From c3d83657ad6a3ee4f86c92d78ab22a32762d23b1 Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Sat, 14 Dec 2024 09:12:11 -0500 Subject: [PATCH] revert last_response default, Response() -> None --- libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py | 2 +- .../ai-endpoints/tests/integration_tests/test_chat_models.py | 5 ----- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py index 8cf0d357..bd9f4baf 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py @@ -101,7 +101,7 @@ class _NVIDIAClient(BaseModel): default={}, description="Last inputs sent over to the server" ) last_response: Optional[Response] = Field( - Response(), description="Last response sent from the server" + None, description="Last response sent from the server" ) headers_tmpl: dict = Field( { diff --git a/libs/ai-endpoints/tests/integration_tests/test_chat_models.py b/libs/ai-endpoints/tests/integration_tests/test_chat_models.py index 28e264ce..ac657085 100644 --- a/libs/ai-endpoints/tests/integration_tests/test_chat_models.py +++ b/libs/ai-endpoints/tests/integration_tests/test_chat_models.py @@ -236,7 +236,6 @@ def test_ai_endpoints_invoke_max_tokens_negative_a( with pytest.raises(Exception): llm = ChatNVIDIA(model=chat_model, max_tokens=max_tokens, **mode) llm.invoke("Show me the tokens") - assert llm._client.last_response assert llm._client.last_response.status_code in [400, 422] assert "max_tokens" in str(llm._client.last_response.content) @@ -251,7 +250,6 @@ def test_ai_endpoints_invoke_max_tokens_negative_b( with pytest.raises(Exception): llm = ChatNVIDIA(model=chat_model, max_tokens=max_tokens, **mode) llm.invoke("Show me the tokens") - assert llm._client.last_response assert llm._client.last_response.status_code in [400, 422] # custom error string - # model inference failed -- ValueError: A requested length of the model output @@ -308,7 +306,6 @@ def test_ai_endpoints_invoke_seed_default(chat_model: str, mode: dict) -> None: def test_ai_endpoints_invoke_seed_range(chat_model: str, mode: dict, seed: int) -> None: llm = ChatNVIDIA(model=chat_model, seed=seed, **mode) llm.invoke("What's in a seed?") - assert llm._client.last_response assert llm._client.last_response.status_code == 200 @@ -335,7 +332,6 @@ def test_ai_endpoints_invoke_temperature_negative( with pytest.raises(Exception): llm = ChatNVIDIA(model=chat_model, temperature=temperature, **mode) llm.invoke("What's in a temperature?") - assert llm._client.last_response assert llm._client.last_response.status_code in [400, 422] assert "temperature" in str(llm._client.last_response.content) @@ -364,7 +360,6 @@ def test_ai_endpoints_invoke_top_p_negative( with pytest.raises(Exception): llm = ChatNVIDIA(model=chat_model, top_p=top_p, **mode) llm.invoke("What's in a top_p?") - assert llm._client.last_response assert llm._client.last_response.status_code in [400, 422] assert "top_p" in str(llm._client.last_response.content)