Skip to content

Commit

Permalink
revert last_response default, Response() -> None
Browse files Browse the repository at this point in the history
  • Loading branch information
mattf committed Dec 14, 2024
1 parent beff5b3 commit c3d8365
Show file tree
Hide file tree
Showing 2 changed files with 1 addition and 6 deletions.
2 changes: 1 addition & 1 deletion libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ class _NVIDIAClient(BaseModel):
default={}, description="Last inputs sent over to the server"
)
last_response: Optional[Response] = Field(
Response(), description="Last response sent from the server"
None, description="Last response sent from the server"
)
headers_tmpl: dict = Field(
{
Expand Down
5 changes: 0 additions & 5 deletions libs/ai-endpoints/tests/integration_tests/test_chat_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -236,7 +236,6 @@ def test_ai_endpoints_invoke_max_tokens_negative_a(
with pytest.raises(Exception):
llm = ChatNVIDIA(model=chat_model, max_tokens=max_tokens, **mode)
llm.invoke("Show me the tokens")
assert llm._client.last_response
assert llm._client.last_response.status_code in [400, 422]
assert "max_tokens" in str(llm._client.last_response.content)

Expand All @@ -251,7 +250,6 @@ def test_ai_endpoints_invoke_max_tokens_negative_b(
with pytest.raises(Exception):
llm = ChatNVIDIA(model=chat_model, max_tokens=max_tokens, **mode)
llm.invoke("Show me the tokens")
assert llm._client.last_response
assert llm._client.last_response.status_code in [400, 422]
# custom error string -
# model inference failed -- ValueError: A requested length of the model output
Expand Down Expand Up @@ -308,7 +306,6 @@ def test_ai_endpoints_invoke_seed_default(chat_model: str, mode: dict) -> None:
def test_ai_endpoints_invoke_seed_range(chat_model: str, mode: dict, seed: int) -> None:
llm = ChatNVIDIA(model=chat_model, seed=seed, **mode)
llm.invoke("What's in a seed?")
assert llm._client.last_response
assert llm._client.last_response.status_code == 200


Expand All @@ -335,7 +332,6 @@ def test_ai_endpoints_invoke_temperature_negative(
with pytest.raises(Exception):
llm = ChatNVIDIA(model=chat_model, temperature=temperature, **mode)
llm.invoke("What's in a temperature?")
assert llm._client.last_response
assert llm._client.last_response.status_code in [400, 422]
assert "temperature" in str(llm._client.last_response.content)

Expand Down Expand Up @@ -364,7 +360,6 @@ def test_ai_endpoints_invoke_top_p_negative(
with pytest.raises(Exception):
llm = ChatNVIDIA(model=chat_model, top_p=top_p, **mode)
llm.invoke("What's in a top_p?")
assert llm._client.last_response
assert llm._client.last_response.status_code in [400, 422]
assert "top_p" in str(llm._client.last_response.content)

Expand Down

0 comments on commit c3d8365

Please sign in to comment.