From 9e3e05b271dcaa838e53a97b13cfe82f4ecee018 Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Tue, 17 Sep 2024 16:41:28 -0400 Subject: [PATCH] workaround broken vlm endpoints, they do not accept stream_options parameter --- .../ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py index 0b56dcbc..c26b92cd 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py @@ -323,6 +323,10 @@ def _stream( stream_options={"include_usage": True}, **kwargs, ) + # todo: get vlm endpoints fixed and remove this + # vlm endpoints do not accept standard stream_options parameter + if self._client.model.model_type == "vlm": + payload.pop("stream_options") for response in self._client.get_req_stream(payload=payload): self._set_callback_out(response, run_manager) parsed_response = self._custom_postprocess(response, streaming=True)