diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py index 2bde648a..454b1d7d 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py @@ -523,7 +523,7 @@ def get_req_stream( } response = self.get_session_fn().post( - **self.__add_authorization(self.last_inputs) + stream=True, **self.__add_authorization(self.last_inputs) ) self._try_raise(response) call = self.copy() diff --git a/libs/ai-endpoints/tests/integration_tests/test_streaming.py b/libs/ai-endpoints/tests/integration_tests/test_streaming.py new file mode 100644 index 00000000..158f81a2 --- /dev/null +++ b/libs/ai-endpoints/tests/integration_tests/test_streaming.py @@ -0,0 +1,27 @@ +import time + +from langchain_nvidia_ai_endpoints import ChatNVIDIA + + +def test_ttft(chat_model: str, mode: dict) -> None: + # we had an issue where streaming took a long time to start. the issue + # was all streamed results were collected before yielding them to the + # user. this test tries to detect the incorrect behavior. + # + # warning: + # - this can false positive if the model itself is slow to start + # - this can false nagative if there is a delay after the first chunk + # + # potential mitigation for false negative is to check mean & stdev and + # filter outliers. + # + # credit to Pouyan Rezakhani for finding this issue + llm = ChatNVIDIA(model=chat_model, **mode) + chunk_times = [time.time()] + for chunk in llm.stream("Count to 1000 by 2s, e.g. 2 4 6 8 ...", max_tokens=512): + chunk_times.append(time.time()) + ttft = chunk_times[1] - chunk_times[0] + total_time = chunk_times[-1] - chunk_times[0] + assert ttft < ( + total_time / 2 + ), "potential streaming issue, TTFT should be less than half of the total time"