Skip to content

Commit

Permalink
Merge pull request #95 from langchain-ai/fix-stream-collection
Browse files Browse the repository at this point in the history
bug: fix stream collection
  • Loading branch information
raspawar authored Aug 29, 2024
2 parents 9f9b762 + 103b798 commit af64b4c
Show file tree
Hide file tree
Showing 2 changed files with 28 additions and 1 deletion.
2 changes: 1 addition & 1 deletion libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -523,7 +523,7 @@ def get_req_stream(
}

response = self.get_session_fn().post(
**self.__add_authorization(self.last_inputs)
stream=True, **self.__add_authorization(self.last_inputs)
)
self._try_raise(response)
call = self.copy()
Expand Down
27 changes: 27 additions & 0 deletions libs/ai-endpoints/tests/integration_tests/test_streaming.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
import time

from langchain_nvidia_ai_endpoints import ChatNVIDIA


def test_ttft(chat_model: str, mode: dict) -> None:
# we had an issue where streaming took a long time to start. the issue
# was all streamed results were collected before yielding them to the
# user. this test tries to detect the incorrect behavior.
#
# warning:
# - this can false positive if the model itself is slow to start
# - this can false nagative if there is a delay after the first chunk
#
# potential mitigation for false negative is to check mean & stdev and
# filter outliers.
#
# credit to Pouyan Rezakhani for finding this issue
llm = ChatNVIDIA(model=chat_model, **mode)
chunk_times = [time.time()]
for chunk in llm.stream("Count to 1000 by 2s, e.g. 2 4 6 8 ...", max_tokens=512):
chunk_times.append(time.time())
ttft = chunk_times[1] - chunk_times[0]
total_time = chunk_times[-1] - chunk_times[0]
assert ttft < (
total_time / 2
), "potential streaming issue, TTFT should be less than half of the total time"

0 comments on commit af64b4c

Please sign in to comment.