Skip to content

Commit

Permalink
Remove pending message on error
Browse files Browse the repository at this point in the history
  • Loading branch information
krassowski committed Jul 11, 2024
1 parent 814bcaa commit cc18864
Show file tree
Hide file tree
Showing 3 changed files with 31 additions and 28 deletions.
7 changes: 5 additions & 2 deletions packages/jupyter-ai/jupyter_ai/chat_handlers/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -289,6 +289,8 @@ def close_pending(self, pending_msg: PendingMessage):
handler.broadcast_message(close_pending_msg)
break

pending_msg.closed = True

@contextlib.contextmanager
def pending(self, text: str, ellipsis: bool = True):
"""
Expand All @@ -297,9 +299,10 @@ def pending(self, text: str, ellipsis: bool = True):
"""
pending_msg = self.start_pending(text, ellipsis=ellipsis)
try:
yield
yield pending_msg
finally:
self.close_pending(pending_msg)
if not pending_msg.closed:
self.close_pending(pending_msg)

def get_llm_chain(self):
lm_provider = self.config_manager.lm_provider
Expand Down
51 changes: 25 additions & 26 deletions packages/jupyter-ai/jupyter_ai/chat_handlers/default.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,29 +97,28 @@ async def process_message(self, message: HumanChatMessage):
received_first_chunk = False

# start with a pending message
pending_message = self.start_pending("Generating response")

# stream response in chunks. this works even if a provider does not
# implement streaming, as `astream()` defaults to yielding `_call()`
# when `_stream()` is not implemented on the LLM class.
async for chunk in self.llm_chain.astream(
{"input": message.body},
config={"configurable": {"session_id": "static_session"}},
):
if not received_first_chunk:
# when receiving the first chunk, close the pending message and
# start the stream.
self.close_pending(pending_message)
stream_id = self._start_stream(human_msg=message)
received_first_chunk = True

if isinstance(chunk, AIMessageChunk):
self._send_stream_chunk(stream_id, chunk.content)
elif isinstance(chunk, str):
self._send_stream_chunk(stream_id, chunk)
else:
self.log.error(f"Unrecognized type of chunk yielded: {type(chunk)}")
break

# complete stream after all chunks have been streamed
self._send_stream_chunk(stream_id, "", complete=True)
with self.pending("Generating response") as pending_message:
# stream response in chunks. this works even if a provider does not
# implement streaming, as `astream()` defaults to yielding `_call()`
# when `_stream()` is not implemented on the LLM class.
async for chunk in self.llm_chain.astream(
{"input": message.body},
config={"configurable": {"session_id": "static_session"}},
):
if not received_first_chunk:
# when receiving the first chunk, close the pending message and
# start the stream.
self.close_pending(pending_message)
stream_id = self._start_stream(human_msg=message)
received_first_chunk = True

if isinstance(chunk, AIMessageChunk):
self._send_stream_chunk(stream_id, chunk.content)
elif isinstance(chunk, str):
self._send_stream_chunk(stream_id, chunk)
else:
self.log.error(f"Unrecognized type of chunk yielded: {type(chunk)}")
break

# complete stream after all chunks have been streamed
self._send_stream_chunk(stream_id, "", complete=True)
1 change: 1 addition & 0 deletions packages/jupyter-ai/jupyter_ai/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,7 @@ class PendingMessage(BaseModel):
body: str
persona: Persona
ellipsis: bool = True
closed: bool = False


class ClosePendingMessage(BaseModel):
Expand Down

0 comments on commit cc18864

Please sign in to comment.