From d9ba49db44fb06ecb9a88e81188c5dc0372708fe Mon Sep 17 00:00:00 2001 From: david qiu Date: Mon, 6 Nov 2023 14:01:15 -0800 Subject: [PATCH] Log exceptions in `/generate` to a file (#431) * rename process_message() => on_message() * remove unused import * add handle_exc() method in BaseChatHandler * add _default_handle_exc() to handle excs from handle_exc() * log exceptions from /generate to a file * pre-commit * improve call to action in GenerateCH.handle_exc() * prefer period over colon in timestamped filenames Co-authored-by: Jason Weill <93281816+JasonWeill@users.noreply.github.com> --------- Co-authored-by: Jason Weill <93281816+JasonWeill@users.noreply.github.com> --- .../jupyter_ai/chat_handlers/ask.py | 2 +- .../jupyter_ai/chat_handlers/base.py | 53 +++++++++++++++---- .../jupyter_ai/chat_handlers/clear.py | 2 +- .../jupyter_ai/chat_handlers/default.py | 2 +- .../jupyter_ai/chat_handlers/generate.py | 26 +++++---- .../jupyter_ai/chat_handlers/help.py | 2 +- .../jupyter_ai/chat_handlers/learn.py | 2 +- packages/jupyter-ai/jupyter_ai/handlers.py | 4 +- 8 files changed, 67 insertions(+), 26 deletions(-) diff --git a/packages/jupyter-ai/jupyter_ai/chat_handlers/ask.py b/packages/jupyter-ai/jupyter_ai/chat_handlers/ask.py index dbc2bd679..2f3f1388a 100644 --- a/packages/jupyter-ai/jupyter_ai/chat_handlers/ask.py +++ b/packages/jupyter-ai/jupyter_ai/chat_handlers/ask.py @@ -48,7 +48,7 @@ def create_llm_chain( verbose=False, ) - async def _process_message(self, message: HumanChatMessage): + async def process_message(self, message: HumanChatMessage): args = self.parse_args(message) if args is None: return diff --git a/packages/jupyter-ai/jupyter_ai/chat_handlers/base.py b/packages/jupyter-ai/jupyter_ai/chat_handlers/base.py index 6ad4e4ec8..7894dcfa5 100644 --- a/packages/jupyter-ai/jupyter_ai/chat_handlers/base.py +++ b/packages/jupyter-ai/jupyter_ai/chat_handlers/base.py @@ -9,7 +9,6 @@ from jupyter_ai.config_manager import ConfigManager, Logger from jupyter_ai.models import AgentChatMessage, HumanChatMessage from jupyter_ai_magics.providers import BaseProvider -from langchain.chat_models.base import BaseChatModel if TYPE_CHECKING: from jupyter_ai.handlers import RootChatHandler @@ -33,20 +32,54 @@ def __init__( self.llm_params = None self.llm_chain = None - async def process_message(self, message: HumanChatMessage): - """Processes the message passed by the root chat handler.""" + async def on_message(self, message: HumanChatMessage): + """ + Method which receives a human message and processes it via + `self.process_message()`, calling `self.handle_exc()` when an exception + is raised. This method is called by RootChatHandler when it routes a + human message to this chat handler. + """ try: - await self._process_message(message) + await self.process_message(message) except Exception as e: - formatted_e = traceback.format_exc() - response = f"Sorry, something went wrong and I wasn't able to index that path.\n\n```\n{formatted_e}\n```" - self.reply(response, message) + try: + # we try/except `handle_exc()` in case it was overriden and + # raises an exception by accident. + await self.handle_exc(e, message) + except Exception as e: + await self._default_handle_exc(e, message) - async def _process_message(self, message: HumanChatMessage): - """Processes the message passed by the `Router`""" + async def process_message(self, message: HumanChatMessage): + """ + Processes a human message routed to this chat handler. Chat handlers + (subclasses) must implement this method. Don't forget to call + `self.reply(, message)` at the end! + + The method definition does not need to be wrapped in a try/except block; + any exceptions raised here are caught by `self.handle_exc()`. + """ raise NotImplementedError("Should be implemented by subclasses.") - def reply(self, response, human_msg: Optional[HumanChatMessage] = None): + async def handle_exc(self, e: Exception, message: HumanChatMessage): + """ + Handles an exception raised by `self.process_message()`. A default + implementation is provided, however chat handlers (subclasses) should + implement this method to provide a more helpful error response. + """ + self._default_handle_exc(e, message) + + async def _default_handle_exc(self, e: Exception, message: HumanChatMessage): + """ + The default definition of `handle_exc()`. This is the default used when + the `handle_exc()` excepts. + """ + formatted_e = traceback.format_exc() + response = ( + f"Sorry, an error occurred. Details below:\n\n```\n{formatted_e}\n```" + ) + self.reply(response, message) + + def reply(self, response: str, human_msg: Optional[HumanChatMessage] = None): agent_msg = AgentChatMessage( id=uuid4().hex, time=time.time(), diff --git a/packages/jupyter-ai/jupyter_ai/chat_handlers/clear.py b/packages/jupyter-ai/jupyter_ai/chat_handlers/clear.py index 78adb65fc..a2a39bb00 100644 --- a/packages/jupyter-ai/jupyter_ai/chat_handlers/clear.py +++ b/packages/jupyter-ai/jupyter_ai/chat_handlers/clear.py @@ -10,7 +10,7 @@ def __init__(self, chat_history: List[ChatMessage], *args, **kwargs): super().__init__(*args, **kwargs) self._chat_history = chat_history - async def _process_message(self, _): + async def process_message(self, _): self._chat_history.clear() for handler in self._root_chat_handlers.values(): if not handler: diff --git a/packages/jupyter-ai/jupyter_ai/chat_handlers/default.py b/packages/jupyter-ai/jupyter_ai/chat_handlers/default.py index c674a383b..3638b6cfb 100644 --- a/packages/jupyter-ai/jupyter_ai/chat_handlers/default.py +++ b/packages/jupyter-ai/jupyter_ai/chat_handlers/default.py @@ -82,7 +82,7 @@ def clear_memory(self): if self.chat_history: self.chat_history.clear() - async def _process_message(self, message: HumanChatMessage): + async def process_message(self, message: HumanChatMessage): self.get_llm_chain() response = await self.llm_chain.apredict(input=message.body, stop=["\nHuman:"]) self.reply(response, message) diff --git a/packages/jupyter-ai/jupyter_ai/chat_handlers/generate.py b/packages/jupyter-ai/jupyter_ai/chat_handlers/generate.py index b3a7c4335..5036c8dfb 100644 --- a/packages/jupyter-ai/jupyter_ai/chat_handlers/generate.py +++ b/packages/jupyter-ai/jupyter_ai/chat_handlers/generate.py @@ -1,5 +1,8 @@ import asyncio import os +import time +import traceback +from pathlib import Path from typing import Dict, List, Optional, Type import nbformat @@ -248,18 +251,23 @@ async def _generate_notebook(self, prompt: str): nbformat.write(notebook, final_path) return final_path - async def _process_message(self, message: HumanChatMessage): + async def process_message(self, message: HumanChatMessage): self.get_llm_chain() # first send a verification message to user response = "👍 Great, I will get started on your notebook. It may take a few minutes, but I will reply here when the notebook is ready. In the meantime, you can continue to ask me other questions." self.reply(response, message) - try: - final_path = await self._generate_notebook(prompt=message.body) - response = f"""🎉 I have created your notebook and saved it to the location {final_path}. I am still learning how to create notebooks, so please review all code before running it.""" - except Exception as e: - self.log.exception(e) - response = "An error occurred while generating the notebook. Try running the /generate task again." - finally: - self.reply(response, message) + final_path = await self._generate_notebook(prompt=message.body) + response = f"""🎉 I have created your notebook and saved it to the location {final_path}. I am still learning how to create notebooks, so please review all code before running it.""" + self.reply(response, message) + + async def handle_exc(self, e: Exception, message: HumanChatMessage): + timestamp = time.strftime("%Y-%m-%d-%H.%M.%S") + log_path = Path(f"jupyter-ai-logs/generate-{timestamp}.log") + log_path.parent.mkdir(parents=True, exist_ok=True) + with log_path.open("w") as log: + traceback.print_exc(file=log) + + response = f"An error occurred while generating the notebook. The error details have been saved to `./{log_path}`.\n\nTry running `/generate` again, as some language models require multiple attempts before a notebook is generated." + self.reply(response, message) diff --git a/packages/jupyter-ai/jupyter_ai/chat_handlers/help.py b/packages/jupyter-ai/jupyter_ai/chat_handlers/help.py index 5643dafbc..be89d1165 100644 --- a/packages/jupyter-ai/jupyter_ai/chat_handlers/help.py +++ b/packages/jupyter-ai/jupyter_ai/chat_handlers/help.py @@ -32,5 +32,5 @@ class HelpChatHandler(BaseChatHandler): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - async def _process_message(self, message: HumanChatMessage): + async def process_message(self, message: HumanChatMessage): self.reply(HELP_MESSAGE, message) diff --git a/packages/jupyter-ai/jupyter_ai/chat_handlers/learn.py b/packages/jupyter-ai/jupyter_ai/chat_handlers/learn.py index dac1d82d1..b93685c0e 100644 --- a/packages/jupyter-ai/jupyter_ai/chat_handlers/learn.py +++ b/packages/jupyter-ai/jupyter_ai/chat_handlers/learn.py @@ -77,7 +77,7 @@ def _load(self): except Exception as e: self.log.error("Could not load vector index from disk.") - async def _process_message(self, message: HumanChatMessage): + async def process_message(self, message: HumanChatMessage): # If no embedding provider has been selected em_provider_cls, em_provider_args = self.get_embedding_provider() if not em_provider_cls: diff --git a/packages/jupyter-ai/jupyter_ai/handlers.py b/packages/jupyter-ai/jupyter_ai/handlers.py index 23b96ad7a..38fc9552d 100644 --- a/packages/jupyter-ai/jupyter_ai/handlers.py +++ b/packages/jupyter-ai/jupyter_ai/handlers.py @@ -222,9 +222,9 @@ async def _route(self, message): start = time.time() if is_command: - await self.chat_handlers[command].process_message(message) + await self.chat_handlers[command].on_message(message) else: - await default.process_message(message) + await default.on_message(message) latency_ms = round((time.time() - start) * 1000) command_readable = "Default" if command == "default" else command