From 4d5bbfcb09338e383e63b069715a08356c402c0c Mon Sep 17 00:00:00 2001 From: "David L. Qiu" Date: Thu, 26 Dec 2024 09:54:32 -0800 Subject: [PATCH] V3: The Beginning (#1169) * Backport PR #1049: Added new Anthropic Sonnet3.5 v2 models (#1050) Co-authored-by: Sanjiv Das * Backport PR #1051: Added Developer documentation for streaming responses (#1058) Co-authored-by: Sanjiv Das * Backport PR #1048: Implement streaming for `/fix` (#1059) Co-authored-by: Sanjiv Das * Backport PR #1057: [pre-commit.ci] pre-commit autoupdate (#1060) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * Backport PR #1064: Added Ollama to the providers table in user docs (#1066) Co-authored-by: Sanjiv Das * Backport PR #1056: Add examples of using Fields and EnvAuthStrategy to developer documentation (#1073) Co-authored-by: Alan Meeson * Backport PR #1069: Merge Anthropic language model providers (#1076) Co-authored-by: Sanjiv Das * Backport PR #1068: Allow `$` to literally denote quantities of USD in chat (#1079) Co-authored-by: david qiu * Backport PR #1075: Fix magic commands when using non-chat providers w/ history (#1080) Co-authored-by: Alan Meeson * Backport PR #1077: Fix `/export` by including streamed agent messages (#1081) Co-authored-by: Mahmut CAVDAR <4072246+mcavdar@users.noreply.github.com> * Backport PR #1072: Reduced padding in cell around code icons in code toolbar (#1084) Co-authored-by: Sanjiv Das * Backport PR #1087: Improve installation documentation and clarify provider dependencies (#1091) Co-authored-by: Sanjiv Das * Backport PR #1092: Remove retired models and add new `Haiku-3.5` model in Anthropic (#1093) Co-authored-by: Sanjiv Das * Backport PR #1094: Continue to allow `$` symbols to delimit inline math in human messages (#1095) Co-authored-by: david qiu * Backport PR #1097: Update `faiss-cpu` version range (#1101) Co-authored-by: david qiu * Backport PR #1104: Fix rendering of code blocks in JupyterLab 4.3.0+ (#1105) Co-authored-by: david qiu * Backport PR #1106: Catch error on non plaintext files in `@file` and reply gracefully in chat (#1110) Co-authored-by: Sanjiv Das * Backport PR #1109: Bump LangChain minimum versions (#1112) Co-authored-by: david qiu * Backport PR #1119: Downgrade spurious 'error' logs (#1124) Co-authored-by: ctcjab * Backport PR #1127: Removes outdated OpenAI models and adds new ones (#1130) Co-authored-by: Sanjiv Das * Backport PR #1131: [pre-commit.ci] pre-commit autoupdate (#1132) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * Backport PR #1125: Update model fields immediately on save (#1133) Co-authored-by: david qiu * Backport PR #1139: Fix install step in CI (#1140) Co-authored-by: david qiu * Backport PR #1129: Fix JSON serialization error in Ollama models (#1141) Co-authored-by: Mr.W * Backport PR #1137: Update completion model fields immediately on save (#1142) Co-authored-by: david qiu * [v3-dev] Initial migration to `jupyterlab-chat` (#1043) * Very first version of the AI working in jupyterlab_collaborative_chat * Allows both collaborative and regular chat to work with AI * handle the help message in the chat too * Autocompletion (#2) * Fix handler methods' parameters * Add slash commands (autocompletion) to the chat input * Stream messages (#3) * Allow for stream messages * update jupyter collaborative chat dependency * AI settings (#4) * Add a menu option to open the AI settings * Remove the input option from the setting widget * pre-commit * linting * Homogeneize typing for optional arguments * Fix import * Showing that the bot is writing (answering) (#5) * Show that the bot is writing (answering) * Update jupyter chat dependency * Some typing * Update extension to jupyterlab_chat (0.6.0) (#8) * Fix linting * Remove try/except to import jupyterlab_chat (not optional anymore), and fix typing * linter * Python unit tests * Fix typing * lint * Fix lint and mypy all together * Fix web_app settings accessor * Fix jupyter_collaboration version Co-authored-by: david qiu <44106031+dlqqq@users.noreply.github.com> * Remove unecessary try/except * Dedicate one set of chat handlers per room (#9) * create new set of chat handlers per room * make YChat an instance attribute on BaseChatHandler * revert changes to chat handlers * pre-commit * use room_id local var Co-authored-by: Nicolas Brichet <32258950+brichet@users.noreply.github.com> --------- Co-authored-by: Nicolas Brichet <32258950+brichet@users.noreply.github.com> --------- Co-authored-by: david qiu <44106031+dlqqq@users.noreply.github.com> Co-authored-by: david qiu * Backport PR #1134: Improve user messaging and documentation for Cross-Region Inference on Amazon Bedrock (#1143) Co-authored-by: Sanjiv Das * Backport PR #1136: Add base API URL field for Ollama and OpenAI embedding models (#1149) Co-authored-by: Sanjiv Das * [v3-dev] Remove `/export`, `/clear`, and `/fix` (#1148) * remove /export * remove /clear * remove /fix * Fix CI in `v3-dev` branch (#1154) * fix check release by bumping to impossible version * fix types * Update Playwright Snapshots --------- Co-authored-by: github-actions[bot] * [v3-dev] Dedicate one LangChain history object per chat (#1151) * dedicate a separate LangChain history object per chat * pre-commit * fix mypy * Backport PR #1160: Trigger update snapshots based on commenter's role (#1161) Co-authored-by: david qiu * Backport PR #1155: Fix code output format in IPython (#1162) Co-authored-by: Divyansh Choudhary * Backport PR #1158: Update `/generate` to not split classes & functions across cells (#1164) Co-authored-by: Sanjiv Das * Remove v2 frontend components (#1156) * First pass to remove the front end chat * Remove code-toolbar by using a simplified markdown renderer in settings * Remove chat-message-menu (should be ported in jupyter-chat) * Remove chat handler * Follow up 'Remove chat-message-menu (should be ported in jupyter-chat)' commit * Clean package.json * Remove UI tests * Remove the generative AI menu * Remove unused components * run yarn dedupe --------- Co-authored-by: David L. Qiu * Upgrade to `jupyterlab-chat>=0.7.0` (#1166) * upgrade to jupyterlab-chat 0.7.0 * pre-commit * upgrade to @jupyter/chat ^0.7.0 in frontend * Remove v2 backend components (#1168) * remove v2 llm memory, implement ReplyStream * remove v2 websockets & REST handlers * remove unused v2 data models * fix slash command autocomplete * fix unit tests * remove unused _learned context provider * fix mypy * pre-commit * fix optional k arg in YChatHistory * bump jupyter chat to 0.7.1 to fix Python 3.9 tests * revert accidentally breaking /learn --------- Co-authored-by: Lumberbot (aka Jack) <39504233+meeseeksmachine@users.noreply.github.com> Co-authored-by: Sanjiv Das Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Alan Meeson Co-authored-by: Mahmut CAVDAR <4072246+mcavdar@users.noreply.github.com> Co-authored-by: ctcjab Co-authored-by: Mr.W Co-authored-by: Nicolas Brichet <32258950+brichet@users.noreply.github.com> Co-authored-by: github-actions[bot] Co-authored-by: Divyansh Choudhary --- .github/workflows/check-release.yml | 2 +- .../slash_command.py | 4 +- .../jupyter_ai_test/test_slash_commands.py | 4 +- .../jupyter_ai/chat_handlers/__init__.py | 8 +- .../jupyter_ai/chat_handlers/ask.py | 29 +- .../jupyter_ai/chat_handlers/base.py | 287 +++------ .../jupyter_ai/chat_handlers/clear.py | 26 - .../jupyter_ai/chat_handlers/default.py | 15 +- .../jupyter_ai/chat_handlers/export.py | 49 -- .../jupyter_ai/chat_handlers/fix.py | 106 ---- .../jupyter_ai/chat_handlers/generate.py | 6 +- .../jupyter_ai/chat_handlers/help.py | 4 +- .../jupyter_ai/chat_handlers/learn.py | 17 +- .../chat_handlers/utils/streaming.py | 82 +++ packages/jupyter-ai/jupyter_ai/constants.py | 8 + .../jupyter_ai/context_providers/_learned.py | 53 -- .../jupyter_ai/context_providers/base.py | 23 +- .../jupyter_ai/context_providers/file.py | 9 +- packages/jupyter-ai/jupyter_ai/extension.py | 234 +++++--- packages/jupyter-ai/jupyter_ai/handlers.py | 417 +------------ packages/jupyter-ai/jupyter_ai/history.py | 143 ++--- packages/jupyter-ai/jupyter_ai/models.py | 203 +------ .../tests/test_context_providers.py | 29 +- .../jupyter_ai/tests/test_extension.py | 3 +- .../jupyter_ai/tests/test_handlers.py | 154 ++--- packages/jupyter-ai/package.json | 3 +- packages/jupyter-ai/pyproject.toml | 1 + packages/jupyter-ai/schema/plugin.json | 21 + packages/jupyter-ai/src/chat_handler.ts | 270 --------- packages/jupyter-ai/src/completions/plugin.ts | 2 +- .../jupyter-ai/src/components/chat-input.tsx | 403 ------------- .../src/components/chat-input/send-button.tsx | 204 ------- .../src/components/chat-messages.tsx | 242 -------- .../chat-messages/chat-message-delete.tsx | 31 - .../chat-messages/chat-message-menu.tsx | 94 --- .../src/components/chat-settings.tsx | 73 +-- packages/jupyter-ai/src/components/chat.tsx | 310 ---------- .../components/code-blocks/code-toolbar.tsx | 197 ------- .../src/components/expandable-text-field.tsx | 80 --- .../mui-extras/tooltipped-button.tsx | 87 --- .../src/components/pending-messages.tsx | 117 ---- .../src/components/rendermime-markdown.tsx | 141 ----- .../src/components/scroll-container.tsx | 69 --- .../settings/rendermime-markdown.tsx | 79 +++ .../src/contexts/active-cell-context.tsx | 329 ----------- .../src/contexts/collaborators-context.tsx | 70 --- packages/jupyter-ai/src/contexts/index.ts | 3 - .../src/contexts/selection-context.tsx | 51 -- .../jupyter-ai/src/contexts/user-context.tsx | 35 -- packages/jupyter-ai/src/handler.ts | 150 ----- packages/jupyter-ai/src/hooks/use-copy.ts | 89 --- packages/jupyter-ai/src/hooks/use-replace.ts | 56 -- packages/jupyter-ai/src/index.ts | 152 ++--- .../jupyter-ai/src/plugins/menu-plugin.ts | 158 ----- packages/jupyter-ai/src/selection-watcher.ts | 181 ------ .../jupyter-ai/src/slash-autocompletion.tsx | 93 +++ packages/jupyter-ai/src/tokens.ts | 45 +- packages/jupyter-ai/src/utils.ts | 43 +- .../jupyter-ai/src/widgets/chat-sidebar.tsx | 54 -- .../src/widgets/settings-widget.tsx | 26 + .../ui-tests/tests/jupyter-ai.spec.ts | 17 +- .../sidebar-linux.png | Bin 2246 -> 893 bytes yarn.lock | 552 +++++------------- 63 files changed, 1015 insertions(+), 5428 deletions(-) delete mode 100644 packages/jupyter-ai/jupyter_ai/chat_handlers/clear.py delete mode 100644 packages/jupyter-ai/jupyter_ai/chat_handlers/export.py delete mode 100644 packages/jupyter-ai/jupyter_ai/chat_handlers/fix.py create mode 100644 packages/jupyter-ai/jupyter_ai/chat_handlers/utils/streaming.py create mode 100644 packages/jupyter-ai/jupyter_ai/constants.py delete mode 100644 packages/jupyter-ai/jupyter_ai/context_providers/_learned.py delete mode 100644 packages/jupyter-ai/src/chat_handler.ts delete mode 100644 packages/jupyter-ai/src/components/chat-input.tsx delete mode 100644 packages/jupyter-ai/src/components/chat-input/send-button.tsx delete mode 100644 packages/jupyter-ai/src/components/chat-messages.tsx delete mode 100644 packages/jupyter-ai/src/components/chat-messages/chat-message-delete.tsx delete mode 100644 packages/jupyter-ai/src/components/chat-messages/chat-message-menu.tsx delete mode 100644 packages/jupyter-ai/src/components/chat.tsx delete mode 100644 packages/jupyter-ai/src/components/code-blocks/code-toolbar.tsx delete mode 100644 packages/jupyter-ai/src/components/expandable-text-field.tsx delete mode 100644 packages/jupyter-ai/src/components/mui-extras/tooltipped-button.tsx delete mode 100644 packages/jupyter-ai/src/components/pending-messages.tsx delete mode 100644 packages/jupyter-ai/src/components/rendermime-markdown.tsx delete mode 100644 packages/jupyter-ai/src/components/scroll-container.tsx create mode 100644 packages/jupyter-ai/src/components/settings/rendermime-markdown.tsx delete mode 100644 packages/jupyter-ai/src/contexts/active-cell-context.tsx delete mode 100644 packages/jupyter-ai/src/contexts/collaborators-context.tsx delete mode 100644 packages/jupyter-ai/src/contexts/selection-context.tsx delete mode 100644 packages/jupyter-ai/src/contexts/user-context.tsx delete mode 100644 packages/jupyter-ai/src/hooks/use-copy.ts delete mode 100644 packages/jupyter-ai/src/hooks/use-replace.ts delete mode 100644 packages/jupyter-ai/src/plugins/menu-plugin.ts delete mode 100644 packages/jupyter-ai/src/selection-watcher.ts create mode 100644 packages/jupyter-ai/src/slash-autocompletion.tsx delete mode 100644 packages/jupyter-ai/src/widgets/chat-sidebar.tsx create mode 100644 packages/jupyter-ai/src/widgets/settings-widget.tsx diff --git a/.github/workflows/check-release.yml b/.github/workflows/check-release.yml index 542206dc4..97b1f74d9 100644 --- a/.github/workflows/check-release.yml +++ b/.github/workflows/check-release.yml @@ -24,7 +24,7 @@ jobs: uses: jupyter-server/jupyter_releaser/.github/actions/check-release@v2 with: token: ${{ secrets.GITHUB_TOKEN }} - version_spec: minor + version_spec: "12.34.56" - name: Upload Distributions uses: actions/upload-artifact@v4 diff --git a/packages/jupyter-ai-module-cookiecutter/{{cookiecutter.root_dir_name}}/{{cookiecutter.python_name}}/slash_command.py b/packages/jupyter-ai-module-cookiecutter/{{cookiecutter.root_dir_name}}/{{cookiecutter.python_name}}/slash_command.py index f82bd5531..9dcd8de05 100644 --- a/packages/jupyter-ai-module-cookiecutter/{{cookiecutter.root_dir_name}}/{{cookiecutter.python_name}}/slash_command.py +++ b/packages/jupyter-ai-module-cookiecutter/{{cookiecutter.root_dir_name}}/{{cookiecutter.python_name}}/slash_command.py @@ -1,5 +1,5 @@ from jupyter_ai.chat_handlers.base import BaseChatHandler, SlashCommandRoutingType -from jupyter_ai.models import HumanChatMessage +from jupyterlab_chat.models import Message class TestSlashCommand(BaseChatHandler): @@ -25,5 +25,5 @@ class TestSlashCommand(BaseChatHandler): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - async def process_message(self, message: HumanChatMessage): + async def process_message(self, message: Message): self.reply("This is the `/test` slash command.") diff --git a/packages/jupyter-ai-test/jupyter_ai_test/test_slash_commands.py b/packages/jupyter-ai-test/jupyter_ai_test/test_slash_commands.py index f82bd5531..9dcd8de05 100644 --- a/packages/jupyter-ai-test/jupyter_ai_test/test_slash_commands.py +++ b/packages/jupyter-ai-test/jupyter_ai_test/test_slash_commands.py @@ -1,5 +1,5 @@ from jupyter_ai.chat_handlers.base import BaseChatHandler, SlashCommandRoutingType -from jupyter_ai.models import HumanChatMessage +from jupyterlab_chat.models import Message class TestSlashCommand(BaseChatHandler): @@ -25,5 +25,5 @@ class TestSlashCommand(BaseChatHandler): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - async def process_message(self, message: HumanChatMessage): + async def process_message(self, message: Message): self.reply("This is the `/test` slash command.") diff --git a/packages/jupyter-ai/jupyter_ai/chat_handlers/__init__.py b/packages/jupyter-ai/jupyter_ai/chat_handlers/__init__.py index a8fe9eb50..3e7e45aa8 100644 --- a/packages/jupyter-ai/jupyter_ai/chat_handlers/__init__.py +++ b/packages/jupyter-ai/jupyter_ai/chat_handlers/__init__.py @@ -1,9 +1,11 @@ +# The following import is to make sure jupyter_ydoc is imported before +# jupyterlab_chat, otherwise it leads to circular import because of the +# YChat relying on YBaseDoc, and jupyter_ydoc registering YChat from the entry point. +import jupyter_ydoc + from .ask import AskChatHandler from .base import BaseChatHandler, SlashCommandRoutingType -from .clear import ClearChatHandler from .default import DefaultChatHandler -from .export import ExportChatHandler -from .fix import FixChatHandler from .generate import GenerateChatHandler from .help import HelpChatHandler from .learn import LearnChatHandler diff --git a/packages/jupyter-ai/jupyter_ai/chat_handlers/ask.py b/packages/jupyter-ai/jupyter_ai/chat_handlers/ask.py index b5c4fa38b..cdd03b7c8 100644 --- a/packages/jupyter-ai/jupyter_ai/chat_handlers/ask.py +++ b/packages/jupyter-ai/jupyter_ai/chat_handlers/ask.py @@ -1,8 +1,8 @@ import argparse from typing import Dict, Type -from jupyter_ai.models import HumanChatMessage from jupyter_ai_magics.providers import BaseProvider +from jupyterlab_chat.models import Message from langchain.chains import ConversationalRetrievalChain from langchain.memory import ConversationBufferWindowMemory from langchain_core.prompts import PromptTemplate @@ -59,7 +59,7 @@ def create_llm_chain( verbose=False, ) - async def process_message(self, message: HumanChatMessage): + async def process_message(self, message: Message): args = self.parse_args(message) if args is None: return @@ -70,8 +70,8 @@ async def process_message(self, message: HumanChatMessage): self.get_llm_chain() - try: - with self.pending("Searching learned documents", message): + with self.start_reply_stream() as reply_stream: + try: assert self.llm_chain # TODO: migrate this class to use a LCEL `Runnable` instead of # `Chain`, then remove the below ignore comment. @@ -79,12 +79,15 @@ async def process_message(self, message: HumanChatMessage): {"question": query} ) response = result["answer"] - self.reply(response, message) - except AssertionError as e: - self.log.error(e) - response = """Sorry, an error occurred while reading the from the learned documents. - If you have changed the embedding provider, try deleting the existing index by running - `/learn -d` command and then re-submitting the `learn ` to learn the documents, - and then asking the question again. - """ - self.reply(response, message) + + # old pending message: "Searching learned documents..." + # TODO: configure this pending message in jupyterlab-chat + reply_stream.write(response) + except AssertionError as e: + self.log.error(e) + response = """Sorry, an error occurred while reading the from the learned documents. + If you have changed the embedding provider, try deleting the existing index by running + `/learn -d` command and then re-submitting the `learn ` to learn the documents, + and then asking the question again. + """ + reply_stream.write(response, message) diff --git a/packages/jupyter-ai/jupyter_ai/chat_handlers/base.py b/packages/jupyter-ai/jupyter_ai/chat_handlers/base.py index c844650ad..327ff5965 100644 --- a/packages/jupyter-ai/jupyter_ai/chat_handlers/base.py +++ b/packages/jupyter-ai/jupyter_ai/chat_handlers/base.py @@ -2,40 +2,26 @@ import asyncio import contextlib import os -import time import traceback from typing import ( TYPE_CHECKING, - Any, Awaitable, ClassVar, Dict, - List, Literal, Optional, Type, Union, cast, ) -from typing import get_args as get_type_args -from uuid import uuid4 from dask.distributed import Client as DaskClient from jupyter_ai.callback_handlers import MetadataCallbackHandler from jupyter_ai.config_manager import ConfigManager, Logger -from jupyter_ai.history import WrappedBoundedChatHistory -from jupyter_ai.models import ( - AgentChatMessage, - AgentStreamChunkMessage, - AgentStreamMessage, - ChatMessage, - ClosePendingMessage, - HumanChatMessage, - Message, - PendingMessage, -) -from jupyter_ai_magics import Persona +from jupyter_ai.constants import BOT from jupyter_ai_magics.providers import BaseProvider +from jupyterlab_chat.models import Message, NewMessage, User +from jupyterlab_chat.ychat import YChat from langchain.pydantic_v1 import BaseModel from langchain_core.messages import AIMessageChunk from langchain_core.runnables import Runnable @@ -43,10 +29,10 @@ from langchain_core.runnables.config import merge_configs as merge_runnable_configs from langchain_core.runnables.utils import Input +from .utils.streaming import ReplyStream + if TYPE_CHECKING: from jupyter_ai.context_providers import BaseCommandContextProvider - from jupyter_ai.handlers import RootChatHandler - from jupyter_ai.history import BoundedChatHistory from langchain_core.chat_history import BaseChatMessageHistory @@ -145,10 +131,8 @@ def __init__( self, log: Logger, config_manager: ConfigManager, - root_chat_handlers: Dict[str, "RootChatHandler"], model_parameters: Dict[str, Dict], - chat_history: List[ChatMessage], - llm_chat_memory: "BoundedChatHistory", + llm_chat_memory: "BaseChatMessageHistory", root_dir: str, preferred_dir: Optional[str], dask_client_future: Awaitable[DaskClient], @@ -156,12 +140,11 @@ def __init__( chat_handlers: Dict[str, "BaseChatHandler"], context_providers: Dict[str, "BaseCommandContextProvider"], message_interrupted: Dict[str, asyncio.Event], + ychat: YChat, ): self.log = log self.config_manager = config_manager - self._root_chat_handlers = root_chat_handlers self.model_parameters = model_parameters - self._chat_history = chat_history self.llm_chat_memory = llm_chat_memory self.parser = argparse.ArgumentParser( add_help=False, description=self.help, formatter_class=MarkdownHelpFormatter @@ -178,12 +161,13 @@ def __init__( self.chat_handlers = chat_handlers self.context_providers = context_providers self.message_interrupted = message_interrupted + self.ychat = ychat self.llm: Optional[BaseProvider] = None self.llm_params: Optional[dict] = None self.llm_chain: Optional[Runnable] = None - async def on_message(self, message: HumanChatMessage): + async def on_message(self, message: Message): """ Method which receives a human message, calls `self.get_llm_chain()`, and processes the message via `self.process_message()`, calling @@ -198,7 +182,7 @@ async def on_message(self, message: HumanChatMessage): slash_command = "/" + routing_type.slash_id if routing_type.slash_id else "" if slash_command in lm_provider_klass.unsupported_slash_commands: self.reply( - "Sorry, the selected language model does not support this slash command." + "Sorry, the selected language model does not support this slash command.", ) return @@ -234,26 +218,26 @@ async def on_message(self, message: HumanChatMessage): finally: BaseChatHandler._requests_count -= 1 - async def process_message(self, message: HumanChatMessage): + async def process_message(self, _human_message: Message): """ Processes a human message routed to this chat handler. Chat handlers (subclasses) must implement this method. Don't forget to call - `self.reply(, message)` at the end! + `self.reply(, chat, message)` at the end! The method definition does not need to be wrapped in a try/except block; any exceptions raised here are caught by `self.handle_exc()`. """ raise NotImplementedError("Should be implemented by subclasses.") - async def handle_exc(self, e: Exception, message: HumanChatMessage): + async def handle_exc(self, e: Exception, _human_message: Message): """ Handles an exception raised by `self.process_message()`. A default implementation is provided, however chat handlers (subclasses) should implement this method to provide a more helpful error response. """ - await self._default_handle_exc(e, message) + await self._default_handle_exc(e, _human_message) - async def _default_handle_exc(self, e: Exception, message: HumanChatMessage): + async def _default_handle_exc(self, e: Exception, _human_message: Message): """ The default definition of `handle_exc()`. This is the default used when the `handle_exc()` excepts. @@ -263,112 +247,33 @@ async def _default_handle_exc(self, e: Exception, message: HumanChatMessage): if lm_provider and lm_provider.is_api_key_exc(e): provider_name = getattr(self.config_manager.lm_provider, "name", "") response = f"Oops! There's a problem connecting to {provider_name}. Please update your {provider_name} API key in the chat settings." - self.reply(response, message) + self.reply(response, _human_message) return formatted_e = traceback.format_exc() response = ( f"Sorry, an error occurred. Details below:\n\n```\n{formatted_e}\n```" ) - self.reply(response, message) + self.reply(response, _human_message) - def broadcast_message(self, message: Message): - """ - Broadcasts a message to all WebSocket connections. If there are no - WebSocket connections and the message is a chat message, this method - directly appends to `self.chat_history`. + def reply(self, body: str, _human_message=None) -> str: """ - broadcast = False - for websocket in self._root_chat_handlers.values(): - if not websocket: - continue + Adds a message to the YChat shared document that this chat handler is + assigned to. Returns the new message ID. - websocket.broadcast_message(message) - broadcast = True - break - - if not broadcast: - if isinstance(message, get_type_args(ChatMessage)): - cast(ChatMessage, message) - self._chat_history.append(message) - - def reply(self, response: str, human_msg: Optional[HumanChatMessage] = None): - """ - Sends an agent message, usually in response to a received - `HumanChatMessage`. + TODO: Either properly store & use reply state in YChat, or remove the + `human_message` argument here. """ - agent_msg = AgentChatMessage( - id=uuid4().hex, - time=time.time(), - body=response, - reply_to=human_msg.id if human_msg else "", - persona=self.persona, - ) + bot = self.ychat.get_user(BOT["username"]) + if not bot: + self.ychat.set_user(User(**BOT)) - self.broadcast_message(agent_msg) + id = self.ychat.add_message(NewMessage(body=body, sender=BOT["username"])) + return id @property def persona(self): return self.config_manager.persona - def start_pending( - self, - text: str, - human_msg: Optional[HumanChatMessage] = None, - *, - ellipsis: bool = True, - ) -> PendingMessage: - """ - Sends a pending message to the client. - - Returns the pending message ID. - """ - persona = self.config_manager.persona - - pending_msg = PendingMessage( - id=uuid4().hex, - time=time.time(), - body=text, - reply_to=human_msg.id if human_msg else "", - persona=Persona(name=persona.name, avatar_route=persona.avatar_route), - ellipsis=ellipsis, - ) - - self.broadcast_message(pending_msg) - return pending_msg - - def close_pending(self, pending_msg: PendingMessage): - """ - Closes a pending message. - """ - if pending_msg.closed: - return - - close_pending_msg = ClosePendingMessage( - id=pending_msg.id, - ) - - self.broadcast_message(close_pending_msg) - pending_msg.closed = True - - @contextlib.contextmanager - def pending( - self, - text: str, - human_msg: Optional[HumanChatMessage] = None, - *, - ellipsis: bool = True, - ): - """ - Context manager that sends a pending message to the client, and closes - it after the block is executed. - """ - pending_msg = self.start_pending(text, human_msg=human_msg, ellipsis=ellipsis) - try: - yield pending_msg - finally: - if not pending_msg.closed: - self.close_pending(pending_msg) - def get_llm_chain(self): lm_provider = self.config_manager.lm_provider lm_provider_params = self.config_manager.lm_provider_params @@ -409,26 +314,19 @@ def create_llm_chain( ): raise NotImplementedError("Should be implemented by subclasses") - def parse_args(self, message, silent=False): - args = message.body.split(" ") + def parse_args(self, message: Message, silent=False): + args = message.body.split(" ")[1:] try: - args = self.parser.parse_args(args[1:]) + arg_namespace = self.parser.parse_args(args) except (argparse.ArgumentError, SystemExit) as e: if not silent: response = f"{self.parser.format_usage()}" self.reply(response, message) return None - return args + return arg_namespace - def get_llm_chat_memory( - self, - last_human_msg: HumanChatMessage, - **kwargs, - ) -> "BaseChatMessageHistory": - return WrappedBoundedChatHistory( - history=self.llm_chat_memory, - last_human_msg=last_human_msg, - ) + def get_llm_chat_memory(self) -> "BaseChatMessageHistory": + return self.llm_chat_memory @property def output_dir(self) -> str: @@ -439,7 +337,7 @@ def output_dir(self) -> str: else: return self.root_dir - def send_help_message(self, human_msg: Optional[HumanChatMessage] = None) -> None: + def send_help_message(self, _human_message: Optional[Message] = None) -> None: """Sends a help message to all connected clients.""" lm_provider = self.config_manager.lm_provider unsupported_slash_commands = ( @@ -470,57 +368,34 @@ def send_help_message(self, human_msg: Optional[HumanChatMessage] = None) -> Non slash_commands_list=slash_commands_list, context_commands_list=context_commands_list, ) - help_message = AgentChatMessage( - id=uuid4().hex, - time=time.time(), - body=help_message_body, - reply_to=human_msg.id if human_msg else "", - persona=self.persona, - ) - self.broadcast_message(help_message) + self.reply(help_message_body, None) - def _start_stream(self, human_msg: HumanChatMessage) -> str: - """ - Sends an `agent-stream` message to indicate the start of a response - stream. Returns the ID of the message, denoted as the `stream_id`. + @contextlib.contextmanager + def start_reply_stream(self): """ - stream_id = uuid4().hex - stream_msg = AgentStreamMessage( - id=stream_id, - time=time.time(), - body="", - reply_to=human_msg.id, - persona=self.persona, - complete=False, - ) - - self.broadcast_message(stream_msg) - return stream_id + Context manager which initializes a `ReplyStream`, opens it, and then + yields the `ReplyStream`. Under this context, developers should call + `reply_stream.write()` on the yielded reply stream to send new string + chunks to the chat. - def _send_stream_chunk( - self, - stream_id: str, - content: str, - complete: bool = False, - metadata: Optional[Dict[str, Any]] = None, - ) -> None: - """ - Sends an `agent-stream-chunk` message containing content that should be - appended to an existing `agent-stream` message with ID `stream_id`. + Once the context is closed, the `ReplyStream` is closed automatically. """ - if not metadata: - metadata = {} - - stream_chunk_msg = AgentStreamChunkMessage( - id=stream_id, content=content, stream_complete=complete, metadata=metadata - ) - self.broadcast_message(stream_chunk_msg) + # initialize and open reply stream + reply_stream = ReplyStream(ychat=self.ychat) + reply_stream.open() + # wrap the yield call in try/finally to ensure streams are closed on + # exceptions. + try: + yield reply_stream + finally: + # close the `ReplyStream` on exit. + reply_stream.close() async def stream_reply( self, input: Input, - human_msg: HumanChatMessage, + _human_message: Optional[Message] = None, pending_msg="Generating response", config: Optional[RunnableConfig] = None, ): @@ -535,7 +410,12 @@ async def stream_reply( the runnable in `self.llm_chain`, but is usually a dictionary whose keys refer to input variables in your prompt template. - - `human_msg`: The `HumanChatMessage` being replied to. + - `_human_message`: The human message being replied to. Currently + unused. TODO: Either re-implement this for v3 or remove it. + + - `_pending_msg` (optional): Changes the default pending message from + "Generating response". Not supported at this time. TODO: Re-implement + this for v3. - `config` (optional): A `RunnableConfig` object that specifies additional configuration when streaming from the runnable. @@ -549,64 +429,31 @@ async def stream_reply( received_first_chunk = False metadata_handler = MetadataCallbackHandler() base_config: RunnableConfig = { - "configurable": {"last_human_msg": human_msg}, "callbacks": [metadata_handler], } merged_config: RunnableConfig = merge_runnable_configs(base_config, config) # start with a pending message - with self.pending(pending_msg, human_msg) as pending_message: + with self.start_reply_stream() as reply_stream: # stream response in chunks. this works even if a provider does not # implement streaming, as `astream()` defaults to yielding `_call()` # when `_stream()` is not implemented on the LLM class. chunk_generator = self.llm_chain.astream(input, config=merged_config) + # TODO v3: re-implement stream interrupt stream_interrupted = False async for chunk in chunk_generator: - if not received_first_chunk: - # when receiving the first chunk, close the pending message and - # start the stream. - self.close_pending(pending_message) - stream_id = self._start_stream(human_msg=human_msg) - received_first_chunk = True - self.message_interrupted[stream_id] = asyncio.Event() - - if self.message_interrupted[stream_id].is_set(): - try: - # notify the model provider that streaming was interrupted - # (this is essential to allow the model to stop generating) - # - # note: `mypy` flags this line, claiming that `athrow` is - # not defined on `AsyncIterator`. This is why an ignore - # comment is placed here. - await chunk_generator.athrow( # type:ignore[attr-defined] - GenerationInterrupted() - ) - except GenerationInterrupted: - # do not let the exception bubble up in case if - # the provider did not handle it - pass - stream_interrupted = True - break - if isinstance(chunk, AIMessageChunk) and isinstance(chunk.content, str): - self._send_stream_chunk(stream_id, chunk.content) + reply_stream.write(chunk.content) elif isinstance(chunk, str): - self._send_stream_chunk(stream_id, chunk) + reply_stream.write(chunk) else: self.log.error(f"Unrecognized type of chunk yielded: {type(chunk)}") break - # complete stream after all chunks have been streamed - stream_tombstone = ( - "\n\n(AI response stopped by user)" if stream_interrupted else "" - ) - self._send_stream_chunk( - stream_id, - stream_tombstone, - complete=True, - metadata=metadata_handler.jai_metadata, - ) - del self.message_interrupted[stream_id] + # if stream was interrupted, add a tombstone + if stream_interrupted: + stream_tombstone = "\n\n(AI response stopped by user)" + reply_stream.write(stream_tombstone) class GenerationInterrupted(asyncio.CancelledError): diff --git a/packages/jupyter-ai/jupyter_ai/chat_handlers/clear.py b/packages/jupyter-ai/jupyter_ai/chat_handlers/clear.py deleted file mode 100644 index d5b0ab6c7..000000000 --- a/packages/jupyter-ai/jupyter_ai/chat_handlers/clear.py +++ /dev/null @@ -1,26 +0,0 @@ -from jupyter_ai.models import ClearRequest - -from .base import BaseChatHandler, SlashCommandRoutingType - - -class ClearChatHandler(BaseChatHandler): - """Clear the chat panel and show the help menu""" - - id = "clear" - name = "Clear chat messages" - help = "Clear the chat window" - routing_type = SlashCommandRoutingType(slash_id="clear") - - uses_llm = False - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - async def process_message(self, _): - # Clear chat by triggering `RootChatHandler.on_clear_request()`. - for handler in self._root_chat_handlers.values(): - if not handler: - continue - - handler.on_clear_request(ClearRequest()) - break diff --git a/packages/jupyter-ai/jupyter_ai/chat_handlers/default.py b/packages/jupyter-ai/jupyter_ai/chat_handlers/default.py index 266ad73ad..e25cfd88c 100644 --- a/packages/jupyter-ai/jupyter_ai/chat_handlers/default.py +++ b/packages/jupyter-ai/jupyter_ai/chat_handlers/default.py @@ -1,9 +1,8 @@ import asyncio from typing import Dict, Type -from jupyter_ai.models import HumanChatMessage from jupyter_ai_magics.providers import BaseProvider -from langchain_core.runnables import ConfigurableFieldSpec +from jupyterlab_chat.models import Message from langchain_core.runnables.history import RunnableWithMessageHistory from ..context_providers import ContextProviderException, find_commands @@ -44,16 +43,10 @@ def create_llm_chain( get_session_history=self.get_llm_chat_memory, input_messages_key="input", history_messages_key="history", - history_factory_config=[ - ConfigurableFieldSpec( - id="last_human_msg", - annotation=HumanChatMessage, - ), - ], ) self.llm_chain = runnable - async def process_message(self, message: HumanChatMessage): + async def process_message(self, message: Message): self.get_llm_chain() assert self.llm_chain @@ -70,13 +63,13 @@ async def process_message(self, message: HumanChatMessage): await self.stream_reply(inputs, message) - async def make_context_prompt(self, human_msg: HumanChatMessage) -> str: + async def make_context_prompt(self, human_msg: Message) -> str: return "\n\n".join( await asyncio.gather( *[ provider.make_context_prompt(human_msg) for provider in self.context_providers.values() - if find_commands(provider, human_msg.prompt) + if find_commands(provider, human_msg.body) ] ) ) diff --git a/packages/jupyter-ai/jupyter_ai/chat_handlers/export.py b/packages/jupyter-ai/jupyter_ai/chat_handlers/export.py deleted file mode 100644 index 7323d81c1..000000000 --- a/packages/jupyter-ai/jupyter_ai/chat_handlers/export.py +++ /dev/null @@ -1,49 +0,0 @@ -import argparse -import os -from datetime import datetime -from typing import List - -from jupyter_ai.models import AgentChatMessage, AgentStreamMessage, HumanChatMessage - -from .base import BaseChatHandler, SlashCommandRoutingType - - -class ExportChatHandler(BaseChatHandler): - id = "export" - name = "Export chat history" - help = "Export chat history to a Markdown file" - routing_type = SlashCommandRoutingType(slash_id="export") - - uses_llm = False - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.parser.prog = "/export" - self.parser.add_argument("path", nargs=argparse.REMAINDER) - - def chat_message_to_markdown(self, message): - if isinstance(message, (AgentChatMessage, AgentStreamMessage)): - agent = self.config_manager.persona.name - return f"**{agent}**: {message.body}" - elif isinstance(message, HumanChatMessage): - return f"**{message.client.display_name}**: {message.body}" - else: - return "" - - # Write the chat history to a markdown file with a timestamp - async def process_message(self, message: HumanChatMessage): - markdown_content = "\n\n".join( - self.chat_message_to_markdown(msg) for msg in self._chat_history - ) - args = self.parse_args(message) - chat_filename = ( # if no filename, use "chat_history" + timestamp - args.path[0] - if (args.path and args.path[0] != "") - else f"chat_history-{datetime.now():%Y-%m-%d-%H-%M-%S}.md" - ) # Handles both empty args and double tap key - chat_file = os.path.join( - self.output_dir, chat_filename - ) # Do not use timestamp if filename is entered as argument - with open(chat_file, "w") as chat_history: - chat_history.write(markdown_content) - self.reply(f"File saved to `{chat_file}`") diff --git a/packages/jupyter-ai/jupyter_ai/chat_handlers/fix.py b/packages/jupyter-ai/jupyter_ai/chat_handlers/fix.py deleted file mode 100644 index 390b93cf6..000000000 --- a/packages/jupyter-ai/jupyter_ai/chat_handlers/fix.py +++ /dev/null @@ -1,106 +0,0 @@ -from typing import Dict, Type - -from jupyter_ai.models import CellWithErrorSelection, HumanChatMessage -from jupyter_ai_magics.providers import BaseProvider -from langchain.prompts import PromptTemplate - -from .base import BaseChatHandler, SlashCommandRoutingType - -FIX_STRING_TEMPLATE = """ -You are Jupyternaut, a conversational assistant living in JupyterLab. Please fix -the notebook cell described below. - -Additional instructions: - -{extra_instructions} - -Input cell: - -``` -{cell_content} -``` - -Output error: - -``` -{traceback} - -{error_name}: {error_value} -``` -""".strip() - -FIX_PROMPT_TEMPLATE = PromptTemplate( - input_variables=[ - "extra_instructions", - "cell_content", - "traceback", - "error_name", - "error_value", - ], - template=FIX_STRING_TEMPLATE, -) - - -class FixChatHandler(BaseChatHandler): - """ - Accepts a `HumanChatMessage` that includes a cell with error output and - recommends a fix as a reply. If a cell with error output is not included, - this chat handler does nothing. - - `/fix` also accepts additional instructions in natural language as an - arbitrary number of arguments, e.g. - - ``` - /fix use the numpy library to implement this function instead. - ``` - """ - - id = "fix" - name = "Fix error cell" - help = "Fix an error cell selected in your notebook" - routing_type = SlashCommandRoutingType(slash_id="fix") - uses_llm = True - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.prompt_template = None - - def create_llm_chain( - self, provider: Type[BaseProvider], provider_params: Dict[str, str] - ): - unified_parameters = { - **provider_params, - **(self.get_model_parameters(provider, provider_params)), - } - llm = provider(**unified_parameters) - self.llm = llm - prompt_template = FIX_PROMPT_TEMPLATE - - runnable = prompt_template | llm # type:ignore - self.llm_chain = runnable - - async def process_message(self, message: HumanChatMessage): - if not (message.selection and message.selection.type == "cell-with-error"): - self.reply( - "`/fix` requires an active code cell with error output. Please click on a cell with error output and retry.", - message, - ) - return - - # hint type of selection - selection: CellWithErrorSelection = message.selection - - # parse additional instructions specified after `/fix` - extra_instructions = message.prompt[4:].strip() or "None." - - self.get_llm_chain() - assert self.llm_chain - - inputs = { - "extra_instructions": extra_instructions, - "cell_content": selection.source, - "traceback": "\n".join(selection.error.traceback), - "error_name": selection.error.name, - "error_value": selection.error.value, - } - await self.stream_reply(inputs, message, pending_msg="Analyzing error") diff --git a/packages/jupyter-ai/jupyter_ai/chat_handlers/generate.py b/packages/jupyter-ai/jupyter_ai/chat_handlers/generate.py index 6318e0979..09e7c03eb 100644 --- a/packages/jupyter-ai/jupyter_ai/chat_handlers/generate.py +++ b/packages/jupyter-ai/jupyter_ai/chat_handlers/generate.py @@ -8,8 +8,8 @@ import nbformat from jupyter_ai.chat_handlers import BaseChatHandler, SlashCommandRoutingType -from jupyter_ai.models import HumanChatMessage from jupyter_ai_magics.providers import BaseProvider +from jupyterlab_chat.models import Message from langchain.chains import LLMChain from langchain.llms import BaseLLM from langchain.output_parsers import PydanticOutputParser @@ -292,7 +292,7 @@ async def _generate_notebook(self, prompt: str): nbformat.write(notebook, final_path) return final_path - async def process_message(self, message: HumanChatMessage): + async def process_message(self, message: Message): self.get_llm_chain() # first send a verification message to user @@ -303,7 +303,7 @@ async def process_message(self, message: HumanChatMessage): response = f"""🎉 I have created your notebook and saved it to the location {final_path}. I am still learning how to create notebooks, so please review all code before running it.""" self.reply(response, message) - async def handle_exc(self, e: Exception, message: HumanChatMessage): + async def handle_exc(self, e: Exception, message: Message): timestamp = time.strftime("%Y-%m-%d-%H.%M.%S") default_log_dir = Path(self.output_dir) / "jupyter-ai-logs" log_dir = self.log_dir or default_log_dir diff --git a/packages/jupyter-ai/jupyter_ai/chat_handlers/help.py b/packages/jupyter-ai/jupyter_ai/chat_handlers/help.py index cd8556863..6e8b41f6b 100644 --- a/packages/jupyter-ai/jupyter_ai/chat_handlers/help.py +++ b/packages/jupyter-ai/jupyter_ai/chat_handlers/help.py @@ -1,4 +1,4 @@ -from jupyter_ai.models import HumanChatMessage +from jupyterlab_chat.models import Message from .base import BaseChatHandler, SlashCommandRoutingType @@ -15,5 +15,5 @@ class HelpChatHandler(BaseChatHandler): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - async def process_message(self, message: HumanChatMessage): + async def process_message(self, message: Message): self.send_help_message(message) diff --git a/packages/jupyter-ai/jupyter_ai/chat_handlers/learn.py b/packages/jupyter-ai/jupyter_ai/chat_handlers/learn.py index e0c6139c0..ff7c6adce 100644 --- a/packages/jupyter-ai/jupyter_ai/chat_handlers/learn.py +++ b/packages/jupyter-ai/jupyter_ai/chat_handlers/learn.py @@ -15,12 +15,12 @@ from jupyter_ai.models import ( DEFAULT_CHUNK_OVERLAP, DEFAULT_CHUNK_SIZE, - HumanChatMessage, IndexedDir, IndexMetadata, ) from jupyter_core.paths import jupyter_data_dir from jupyter_core.utils import ensure_dir_exists +from jupyterlab_chat.models import Message from langchain.schema import BaseRetriever, Document from langchain.text_splitter import ( LatexTextSplitter, @@ -128,12 +128,12 @@ def _load(self): ) self.log.error(e) - async def process_message(self, message: HumanChatMessage): + async def process_message(self, message: Message): # If no embedding provider has been selected em_provider_cls, em_provider_args = self.get_embedding_provider() if not em_provider_cls: self.reply( - "Sorry, please select an embedding provider before using the `/learn` command." + "Sorry, please select an embedding provider before using the `/learn` command.", ) return @@ -163,14 +163,15 @@ async def process_message(self, message: HumanChatMessage): except ModuleNotFoundError as e: self.log.error(e) self.reply( - "No `arxiv` package found. " "Install with `pip install arxiv`." + "No `arxiv` package found. " + "Install with `pip install arxiv`.", ) return except Exception as e: self.log.error(e) self.reply( "An error occurred while processing the arXiv file. " - f"Please verify that the arxiv id {id} is correct." + f"Please verify that the arxiv id {id} is correct.", ) return @@ -202,7 +203,9 @@ async def process_message(self, message: HumanChatMessage): # delete and relearn index if embedding model was changed await self.delete_and_relearn() - with self.pending(f"Loading and splitting files for {load_path}", message): + # TODO v3: reinstate pending message + # original pending message: "Loading and splitting files for {load_path}" + with self.start_reply_stream() as reply_stream: try: await self.learn_dir( load_path, args.chunk_size, args.chunk_overlap, args.all_files @@ -218,7 +221,7 @@ async def process_message(self, message: HumanChatMessage): You can ask questions about these docs by prefixing your message with **/ask**.""" % ( load_path.replace("*", r"\*") ) - self.reply(response, message) + reply_stream.write(response) def _build_list_response(self): if not self.metadata.dirs: diff --git a/packages/jupyter-ai/jupyter_ai/chat_handlers/utils/streaming.py b/packages/jupyter-ai/jupyter_ai/chat_handlers/utils/streaming.py new file mode 100644 index 000000000..69a6727ab --- /dev/null +++ b/packages/jupyter-ai/jupyter_ai/chat_handlers/utils/streaming.py @@ -0,0 +1,82 @@ +import time +from typing import Optional + +from jupyter_ai.constants import BOT +from jupyterlab_chat.models import Message, NewMessage, User +from jupyterlab_chat.ychat import YChat + + +class ReplyStreamClosed(Exception): + pass + + +class ReplyStream: + """ + Object yielded by the `BaseChatHandler.start_reply_stream()` context + manager. This provides three methods: + + - `open() -> str`: Opens a new, empty reply stream. This shows "Jupyternaut + is writing..." in the chat UI until the stream is closed. + - `write(chunk: str)`: Appends `chunk` to the reply stream. + - `close()`: Closes the reply stream. + + Note that `open()` and `close()` are automatically called by the + `BaseChatHandler.start_reply_stream()` context manager, so only `write()` + should be called within that context. + + TODO: Re-implement the capability to customize the pending message. + + TODO: Re-implement the capability to add metadata to messages. Message + metadata is important for some usage scenarios like implementing LLM + feedback in the UI, which requires some kind of LLM-specific request ID to + be available in the message metadata. + """ + + def __init__(self, ychat: YChat): + self.ychat = ychat + self._is_open = False + self._stream_id: Optional[str] = None + + def _set_user(self): + bot = self.ychat.get_user(BOT["username"]) + if not bot: + self.ychat.set_user(User(**BOT)) + + def open(self): + self._set_user() + self.ychat.awareness.set_local_state_field("isWriting", True) + self._is_open = True + + def write(self, chunk: str) -> str: + """ + Writes a string chunk to the current reply stream. Returns the ID of the + message that this reply stream is writing to. + """ + try: + assert self._is_open + except: + raise ReplyStreamClosed("Reply stream must be opened first.") from None + + if not self._stream_id: + self._set_user() + self._stream_id = self.ychat.add_message( + NewMessage(body="", sender=BOT["username"]) + ) + + self._set_user() + self.ychat.update_message( + Message( + id=self._stream_id, + body=chunk, + time=time.time(), + sender=BOT["username"], + raw_time=False, + ), + append=True, + ) + + return self._stream_id + + def close(self): + self.ychat.awareness.set_local_state_field("isWriting", False) + self._is_open = False diff --git a/packages/jupyter-ai/jupyter_ai/constants.py b/packages/jupyter-ai/jupyter_ai/constants.py new file mode 100644 index 000000000..ab212fb23 --- /dev/null +++ b/packages/jupyter-ai/jupyter_ai/constants.py @@ -0,0 +1,8 @@ +# The BOT currently has a fixed username, because this username is used has key in chats, +# it needs to constant. Do we need to change it ? +BOT = { + "username": "5f6a7570-7974-6572-6e61-75742d626f74", + "name": "Jupyternaut", + "display_name": "Jupyternaut", + "initials": "J", +} diff --git a/packages/jupyter-ai/jupyter_ai/context_providers/_learned.py b/packages/jupyter-ai/jupyter_ai/context_providers/_learned.py deleted file mode 100644 index 5128487de..000000000 --- a/packages/jupyter-ai/jupyter_ai/context_providers/_learned.py +++ /dev/null @@ -1,53 +0,0 @@ -# Currently unused as it is duplicating the functionality of the /ask command. -# TODO: Rename "learned" to something better. -from typing import List - -from jupyter_ai.chat_handlers.learn import Retriever -from jupyter_ai.models import HumanChatMessage - -from .base import BaseCommandContextProvider, ContextCommand -from .file import FileContextProvider - -FILE_CHUNK_TEMPLATE = """ -Snippet from file: {filepath} -``` -{content} -``` -""".strip() - - -class LearnedContextProvider(BaseCommandContextProvider): - id = "learned" - help = "Include content indexed from `/learn`" - remove_from_prompt = True - header = "Following are snippets from potentially relevant files:" - - def __init__(self, **kwargs): - super().__init__(**kwargs) - self.retriever = Retriever(learn_chat_handler=self.chat_handlers["/learn"]) - - async def _make_context_prompt( - self, message: HumanChatMessage, commands: List[ContextCommand] - ) -> str: - if not self.retriever: - return "" - query = self._clean_prompt(message.body) - docs = await self.retriever.ainvoke(query) - excluded = self._get_repeated_files(message) - context = "\n\n".join( - [ - FILE_CHUNK_TEMPLATE.format( - filepath=d.metadata["path"], content=d.page_content - ) - for d in docs - if d.metadata["path"] not in excluded and d.page_content - ] - ) - return self.header + "\n" + context - - def _get_repeated_files(self, message: HumanChatMessage) -> List[str]: - # don't include files that are already provided by the file context provider - file_context_provider = self.context_providers.get("file") - if isinstance(file_context_provider, FileContextProvider): - return file_context_provider.get_filepaths(message) - return [] diff --git a/packages/jupyter-ai/jupyter_ai/context_providers/base.py b/packages/jupyter-ai/jupyter_ai/context_providers/base.py index 1b0953e84..d5b40acfa 100644 --- a/packages/jupyter-ai/jupyter_ai/context_providers/base.py +++ b/packages/jupyter-ai/jupyter_ai/context_providers/base.py @@ -1,18 +1,15 @@ import abc import os import re -from typing import TYPE_CHECKING, Awaitable, ClassVar, Dict, List, Optional +from typing import Awaitable, ClassVar, Dict, List, Optional from dask.distributed import Client as DaskClient from jupyter_ai.chat_handlers.base import get_preferred_dir from jupyter_ai.config_manager import ConfigManager, Logger -from jupyter_ai.models import ChatMessage, HumanChatMessage, ListOptionsEntry +from jupyter_ai.models import ListOptionsEntry +from jupyterlab_chat.models import Message from langchain.pydantic_v1 import BaseModel -if TYPE_CHECKING: - from jupyter_ai.chat_handlers import BaseChatHandler - from jupyter_ai.history import BoundedChatHistory - class _BaseContextProvider(abc.ABC): id: ClassVar[str] @@ -27,30 +24,24 @@ def __init__( log: Logger, config_manager: ConfigManager, model_parameters: Dict[str, Dict], - chat_history: List[ChatMessage], - llm_chat_memory: "BoundedChatHistory", root_dir: str, preferred_dir: Optional[str], dask_client_future: Awaitable[DaskClient], - chat_handlers: Dict[str, "BaseChatHandler"], context_providers: Dict[str, "BaseCommandContextProvider"], ): preferred_dir = preferred_dir or "" self.log = log self.config_manager = config_manager self.model_parameters = model_parameters - self._chat_history = chat_history - self.llm_chat_memory = llm_chat_memory self.root_dir = os.path.abspath(os.path.expanduser(root_dir)) self.preferred_dir = get_preferred_dir(self.root_dir, preferred_dir) self.dask_client_future = dask_client_future - self.chat_handlers = chat_handlers self.context_providers = context_providers self.llm = None @abc.abstractmethod - async def make_context_prompt(self, message: HumanChatMessage) -> str: + async def make_context_prompt(self, message: Message) -> str: """Returns a context prompt for all commands of the context provider command. """ @@ -149,18 +140,18 @@ def pattern(self) -> str: else rf"(? str: + async def make_context_prompt(self, message: Message) -> str: """Returns a context prompt for all commands of the context provider command. """ - commands = find_commands(self, message.prompt) + commands = find_commands(self, message.body) if not commands: return "" return await self._make_context_prompt(message, commands) @abc.abstractmethod async def _make_context_prompt( - self, message: HumanChatMessage, commands: List[ContextCommand] + self, message: Message, commands: List[ContextCommand] ) -> str: """Returns a context prompt for the given commands.""" pass diff --git a/packages/jupyter-ai/jupyter_ai/context_providers/file.py b/packages/jupyter-ai/jupyter_ai/context_providers/file.py index 45619122d..97b467be0 100644 --- a/packages/jupyter-ai/jupyter_ai/context_providers/file.py +++ b/packages/jupyter-ai/jupyter_ai/context_providers/file.py @@ -4,7 +4,8 @@ import nbformat from jupyter_ai.document_loaders.directory import SUPPORTED_EXTS -from jupyter_ai.models import HumanChatMessage, ListOptionsEntry +from jupyter_ai.models import ListOptionsEntry +from jupyterlab_chat.models import Message from .base import ( BaseCommandContextProvider, @@ -90,7 +91,7 @@ def get_file_type(self, filepath): return file_extension async def _make_context_prompt( - self, message: HumanChatMessage, commands: List[ContextCommand] + self, message: Message, commands: List[ContextCommand] ) -> str: context = "\n\n".join( [ @@ -159,9 +160,9 @@ def _replace_command(self, command: ContextCommand) -> str: filepath = command.arg or "" return f"'{filepath}'" - def get_filepaths(self, message: HumanChatMessage) -> List[str]: + def get_filepaths(self, message: Message) -> List[str]: filepaths = [] - for command in find_commands(self, message.prompt): + for command in find_commands(self, message.body): filepath = command.arg or "" if not os.path.isabs(filepath): filepath = os.path.join(self.base_dir, filepath) diff --git a/packages/jupyter-ai/jupyter_ai/extension.py b/packages/jupyter-ai/jupyter_ai/extension.py index 08c8c5a47..7391287a5 100644 --- a/packages/jupyter-ai/jupyter_ai/extension.py +++ b/packages/jupyter-ai/jupyter_ai/extension.py @@ -2,46 +2,66 @@ import re import time import types +from functools import partial +from typing import Dict +import traitlets from dask.distributed import Client as DaskClient from importlib_metadata import entry_points from jupyter_ai.chat_handlers.learn import Retriever from jupyter_ai_magics import BaseProvider, JupyternautPersona from jupyter_ai_magics.utils import get_em_providers, get_lm_providers +from jupyter_events import EventLogger from jupyter_server.extension.application import ExtensionApp +from jupyter_server.utils import url_path_join +from jupyterlab_chat.models import Message +from jupyterlab_chat.ychat import YChat +from pycrdt import ArrayEvent from tornado.web import StaticFileHandler -from traitlets import Dict, Integer, List, Unicode +from traitlets import Integer, List, Unicode from .chat_handlers import ( AskChatHandler, - ClearChatHandler, + BaseChatHandler, DefaultChatHandler, - ExportChatHandler, - FixChatHandler, GenerateChatHandler, HelpChatHandler, LearnChatHandler, ) from .completions.handlers import DefaultInlineCompletionHandler from .config_manager import ConfigManager +from .constants import BOT from .context_providers import BaseCommandContextProvider, FileContextProvider from .handlers import ( ApiKeysHandler, AutocompleteOptionsHandler, - ChatHistoryHandler, EmbeddingsModelProviderHandler, GlobalConfigHandler, ModelProviderHandler, - RootChatHandler, SlashCommandsInfoHandler, ) -from .history import BoundedChatHistory +from .history import YChatHistory + +from jupyter_collaboration import ( # type:ignore[import-untyped] # isort:skip + __version__ as jupyter_collaboration_version, +) + JUPYTERNAUT_AVATAR_ROUTE = JupyternautPersona.avatar_route JUPYTERNAUT_AVATAR_PATH = str( os.path.join(os.path.dirname(__file__), "static", "jupyternaut.svg") ) +JCOLLAB_VERSION = int(jupyter_collaboration_version[0]) + +if JCOLLAB_VERSION >= 3: + from jupyter_server_ydoc.utils import ( # type:ignore[import-not-found,import-untyped] + JUPYTER_COLLABORATION_EVENTS_URI, + ) +else: + from jupyter_collaboration.utils import ( # type:ignore[import-not-found,import-untyped] + JUPYTER_COLLABORATION_EVENTS_URI, + ) DEFAULT_HELP_MESSAGE_TEMPLATE = """Hi there! I'm {persona_name}, your programming assistant. You can ask me a question using the text box below. You can also use these commands: @@ -60,8 +80,6 @@ class AiExtension(ExtensionApp): handlers = [ # type:ignore[assignment] (r"api/ai/api_keys/(?P\w+)", ApiKeysHandler), (r"api/ai/config/?", GlobalConfigHandler), - (r"api/ai/chats/?", RootChatHandler), - (r"api/ai/chats/history?", ChatHistoryHandler), (r"api/ai/chats/slash_commands?", SlashCommandsInfoHandler), (r"api/ai/chats/autocomplete_options?", AutocompleteOptionsHandler), (r"api/ai/providers?", ModelProviderHandler), @@ -121,9 +139,9 @@ class AiExtension(ExtensionApp): config=True, ) - model_parameters = Dict( + model_parameters = traitlets.Dict( key_trait=Unicode(), - value_trait=Dict(), + value_trait=traitlets.Dict(), default_value={}, help="""Key-value pairs for model id and corresponding parameters that are passed to the provider class. The values are unpacked and passed to @@ -161,7 +179,7 @@ class AiExtension(ExtensionApp): config=True, ) - default_api_keys = Dict( + default_api_keys = traitlets.Dict( key_trait=Unicode(), value_trait=Unicode(), default_value=None, @@ -204,6 +222,125 @@ class AiExtension(ExtensionApp): config=True, ) + def initialize(self): + super().initialize() + + self.chat_handlers_by_room: Dict[str, Dict[str, BaseChatHandler]] = {} + """ + Nested dictionary that returns the dedicated chat handler instance that + should be used, given the room ID and command ID respectively. + + Example: `self.chat_handlers_by_room[]` yields the set of chat + handlers dedicated to the room identified by ``. + """ + + self.ychats_by_room: Dict[str, YChat] = {} + """Cache of YChat instances, indexed by room ID.""" + + self.event_logger = self.serverapp.web_app.settings["event_logger"] + self.event_logger.add_listener( + schema_id=JUPYTER_COLLABORATION_EVENTS_URI, listener=self.connect_chat + ) + + async def connect_chat( + self, logger: EventLogger, schema_id: str, data: dict + ) -> None: + # ignore events that are not chat room initialization events + if not ( + data["room"].startswith("text:chat:") + and data["action"] == "initialize" + and data["msg"] == "Room initialized" + ): + return + + # log room ID + room_id = data["room"] + self.log.info(f"Connecting to a chat room with room ID: {room_id}.") + + # get YChat document associated with the room + ychat = await self.get_chat(room_id) + if ychat is None: + return + + # Add the bot user to the chat document awareness. + BOT["avatar_url"] = url_path_join( + self.settings.get("base_url", "/"), "api/ai/static/jupyternaut.svg" + ) + if ychat.awareness is not None: + ychat.awareness.set_local_state_field("user", BOT) + + # initialize chat handlers for new chat + self.chat_handlers_by_room[room_id] = self._init_chat_handlers(ychat) + + callback = partial(self.on_change, room_id) + ychat.ymessages.observe(callback) + + async def get_chat(self, room_id: str) -> YChat: + """ + Retrieves the YChat instance associated with a room ID. This method + is cached, i.e. successive calls with the same room ID quickly return a + cached value. + """ + if room_id in self.ychats_by_room: + return self.ychats_by_room[room_id] + + assert self.serverapp + if JCOLLAB_VERSION >= 3: + collaboration = self.serverapp.web_app.settings["jupyter_server_ydoc"] + document = await collaboration.get_document(room_id=room_id, copy=False) + else: + collaboration = self.serverapp.web_app.settings["jupyter_collaboration"] + server = collaboration.ywebsocket_server + + room = await server.get_room(room_id) + document = room._document + + assert document + self.ychats_by_room[room_id] = document + return document + + def on_change(self, room_id: str, events: ArrayEvent) -> None: + assert self.serverapp + + for change in events.delta: # type:ignore[attr-defined] + if not "insert" in change.keys(): + continue + messages = change["insert"] + for message_dict in messages: + message = Message(**message_dict) + if message.sender == BOT["username"] or message.raw_time: + continue + + self.serverapp.io_loop.asyncio_loop.create_task( # type:ignore[attr-defined] + self.route_human_message(room_id, message) + ) + + async def route_human_message(self, room_id: str, message: Message): + """ + Method that routes an incoming human message to the appropriate chat + handler. + """ + chat_handlers = self.chat_handlers_by_room[room_id] + default = chat_handlers["default"] + # Split on any whitespace, either spaces or newlines + maybe_command = message.body.split(None, 1)[0] + is_command = ( + message.body.startswith("/") + and maybe_command in chat_handlers.keys() + and maybe_command != "default" + ) + command = maybe_command if is_command else "default" + + start = time.time() + if is_command: + await chat_handlers[command].on_message(message) + else: + await default.on_message(message) + + latency_ms = round((time.time() - start) * 1000) + command_readable = "Default" if command == "default" else command + self.log.info(f"{command_readable} chat handler resolved in {latency_ms} ms.") + def initialize_settings(self): start = time.time() @@ -259,23 +396,6 @@ def initialize_settings(self): self.log.info(f"Registered {self.name} server extension") - # Store chat clients in a dictionary - self.settings["chat_clients"] = {} - self.settings["jai_root_chat_handlers"] = {} - - # list of chat messages to broadcast to new clients - # this is only used to render the UI, and is not the conversational - # memory object used by the LM chain. - self.settings["chat_history"] = [] - - # conversational memory object used by LM chain - self.settings["llm_chat_memory"] = BoundedChatHistory( - k=self.default_max_chat_history - ) - - # list of pending messages - self.settings["pending_messages"] = [] - # get reference to event loop # `asyncio.get_event_loop()` is deprecated in Python 3.11+, in favor of # the more readable `asyncio.get_event_loop_policy().get_event_loop()`. @@ -297,31 +417,12 @@ def initialize_settings(self): # message generation/streaming got interrupted. self.settings["jai_message_interrupted"] = {} - # initialize chat handlers - self._init_chat_handlers() - # initialize context providers - self._init_context_provders() - - # show help message at server start - self._show_help_message() + self._init_context_providers() latency_ms = round((time.time() - start) * 1000) self.log.info(f"Initialized Jupyter AI server extension in {latency_ms} ms.") - def _show_help_message(self): - """ - Method that ensures a dynamically-generated help message is included in - the chat history shown to users. - """ - # call `send_help_message()` on any instance of `BaseChatHandler`. The - # `default` chat handler should always exist, so we reference that - # object when calling `send_help_message()`. - default_chat_handler: DefaultChatHandler = self.settings["jai_chat_handlers"][ - "default" - ] - default_chat_handler.send_help_message() - async def _get_dask_client(self): return DaskClient(processes=False, asynchronous=True) @@ -349,17 +450,25 @@ async def _stop_extension(self): await dask_client.close() self.log.debug("Closed Dask client.") - def _init_chat_handlers(self): + def _init_chat_handlers(self, ychat: YChat) -> Dict[str, BaseChatHandler]: + """ + Initializes a set of chat handlers. May accept a YChat instance for + collaborative chats. + + TODO: Make `ychat` required once Jupyter Chat migration is complete. + """ + assert self.serverapp + eps = entry_points() chat_handler_eps = eps.select(group="jupyter_ai.chat_handlers") - chat_handlers = {} + chat_handlers: Dict[str, BaseChatHandler] = {} + llm_chat_memory = YChatHistory(ychat, k=self.default_max_chat_history) + chat_handler_kwargs = { "log": self.log, "config_manager": self.settings["jai_config_manager"], "model_parameters": self.settings["model_parameters"], - "root_chat_handlers": self.settings["jai_root_chat_handlers"], - "chat_history": self.settings["chat_history"], - "llm_chat_memory": self.settings["llm_chat_memory"], + "llm_chat_memory": llm_chat_memory, "root_dir": self.serverapp.root_dir, "dask_client_future": self.settings["dask_client_future"], "preferred_dir": self.serverapp.contents_manager.preferred_dir, @@ -367,9 +476,9 @@ def _init_chat_handlers(self): "chat_handlers": chat_handlers, "context_providers": self.settings["jai_context_providers"], "message_interrupted": self.settings["jai_message_interrupted"], + "ychat": ychat, } default_chat_handler = DefaultChatHandler(**chat_handler_kwargs) - clear_chat_handler = ClearChatHandler(**chat_handler_kwargs) generate_chat_handler = GenerateChatHandler( **chat_handler_kwargs, log_dir=self.error_logs_dir, @@ -378,17 +487,10 @@ def _init_chat_handlers(self): retriever = Retriever(learn_chat_handler=learn_chat_handler) ask_chat_handler = AskChatHandler(**chat_handler_kwargs, retriever=retriever) - export_chat_handler = ExportChatHandler(**chat_handler_kwargs) - - fix_chat_handler = FixChatHandler(**chat_handler_kwargs) - chat_handlers["default"] = default_chat_handler chat_handlers["/ask"] = ask_chat_handler - chat_handlers["/clear"] = clear_chat_handler chat_handlers["/generate"] = generate_chat_handler chat_handlers["/learn"] = learn_chat_handler - chat_handlers["/export"] = export_chat_handler - chat_handlers["/fix"] = fix_chat_handler slash_command_pattern = r"^[a-zA-Z0-9_]+$" for chat_handler_ep in chat_handler_eps: @@ -439,10 +541,9 @@ def _init_chat_handlers(self): # Make help always appear as the last command chat_handlers["/help"] = HelpChatHandler(**chat_handler_kwargs) - # bind chat handlers to settings - self.settings["jai_chat_handlers"] = chat_handlers + return chat_handlers - def _init_context_provders(self): + def _init_context_providers(self): eps = entry_points() context_providers_eps = eps.select(group="jupyter_ai.context_providers") context_providers = self.settings["jai_context_providers"] @@ -450,12 +551,9 @@ def _init_context_provders(self): "log": self.log, "config_manager": self.settings["jai_config_manager"], "model_parameters": self.settings["model_parameters"], - "chat_history": self.settings["chat_history"], - "llm_chat_memory": self.settings["llm_chat_memory"], "root_dir": self.serverapp.root_dir, "dask_client_future": self.settings["dask_client_future"], "preferred_dir": self.serverapp.contents_manager.preferred_dir, - "chat_handlers": self.settings["jai_chat_handlers"], "context_providers": self.settings["jai_context_providers"], } context_providers_clses = [ diff --git a/packages/jupyter-ai/jupyter_ai/handlers.py b/packages/jupyter-ai/jupyter_ai/handlers.py index 28b169c00..394994996 100644 --- a/packages/jupyter-ai/jupyter_ai/handlers.py +++ b/packages/jupyter-ai/jupyter_ai/handlers.py @@ -1,44 +1,27 @@ -import getpass -import json -import time -import uuid -from asyncio import AbstractEventLoop, Event -from dataclasses import asdict -from typing import TYPE_CHECKING, Dict, List, Optional, Set, cast - -import tornado -from jupyter_ai.chat_handlers import BaseChatHandler, SlashCommandRoutingType +from typing import TYPE_CHECKING, Dict, List, Optional, Type, cast + +from jupyter_ai.chat_handlers import ( + AskChatHandler, + DefaultChatHandler, + GenerateChatHandler, + HelpChatHandler, + LearnChatHandler, + SlashCommandRoutingType, +) from jupyter_ai.config_manager import ConfigManager, KeyEmptyError, WriteConflictError from jupyter_ai.context_providers import BaseCommandContextProvider, ContextCommand from jupyter_server.base.handlers import APIHandler as BaseAPIHandler -from jupyter_server.base.handlers import JupyterHandler from langchain.pydantic_v1 import ValidationError -from tornado import web, websocket +from tornado import web from tornado.web import HTTPError from .models import ( - AgentChatMessage, - AgentStreamChunkMessage, - AgentStreamMessage, - ChatClient, - ChatHistory, - ChatMessage, - ChatRequest, - ChatUser, - ClearMessage, - ClearRequest, - ClosePendingMessage, - ConnectionMessage, - HumanChatMessage, ListOptionsEntry, ListOptionsResponse, ListProvidersEntry, ListProvidersResponse, ListSlashCommandsEntry, ListSlashCommandsResponse, - Message, - PendingMessage, - StopRequest, UpdateConfigRequest, ) @@ -46,368 +29,18 @@ from jupyter_ai_magics.embedding_providers import BaseEmbeddingsProvider from jupyter_ai_magics.providers import BaseProvider + from .chat_handlers import BaseChatHandler from .context_providers import BaseCommandContextProvider - from .history import BoundedChatHistory - - -class ChatHistoryHandler(BaseAPIHandler): - """Handler to return message history""" - - @property - def chat_history(self) -> List[ChatMessage]: - return self.settings["chat_history"] - - @property - def pending_messages(self) -> List[PendingMessage]: - return self.settings["pending_messages"] - - @tornado.web.authenticated - async def get(self): - history = ChatHistory( - messages=self.chat_history, pending_messages=self.pending_messages - ) - self.finish(history.json()) - - -class RootChatHandler(JupyterHandler, websocket.WebSocketHandler): - """ - A websocket handler for chat. - """ - - @property - def root_chat_handlers(self) -> Dict[str, "RootChatHandler"]: - """Dictionary mapping client IDs to their corresponding RootChatHandler - instances.""" - return self.settings["jai_root_chat_handlers"] - - @property - def chat_handlers(self) -> Dict[str, "BaseChatHandler"]: - """Dictionary mapping chat commands to their corresponding - BaseChatHandler instances.""" - return self.settings["jai_chat_handlers"] - - @property - def chat_clients(self) -> Dict[str, ChatClient]: - """Dictionary mapping client IDs to their ChatClient objects that store - metadata.""" - return self.settings["chat_clients"] - - @property - def chat_client(self) -> ChatClient: - """Returns ChatClient object associated with the current connection.""" - return self.chat_clients[self.client_id] - - @property - def chat_history(self) -> List[ChatMessage]: - return self.settings["chat_history"] - - @chat_history.setter - def chat_history(self, new_history): - self.settings["chat_history"] = new_history - - @property - def message_interrupted(self) -> Dict[str, Event]: - return self.settings["jai_message_interrupted"] - - @property - def llm_chat_memory(self) -> "BoundedChatHistory": - return self.settings["llm_chat_memory"] - - @property - def loop(self) -> AbstractEventLoop: - return self.settings["jai_event_loop"] - - @property - def pending_messages(self) -> List[PendingMessage]: - return self.settings["pending_messages"] - - @pending_messages.setter - def pending_messages(self, new_pending_messages): - self.settings["pending_messages"] = new_pending_messages - - def initialize(self): - self.log.debug("Initializing websocket connection %s", self.request.path) - - def pre_get(self): - """Handles authentication/authorization.""" - # authenticate the request before opening the websocket - user = self.current_user - if user is None: - self.log.warning("Couldn't authenticate WebSocket connection") - raise web.HTTPError(403) - - # authorize the user. - if not self.authorizer.is_authorized(self, user, "execute", "events"): - raise web.HTTPError(403) - - async def get(self, *args, **kwargs): - """Get an event socket.""" - self.pre_get() - res = super().get(*args, **kwargs) - await res - - def get_chat_user(self) -> ChatUser: - """Retrieves the current user. If `jupyter_collaboration` is not - installed, one is synthesized from the server's current shell - environment.""" - # Get a dictionary of all loaded extensions. - # (`serverapp` is a property on all `JupyterHandler` subclasses) - assert self.serverapp - extensions = self.serverapp.extension_manager.extensions - collaborative_legacy = ( - "jupyter_collaboration" in extensions - and extensions["jupyter_collaboration"].enabled - ) - collaborative_v3 = ( - "jupyter_server_ydoc" in extensions - and extensions["jupyter_server_ydoc"].enabled - ) - collaborative = collaborative_legacy or collaborative_v3 - - if collaborative: - names = self.current_user.name.split(" ", maxsplit=2) - initials = getattr(self.current_user, "initials", None) - if not initials: - # compute default initials in case IdentityProvider doesn't - # return initials, e.g. JupyterHub (#302) - names = self.current_user.name.split(" ", maxsplit=2) - initials = "".join( - [(name.capitalize()[0] if len(name) > 0 else "") for name in names] - ) - chat_user_kwargs = { - **asdict(self.current_user), - "initials": initials, - } - - return ChatUser(**chat_user_kwargs) - - login = getpass.getuser() - initials = login[0].capitalize() - return ChatUser( - username=self.current_user.username, - initials=initials, - name=login, - display_name=login, - color=None, - avatar_url=None, - ) - - def generate_client_id(self): - """Generates a client ID to identify the current WS connection.""" - return uuid.uuid4().hex - - def open(self): - """Handles opening of a WebSocket connection. Client ID can be retrieved - from `self.client_id`.""" - - current_user = self.get_chat_user().dict() - client_id = self.generate_client_id() - - self.root_chat_handlers[client_id] = self - self.chat_clients[client_id] = ChatClient(**current_user, id=client_id) - self.client_id = client_id - self.write_message( - ConnectionMessage( - client_id=client_id, - history=ChatHistory( - messages=self.chat_history, pending_messages=self.pending_messages - ), - ).dict() - ) - - self.log.info(f"Client connected. ID: {client_id}") - self.log.debug("Clients are : %s", self.root_chat_handlers.keys()) - - def broadcast_message(self, message: Message): - """Broadcasts message to all connected clients. - Appends message to chat history. - """ - - # do not broadcast agent messages that are replying to cleared human message - if ( - isinstance(message, (AgentChatMessage, AgentStreamMessage)) - and message.reply_to - and message.reply_to - not in [m.id for m in self.chat_history if isinstance(m, HumanChatMessage)] - ): - return - - self.log.debug("Broadcasting message: %s to all clients...", message) - client_ids = self.root_chat_handlers.keys() - - for client_id in client_ids: - client = self.root_chat_handlers[client_id] - if client: - client.write_message(message.dict()) - - # append all messages of type `ChatMessage` directly to the chat history - if isinstance( - message, (HumanChatMessage, AgentChatMessage, AgentStreamMessage) - ): - self.chat_history.append(message) - elif isinstance(message, AgentStreamChunkMessage): - # for stream chunks, modify the corresponding `AgentStreamMessage` - # by appending its content and potentially marking it as complete. - chunk: AgentStreamChunkMessage = message - - # iterate backwards from the end of the list - for history_message in self.chat_history[::-1]: - if ( - history_message.type == "agent-stream" - and history_message.id == chunk.id - ): - stream_message: AgentStreamMessage = history_message - stream_message.body += chunk.content - stream_message.metadata = chunk.metadata - stream_message.complete = chunk.stream_complete - break - elif isinstance(message, PendingMessage): - self.pending_messages.append(message) - elif isinstance(message, ClosePendingMessage): - self.pending_messages = list( - filter(lambda m: m.id != message.id, self.pending_messages) - ) - - async def on_message(self, message): - self.log.debug("Message received: %s", message) - - try: - message = json.loads(message) - if message.get("type") == "clear": - request = ClearRequest(**message) - elif message.get("type") == "stop": - request = StopRequest(**message) - else: - request = ChatRequest(**message) - except ValidationError as e: - self.log.error(e) - return - - if isinstance(request, ClearRequest): - self.on_clear_request(request) - return - - if isinstance(request, StopRequest): - self.on_stop_request() - return - - chat_request = request - message_body = chat_request.prompt - if chat_request.selection: - message_body += f"\n\n```\n{chat_request.selection.source}\n```\n" - - # message broadcast to chat clients - chat_message_id = str(uuid.uuid4()) - chat_message = HumanChatMessage( - id=chat_message_id, - time=time.time(), - body=message_body, - prompt=chat_request.prompt, - selection=chat_request.selection, - client=self.chat_client, - ) - - # broadcast the message to other clients - self.broadcast_message(message=chat_message) - - # do not await this, as it blocks the parent task responsible for - # handling messages from a websocket. instead, process each message - # as a distinct concurrent task. - self.loop.create_task(self._route(chat_message)) - - def on_clear_request(self, request: ClearRequest): - target = request.target - - # if no target, clear all messages - if not target: - self.chat_history.clear() - self.pending_messages.clear() - self.llm_chat_memory.clear() - self.broadcast_message(ClearMessage()) - self.settings["jai_chat_handlers"]["default"].send_help_message() - return - - # otherwise, clear a single message - for msg in self.chat_history[::-1]: - # interrupt the single message - if msg.type == "agent-stream" and getattr(msg, "reply_to", None) == target: - try: - self.message_interrupted[msg.id].set() - except KeyError: - # do nothing if the message was already interrupted - # or stream got completed (thread-safe way!) - pass - break - - self.chat_history[:] = [ - msg - for msg in self.chat_history - if msg.id != target and getattr(msg, "reply_to", None) != target - ] - self.pending_messages[:] = [ - msg for msg in self.pending_messages if msg.reply_to != target - ] - self.llm_chat_memory.clear([target]) - self.broadcast_message(ClearMessage(targets=[target])) - - def on_stop_request(self): - # set of message IDs that were submitted by this user, determined by the - # username associated with this WebSocket connection. - current_user_messages: Set[str] = set() - for message in self.chat_history: - if ( - message.type == "human" - and message.client.username == self.current_user.username - ): - current_user_messages.add(message.id) - - # set of `AgentStreamMessage` IDs to stop - streams_to_stop: Set[str] = set() - for message in self.chat_history: - if ( - message.type == "agent-stream" - and message.reply_to in current_user_messages - ): - streams_to_stop.add(message.id) - - for stream_id in streams_to_stop: - try: - self.message_interrupted[stream_id].set() - except KeyError: - # do nothing if the message was already interrupted - # or stream got completed (thread-safe way!) - pass - - async def _route(self, message): - """Method that routes an incoming message to the appropriate handler.""" - default = self.chat_handlers["default"] - # Split on any whitespace, either spaces or newlines - maybe_command = message.body.split(None, 1)[0] - is_command = ( - message.body.startswith("/") - and maybe_command in self.chat_handlers.keys() - and maybe_command != "default" - ) - command = maybe_command if is_command else "default" - - start = time.time() - if is_command: - await self.chat_handlers[command].on_message(message) - else: - await default.on_message(message) - - latency_ms = round((time.time() - start) * 1000) - command_readable = "Default" if command == "default" else command - self.log.info(f"{command_readable} chat handler resolved in {latency_ms} ms.") - - def on_close(self): - self.log.debug("Disconnecting client with user %s", self.client_id) - - self.root_chat_handlers.pop(self.client_id, None) - self.chat_clients.pop(self.client_id, None) - self.log.info(f"Client disconnected. ID: {self.client_id}") - self.log.debug("Chat clients: %s", self.root_chat_handlers.keys()) +# TODO v3: unify loading of chat handlers in a single place, then read +# from that instead of this hard-coded dict. +CHAT_HANDLER_DICT = { + "default": DefaultChatHandler, + "/ask": AskChatHandler, + "/learn": LearnChatHandler, + "/generate": GenerateChatHandler, + "/help": HelpChatHandler, +} class ProviderHandler(BaseAPIHandler): @@ -578,8 +211,8 @@ def config_manager(self) -> ConfigManager: # type:ignore[override] return self.settings["jai_config_manager"] @property - def chat_handlers(self) -> Dict[str, "BaseChatHandler"]: - return self.settings["jai_chat_handlers"] + def chat_handlers(self) -> Dict[str, Type["BaseChatHandler"]]: + return CHAT_HANDLER_DICT @web.authenticated def get(self): @@ -631,8 +264,8 @@ def context_providers(self) -> Dict[str, "BaseCommandContextProvider"]: return self.settings["jai_context_providers"] @property - def chat_handlers(self) -> Dict[str, "BaseChatHandler"]: - return self.settings["jai_chat_handlers"] + def chat_handlers(self) -> Dict[str, Type["BaseChatHandler"]]: + return CHAT_HANDLER_DICT @web.authenticated def get(self): diff --git a/packages/jupyter-ai/jupyter_ai/history.py b/packages/jupyter-ai/jupyter_ai/history.py index 0f1ba7dc0..8d85e3ab7 100644 --- a/packages/jupyter-ai/jupyter_ai/history.py +++ b/packages/jupyter-ai/jupyter_ai/history.py @@ -1,111 +1,62 @@ -import time -from typing import List, Optional, Sequence, Set, Union +from typing import List, Optional +from jupyter_ai.constants import BOT +from jupyterlab_chat.models import Message as JChatMessage +from jupyterlab_chat.ychat import YChat from langchain_core.chat_history import BaseChatMessageHistory -from langchain_core.messages import BaseMessage -from langchain_core.pydantic_v1 import BaseModel, PrivateAttr +from langchain_core.messages import AIMessage, BaseMessage, HumanMessage -from .models import HumanChatMessage -HUMAN_MSG_ID_KEY = "_jupyter_ai_human_msg_id" - - -class BoundedChatHistory(BaseChatMessageHistory, BaseModel): +class YChatHistory(BaseChatMessageHistory): """ - An in-memory implementation of `BaseChatMessageHistory` that stores up to - `k` exchanges between a user and an LLM. - - For example, when `k=2`, `BoundedChatHistory` will store up to 2 human - messages and 2 AI messages. If `k` is set to `None` all messages are kept. - """ - - k: Union[int, None] - clear_time: float = 0.0 - cleared_msgs: Set[str] = set() - _all_messages: List[BaseMessage] = PrivateAttr(default_factory=list) + An implementation of `BaseChatMessageHistory` that returns the preceding `k` + exchanges (`k * 2` messages) from the given YChat model. - @property - def messages(self) -> List[BaseMessage]: # type:ignore[override] - if self.k is None: - return self._all_messages - return self._all_messages[-self.k * 2 :] - - async def aget_messages(self) -> List[BaseMessage]: - return self.messages - - def add_message(self, message: BaseMessage) -> None: - """Add a self-created message to the store""" - if HUMAN_MSG_ID_KEY not in message.additional_kwargs: - # human message id must be added to allow for targeted clearing of messages. - # `WrappedBoundedChatHistory` should be used instead to add messages. - raise ValueError( - "Message must have a human message ID to be added to the store." - ) - self._all_messages.append(message) + If `k` is set to `None`, then this class returns all preceding messages. - async def aadd_messages(self, messages: Sequence[BaseMessage]) -> None: - """Add messages to the store""" - self.add_messages(messages) - - def clear(self, human_msg_ids: Optional[List[str]] = None) -> None: - """Clears conversation exchanges. If `human_msg_id` is provided, only - clears the respective human message and its reply. Otherwise, clears - all messages.""" - if human_msg_ids: - self._all_messages = [ - m - for m in self._all_messages - if m.additional_kwargs[HUMAN_MSG_ID_KEY] not in human_msg_ids - ] - self.cleared_msgs.update(human_msg_ids) - else: - self._all_messages = [] - self.cleared_msgs = set() - self.clear_time = time.time() - - async def aclear(self) -> None: - self.clear() - - -class WrappedBoundedChatHistory(BaseChatMessageHistory, BaseModel): + TODO: Consider just defining `k` as the number of messages and default to 4. """ - Wrapper around `BoundedChatHistory` that only appends an `AgentChatMessage` - if the `HumanChatMessage` it is replying to was not cleared. If a chat - handler is replying to a `HumanChatMessage`, it should pass this object via - the `last_human_msg` configuration parameter. - - For example, a chat handler that is streaming a reply to a - `HumanChatMessage` should be called via: - ```py - async for chunk in self.llm_chain.astream( - {"input": message.body}, - config={"configurable": {"last_human_msg": message}}, - ): - ... - ``` - - Reference: https://python.langchain.com/v0.1/docs/expression_language/how_to/message_history/ - """ - - history: BoundedChatHistory - last_human_msg: HumanChatMessage + def __init__(self, ychat: YChat, k: Optional[int] = None): + self.ychat = ychat + self.k = k @property def messages(self) -> List[BaseMessage]: # type:ignore[override] - return self.history.messages + """ + Returns the last `2 * k` messages preceding the latest message. If + `k` is set to `None`, return all preceding messages. + """ + # TODO: consider bounding history based on message size (e.g. total + # char/token count) instead of message count. + all_messages = self.ychat.get_messages() + + # gather last k * 2 messages and return + # we exclude the last message since that is the human message just + # submitted by a user. + start_idx = 0 if self.k is None else -2 * self.k - 1 + recent_messages: List[JChatMessage] = all_messages[start_idx:-1] + + return self._convert_to_langchain_messages(recent_messages) + + def _convert_to_langchain_messages(self, jchat_messages: List[JChatMessage]): + """ + Accepts a list of Jupyter Chat messages, and returns them as a list of + LangChain messages. + """ + messages: List[BaseMessage] = [] + for jchat_message in jchat_messages: + if jchat_message.sender == BOT["username"]: + messages.append(AIMessage(content=jchat_message.body)) + else: + messages.append(HumanMessage(content=jchat_message.body)) + + return messages def add_message(self, message: BaseMessage) -> None: - # prevent adding pending messages to the store if clear was triggered. - if ( - self.last_human_msg.time > self.history.clear_time - and self.last_human_msg.id not in self.history.cleared_msgs - ): - message.additional_kwargs[HUMAN_MSG_ID_KEY] = self.last_human_msg.id - self.history.add_message(message) - - async def aadd_messages(self, messages: Sequence[BaseMessage]) -> None: - self.add_messages(messages) + # do nothing when other LangChain objects call this method, since + # message history is maintained by the `YChat` shared document. + return - def clear(self) -> None: - self.history.clear() + def clear(self): + raise NotImplementedError() diff --git a/packages/jupyter-ai/jupyter_ai/models.py b/packages/jupyter-ai/jupyter_ai/models.py index 6bd7d4e06..180c3d13f 100644 --- a/packages/jupyter-ai/jupyter_ai/models.py +++ b/packages/jupyter-ai/jupyter_ai/models.py @@ -1,7 +1,7 @@ -import json -from typing import Any, Dict, List, Literal, Optional, Union +from typing import Any, Dict, List, Optional -from jupyter_ai_magics import Persona +# unused import: exports Persona from this module +from jupyter_ai_magics.models.persona import Persona from jupyter_ai_magics.providers import AuthStrategy, Field from langchain.pydantic_v1 import BaseModel, validator @@ -9,203 +9,6 @@ DEFAULT_CHUNK_OVERLAP = 100 -class CellError(BaseModel): - name: str - value: str - traceback: List[str] - - -class TextSelection(BaseModel): - type: Literal["text"] = "text" - source: str - - -class CellSelection(BaseModel): - type: Literal["cell"] = "cell" - source: str - - -class CellWithErrorSelection(BaseModel): - type: Literal["cell-with-error"] = "cell-with-error" - source: str - error: CellError - - -Selection = Union[TextSelection, CellSelection, CellWithErrorSelection] - - -# the type of message used to chat with the agent -class ChatRequest(BaseModel): - prompt: str - selection: Optional[Selection] - - -class StopRequest(BaseModel): - """ - A request from a user to stop streaming all messages that are replying to - messages previously sent by that user. This request does not stop all - streaming responses for all users, but only the user that issued the - request. User identity is determined by the `username` from the - `IdentityProvider` instance available to each WebSocket handler. - """ - - type: Literal["stop"] - - -class ClearRequest(BaseModel): - type: Literal["clear"] = "clear" - target: Optional[str] - """ - Message ID of the HumanChatMessage to delete an exchange at. - If not provided, this requests the backend to clear all messages. - """ - - -class ChatUser(BaseModel): - # User ID assigned by IdentityProvider. - username: str - initials: str - name: str - display_name: str - color: Optional[str] - avatar_url: Optional[str] - - -class ChatClient(ChatUser): - # A unique client ID assigned to identify different JupyterLab clients on - # the same device (i.e. running on multiple tabs/windows), which may have - # the same username assigned to them by the IdentityProvider. - id: str - - -class BaseAgentMessage(BaseModel): - id: str - time: float - body: str - - reply_to: str - """ - Message ID of the HumanChatMessage being replied to. This is set to an empty - string if not applicable. - """ - - persona: Persona - """ - The persona of the selected provider. If the selected provider is `None`, - this defaults to a description of `JupyternautPersona`. - """ - - metadata: Dict[str, Any] = {} - """ - Message metadata set by a provider after fully processing an input. The - contents of this dictionary are provider-dependent, and can be any - dictionary with string keys. This field is not to be displayed directly to - the user, and is intended solely for developer purposes. - """ - - -class AgentChatMessage(BaseAgentMessage): - type: Literal["agent"] = "agent" - - -class AgentStreamMessage(BaseAgentMessage): - type: Literal["agent-stream"] = "agent-stream" - complete: bool - # other attrs inherited from `AgentChatMessage` - - -class AgentStreamChunkMessage(BaseModel): - type: Literal["agent-stream-chunk"] = "agent-stream-chunk" - id: str - """ID of the parent `AgentStreamMessage`.""" - content: str - """The string to append to the `AgentStreamMessage` referenced by `id`.""" - stream_complete: bool - """Indicates whether this chunk completes the stream referenced by `id`.""" - metadata: Dict[str, Any] = {} - """ - The metadata of the stream referenced by `id`. Metadata from the latest - chunk should override any metadata from previous chunks. See the docstring - on `BaseAgentMessage.metadata` for information. - """ - - @validator("metadata") - def validate_metadata(cls, v): - """Ensure metadata values are JSON serializable""" - try: - json.dumps(v) - return v - except TypeError as e: - raise ValueError(f"Metadata must be JSON serializable: {str(e)}") - - -class HumanChatMessage(BaseModel): - type: Literal["human"] = "human" - id: str - time: float - body: str - """The formatted body of the message to be rendered in the UI. Includes both - `prompt` and `selection`.""" - prompt: str - """The prompt typed into the chat input by the user.""" - selection: Optional[Selection] - """The selection included with the prompt, if any.""" - client: ChatClient - - -class ClearMessage(BaseModel): - type: Literal["clear"] = "clear" - targets: Optional[List[str]] = None - """ - Message IDs of the HumanChatMessage to delete an exchange at. - If not provided, this instructs the frontend to clear all messages. - """ - - -class PendingMessage(BaseModel): - type: Literal["pending"] = "pending" - id: str - time: float - body: str - reply_to: str - persona: Persona - ellipsis: bool = True - closed: bool = False - - -class ClosePendingMessage(BaseModel): - type: Literal["close-pending"] = "close-pending" - id: str - - -# the type of messages being broadcast to clients -ChatMessage = Union[ - AgentChatMessage, HumanChatMessage, AgentStreamMessage, AgentStreamChunkMessage -] - - -class ChatHistory(BaseModel): - """History of chat messages""" - - messages: List[ChatMessage] - pending_messages: List[PendingMessage] - - -class ConnectionMessage(BaseModel): - type: Literal["connection"] = "connection" - client_id: str - history: ChatHistory - - -Message = Union[ - ChatMessage, - ConnectionMessage, - ClearMessage, - PendingMessage, - ClosePendingMessage, -] - - class ListProvidersEntry(BaseModel): """Model provider with supported models and provider's authentication strategy diff --git a/packages/jupyter-ai/jupyter_ai/tests/test_context_providers.py b/packages/jupyter-ai/jupyter_ai/tests/test_context_providers.py index 132dcf871..bb2218e3a 100644 --- a/packages/jupyter-ai/jupyter_ai/tests/test_context_providers.py +++ b/packages/jupyter-ai/jupyter_ai/tests/test_context_providers.py @@ -4,15 +4,12 @@ import pytest from jupyter_ai.config_manager import ConfigManager from jupyter_ai.context_providers import FileContextProvider, find_commands -from jupyter_ai.history import BoundedChatHistory -from jupyter_ai.models import ChatClient, HumanChatMessage, Persona +from jupyter_ai.models import Persona +from jupyterlab_chat.models import Message @pytest.fixture -def human_chat_message() -> HumanChatMessage: - chat_client = ChatClient( - id=0, username="test", initials="test", name="test", display_name="test" - ) +def human_message() -> Message: prompt = ( "@file:test1.py @file @file:dir/test2.md test test\n" "@file:/dir/test3.png\n" @@ -22,13 +19,7 @@ def human_chat_message() -> HumanChatMessage: "@file:'test7.py test\"\n" # do not allow for mixed quotes "```\n@file:fail2.py\n```\n" # do not look within backticks ) - return HumanChatMessage( - id="test", - time=0, - body=prompt, - prompt=prompt, - client=chat_client, - ) + return Message(id="fake-message-uuid", time=0, body=prompt, sender="fake-user-uuid") @pytest.fixture @@ -39,17 +30,14 @@ def file_context_provider() -> FileContextProvider: log=logging.getLogger(__name__), config_manager=config_manager, model_parameters={}, - chat_history=[], - llm_chat_memory=BoundedChatHistory(k=2), root_dir="", preferred_dir="", dask_client_future=None, - chat_handlers={}, context_providers={}, ) -def test_find_instances(file_context_provider, human_chat_message): +def test_find_instances(file_context_provider, human_message): expected = [ "@file:test1.py", "@file:dir/test2.md", @@ -60,13 +48,12 @@ def test_find_instances(file_context_provider, human_chat_message): "@file:'test7.py", ] commands = [ - cmd.cmd - for cmd in find_commands(file_context_provider, human_chat_message.prompt) + cmd.cmd for cmd in find_commands(file_context_provider, human_message.body) ] assert commands == expected -def test_replace_prompt(file_context_provider, human_chat_message): +def test_replace_prompt(file_context_provider, human_message): expected = ( "'test1.py' @file 'dir/test2.md' test test\n" "'/dir/test3.png'\n" @@ -76,5 +63,5 @@ def test_replace_prompt(file_context_provider, human_chat_message): "'test7.py' test\"\n" "```\n@file:fail2.py\n```\n" # do not look within backticks ) - prompt = file_context_provider.replace_prompt(human_chat_message.prompt) + prompt = file_context_provider.replace_prompt(human_message.body) assert prompt == expected diff --git a/packages/jupyter-ai/jupyter_ai/tests/test_extension.py b/packages/jupyter-ai/jupyter_ai/tests/test_extension.py index 9ae52d8a0..8100ff670 100644 --- a/packages/jupyter-ai/jupyter_ai/tests/test_extension.py +++ b/packages/jupyter-ai/jupyter_ai/tests/test_extension.py @@ -4,7 +4,6 @@ import pytest from jupyter_ai.extension import AiExtension -from jupyter_ai.history import HUMAN_MSG_ID_KEY from jupyter_ai_magics import BaseProvider from langchain_core.messages import BaseMessage @@ -78,6 +77,7 @@ def ai_extension(jp_serverapp): (None, 9, 9), ], ) +@pytest.mark.skip("TODO v3: replace this with a unit test for YChatHistory") def test_max_chat_history(ai_extension, max_history, messages_to_add, expected_size): ai = ai_extension ai.default_max_chat_history = max_history @@ -86,7 +86,6 @@ def test_max_chat_history(ai_extension, max_history, messages_to_add, expected_s message = BaseMessage( content=f"Test message {i}", type="test", - additional_kwargs={HUMAN_MSG_ID_KEY: f"message-{i}"}, ) ai.settings["llm_chat_memory"].add_message(message) diff --git a/packages/jupyter-ai/jupyter_ai/tests/test_handlers.py b/packages/jupyter-ai/jupyter_ai/tests/test_handlers.py index 81108bdb7..c009aa918 100644 --- a/packages/jupyter-ai/jupyter_ai/tests/test_handlers.py +++ b/packages/jupyter-ai/jupyter_ai/tests/test_handlers.py @@ -1,27 +1,20 @@ import logging import os import stat -from typing import Optional +from typing import List, Optional from unittest import mock -import pytest from jupyter_ai.chat_handlers import DefaultChatHandler, learn from jupyter_ai.config_manager import ConfigManager from jupyter_ai.extension import DEFAULT_HELP_MESSAGE_TEMPLATE -from jupyter_ai.handlers import RootChatHandler -from jupyter_ai.history import BoundedChatHistory -from jupyter_ai.models import ( - ChatClient, - ClosePendingMessage, - HumanChatMessage, - Message, - PendingMessage, - Persona, -) +from jupyter_ai.history import YChatHistory +from jupyter_ai.models import Persona from jupyter_ai_magics import BaseProvider +from jupyterlab_chat.models import NewMessage +from jupyterlab_chat.ychat import YChat from langchain_community.llms import FakeListLLM -from tornado.httputil import HTTPServerRequest -from tornado.web import Application +from langchain_core.messages import AIMessage, BaseMessage, HumanMessage +from pycrdt import Awareness, Doc class MockLearnHandler(learn.LearnChatHandler): @@ -49,28 +42,22 @@ def astream(self, *args, **kwargs): class TestDefaultChatHandler(DefaultChatHandler): def __init__(self, lm_provider=None, lm_provider_params=None): - self.request = HTTPServerRequest() - self.application = Application() - self.messages = [] - self.tasks = [] + # initialize dummy YDoc, YAwareness, YChat, and YChatHistory objects + ydoc = Doc() + awareness = Awareness(ydoc=ydoc) + self.ychat = YChat(ydoc=ydoc, awareness=awareness) + self.ychat_history = YChatHistory(ychat=self.ychat) + + # initialize & configure mock ConfigManager config_manager = mock.create_autospec(ConfigManager) config_manager.lm_provider = lm_provider or MockProvider config_manager.lm_provider_params = lm_provider_params or {"model_id": "model"} config_manager.persona = Persona(name="test", avatar_route="test") - def broadcast_message(message: Message) -> None: - self.messages.append(message) - - root_handler = mock.create_autospec(RootChatHandler) - root_handler.broadcast_message = broadcast_message - super().__init__( log=logging.getLogger(__name__), config_manager=config_manager, - root_chat_handlers={"root": root_handler}, model_parameters={}, - chat_history=[], - llm_chat_memory=BoundedChatHistory(k=2), root_dir="", preferred_dir="", dask_client_future=None, @@ -78,29 +65,45 @@ def broadcast_message(message: Message) -> None: chat_handlers={}, context_providers={}, message_interrupted={}, + llm_chat_memory=self.ychat_history, + ychat=self.ychat, ) + @property + def messages(self) -> List[BaseMessage]: + """ + Test helper method for getting the complete message history, including + the last message. + """ -class TestException(Exception): - pass + return self.ychat_history._convert_to_langchain_messages( + self.ychat.get_messages() + ) + async def send_human_message(self, body: str = "Hello!"): + """ + Test helper method that sends a human message to this chat handler. -@pytest.fixture -def chat_client(): - return ChatClient( - id=0, username="test", initials="test", name="test", display_name="test" - ) + Without the event subscription performed by `AiExtension`, the chat + handler is not called automatically, hence we trigger it manually by + invoking `on_message()`. + """ + id = self.ychat.add_message(NewMessage(body=body, sender="fake-user-uuid")) + message = self.ychat.get_message(id) + return await self.on_message(message) -@pytest.fixture -def human_chat_message(chat_client): - return HumanChatMessage( - id="test", - time=0, - body="test message", - prompt="test message", - client=chat_client, - ) + @property + def is_writing(self) -> bool: + """ + Returns whether Jupyternaut is indicating that it is still writing in + the chat, based on its Yjs awareness. + """ + return self.ychat.awareness.get_local_state()["isWriting"] + + +class TestException(Exception): + pass def test_learn_index_permissions(tmp_path): @@ -112,7 +115,7 @@ def test_learn_index_permissions(tmp_path): assert stat.filemode(mode) == "drwx------" -async def test_default_closes_pending_on_success(human_chat_message): +async def test_default_stops_writing_on_success(): handler = TestDefaultChatHandler( lm_provider=MockProvider, lm_provider_params={ @@ -120,65 +123,24 @@ async def test_default_closes_pending_on_success(human_chat_message): "should_raise": False, }, ) - await handler.process_message(human_chat_message) - - # >=2 because there are additional stream messages that follow - assert len(handler.messages) >= 2 - assert isinstance(handler.messages[0], PendingMessage) - assert isinstance(handler.messages[1], ClosePendingMessage) - - -async def test_default_closes_pending_on_error(human_chat_message): - handler = TestDefaultChatHandler( - lm_provider=MockProvider, - lm_provider_params={ - "model_id": "model", - "should_raise": True, - }, - ) - with pytest.raises(TestException): - await handler.process_message(human_chat_message) - + await handler.send_human_message() assert len(handler.messages) == 2 - assert isinstance(handler.messages[0], PendingMessage) - assert isinstance(handler.messages[1], ClosePendingMessage) + assert isinstance(handler.messages[0], HumanMessage) + assert isinstance(handler.messages[1], AIMessage) + assert not handler.is_writing -async def test_sends_closing_message_at_most_once(human_chat_message): +async def test_default_stops_writing_on_error(): handler = TestDefaultChatHandler( lm_provider=MockProvider, lm_provider_params={ "model_id": "model", - "should_raise": False, + "should_raise": True, }, ) - message = handler.start_pending("Flushing Pipe Network") - assert len(handler.messages) == 1 - assert isinstance(handler.messages[0], PendingMessage) - assert not message.closed - - handler.close_pending(message) - assert len(handler.messages) == 2 - assert isinstance(handler.messages[1], ClosePendingMessage) - assert message.closed - - # closing an already closed message does not lead to another broadcast - handler.close_pending(message) + await handler.send_human_message() + print(handler.messages) assert len(handler.messages) == 2 - assert message.closed - - -# TODO -# import json - - -# async def test_get_example(jp_fetch): -# # When -# response = await jp_fetch("jupyter-ai", "get_example") - -# # Then -# assert response.code == 200 -# payload = json.loads(response.body) -# assert payload == { -# "data": "This is /jupyter-ai/get_example endpoint!" -# } + assert isinstance(handler.messages[0], HumanMessage) + assert isinstance(handler.messages[1], AIMessage) + assert not handler.is_writing diff --git a/packages/jupyter-ai/package.json b/packages/jupyter-ai/package.json index 814b4a90a..750a13293 100644 --- a/packages/jupyter-ai/package.json +++ b/packages/jupyter-ai/package.json @@ -61,10 +61,9 @@ "dependencies": { "@emotion/react": "^11.10.5", "@emotion/styled": "^11.10.5", - "@jupyter/collaboration": "^1", + "@jupyter/chat": "^0.7.1", "@jupyterlab/application": "^4.2.0", "@jupyterlab/apputils": "^4.2.0", - "@jupyterlab/cells": "^4.2.0", "@jupyterlab/codeeditor": "^4.2.0", "@jupyterlab/codemirror": "^4.2.0", "@jupyterlab/completer": "^4.2.0", diff --git a/packages/jupyter-ai/pyproject.toml b/packages/jupyter-ai/pyproject.toml index c9d1b5d53..164f423f3 100644 --- a/packages/jupyter-ai/pyproject.toml +++ b/packages/jupyter-ai/pyproject.toml @@ -35,6 +35,7 @@ dependencies = [ "typing_extensions>=4.5.0", "traitlets>=5.0", "deepmerge>=2.0,<3", + "jupyterlab-chat>=0.7.1,<1.0.0", ] dynamic = ["version", "description", "authors", "urls", "keywords"] diff --git a/packages/jupyter-ai/schema/plugin.json b/packages/jupyter-ai/schema/plugin.json index 78804b5c6..37e0a4671 100644 --- a/packages/jupyter-ai/schema/plugin.json +++ b/packages/jupyter-ai/schema/plugin.json @@ -12,6 +12,27 @@ "preventDefault": false } ], + "jupyter.lab.menus": { + "main": [ + { + "id": "jp-mainmenu-settings", + "items": [ + { + "type": "separator", + "rank": 110 + }, + { + "command": "jupyter-ai:open-settings", + "rank": 110 + }, + { + "type": "separator", + "rank": 110 + } + ] + } + ] + }, "additionalProperties": false, "type": "object" } diff --git a/packages/jupyter-ai/src/chat_handler.ts b/packages/jupyter-ai/src/chat_handler.ts deleted file mode 100644 index e1b1e332c..000000000 --- a/packages/jupyter-ai/src/chat_handler.ts +++ /dev/null @@ -1,270 +0,0 @@ -import { IDisposable } from '@lumino/disposable'; -import { ServerConnection } from '@jupyterlab/services'; -import { URLExt } from '@jupyterlab/coreutils'; -import { Signal } from '@lumino/signaling'; - -import { AiService } from './handler'; - -const CHAT_SERVICE_URL = 'api/ai/chats'; - -export class ChatHandler implements IDisposable { - /** - * The server settings used to make API requests. - */ - readonly serverSettings: ServerConnection.ISettings; - - /** - * ID of the connection. Requires `await initialize()`. - */ - id = ''; - - /** - * Create a new chat handler. - */ - constructor(options: AiService.IOptions = {}) { - this.serverSettings = - options.serverSettings ?? ServerConnection.makeSettings(); - } - - /** - * Initializes the WebSocket connection to the Chat backend. Promise is - * resolved when server acknowledges connection and sends the client ID. This - * must be awaited before calling any other method. - */ - public async initialize(): Promise { - await this._initialize(); - } - - /** - * Sends a message across the WebSocket. Promise resolves to the message ID - * when the server sends the same message back, acknowledging receipt. - */ - public sendMessage(message: AiService.Request): Promise { - return new Promise(resolve => { - this._socket?.send(JSON.stringify(message)); - this._sendResolverQueue.push(resolve); - }); - } - - /** - * Returns a Promise that resolves to the agent's reply, given the message ID - * of the human message. Should only be called once per message. - */ - public replyFor(messageId: string): Promise { - return new Promise(resolve => { - this._replyForResolverDict[messageId] = resolve; - }); - } - - public addListener(handler: (message: AiService.Message) => void): void { - this._listeners.push(handler); - } - - public removeListener(handler: (message: AiService.Message) => void): void { - const index = this._listeners.indexOf(handler); - if (index > -1) { - this._listeners.splice(index, 1); - } - } - - /** - * Whether the chat handler is disposed. - */ - get isDisposed(): boolean { - return this._isDisposed; - } - - /** - * Dispose the chat handler. - */ - dispose(): void { - if (this.isDisposed) { - return; - } - this._isDisposed = true; - this._listeners = []; - - // Clean up socket. - const socket = this._socket; - if (socket) { - this._socket = null; - socket.onopen = () => undefined; - socket.onerror = () => undefined; - socket.onmessage = () => undefined; - socket.onclose = () => undefined; - socket.close(); - } - } - - get history(): AiService.ChatHistory { - return { - messages: this._messages, - pending_messages: this._pendingMessages - }; - } - - get historyChanged(): Signal { - return this._historyChanged; - } - - private _onMessage(newMessage: AiService.Message): void { - // resolve promise from `sendMessage()` - if (newMessage.type === 'human' && newMessage.client.id === this.id) { - this._sendResolverQueue.shift()?.(newMessage.id); - } - - // resolve promise from `replyFor()` if it exists - if ( - newMessage.type === 'agent' && - newMessage.reply_to in this._replyForResolverDict - ) { - this._replyForResolverDict[newMessage.reply_to](newMessage); - delete this._replyForResolverDict[newMessage.reply_to]; - } - - // call listeners in serial - this._listeners.forEach(listener => listener(newMessage)); - - // append message to chat history. this block should always set `_messages` - // or `_pendingMessages` to a new array instance rather than modifying - // in-place so consumer React components re-render. - switch (newMessage.type) { - case 'connection': - break; - case 'clear': - if (newMessage.targets) { - const targets = newMessage.targets; - this._messages = this._messages.filter( - msg => - !targets.includes(msg.id) && - !('reply_to' in msg && targets.includes(msg.reply_to)) - ); - this._pendingMessages = this._pendingMessages.filter( - msg => !targets.includes(msg.reply_to) - ); - } else { - this._messages = []; - this._pendingMessages = []; - } - break; - case 'pending': - this._pendingMessages = [...this._pendingMessages, newMessage]; - break; - case 'close-pending': - this._pendingMessages = this._pendingMessages.filter( - p => p.id !== newMessage.id - ); - break; - case 'agent-stream-chunk': { - const target = newMessage.id; - const streamMessage = this._messages.find( - (m): m is AiService.AgentStreamMessage => - m.type === 'agent-stream' && m.id === target - ); - if (!streamMessage) { - console.error( - `Received stream chunk with ID ${target}, but no agent-stream message with that ID exists. ` + - 'Ignoring this stream chunk.' - ); - break; - } - - streamMessage.body += newMessage.content; - streamMessage.metadata = newMessage.metadata; - if (newMessage.stream_complete) { - streamMessage.complete = true; - } - this._messages = [...this._messages]; - break; - } - default: - // human or agent chat message - this._messages = [...this._messages, newMessage]; - break; - } - - // finally, trigger `historyChanged` signal - this._historyChanged.emit({ - messages: this._messages, - pending_messages: this._pendingMessages - }); - } - - /** - * Queue of Promise resolvers pushed onto by `send()` - */ - private _sendResolverQueue: ((value: string) => void)[] = []; - - /** - * Dictionary mapping message IDs to Promise resolvers, inserted into by - * `replyFor()`. - */ - private _replyForResolverDict: Record< - string, - (value: AiService.AgentChatMessage) => void - > = {}; - - private _onClose(e: CloseEvent, reject: any) { - reject(new Error('Chat UI websocket disconnected')); - console.error('Chat UI websocket disconnected'); - // only attempt re-connect if there was an abnormal closure - // WebSocket status codes defined in RFC 6455: https://www.rfc-editor.org/rfc/rfc6455.html#section-7.4.1 - if (e.code === 1006) { - const delaySeconds = 1; - console.info(`Will try to reconnect in ${delaySeconds} s.`); - setTimeout(async () => await this._initialize(), delaySeconds * 1000); - } - } - - private _initialize(): Promise { - return new Promise((resolve, reject) => { - if (this.isDisposed) { - return; - } - console.log('Creating a new websocket connection for chat...'); - const { token, WebSocket, wsUrl } = this.serverSettings; - const url = - URLExt.join(wsUrl, CHAT_SERVICE_URL) + - (token ? `?token=${encodeURIComponent(token)}` : ''); - - const socket = (this._socket = new WebSocket(url)); - socket.onclose = e => this._onClose(e, reject); - socket.onerror = e => reject(e); - socket.onmessage = msg => - msg.data && this._onMessage(JSON.parse(msg.data)); - - const listenForConnection = (message: AiService.Message) => { - if (message.type !== 'connection') { - return; - } - this.id = message.client_id; - - // initialize chat history from `ConnectionMessage` - this._messages = message.history.messages; - this._pendingMessages = message.history.pending_messages; - - resolve(); - this.removeListener(listenForConnection); - }; - - this.addListener(listenForConnection); - }); - } - - private _isDisposed = false; - private _socket: WebSocket | null = null; - private _listeners: ((msg: any) => void)[] = []; - - /** - * The list of chat messages - */ - private _messages: AiService.ChatMessage[] = []; - private _pendingMessages: AiService.PendingMessage[] = []; - - /** - * Signal for when the chat history is changed. Components rendering the chat - * history should subscribe to this signal and update their state when this - * signal is triggered. - */ - private _historyChanged = new Signal(this); -} diff --git a/packages/jupyter-ai/src/completions/plugin.ts b/packages/jupyter-ai/src/completions/plugin.ts index 4487b2752..bcccd5984 100644 --- a/packages/jupyter-ai/src/completions/plugin.ts +++ b/packages/jupyter-ai/src/completions/plugin.ts @@ -8,7 +8,7 @@ import { IEditorLanguageRegistry, IEditorLanguage } from '@jupyterlab/codemirror'; -import { getEditor } from '../selection-watcher'; +import { getEditor } from '../utils'; import { IJaiStatusItem, IJaiCompletionProvider } from '../tokens'; import { displayName, JaiInlineProvider } from './provider'; import { CompletionWebsocketHandler } from './handler'; diff --git a/packages/jupyter-ai/src/components/chat-input.tsx b/packages/jupyter-ai/src/components/chat-input.tsx deleted file mode 100644 index 1e19f7774..000000000 --- a/packages/jupyter-ai/src/components/chat-input.tsx +++ /dev/null @@ -1,403 +0,0 @@ -import React, { useEffect, useRef, useState } from 'react'; - -import { - Autocomplete, - Box, - SxProps, - TextField, - Theme, - InputAdornment, - Typography -} from '@mui/material'; -import { - Download, - FindInPage, - Help, - MoreHoriz, - MenuBook, - School, - HideSource, - AutoFixNormal -} from '@mui/icons-material'; -import { ISignal } from '@lumino/signaling'; - -import { AiService } from '../handler'; -import { SendButton, SendButtonProps } from './chat-input/send-button'; -import { useActiveCellContext } from '../contexts/active-cell-context'; -import { ChatHandler } from '../chat_handler'; - -type ChatInputProps = { - chatHandler: ChatHandler; - focusInputSignal: ISignal; - sendWithShiftEnter: boolean; - sx?: SxProps; - /** - * Name of the persona, set by the selected chat model. This defaults to - * `'Jupyternaut'`, but can differ for custom providers. - */ - personaName: string; - /** - * Whether the backend is streaming a reply to any message sent by the current - * user. - */ - streamingReplyHere: boolean; -}; - -/** - * List of icons per slash command, shown in the autocomplete popup. - * - * This list of icons should eventually be made configurable. However, it is - * unclear whether custom icons should be defined within a Lumino plugin (in the - * frontend) or served from a static server route (in the backend). - */ -const DEFAULT_COMMAND_ICONS: Record = { - '/ask': , - '/clear': , - '/export': , - '/fix': , - '/generate': , - '/help': , - '/learn': , - '@file': , - unknown: -}; - -/** - * Renders an option shown in the slash command autocomplete. - */ -function renderAutocompleteOption( - optionProps: React.HTMLAttributes, - option: AiService.AutocompleteOption -): JSX.Element { - const icon = - option.id in DEFAULT_COMMAND_ICONS - ? DEFAULT_COMMAND_ICONS[option.id] - : DEFAULT_COMMAND_ICONS.unknown; - - return ( -
  • - {icon} - - - {option.label} - - - {' — ' + option.description} - - -
  • - ); -} - -export function ChatInput(props: ChatInputProps): JSX.Element { - const [input, setInput] = useState(''); - const [autocompleteOptions, setAutocompleteOptions] = useState< - AiService.AutocompleteOption[] - >([]); - const [autocompleteCommandOptions, setAutocompleteCommandOptions] = useState< - AiService.AutocompleteOption[] - >([]); - const [autocompleteArgOptions, setAutocompleteArgOptions] = useState< - AiService.AutocompleteOption[] - >([]); - const [currSlashCommand, setCurrSlashCommand] = useState(null); - const activeCell = useActiveCellContext(); - - /** - * Effect: fetch the list of available slash commands from the backend on - * initial mount to populate the slash command autocomplete. - */ - useEffect(() => { - async function getAutocompleteCommandOptions() { - const response = await AiService.listAutocompleteOptions(); - setAutocompleteCommandOptions(response.options); - } - getAutocompleteCommandOptions(); - }, []); - - useEffect(() => { - async function getAutocompleteArgOptions() { - let options: AiService.AutocompleteOption[] = []; - const lastWord = getLastWord(input); - if (lastWord.includes(':')) { - const id = lastWord.split(':', 1)[0]; - // get option that matches the command - const option = autocompleteCommandOptions.find( - option => option.id === id - ); - if (option) { - const response = await AiService.listAutocompleteArgOptions(lastWord); - options = response.options; - } - } - setAutocompleteArgOptions(options); - } - getAutocompleteArgOptions(); - }, [autocompleteCommandOptions, input]); - - // Combine the fixed options with the argument options - useEffect(() => { - if (autocompleteArgOptions.length > 0) { - setAutocompleteOptions(autocompleteArgOptions); - } else { - setAutocompleteOptions(autocompleteCommandOptions); - } - }, [autocompleteCommandOptions, autocompleteArgOptions]); - - // whether any option is highlighted in the autocomplete - const [highlighted, setHighlighted] = useState(false); - - // controls whether the autocomplete is open - const [open, setOpen] = useState(false); - - // store reference to the input element to enable focusing it easily - const inputRef = useRef(); - - /** - * Effect: connect the signal emitted on input focus request. - */ - useEffect(() => { - const focusInputElement = () => { - if (inputRef.current) { - inputRef.current.focus(); - } - }; - props.focusInputSignal.connect(focusInputElement); - return () => { - props.focusInputSignal.disconnect(focusInputElement); - }; - }, []); - - /** - * Effect: Open the autocomplete when the user types a slash into an empty - * chat input. Close the autocomplete when the user clears the chat input. - */ - useEffect(() => { - if (filterAutocompleteOptions(autocompleteOptions, input).length > 0) { - setOpen(true); - return; - } - - if (input === '') { - setOpen(false); - return; - } - }, [input]); - - /** - * Effect: Set current slash command - */ - useEffect(() => { - const matchedSlashCommand = input.match(/^\s*\/\w+/); - setCurrSlashCommand(matchedSlashCommand && matchedSlashCommand[0]); - }, [input]); - - /** - * Effect: ensure that the `highlighted` is never `true` when `open` is - * `false`. - * - * For context: https://github.com/jupyterlab/jupyter-ai/issues/849 - */ - useEffect(() => { - if (!open && highlighted) { - setHighlighted(false); - } - }, [open, highlighted]); - - function onSend(selection?: AiService.Selection) { - const prompt = input; - setInput(''); - - // if the current slash command is `/fix`, we always include a code cell - // with error output in the selection. - if (currSlashCommand === '/fix') { - const cellWithError = activeCell.manager.getContent(true); - if (!cellWithError) { - return; - } - - props.chatHandler.sendMessage({ - prompt, - selection: { ...cellWithError, type: 'cell-with-error' } - }); - return; - } - - // otherwise, send a ChatRequest with the prompt and selection - props.chatHandler.sendMessage({ prompt, selection }); - } - - const inputExists = !!input.trim(); - function handleKeyDown(event: React.KeyboardEvent) { - if (event.key !== 'Enter') { - return; - } - - // do not send the message if the user was just trying to select a suggested - // slash command from the Autocomplete component. - if (highlighted) { - return; - } - - if (!inputExists) { - event.stopPropagation(); - event.preventDefault(); - return; - } - - if ( - event.key === 'Enter' && - ((props.sendWithShiftEnter && event.shiftKey) || - (!props.sendWithShiftEnter && !event.shiftKey)) - ) { - onSend(); - event.stopPropagation(); - event.preventDefault(); - } - } - - // Set the helper text based on whether Shift+Enter is used for sending. - const helperText = props.sendWithShiftEnter ? ( - - Press Shift+Enter to send message - - ) : ( - - Press Shift+Enter to add a new line - - ); - - const sendButtonProps: SendButtonProps = { - onSend, - onStop: () => { - props.chatHandler.sendMessage({ - type: 'stop' - }); - }, - streamingReplyHere: props.streamingReplyHere, - sendWithShiftEnter: props.sendWithShiftEnter, - inputExists, - activeCellHasError: activeCell.hasError, - currSlashCommand - }; - - function filterAutocompleteOptions( - options: AiService.AutocompleteOption[], - inputValue: string - ): AiService.AutocompleteOption[] { - const lastWord = getLastWord(inputValue); - if (lastWord === '') { - return []; - } - const isStart = lastWord === inputValue; - return options.filter( - option => - option.label.startsWith(lastWord) && (!option.only_start || isStart) - ); - } - - return ( - - { - return filterAutocompleteOptions(options, inputValue); - }} - onChange={(_, option) => { - const value = typeof option === 'string' ? option : option.label; - let matchLength = 0; - for (let i = 1; i <= value.length; i++) { - if (input.endsWith(value.slice(0, i))) { - matchLength = i; - } - } - setInput(input + value.slice(matchLength)); - }} - onInputChange={(_, newValue: string) => { - setInput(newValue); - }} - onHighlightChange={ - /** - * On highlight change: set `highlighted` to whether an option is - * highlighted by the user. - */ - (_, highlightedOption) => { - setHighlighted(!!highlightedOption); - } - } - onClose={(_, reason) => { - if (reason !== 'selectOption' || input.endsWith(' ')) { - setOpen(false); - } - }} - // set this to an empty string to prevent the last selected slash - // command from being shown in blue - value="" - open={open} - options={autocompleteOptions} - // hide default extra right padding in the text field - disableClearable - // ensure the autocomplete popup always renders on top - componentsProps={{ - popper: { - placement: 'top' - }, - paper: { - sx: { - border: '1px solid lightgray' - } - } - }} - renderOption={renderAutocompleteOption} - ListboxProps={{ - sx: { - '& .MuiAutocomplete-option': { - padding: 2 - } - } - }} - renderInput={params => ( - - - - ) - }} - FormHelperTextProps={{ - sx: { marginLeft: 'auto', marginRight: 0 } - }} - helperText={input.length > 2 ? helperText : ' '} - /> - )} - /> - - ); -} - -function getLastWord(input: string): string { - return input.split(/(? unknown; - onStop: () => unknown; - sendWithShiftEnter: boolean; - currSlashCommand: string | null; - inputExists: boolean; - activeCellHasError: boolean; - /** - * Whether the backend is streaming a reply to any message sent by the current - * user. - */ - streamingReplyHere: boolean; -}; - -export function SendButton(props: SendButtonProps): JSX.Element { - const [menuAnchorEl, setMenuAnchorEl] = useState(null); - const [menuOpen, setMenuOpen] = useState(false); - const [textSelection] = useSelectionContext(); - const activeCell = useActiveCellContext(); - - const openMenu = useCallback((el: HTMLElement | null) => { - setMenuAnchorEl(el); - setMenuOpen(true); - }, []); - - const closeMenu = useCallback(() => { - setMenuOpen(false); - }, []); - - let action: 'send' | 'stop' | 'fix' = props.inputExists - ? 'send' - : props.streamingReplyHere - ? 'stop' - : 'send'; - if (props.currSlashCommand === '/fix') { - action = 'fix'; - } - - let disabled = false; - if (action === 'send' && !props.inputExists) { - disabled = true; - } - if (action === 'fix' && !props.activeCellHasError) { - disabled = true; - } - - const includeSelectionDisabled = !(activeCell.exists || textSelection); - - const includeSelectionTooltip = - action === 'fix' - ? FIX_TOOLTIP - : textSelection - ? `${textSelection.text.split('\n').length} lines selected` - : activeCell.exists - ? 'Code from 1 active cell' - : 'No selection or active cell'; - - const defaultTooltip = props.sendWithShiftEnter - ? 'Send message (SHIFT+ENTER)' - : 'Send message (ENTER)'; - - const tooltip = - action === 'fix' && !props.activeCellHasError - ? FIX_TOOLTIP - : action === 'stop' - ? 'Stop streaming' - : !props.inputExists - ? 'Message must not be empty' - : defaultTooltip; - - function sendWithSelection() { - // if the current slash command is `/fix`, `props.onSend()` should always - // include the code cell with error output, so the `selection` argument does - // not need to be defined. - if (action === 'fix') { - props.onSend(); - closeMenu(); - return; - } - - // otherwise, parse the text selection or active cell, with the text - // selection taking precedence. - if (textSelection?.text) { - props.onSend({ - type: 'text', - source: textSelection.text - }); - closeMenu(); - return; - } - - if (activeCell.exists) { - props.onSend({ - type: 'cell', - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - source: activeCell.manager.getContent(false)!.source - }); - closeMenu(); - return; - } - } - - return ( - - (action === 'stop' ? props.onStop() : props.onSend())} - disabled={disabled} - tooltip={tooltip} - buttonProps={{ - size: 'small', - title: defaultTooltip, - variant: 'contained' - }} - sx={{ - minWidth: 'unset', - borderRadius: '2px 0px 0px 2px' - }} - > - {action === 'stop' ? : } - - { - openMenu(e.currentTarget); - }} - disabled={disabled} - tooltip="" - buttonProps={{ - variant: 'contained', - onKeyDown: e => { - if (e.key !== 'Enter' && e.key !== ' ') { - return; - } - openMenu(e.currentTarget); - // stopping propagation of this event prevents the prompt from being - // sent when the dropdown button is selected and clicked via 'Enter'. - e.stopPropagation(); - } - }} - sx={{ - minWidth: 'unset', - padding: '4px 0px', - borderRadius: '0px 2px 2px 0px', - borderLeft: '1px solid white' - }} - > - - - - { - sendWithSelection(); - // prevent sending second message with no selection - e.stopPropagation(); - }} - disabled={includeSelectionDisabled} - > - - - Send message with selection - - {includeSelectionTooltip} - - - - - - ); -} diff --git a/packages/jupyter-ai/src/components/chat-messages.tsx b/packages/jupyter-ai/src/components/chat-messages.tsx deleted file mode 100644 index 5c4286f8f..000000000 --- a/packages/jupyter-ai/src/components/chat-messages.tsx +++ /dev/null @@ -1,242 +0,0 @@ -import React, { useState, useEffect } from 'react'; - -import { Avatar, Box, Typography } from '@mui/material'; -import type { SxProps, Theme } from '@mui/material'; -import { IRenderMimeRegistry } from '@jupyterlab/rendermime'; -import { ServerConnection } from '@jupyterlab/services'; -// TODO: delete jupyternaut from frontend package - -import { AiService } from '../handler'; -import { RendermimeMarkdown } from './rendermime-markdown'; -import { useCollaboratorsContext } from '../contexts/collaborators-context'; -import { ChatMessageMenu } from './chat-messages/chat-message-menu'; -import { ChatMessageDelete } from './chat-messages/chat-message-delete'; -import { ChatHandler } from '../chat_handler'; -import { IJaiMessageFooter } from '../tokens'; - -type ChatMessagesProps = { - rmRegistry: IRenderMimeRegistry; - messages: AiService.ChatMessage[]; - chatHandler: ChatHandler; - messageFooter: IJaiMessageFooter | null; -}; - -type ChatMessageHeaderProps = { - message: AiService.ChatMessage; - chatHandler: ChatHandler; - timestamp: string; - sx?: SxProps; -}; - -function sortMessages( - messages: AiService.ChatMessage[] -): AiService.ChatMessage[] { - const timestampsById: Record = {}; - for (const message of messages) { - timestampsById[message.id] = message.time; - } - - return [...messages].sort((a, b) => { - /** - * Use the *origin timestamp* as the primary sort key. This ensures that - * each agent reply is grouped with the human message that triggered it. - * - * - If the message is from an agent, the origin timestamp is the timestamp - * of the message it is replying to. - * - * - Otherwise, the origin timestamp is the *message timestamp*, i.e. - * `message.time` itself. - */ - - const aOriginTimestamp = - 'reply_to' in a && a.reply_to in timestampsById - ? timestampsById[a.reply_to] - : a.time; - const bOriginTimestamp = - 'reply_to' in b && b.reply_to in timestampsById - ? timestampsById[b.reply_to] - : b.time; - - /** - * Use the message timestamp as a secondary sort key. This ensures that each - * agent reply is shown after the human message that triggered it. - */ - const aMessageTimestamp = a.time; - const bMessageTimestamp = b.time; - - return ( - aOriginTimestamp - bOriginTimestamp || - aMessageTimestamp - bMessageTimestamp - ); - }); -} - -export function ChatMessageHeader(props: ChatMessageHeaderProps): JSX.Element { - const collaborators = useCollaboratorsContext(); - - const sharedStyles: SxProps = { - height: '24px', - width: '24px' - }; - - let avatar: JSX.Element; - if (props.message.type === 'human') { - const bgcolor = collaborators?.[props.message.client.username]?.color; - avatar = ( - - - {props.message.client.initials} - - - ); - } else { - const baseUrl = ServerConnection.makeSettings().baseUrl; - const avatar_url = baseUrl + props.message.persona.avatar_route; - avatar = ( - - - - ); - } - - const name = - props.message.type === 'human' - ? props.message.client.display_name - : props.message.persona.name; - - const shouldShowMenu = - props.message.type === 'agent' || - (props.message.type === 'agent-stream' && props.message.complete); - const shouldShowDelete = props.message.type === 'human'; - - return ( - :not(:last-child)': { - marginRight: 3 - }, - ...props.sx - }} - > - {avatar} - - - {name} - - - - {props.timestamp} - - {shouldShowMenu && ( - - )} - {shouldShowDelete && ( - - )} - - - - ); -} - -export function ChatMessages(props: ChatMessagesProps): JSX.Element { - const [timestamps, setTimestamps] = useState>({}); - const [sortedMessages, setSortedMessages] = useState( - [] - ); - - /** - * Effect: update cached timestamp strings upon receiving a new message. - */ - useEffect(() => { - const newTimestamps: Record = { ...timestamps }; - let timestampAdded = false; - - for (const message of props.messages) { - if (!(message.id in newTimestamps)) { - // Use the browser's default locale - newTimestamps[message.id] = new Date(message.time * 1000) // Convert message time to milliseconds - .toLocaleTimeString([], { - hour: 'numeric', // Avoid leading zero for hours; we don't want "03:15 PM" - minute: '2-digit' - }); - - timestampAdded = true; - } - } - if (timestampAdded) { - setTimestamps(newTimestamps); - } - }, [props.messages]); - - useEffect(() => { - setSortedMessages(sortMessages(props.messages)); - }, [props.messages]); - - return ( - :not(:last-child)': { - borderBottom: '1px solid var(--jp-border-color2)' - } - }} - > - {sortedMessages.map(message => { - return ( - - - - {props.messageFooter && ( - - )} - - ); - })} - - ); -} diff --git a/packages/jupyter-ai/src/components/chat-messages/chat-message-delete.tsx b/packages/jupyter-ai/src/components/chat-messages/chat-message-delete.tsx deleted file mode 100644 index d6fc691bd..000000000 --- a/packages/jupyter-ai/src/components/chat-messages/chat-message-delete.tsx +++ /dev/null @@ -1,31 +0,0 @@ -import React from 'react'; -import { SxProps } from '@mui/material'; -import { Close } from '@mui/icons-material'; - -import { AiService } from '../../handler'; -import { ChatHandler } from '../../chat_handler'; -import { TooltippedIconButton } from '../mui-extras/tooltipped-icon-button'; - -type DeleteButtonProps = { - message: AiService.ChatMessage; - chatHandler: ChatHandler; - sx?: SxProps; -}; - -export function ChatMessageDelete(props: DeleteButtonProps): JSX.Element { - const request: AiService.ClearRequest = { - type: 'clear', - target: props.message.id - }; - return ( - props.chatHandler.sendMessage(request)} - sx={props.sx} - tooltip="Delete this exchange" - > - - - ); -} - -export default ChatMessageDelete; diff --git a/packages/jupyter-ai/src/components/chat-messages/chat-message-menu.tsx b/packages/jupyter-ai/src/components/chat-messages/chat-message-menu.tsx deleted file mode 100644 index a10c061ee..000000000 --- a/packages/jupyter-ai/src/components/chat-messages/chat-message-menu.tsx +++ /dev/null @@ -1,94 +0,0 @@ -import React, { useRef, useState } from 'react'; - -import { IconButton, Menu, MenuItem, SxProps } from '@mui/material'; -import { MoreVert } from '@mui/icons-material'; -import { - addAboveIcon, - addBelowIcon, - copyIcon -} from '@jupyterlab/ui-components'; - -import { AiService } from '../../handler'; -import { CopyStatus, useCopy } from '../../hooks/use-copy'; -import { useReplace } from '../../hooks/use-replace'; -import { useActiveCellContext } from '../../contexts/active-cell-context'; -import { replaceCellIcon } from '../../icons'; - -type ChatMessageMenuProps = { - message: AiService.ChatMessage; - - /** - * Styles applied to the menu icon button. - */ - sx?: SxProps; -}; - -export function ChatMessageMenu(props: ChatMessageMenuProps): JSX.Element { - const menuButtonRef = useRef(null); - const { copy, copyLabel } = useCopy({ - labelOverrides: { [CopyStatus.None]: 'Copy response' } - }); - const { replace, replaceLabel } = useReplace(); - const activeCell = useActiveCellContext(); - - const [menuOpen, setMenuOpen] = useState(false); - - const [anchorEl, setAnchorEl] = React.useState(null); - const openMenu = (event: React.MouseEvent) => { - setAnchorEl(event.currentTarget); - setMenuOpen(true); - }; - - const insertAboveLabel = activeCell.exists - ? 'Insert response above active cell' - : 'Insert response above active cell (no active cell)'; - - const insertBelowLabel = activeCell.exists - ? 'Insert response below active cell' - : 'Insert response below active cell (no active cell)'; - - const menuItemSx: SxProps = { - display: 'flex', - alignItems: 'center', - gap: '8px', - lineHeight: 0 - }; - - return ( - <> - - - - setMenuOpen(false)} - anchorEl={anchorEl} - > - copy(props.message.body)} sx={menuItemSx}> - - {copyLabel} - - replace(props.message.body)} sx={menuItemSx}> - - {replaceLabel} - - activeCell.manager.insertAbove(props.message.body)} - disabled={!activeCell.exists} - sx={menuItemSx} - > - - {insertAboveLabel} - - activeCell.manager.insertBelow(props.message.body)} - disabled={!activeCell.exists} - sx={menuItemSx} - > - - {insertBelowLabel} - - - - ); -} diff --git a/packages/jupyter-ai/src/components/chat-settings.tsx b/packages/jupyter-ai/src/components/chat-settings.tsx index c32eb46fd..8d936c46a 100644 --- a/packages/jupyter-ai/src/components/chat-settings.tsx +++ b/packages/jupyter-ai/src/components/chat-settings.tsx @@ -26,7 +26,7 @@ import { ExistingApiKeys } from './settings/existing-api-keys'; import { IRenderMimeRegistry } from '@jupyterlab/rendermime'; import { minifyUpdate } from './settings/minify'; import { useStackingAlert } from './mui-extras/stacking-alert'; -import { RendermimeMarkdown } from './rendermime-markdown'; +import { RendermimeMarkdown } from './settings/rendermime-markdown'; import { IJaiCompletionProvider } from '../tokens'; import { getProviderId, getModelLocalId } from '../utils'; @@ -34,6 +34,9 @@ type ChatSettingsProps = { rmRegistry: IRenderMimeRegistry; completionProvider: IJaiCompletionProvider | null; openInlineCompleterSettings: () => void; + // The temporary input options, should be removed when jupyterlab chat is + // the only chat. + inputOptions?: boolean; }; /** @@ -372,7 +375,6 @@ export function ChatSettings(props: ChatSettingsProps): JSX.Element { )} {lmGlobalId && ( @@ -488,7 +490,6 @@ export function ChatSettings(props: ChatSettingsProps): JSX.Element { )} {clmGlobalId && ( @@ -534,36 +535,42 @@ export function ChatSettings(props: ChatSettingsProps): JSX.Element { onSuccess={server.refetchApiKeys} /> - {/* Input */} -

    Input

    - - - When writing a message, press Enter to: - - { - setSendWse(e.target.value === 'newline'); - }} - > - } - label="Send the message" - /> - } - label={ - <> - Start a new line (use Shift+Enter to send) - - } - /> - - + {/* Input - to remove when jupyterlab chat is the only chat */} + {(props.inputOptions ?? true) && ( + <> +

    Input

    + + + When writing a message, press Enter to: + + { + setSendWse(e.target.value === 'newline'); + }} + > + } + label="Send the message" + /> + } + label={ + <> + Start a new line (use Shift+Enter to + send) + + } + /> + + + + )} + - - - ); - } - - // set of IDs of messages sent by the current user. - const myHumanMessageIds = new Set( - messages - .filter( - m => m.type === 'human' && m.client.username === user?.identity.username - ) - .map(m => m.id) - ); - - // whether the backend is currently streaming a reply to any message sent by - // the current user. - const streamingReplyHere = messages.some( - m => - m.type === 'agent-stream' && - myHumanMessageIds.has(m.reply_to) && - !m.complete - ); - - return ( - <> - - - - - - - ); -} - -export type ChatProps = { - selectionWatcher: SelectionWatcher; - chatHandler: ChatHandler; - globalAwareness: Awareness | null; - themeManager: IThemeManager | null; - rmRegistry: IRenderMimeRegistry; - chatView?: ChatView; - completionProvider: IJaiCompletionProvider | null; - openInlineCompleterSettings: () => void; - activeCellManager: ActiveCellManager; - focusInputSignal: ISignal; - messageFooter: IJaiMessageFooter | null; - telemetryHandler: IJaiTelemetryHandler | null; - userManager: User.IManager; -}; - -enum ChatView { - Chat, - Settings -} - -export function Chat(props: ChatProps): JSX.Element { - const [view, setView] = useState(props.chatView || ChatView.Chat); - const [showWelcomeMessage, setShowWelcomeMessage] = useState(false); - - const openSettingsView = () => { - setShowWelcomeMessage(false); - setView(ChatView.Settings); - }; - - return ( - - - - - - - =4.3.0. - // See: https://jupyterlab.readthedocs.io/en/latest/extension/extension_migration.html#css-styling - className="jp-ThemedContainer" - // root box should not include padding as it offsets the vertical - // scrollbar to the left - sx={{ - width: '100%', - height: '100%', - boxSizing: 'border-box', - background: 'var(--jp-layout-color0)', - display: 'flex', - flexDirection: 'column' - }} - > - {/* top bar */} - - {view !== ChatView.Chat ? ( - setView(ChatView.Chat)}> - - - ) : ( - - )} - {view === ChatView.Chat ? ( - - {!showWelcomeMessage && ( - - props.chatHandler.sendMessage({ type: 'clear' }) - } - tooltip="New chat" - > - - - )} - openSettingsView()}> - - - - ) : ( - - )} - - {/* body */} - {view === ChatView.Chat && ( - - )} - {view === ChatView.Settings && ( - - )} - - - - - - - - ); -} diff --git a/packages/jupyter-ai/src/components/code-blocks/code-toolbar.tsx b/packages/jupyter-ai/src/components/code-blocks/code-toolbar.tsx deleted file mode 100644 index 315e5d4d6..000000000 --- a/packages/jupyter-ai/src/components/code-blocks/code-toolbar.tsx +++ /dev/null @@ -1,197 +0,0 @@ -import React from 'react'; -import { Box } from '@mui/material'; -import { - addAboveIcon, - addBelowIcon, - copyIcon -} from '@jupyterlab/ui-components'; -import { replaceCellIcon } from '../../icons'; - -import { - ActiveCellManager, - useActiveCellContext -} from '../../contexts/active-cell-context'; -import { TooltippedIconButton } from '../mui-extras/tooltipped-icon-button'; -import { useReplace } from '../../hooks/use-replace'; -import { useCopy } from '../../hooks/use-copy'; -import { AiService } from '../../handler'; -import { useTelemetry } from '../../contexts/telemetry-context'; -import { TelemetryEvent } from '../../tokens'; - -export type CodeToolbarProps = { - /** - * The content of the Markdown code block this component is attached to. - */ - code: string; - /** - * Parent message which contains the code referenced by `content`. - */ - parentMessage?: AiService.ChatMessage; -}; - -export function CodeToolbar(props: CodeToolbarProps): JSX.Element { - const activeCell = useActiveCellContext(); - const sharedToolbarButtonProps: ToolbarButtonProps = { - code: props.code, - activeCellManager: activeCell.manager, - activeCellExists: activeCell.exists, - parentMessage: props.parentMessage - }; - - return ( - - - - - - - ); -} - -type ToolbarButtonProps = { - code: string; - activeCellExists: boolean; - activeCellManager: ActiveCellManager; - parentMessage?: AiService.ChatMessage; - // TODO: parentMessage should always be defined, but this can be undefined - // when the code toolbar appears in Markdown help messages in the Settings - // UI. The Settings UI should use a different component to render Markdown, - // and should never render code toolbars within it. -}; - -function buildTelemetryEvent( - type: string, - props: ToolbarButtonProps -): TelemetryEvent { - const charCount = props.code.length; - // number of lines = number of newlines + 1 - const lineCount = (props.code.match(/\n/g) ?? []).length + 1; - - return { - type, - message: { - id: props.parentMessage?.id ?? '', - type: props.parentMessage?.type ?? 'human', - time: props.parentMessage?.time ?? 0, - metadata: - props.parentMessage && 'metadata' in props.parentMessage - ? props.parentMessage.metadata - : {} - }, - code: { - charCount, - lineCount - } - }; -} - -function InsertAboveButton(props: ToolbarButtonProps) { - const telemetryHandler = useTelemetry(); - const tooltip = props.activeCellExists - ? 'Insert above active cell' - : 'Insert above active cell (no active cell)'; - - return ( - { - props.activeCellManager.insertAbove(props.code); - - try { - telemetryHandler.onEvent(buildTelemetryEvent('insert-above', props)); - } catch (e) { - console.error(e); - return; - } - }} - disabled={!props.activeCellExists} - > - - - ); -} - -function InsertBelowButton(props: ToolbarButtonProps) { - const telemetryHandler = useTelemetry(); - const tooltip = props.activeCellExists - ? 'Insert below active cell' - : 'Insert below active cell (no active cell)'; - - return ( - { - props.activeCellManager.insertBelow(props.code); - - try { - telemetryHandler.onEvent(buildTelemetryEvent('insert-below', props)); - } catch (e) { - console.error(e); - return; - } - }} - > - - - ); -} - -function ReplaceButton(props: ToolbarButtonProps) { - const telemetryHandler = useTelemetry(); - const { replace, replaceDisabled, replaceLabel } = useReplace(); - - return ( - { - replace(props.code); - - try { - telemetryHandler.onEvent(buildTelemetryEvent('replace', props)); - } catch (e) { - console.error(e); - return; - } - }} - > - - - ); -} - -export function CopyButton(props: ToolbarButtonProps): JSX.Element { - const telemetryHandler = useTelemetry(); - const { copy, copyLabel } = useCopy(); - - return ( - { - copy(props.code); - - try { - telemetryHandler.onEvent(buildTelemetryEvent('copy', props)); - } catch (e) { - console.error(e); - return; - } - }} - aria-label="Copy to clipboard" - > - - - ); -} diff --git a/packages/jupyter-ai/src/components/expandable-text-field.tsx b/packages/jupyter-ai/src/components/expandable-text-field.tsx deleted file mode 100644 index 5e05511ef..000000000 --- a/packages/jupyter-ai/src/components/expandable-text-field.tsx +++ /dev/null @@ -1,80 +0,0 @@ -import React, { useEffect, useRef, useState } from 'react'; - -export interface IExpandableTextFieldProps { - label: string; - collapsedTextMaxLength?: number; - id?: string; - text?: string; - style?: React.CSSProperties; - InputProps?: { - startAdornment: JSX.Element; - }; - helperText?: string; - name?: string; -} - -export function ExpandableTextField( - props: IExpandableTextFieldProps -): JSX.Element { - const [expanded, setExpanded] = useState(false); - const [overflowing, setOverflowing] = useState(false); - const { label, style, helperText, InputProps } = props; - const textContainerRef = useRef(null); - - useEffect(() => { - setExpanded(false); - const el = textContainerRef.current; - if (el?.offsetWidth && el?.scrollWidth) { - setOverflowing(el.offsetWidth < el.scrollWidth); - } - }, [props.text]); - - return ( -
    - {label} -
    -
    - {InputProps?.startAdornment} - - {props.text ? props.text : !InputProps?.startAdornment && '\u2014'} - -
    - {overflowing && ( -
    setExpanded(!expanded)} - className="jp-ai-ExpandableTextField-value" - > - {expanded ? 'Show Less' : 'Show More'} -
    - )} - - {helperText} - -
    -
    - ); -} diff --git a/packages/jupyter-ai/src/components/mui-extras/tooltipped-button.tsx b/packages/jupyter-ai/src/components/mui-extras/tooltipped-button.tsx deleted file mode 100644 index 363a4e832..000000000 --- a/packages/jupyter-ai/src/components/mui-extras/tooltipped-button.tsx +++ /dev/null @@ -1,87 +0,0 @@ -import React from 'react'; -import { Button, ButtonProps, SxProps, TooltipProps } from '@mui/material'; - -import { ContrastingTooltip } from './contrasting-tooltip'; - -export type TooltippedButtonProps = { - onClick: React.MouseEventHandler; - tooltip: string; - children: JSX.Element; - disabled?: boolean; - placement?: TooltipProps['placement']; - /** - * The offset of the tooltip popup. - * - * The expected syntax is defined by the Popper library: - * https://popper.js.org/docs/v2/modifiers/offset/ - */ - offset?: [number, number]; - 'aria-label'?: string; - /** - * Props passed directly to the MUI `Button` component. - */ - buttonProps?: ButtonProps; - /** - * Styles applied to the MUI `Button` component. - */ - sx?: SxProps; -}; - -/** - * A component that renders an MUI `Button` with a high-contrast tooltip - * provided by `ContrastingTooltip`. This component differs from the MUI - * defaults in the following ways: - * - * - Shows the tooltip on hover even if disabled. - * - Renders the tooltip above the button by default. - * - Renders the tooltip closer to the button by default. - * - Lowers the opacity of the Button when disabled. - * - Renders the Button with `line-height: 0` to avoid showing extra - * vertical space in SVG icons. - * - * NOTE TO DEVS: Please keep this component's features synchronized with - * features available to `TooltippedIconButton`. - */ -export function TooltippedButton(props: TooltippedButtonProps): JSX.Element { - return ( - - {/* - By default, tooltips never appear when the Button is disabled. The - official way to support this feature in MUI is to wrap the child Button - element in a `span` element. - - See: https://mui.com/material-ui/react-tooltip/#disabled-elements - */} - - - - - ); -} diff --git a/packages/jupyter-ai/src/components/pending-messages.tsx b/packages/jupyter-ai/src/components/pending-messages.tsx deleted file mode 100644 index c258c295e..000000000 --- a/packages/jupyter-ai/src/components/pending-messages.tsx +++ /dev/null @@ -1,117 +0,0 @@ -import React, { useState, useEffect } from 'react'; - -import { Box, Typography } from '@mui/material'; -import { AiService } from '../handler'; -import { ChatMessageHeader } from './chat-messages'; -import { ChatHandler } from '../chat_handler'; - -type PendingMessagesProps = { - messages: AiService.PendingMessage[]; - chatHandler: ChatHandler; -}; - -type PendingMessageElementProps = { - text: string; - ellipsis: boolean; -}; - -function PendingMessageElement(props: PendingMessageElementProps): JSX.Element { - const [dots, setDots] = useState(''); - - useEffect(() => { - const interval = setInterval(() => { - setDots(dots => (dots.length < 3 ? dots + '.' : '')); - }, 500); - - return () => clearInterval(interval); - }, []); - - let text = props.text; - if (props.ellipsis) { - text = props.text + dots; - } - - return ( - - {text.split('\n').map((line, index) => ( - {line} - ))} - - ); -} - -export function PendingMessages( - props: PendingMessagesProps -): JSX.Element | null { - const [timestamp, setTimestamp] = useState(''); - const [agentMessage, setAgentMessage] = - useState(null); - - useEffect(() => { - if (props.messages.length === 0) { - setAgentMessage(null); - setTimestamp(''); - return; - } - const lastMessage = props.messages[props.messages.length - 1]; - setAgentMessage({ - type: 'agent', - id: lastMessage.id, - time: lastMessage.time, - body: '', - reply_to: '', - persona: lastMessage.persona, - metadata: {} - }); - - // timestamp format copied from ChatMessage - const newTimestamp = new Date(lastMessage.time * 1000).toLocaleTimeString( - [], - { - hour: 'numeric', - minute: '2-digit' - } - ); - setTimestamp(newTimestamp); - }, [props.messages]); - - if (!agentMessage) { - return null; - } - - return ( - - - :not(:last-child)': { - marginBottom: '2em' - } - }} - > - {props.messages.map(message => ( - - ))} - - - ); -} diff --git a/packages/jupyter-ai/src/components/rendermime-markdown.tsx b/packages/jupyter-ai/src/components/rendermime-markdown.tsx deleted file mode 100644 index 9a0278517..000000000 --- a/packages/jupyter-ai/src/components/rendermime-markdown.tsx +++ /dev/null @@ -1,141 +0,0 @@ -import React, { useState, useEffect, useRef } from 'react'; -import { createPortal } from 'react-dom'; - -import { CodeToolbar, CodeToolbarProps } from './code-blocks/code-toolbar'; -import { IRenderMimeRegistry } from '@jupyterlab/rendermime'; -import { AiService } from '../handler'; - -const MD_MIME_TYPE = 'text/markdown'; -const RENDERMIME_MD_CLASS = 'jp-ai-rendermime-markdown'; - -type RendermimeMarkdownProps = { - markdownStr: string; - rmRegistry: IRenderMimeRegistry; - /** - * Reference to the parent message object in the Jupyter AI chat. - */ - parentMessage?: AiService.ChatMessage; - /** - * Whether the message is complete. This is generally `true` except in the - * case where `markdownStr` contains the incomplete contents of a - * `AgentStreamMessage`, in which case this should be set to `false`. - */ - complete: boolean; -}; - -/** - * Escapes backslashes in LaTeX delimiters such that they appear in the DOM - * after the initial MarkDown render. For example, this function takes '\(` and - * returns `\\(`. - * - * Required for proper rendering of MarkDown + LaTeX markup in the chat by - * `ILatexTypesetter`. - */ -function escapeLatexDelimiters(text: string) { - return text - .replace(/\\\(/g, '\\\\(') - .replace(/\\\)/g, '\\\\)') - .replace(/\\\[/g, '\\\\[') - .replace(/\\\]/g, '\\\\]'); -} - -function RendermimeMarkdownBase(props: RendermimeMarkdownProps): JSX.Element { - // create a single renderer object at component mount - const [renderer] = useState(() => { - return props.rmRegistry.createRenderer(MD_MIME_TYPE); - }); - - // ref that tracks the content container to store the rendermime node in - const renderingContainer = useRef(null); - // ref that tracks whether the rendermime node has already been inserted - const renderingInserted = useRef(false); - - // each element is a two-tuple with the structure [codeToolbarRoot, codeToolbarProps]. - const [codeToolbarDefns, setCodeToolbarDefns] = useState< - Array<[HTMLDivElement, CodeToolbarProps]> - >([]); - - /** - * Effect: use Rendermime to render `props.markdownStr` into an HTML element, - * and insert it into `renderingContainer` if not yet inserted. When the - * message is completed, add code toolbars. - */ - useEffect(() => { - const renderContent = async () => { - // initialize mime model - const mdStr = escapeLatexDelimiters(props.markdownStr); - const model = props.rmRegistry.createModel({ - data: { [MD_MIME_TYPE]: mdStr } - }); - - // step 1: render markdown - await renderer.renderModel(model); - if (!renderer.node) { - throw new Error( - 'Rendermime was unable to render Markdown content within a chat message. Please report this upstream to Jupyter AI on GitHub.' - ); - } - - // step 2: render LaTeX via MathJax - props.rmRegistry.latexTypesetter?.typeset(renderer.node); - - // insert the rendering into renderingContainer if not yet inserted - if (renderingContainer.current !== null && !renderingInserted.current) { - renderingContainer.current.appendChild(renderer.node); - renderingInserted.current = true; - } - - // if complete, render code toolbars - if (!props.complete) { - return; - } - const newCodeToolbarDefns: [HTMLDivElement, CodeToolbarProps][] = []; - - // Attach CodeToolbar root element to each
     block
    -      const preBlocks = renderer.node.querySelectorAll('pre');
    -      preBlocks.forEach(preBlock => {
    -        const codeToolbarRoot = document.createElement('div');
    -        preBlock.parentNode?.insertBefore(
    -          codeToolbarRoot,
    -          preBlock.nextSibling
    -        );
    -        newCodeToolbarDefns.push([
    -          codeToolbarRoot,
    -          {
    -            code: preBlock.textContent || '',
    -            parentMessage: props.parentMessage
    -          }
    -        ]);
    -      });
    -
    -      setCodeToolbarDefns(newCodeToolbarDefns);
    -    };
    -
    -    renderContent();
    -  }, [
    -    props.markdownStr,
    -    props.complete,
    -    props.rmRegistry,
    -    props.parentMessage
    -  ]);
    -
    -  return (
    -    
    -
    - { - // Render a `CodeToolbar` element underneath each code block. - // We use ReactDOM.createPortal() so each `CodeToolbar` element is able - // to use the context in the main React tree. - codeToolbarDefns.map(codeToolbarDefn => { - const [codeToolbarRoot, codeToolbarProps] = codeToolbarDefn; - return createPortal( - , - codeToolbarRoot - ); - }) - } -
    - ); -} - -export const RendermimeMarkdown = React.memo(RendermimeMarkdownBase); diff --git a/packages/jupyter-ai/src/components/scroll-container.tsx b/packages/jupyter-ai/src/components/scroll-container.tsx deleted file mode 100644 index d17d8d2a6..000000000 --- a/packages/jupyter-ai/src/components/scroll-container.tsx +++ /dev/null @@ -1,69 +0,0 @@ -import React, { useEffect, useMemo } from 'react'; -import { Box, SxProps, Theme } from '@mui/material'; - -type ScrollContainerProps = { - children: React.ReactNode; - sx?: SxProps; -}; - -/** - * Component that handles intelligent scrolling. - * - * - If viewport is at the bottom of the overflow container, appending new - * children keeps the viewport on the bottom of the overflow container. - * - * - If viewport is in the middle of the overflow container, appending new - * children leaves the viewport unaffected. - * - * Currently only works for Chrome and Firefox due to reliance on - * `overflow-anchor`. - * - * **References** - * - https://css-tricks.com/books/greatest-css-tricks/pin-scrolling-to-bottom/ - */ -export function ScrollContainer(props: ScrollContainerProps): JSX.Element { - const id = useMemo( - () => 'jupyter-ai-scroll-container-' + Date.now().toString(), - [] - ); - - /** - * Effect: Scroll the container to the bottom as soon as it is visible. - */ - useEffect(() => { - const el = document.querySelector(`#${id}`); - if (!el) { - return; - } - - const observer = new IntersectionObserver( - entries => { - entries.forEach(entry => { - if (entry.isIntersecting) { - el.scroll({ top: 999999999 }); - } - }); - }, - { threshold: 1.0 } - ); - - observer.observe(el); - return () => observer.disconnect(); - }, []); - - return ( - - {props.children} - - - ); -} diff --git a/packages/jupyter-ai/src/components/settings/rendermime-markdown.tsx b/packages/jupyter-ai/src/components/settings/rendermime-markdown.tsx new file mode 100644 index 000000000..9d600cac4 --- /dev/null +++ b/packages/jupyter-ai/src/components/settings/rendermime-markdown.tsx @@ -0,0 +1,79 @@ +import React, { useState, useEffect, useRef } from 'react'; +import { IRenderMimeRegistry } from '@jupyterlab/rendermime'; + +const MD_MIME_TYPE = 'text/markdown'; +const RENDERMIME_MD_CLASS = 'jp-ai-rendermime-markdown'; + +type RendermimeMarkdownProps = { + markdownStr: string; + rmRegistry: IRenderMimeRegistry; +}; + +/** + * Escapes backslashes in LaTeX delimiters such that they appear in the DOM + * after the initial MarkDown render. For example, this function takes '\(` and + * returns `\\(`. + * + * Required for proper rendering of MarkDown + LaTeX markup in the chat by + * `ILatexTypesetter`. + */ +function escapeLatexDelimiters(text: string) { + return text + .replace(/\\\(/g, '\\\\(') + .replace(/\\\)/g, '\\\\)') + .replace(/\\\[/g, '\\\\[') + .replace(/\\\]/g, '\\\\]'); +} + +export function RendermimeMarkdown( + props: RendermimeMarkdownProps +): JSX.Element { + // create a single renderer object at component mount + const [renderer] = useState(() => { + return props.rmRegistry.createRenderer(MD_MIME_TYPE); + }); + + // ref that tracks the content container to store the rendermime node in + const renderingContainer = useRef(null); + // ref that tracks whether the rendermime node has already been inserted + const renderingInserted = useRef(false); + + /** + * Effect: use Rendermime to render `props.markdownStr` into an HTML element, + * and insert it into `renderingContainer` if not yet inserted. + */ + useEffect(() => { + const renderContent = async () => { + // initialize mime model + const mdStr = escapeLatexDelimiters(props.markdownStr); + const model = props.rmRegistry.createModel({ + data: { [MD_MIME_TYPE]: mdStr } + }); + + // step 1: render markdown + await renderer.renderModel(model); + if (!renderer.node) { + throw new Error( + 'Rendermime was unable to render Markdown content. Please report this upstream to Jupyter AI on GitHub.' + ); + } + + // step 2: render LaTeX via MathJax + props.rmRegistry.latexTypesetter?.typeset(renderer.node); + + // insert the rendering into renderingContainer if not yet inserted + if (renderingContainer.current !== null && !renderingInserted.current) { + renderingContainer.current.appendChild(renderer.node); + renderingInserted.current = true; + } + }; + + renderContent(); + }, [props.markdownStr]); + + return ( +
    +
    +
    + ); +} diff --git a/packages/jupyter-ai/src/contexts/active-cell-context.tsx b/packages/jupyter-ai/src/contexts/active-cell-context.tsx deleted file mode 100644 index 72e93a8ca..000000000 --- a/packages/jupyter-ai/src/contexts/active-cell-context.tsx +++ /dev/null @@ -1,329 +0,0 @@ -import React, { useState, useContext, useEffect } from 'react'; - -import { JupyterFrontEnd } from '@jupyterlab/application'; -import { DocumentWidget } from '@jupyterlab/docregistry'; -import { Notebook, NotebookActions } from '@jupyterlab/notebook'; -import { Cell } from '@jupyterlab/cells'; -import { IError as CellError } from '@jupyterlab/nbformat'; - -import { Widget } from '@lumino/widgets'; -import { Signal } from '@lumino/signaling'; - -function getNotebook(widget: Widget | null): Notebook | null { - if (!(widget instanceof DocumentWidget)) { - return null; - } - - const { content } = widget; - if (!(content instanceof Notebook)) { - return null; - } - - return content; -} - -function getActiveCell(widget: Widget | null): Cell | null { - const notebook = getNotebook(widget); - if (!notebook) { - return null; - } - - return notebook.activeCell; -} - -type CellContent = { - type: string; - source: string; -}; - -type CellWithErrorContent = { - type: 'code'; - source: string; - error: { - name: string; - value: string; - traceback: string[]; - }; -}; - -/** - * A manager that maintains a reference to the current active notebook cell in - * the main panel (if any), and provides methods for inserting or appending - * content to the active cell. - * - * The current active cell should be obtained by listening to the - * `activeCellChanged` signal. - */ -export class ActiveCellManager { - constructor(shell: JupyterFrontEnd.IShell) { - this._shell = shell; - this._shell.currentChanged?.connect((sender, args) => { - this._mainAreaWidget = args.newValue; - }); - - setInterval(() => { - this._pollActiveCell(); - }, 200); - } - - get activeCellChanged(): Signal { - return this._activeCellChanged; - } - - get activeCellErrorChanged(): Signal { - return this._activeCellErrorChanged; - } - - /** - * Returns an `ActiveCellContent` object that describes the current active - * cell. If no active cell exists, this method returns `null`. - * - * When called with `withError = true`, this method returns `null` if the - * active cell does not have an error output. Otherwise it returns an - * `ActiveCellContentWithError` object that describes both the active cell and - * the error output. - */ - getContent(withError: false): CellContent | null; - getContent(withError: true): CellWithErrorContent | null; - getContent(withError = false): CellContent | CellWithErrorContent | null { - const sharedModel = this._activeCell?.model.sharedModel; - if (!sharedModel) { - return null; - } - - // case where withError = false - if (!withError) { - return { - type: sharedModel.cell_type, - source: sharedModel.getSource() - }; - } - - // case where withError = true - const error = this._activeCellError; - if (error) { - return { - type: 'code', - source: sharedModel.getSource(), - error: { - name: error.ename, - value: error.evalue, - traceback: error.traceback - } - }; - } - - return null; - } - - /** - * Inserts `content` in a new cell above the active cell. - */ - insertAbove(content: string): void { - const notebook = getNotebook(this._mainAreaWidget); - if (!notebook) { - return; - } - - // create a new cell above the active cell and mark new cell as active - NotebookActions.insertAbove(notebook); - // emit activeCellChanged event to consumers - this._pollActiveCell(); - // replace content of this new active cell - this.replace(content); - } - - /** - * Inserts `content` in a new cell below the active cell. - */ - insertBelow(content: string): void { - const notebook = getNotebook(this._mainAreaWidget); - if (!notebook) { - return; - } - - // create a new cell below the active cell and mark new cell as active - NotebookActions.insertBelow(notebook); - // emit activeCellChanged event to consumers - this._pollActiveCell(); - // replace content of this new active cell - this.replace(content); - } - - /** - * Replaces the contents of the active cell. - */ - async replace(content: string): Promise { - // get reference to active cell directly from Notebook API. this avoids the - // possibility of acting on an out-of-date reference. - const activeCell = getNotebook(this._mainAreaWidget)?.activeCell; - if (!activeCell) { - return; - } - - // wait for editor to be ready - await activeCell.ready; - - // replace the content of the active cell - /** - * NOTE: calling this method sometimes emits an error to the browser console: - * - * ``` - * Error: Calls to EditorView.update are not allowed while an update is in progress - * ``` - * - * However, there seems to be no impact on the behavior/stability of the - * JupyterLab application after this error is logged. Furthermore, this is - * the official API for setting the content of a cell in JupyterLab 4, - * meaning that this is likely unavoidable. - */ - activeCell.editor?.model.sharedModel.setSource(content); - } - - protected _pollActiveCell(): void { - const prevActiveCell = this._activeCell; - const currActiveCell = getActiveCell(this._mainAreaWidget); - - // emit activeCellChanged when active cell changes - if (prevActiveCell !== currActiveCell) { - this._activeCell = currActiveCell; - this._activeCellChanged.emit(currActiveCell); - } - - const currSharedModel = currActiveCell?.model.sharedModel; - const prevExecutionCount = this._activeCellExecutionCount; - const currExecutionCount: number | null = - currSharedModel && 'execution_count' in currSharedModel - ? currSharedModel?.execution_count - : null; - this._activeCellExecutionCount = currExecutionCount; - - // emit activeCellErrorChanged when active cell changes or when the - // execution count changes - if ( - prevActiveCell !== currActiveCell || - prevExecutionCount !== currExecutionCount - ) { - const prevActiveCellError = this._activeCellError; - let currActiveCellError: CellError | null = null; - if (currSharedModel && 'outputs' in currSharedModel) { - currActiveCellError = - currSharedModel.outputs.find( - (output): output is CellError => output.output_type === 'error' - ) || null; - } - - // for some reason, the `CellError` object is not referentially stable, - // meaning that this condition always evaluates to `true` and the - // `activeCellErrorChanged` signal is emitted every 200ms, even when the - // error output is unchanged. this is why we have to rely on - // `execution_count` to track changes to the error output. - if (prevActiveCellError !== currActiveCellError) { - this._activeCellError = currActiveCellError; - this._activeCellErrorChanged.emit(this._activeCellError); - } - } - } - - protected _shell: JupyterFrontEnd.IShell; - protected _mainAreaWidget: Widget | null = null; - - /** - * The active cell. - */ - protected _activeCell: Cell | null = null; - /** - * The execution count of the active cell. This is the number shown on the - * left in square brackets after running a cell. Changes to this indicate that - * the error output may have changed. - */ - protected _activeCellExecutionCount: number | null = null; - /** - * The `CellError` output within the active cell, if any. - */ - protected _activeCellError: CellError | null = null; - - protected _activeCellChanged = new Signal(this); - protected _activeCellErrorChanged = new Signal(this); -} - -type ActiveCellContextReturn = { - exists: boolean; - hasError: boolean; - manager: ActiveCellManager; -}; - -type ActiveCellContextValue = { - exists: boolean; - hasError: boolean; - manager: ActiveCellManager | null; -}; - -const defaultActiveCellContext: ActiveCellContextValue = { - exists: false, - hasError: false, - manager: null -}; - -const ActiveCellContext = React.createContext( - defaultActiveCellContext -); - -type ActiveCellContextProps = { - activeCellManager: ActiveCellManager; - children: React.ReactNode; -}; - -export function ActiveCellContextProvider( - props: ActiveCellContextProps -): JSX.Element { - const [exists, setExists] = useState(false); - const [hasError, setHasError] = useState(false); - - useEffect(() => { - const manager = props.activeCellManager; - - manager.activeCellChanged.connect((_, newActiveCell) => { - setExists(!!newActiveCell); - }); - manager.activeCellErrorChanged.connect((_, newActiveCellError) => { - setHasError(!!newActiveCellError); - }); - }, [props.activeCellManager]); - - return ( - - {props.children} - - ); -} - -/** - * Usage: `const activeCell = useActiveCellContext()` - * - * Returns an object `activeCell` with the following properties: - * - `activeCell.exists`: whether an active cell exists - * - `activeCell.hasError`: whether an active cell exists with an error output - * - `activeCell.manager`: the `ActiveCellManager` singleton - */ -export function useActiveCellContext(): ActiveCellContextReturn { - const { exists, hasError, manager } = useContext(ActiveCellContext); - - if (!manager) { - throw new Error( - 'useActiveCellContext() cannot be called outside ActiveCellContextProvider.' - ); - } - - return { - exists, - hasError, - manager - }; -} diff --git a/packages/jupyter-ai/src/contexts/collaborators-context.tsx b/packages/jupyter-ai/src/contexts/collaborators-context.tsx deleted file mode 100644 index 72fee7068..000000000 --- a/packages/jupyter-ai/src/contexts/collaborators-context.tsx +++ /dev/null @@ -1,70 +0,0 @@ -import React, { useContext, useEffect, useState } from 'react'; -import type { Awareness } from 'y-protocols/awareness'; - -import { AiService } from '../handler'; - -const CollaboratorsContext = React.createContext< - Record ->({}); - -/** - * Returns a dictionary mapping each collaborator's username to their associated - * Collaborator object. - */ -// eslint-disable-next-line @typescript-eslint/explicit-module-boundary-types -export function useCollaboratorsContext() { - return useContext(CollaboratorsContext); -} - -type GlobalAwarenessStates = Map< - number, - { current: string; user: AiService.Collaborator } ->; - -type CollaboratorsContextProviderProps = { - globalAwareness: Awareness | null; - children: JSX.Element; -}; - -// eslint-disable-next-line @typescript-eslint/explicit-module-boundary-types -export function CollaboratorsContextProvider({ - globalAwareness, - children -}: CollaboratorsContextProviderProps) { - const [collaborators, setCollaborators] = useState< - Record - >({}); - - /** - * Effect: listen to changes in global awareness and update collaborators - * dictionary. - */ - useEffect(() => { - function handleChange() { - const states = (globalAwareness?.getStates() ?? - new Map()) as GlobalAwarenessStates; - - const collaboratorsDict: Record = {}; - states.forEach(state => { - collaboratorsDict[state.user.username] = state.user; - }); - - setCollaborators(collaboratorsDict); - } - - globalAwareness?.on('change', handleChange); - return () => { - globalAwareness?.off('change', handleChange); - }; - }, [globalAwareness]); - - if (!globalAwareness) { - return children; - } - - return ( - - {children} - - ); -} diff --git a/packages/jupyter-ai/src/contexts/index.ts b/packages/jupyter-ai/src/contexts/index.ts index 0cc0c017f..479d11b15 100644 --- a/packages/jupyter-ai/src/contexts/index.ts +++ b/packages/jupyter-ai/src/contexts/index.ts @@ -1,4 +1 @@ -export * from './active-cell-context'; -export * from './collaborators-context'; -export * from './selection-context'; export * from './telemetry-context'; diff --git a/packages/jupyter-ai/src/contexts/selection-context.tsx b/packages/jupyter-ai/src/contexts/selection-context.tsx deleted file mode 100644 index e36d0de38..000000000 --- a/packages/jupyter-ai/src/contexts/selection-context.tsx +++ /dev/null @@ -1,51 +0,0 @@ -import React, { useCallback, useContext, useEffect, useState } from 'react'; -import { Selection, SelectionWatcher } from '../selection-watcher'; - -const SelectionContext = React.createContext< - [Selection | null, (value: Selection) => unknown] ->([ - null, - () => { - /* noop */ - } -]); - -// eslint-disable-next-line @typescript-eslint/explicit-module-boundary-types -export function useSelectionContext() { - return useContext(SelectionContext); -} - -type SelectionContextProviderProps = { - selectionWatcher: SelectionWatcher; - children: React.ReactNode; -}; - -// eslint-disable-next-line @typescript-eslint/explicit-module-boundary-types -export function SelectionContextProvider({ - selectionWatcher, - children -}: SelectionContextProviderProps) { - const [selection, setSelection] = useState(null); - - /** - * Effect: subscribe to SelectionWatcher - */ - useEffect(() => { - selectionWatcher.selectionChanged.connect((sender, newSelection) => { - setSelection(newSelection); - }); - }, []); - - const replaceSelection = useCallback( - (value: Selection) => { - selectionWatcher.replaceSelection(value); - }, - [selectionWatcher] - ); - - return ( - - {children} - - ); -} diff --git a/packages/jupyter-ai/src/contexts/user-context.tsx b/packages/jupyter-ai/src/contexts/user-context.tsx deleted file mode 100644 index ff9fe8e3d..000000000 --- a/packages/jupyter-ai/src/contexts/user-context.tsx +++ /dev/null @@ -1,35 +0,0 @@ -import React, { useContext, useEffect, useState } from 'react'; -import type { User } from '@jupyterlab/services'; -import { PartialJSONObject } from '@lumino/coreutils'; - -const UserContext = React.createContext(null); - -export function useUserContext(): User.IUser | null { - return useContext(UserContext); -} - -type UserContextProviderProps = { - userManager: User.IManager; - children: React.ReactNode; -}; - -export function UserContextProvider({ - userManager, - children -}: UserContextProviderProps): JSX.Element { - const [user, setUser] = useState(null); - - useEffect(() => { - userManager.ready.then(() => { - setUser({ - identity: userManager.identity!, - permissions: userManager.permissions as PartialJSONObject - }); - }); - userManager.userChanged.connect((sender, newUser) => { - setUser(newUser); - }); - }, []); - - return {children}; -} diff --git a/packages/jupyter-ai/src/handler.ts b/packages/jupyter-ai/src/handler.ts index 1b58ed2af..c2ef14caa 100644 --- a/packages/jupyter-ai/src/handler.ts +++ b/packages/jupyter-ai/src/handler.ts @@ -51,156 +51,6 @@ export namespace AiService { serverSettings?: ServerConnection.ISettings; } - export type CellError = { - name: string; - value: string; - traceback: string[]; - }; - - export type TextSelection = { - type: 'text'; - source: string; - }; - - export type CellSelection = { - type: 'cell'; - source: string; - }; - - export type CellWithErrorSelection = { - type: 'cell-with-error'; - source: string; - error: CellError; - }; - - export type Selection = - | TextSelection - | CellSelection - | CellWithErrorSelection; - - export type ChatRequest = { - prompt: string; - selection?: Selection; - }; - - export type ClearRequest = { - type: 'clear'; - target?: string; - }; - - export type StopRequest = { - type: 'stop'; - }; - - export type Collaborator = { - username: string; - initials: string; - name: string; - display_name: string; - color?: string; - avatar_url?: string; - }; - - export type ChatClient = Collaborator & { - id: string; - }; - - export type Persona = { - name: string; - avatar_route: string; - }; - - export type AgentChatMessage = { - type: 'agent'; - id: string; - time: number; - body: string; - reply_to: string; - persona: Persona; - metadata: Record; - }; - - export type HumanChatMessage = { - type: 'human'; - id: string; - time: number; - /** - * The formatted body of the message to be rendered in the UI. Includes both - * `prompt` and `selection`. - */ - body: string; - /** - * The prompt typed into the chat input by the user. - */ - prompt: string; - /** - * The selection included with the prompt, if any. - */ - selection?: Selection; - client: ChatClient; - }; - - export type ConnectionMessage = { - type: 'connection'; - client_id: string; - history: ChatHistory; - }; - - export type ClearMessage = { - type: 'clear'; - targets?: string[]; - }; - - export type PendingMessage = { - type: 'pending'; - id: string; - time: number; - body: string; - reply_to: string; - persona: Persona; - ellipsis: boolean; - }; - - export type ClosePendingMessage = { - type: 'close-pending'; - id: string; - }; - - export type AgentStreamMessage = Omit & { - type: 'agent-stream'; - complete: boolean; - }; - - export type AgentStreamChunkMessage = { - type: 'agent-stream-chunk'; - id: string; - content: string; - stream_complete: boolean; - metadata: Record; - }; - - export type Request = ChatRequest | ClearRequest | StopRequest; - - export type ChatMessage = - | AgentChatMessage - | HumanChatMessage - | AgentStreamMessage; - - export type Message = - | AgentChatMessage - | HumanChatMessage - | ConnectionMessage - | ClearMessage - | PendingMessage - | ClosePendingMessage - | AgentStreamMessage - | AgentStreamChunkMessage; - - export type ChatHistory = { - messages: ChatMessage[]; - pending_messages: PendingMessage[]; - }; - export type DescribeConfigResponse = { model_provider_id: string | null; embeddings_provider_id: string | null; diff --git a/packages/jupyter-ai/src/hooks/use-copy.ts b/packages/jupyter-ai/src/hooks/use-copy.ts deleted file mode 100644 index 62f748718..000000000 --- a/packages/jupyter-ai/src/hooks/use-copy.ts +++ /dev/null @@ -1,89 +0,0 @@ -import { useState, useRef, useCallback } from 'react'; - -export enum CopyStatus { - None, - Copying, - Copied -} - -export type UseCopyProps = { - /** - * List of labels by copy status. Used to override the default labels provided - * by this hook. - */ - labelOverrides?: Partial>; -}; - -export type UseCopyReturn = { - /** - * The status of the copy operation. This is set to CopyStatus.None when no - * copy operation was performed, set to CopyStatus.Copying while the copy - * operation is executing, and set to CopyStatus.Copied for 1000ms after the - * copy operation completes. - * - */ - copyStatus: CopyStatus; - /** - * Label that should be shown by the copy button based on the copy status. - * This can be selectively overridden via the `labelOverrides` prop passed to - * the `useCopy()` hook. - */ - copyLabel: string; - /** - * Function that takes a string and copies it to the clipboard. - */ - copy: (value: string) => unknown; -}; - -const DEFAULT_LABELS_BY_COPY_STATUS: Record = { - [CopyStatus.None]: 'Copy to clipboard', - [CopyStatus.Copying]: 'Copying…', - [CopyStatus.Copied]: 'Copied!' -}; - -/** - * Hook that provides a function to copy a string to a clipboard and manages - * related UI state. Should be used by any button that intends to copy text. - */ -export function useCopy(props?: UseCopyProps): UseCopyReturn { - const [copyStatus, setCopyStatus] = useState(CopyStatus.None); - const timeoutId = useRef(null); - - const copy = useCallback( - async (value: string) => { - // ignore if we are already copying - if (copyStatus === CopyStatus.Copying) { - return; - } - - try { - await navigator.clipboard.writeText(value); - } catch (err) { - console.error('Failed to copy text: ', err); - setCopyStatus(CopyStatus.None); - return; - } - - setCopyStatus(CopyStatus.Copied); - if (timeoutId.current) { - clearTimeout(timeoutId.current); - } - timeoutId.current = setTimeout( - () => setCopyStatus(CopyStatus.None), - 1000 - ); - }, - [copyStatus] - ); - - const copyLabel = { - ...DEFAULT_LABELS_BY_COPY_STATUS, - ...props?.labelOverrides - }[copyStatus]; - - return { - copyStatus, - copyLabel, - copy - }; -} diff --git a/packages/jupyter-ai/src/hooks/use-replace.ts b/packages/jupyter-ai/src/hooks/use-replace.ts deleted file mode 100644 index f4486b588..000000000 --- a/packages/jupyter-ai/src/hooks/use-replace.ts +++ /dev/null @@ -1,56 +0,0 @@ -import { useActiveCellContext } from '../contexts/active-cell-context'; -import { useSelectionContext } from '../contexts/selection-context'; - -export type UseReplaceReturn = { - /** - * If a user has a range of text selected, this function replaces the - * selection range with `value`. Otherwise, if the user has an active notebook - * cell, this function replaces the contents of the active cell with `value`. - * - * Otherwise (if a user does not have a text selection or active cell), this - * function does nothing. - */ - replace: (value: string) => unknown; - /** - * Whether the replace button should be disabled, i.e. the user does not have - * a text selection or active cell. - */ - replaceDisabled: boolean; - /** - * Label that should be shown by the replace button using this hook. - */ - replaceLabel: string; -}; - -/** - * Hook that provides a function to either replace a text selection or an active - * cell. Manages related UI state. Should be used by any button that intends to - * replace some user selection. - */ -export function useReplace(): UseReplaceReturn { - const [textSelection, replaceTextSelection] = useSelectionContext(); - const activeCell = useActiveCellContext(); - - const replace = (value: string) => { - if (textSelection) { - replaceTextSelection({ ...textSelection, text: value }); - } else if (activeCell.exists) { - activeCell.manager.replace(value); - } - }; - - const replaceDisabled = !(textSelection || activeCell.exists); - - const numLines = textSelection?.text.split('\n').length || 0; - const replaceLabel = textSelection - ? `Replace selection (${numLines} ${numLines === 1 ? 'line' : 'lines'})` - : activeCell.exists - ? 'Replace selection (1 active cell)' - : 'Replace selection (no selection or active cell)'; - - return { - replace, - replaceDisabled, - replaceLabel - }; -} diff --git a/packages/jupyter-ai/src/index.ts b/packages/jupyter-ai/src/index.ts index f24fbfa00..14528b51b 100644 --- a/packages/jupyter-ai/src/index.ts +++ b/packages/jupyter-ai/src/index.ts @@ -1,141 +1,115 @@ +import { IAutocompletionRegistry } from '@jupyter/chat'; import { JupyterFrontEnd, - JupyterFrontEndPlugin, - ILayoutRestorer + JupyterFrontEndPlugin } from '@jupyterlab/application'; - import { IWidgetTracker, ReactWidget, - IThemeManager + IThemeManager, + MainAreaWidget, + ICommandPalette } from '@jupyterlab/apputils'; import { IDocumentWidget } from '@jupyterlab/docregistry'; -import { IGlobalAwareness } from '@jupyter/collaboration'; -import type { Awareness } from 'y-protocols/awareness'; -import { buildChatSidebar } from './widgets/chat-sidebar'; -import { SelectionWatcher } from './selection-watcher'; -import { ChatHandler } from './chat_handler'; -import { buildErrorWidget } from './widgets/chat-error'; +import { IRenderMimeRegistry } from '@jupyterlab/rendermime'; + import { completionPlugin } from './completions'; +import { autocompletion } from './slash-autocompletion'; import { statusItemPlugin } from './status'; -import { - IJaiCompletionProvider, - IJaiCore, - IJaiMessageFooter, - IJaiTelemetryHandler -} from './tokens'; -import { IRenderMimeRegistry } from '@jupyterlab/rendermime'; -import { ActiveCellManager } from './contexts/active-cell-context'; -import { Signal } from '@lumino/signaling'; -import { menuPlugin } from './plugins/menu-plugin'; +import { IJaiCompletionProvider } from './tokens'; +import { buildErrorWidget } from './widgets/chat-error'; +import { buildAiSettings } from './widgets/settings-widget'; export type DocumentTracker = IWidgetTracker; export namespace CommandIDs { /** - * Command to focus the input. + * Command to open the AI settings. */ - export const focusChatInput = 'jupyter-ai:focus-chat-input'; + export const openAiSettings = 'jupyter-ai:open-settings'; } /** * Initialization data for the jupyter_ai extension. */ -const plugin: JupyterFrontEndPlugin = { +const plugin: JupyterFrontEndPlugin = { id: '@jupyter-ai/core:plugin', autoStart: true, requires: [IRenderMimeRegistry], - optional: [ - IGlobalAwareness, - ILayoutRestorer, - IThemeManager, - IJaiCompletionProvider, - IJaiMessageFooter, - IJaiTelemetryHandler - ], - provides: IJaiCore, + optional: [ICommandPalette, IThemeManager, IJaiCompletionProvider], activate: async ( app: JupyterFrontEnd, rmRegistry: IRenderMimeRegistry, - globalAwareness: Awareness | null, - restorer: ILayoutRestorer | null, + palette: ICommandPalette | null, themeManager: IThemeManager | null, - completionProvider: IJaiCompletionProvider | null, - messageFooter: IJaiMessageFooter | null, - telemetryHandler: IJaiTelemetryHandler | null + completionProvider: IJaiCompletionProvider | null ) => { - /** - * Initialize selection watcher singleton - */ - const selectionWatcher = new SelectionWatcher(app.shell); - - /** - * Initialize active cell manager singleton - */ - const activeCellManager = new ActiveCellManager(app.shell); - - /** - * Initialize chat handler, open WS connection - */ - const chatHandler = new ChatHandler(); - const openInlineCompleterSettings = () => { app.commands.execute('settingeditor:open', { query: 'Inline Completer' }); }; - const focusInputSignal = new Signal({}); - - let chatWidget: ReactWidget; + // Create a AI settings widget. + let aiSettings: MainAreaWidget; + let settingsWidget: ReactWidget; try { - await chatHandler.initialize(); - chatWidget = buildChatSidebar( - selectionWatcher, - chatHandler, - globalAwareness, - themeManager, + settingsWidget = buildAiSettings( rmRegistry, completionProvider, - openInlineCompleterSettings, - activeCellManager, - focusInputSignal, - messageFooter, - telemetryHandler, - app.serviceManager.user + openInlineCompleterSettings ); } catch (e) { - chatWidget = buildErrorWidget(themeManager); - } - - /** - * Add Chat widget to right sidebar - */ - app.shell.add(chatWidget, 'left', { rank: 2000 }); - - if (restorer) { - restorer.add(chatWidget, 'jupyter-ai-chat'); + settingsWidget = buildErrorWidget(themeManager); } - // Define jupyter-ai commands - app.commands.addCommand(CommandIDs.focusChatInput, { + // Add a command to open settings widget in main area. + app.commands.addCommand(CommandIDs.openAiSettings, { execute: () => { - app.shell.activateById(chatWidget.id); - focusInputSignal.emit(); + if (!aiSettings || aiSettings.isDisposed) { + aiSettings = new MainAreaWidget({ content: settingsWidget }); + aiSettings.id = 'jupyter-ai-settings'; + aiSettings.title.label = 'AI settings'; + aiSettings.title.closable = true; + } + if (!aiSettings.isAttached) { + app?.shell.add(aiSettings, 'main'); + } + app.shell.activateById(aiSettings.id); }, - label: 'Focus the jupyter-ai chat' + label: 'AI settings' }); - return { - activeCellManager, - chatHandler, - chatWidget, - selectionWatcher - }; + if (palette) { + palette.addItem({ + category: 'jupyter-ai', + command: CommandIDs.openAiSettings + }); + } + } +}; + +/** + * Add slash commands to jupyterlab chat. + */ +const chat_autocompletion: JupyterFrontEndPlugin = { + id: '@jupyter-ai/core:autocompletion', + autoStart: true, + requires: [IAutocompletionRegistry], + activate: async ( + app: JupyterFrontEnd, + autocompletionRegistry: IAutocompletionRegistry + ) => { + autocompletionRegistry.add('ai', autocompletion); } }; -export default [plugin, statusItemPlugin, completionPlugin, menuPlugin]; +export default [ + plugin, + statusItemPlugin, + completionPlugin, + chat_autocompletion +]; export * from './contexts'; export * from './tokens'; diff --git a/packages/jupyter-ai/src/plugins/menu-plugin.ts b/packages/jupyter-ai/src/plugins/menu-plugin.ts deleted file mode 100644 index 8994a552d..000000000 --- a/packages/jupyter-ai/src/plugins/menu-plugin.ts +++ /dev/null @@ -1,158 +0,0 @@ -import { - JupyterFrontEnd, - JupyterFrontEndPlugin -} from '@jupyterlab/application'; - -import { IJaiCore } from '../tokens'; -import { AiService } from '../handler'; -import { Menu } from '@lumino/widgets'; -import { CommandRegistry } from '@lumino/commands'; - -export namespace CommandIDs { - export const explain = 'jupyter-ai:explain'; - export const fix = 'jupyter-ai:fix'; - export const optimize = 'jupyter-ai:optimize'; - export const refactor = 'jupyter-ai:refactor'; -} - -/** - * Optional plugin that adds a "Generative AI" submenu to the context menu. - * These implement UI shortcuts that explain, fix, refactor, or optimize code in - * a notebook or file. - * - * **This plugin is experimental and may be removed in a future release.** - */ -export const menuPlugin: JupyterFrontEndPlugin = { - id: '@jupyter-ai/core:menu-plugin', - autoStart: true, - requires: [IJaiCore], - activate: (app: JupyterFrontEnd, jaiCore: IJaiCore) => { - const { activeCellManager, chatHandler, chatWidget, selectionWatcher } = - jaiCore; - - function activateChatSidebar() { - app.shell.activateById(chatWidget.id); - } - - function getSelection(): AiService.Selection | null { - const textSelection = selectionWatcher.selection; - const activeCell = activeCellManager.getContent(false); - const selection: AiService.Selection | null = textSelection - ? { type: 'text', source: textSelection.text } - : activeCell - ? { type: 'cell', source: activeCell.source } - : null; - - return selection; - } - - function buildLabelFactory(baseLabel: string): () => string { - return () => { - const textSelection = selectionWatcher.selection; - const activeCell = activeCellManager.getContent(false); - - return textSelection - ? `${baseLabel} (${textSelection.numLines} lines selected)` - : activeCell - ? `${baseLabel} (1 active cell)` - : baseLabel; - }; - } - - // register commands - const menuCommands = new CommandRegistry(); - menuCommands.addCommand(CommandIDs.explain, { - execute: () => { - const selection = getSelection(); - if (!selection) { - return; - } - - activateChatSidebar(); - chatHandler.sendMessage({ - prompt: 'Explain the code below.', - selection - }); - }, - label: buildLabelFactory('Explain code'), - isEnabled: () => !!getSelection() - }); - menuCommands.addCommand(CommandIDs.fix, { - execute: () => { - const activeCellWithError = activeCellManager.getContent(true); - if (!activeCellWithError) { - return; - } - - chatHandler.sendMessage({ - prompt: '/fix', - selection: { - type: 'cell-with-error', - error: activeCellWithError.error, - source: activeCellWithError.source - } - }); - }, - label: () => { - const activeCellWithError = activeCellManager.getContent(true); - return activeCellWithError - ? 'Fix code cell (1 error cell)' - : 'Fix code cell (no error cell)'; - }, - isEnabled: () => { - const activeCellWithError = activeCellManager.getContent(true); - return !!activeCellWithError; - } - }); - menuCommands.addCommand(CommandIDs.optimize, { - execute: () => { - const selection = getSelection(); - if (!selection) { - return; - } - - activateChatSidebar(); - chatHandler.sendMessage({ - prompt: 'Optimize the code below.', - selection - }); - }, - label: buildLabelFactory('Optimize code'), - isEnabled: () => !!getSelection() - }); - menuCommands.addCommand(CommandIDs.refactor, { - execute: () => { - const selection = getSelection(); - if (!selection) { - return; - } - - activateChatSidebar(); - chatHandler.sendMessage({ - prompt: 'Refactor the code below.', - selection - }); - }, - label: buildLabelFactory('Refactor code'), - isEnabled: () => !!getSelection() - }); - - // add commands as a context menu item containing a "Generative AI" submenu - const submenu = new Menu({ - commands: menuCommands - }); - submenu.id = 'jupyter-ai:submenu'; - submenu.title.label = 'Generative AI'; - submenu.addItem({ command: CommandIDs.explain }); - submenu.addItem({ command: CommandIDs.fix }); - submenu.addItem({ command: CommandIDs.optimize }); - submenu.addItem({ command: CommandIDs.refactor }); - - app.contextMenu.addItem({ - type: 'submenu', - selector: '.jp-Editor', - rank: 1, - submenu - }); - } -}; diff --git a/packages/jupyter-ai/src/selection-watcher.ts b/packages/jupyter-ai/src/selection-watcher.ts deleted file mode 100644 index 9cbb67f31..000000000 --- a/packages/jupyter-ai/src/selection-watcher.ts +++ /dev/null @@ -1,181 +0,0 @@ -import { JupyterFrontEnd } from '@jupyterlab/application'; -import { DocumentWidget } from '@jupyterlab/docregistry'; -import { CodeEditor } from '@jupyterlab/codeeditor'; -import { CodeMirrorEditor } from '@jupyterlab/codemirror'; -import { FileEditor } from '@jupyterlab/fileeditor'; -import { Notebook } from '@jupyterlab/notebook'; - -import { find } from '@lumino/algorithm'; -import { Widget } from '@lumino/widgets'; -import { Signal } from '@lumino/signaling'; - -import { getCellIndex } from './utils'; - -/** - * Gets the editor instance used by a document widget. Returns `null` if unable. - */ -export function getEditor( - widget: Widget | null -): CodeMirrorEditor | null | undefined { - if (!(widget instanceof DocumentWidget)) { - return null; - } - - let editor: CodeEditor.IEditor | null | undefined; - const { content } = widget; - - if (content instanceof FileEditor) { - editor = content.editor; - } else if (content instanceof Notebook) { - editor = content.activeCell?.editor; - } - - if (!(editor instanceof CodeMirrorEditor)) { - return undefined; - } - - return editor; -} - -/** - * Gets a Selection object from a document widget. Returns `null` if unable. - */ -function getTextSelection(widget: Widget | null): Selection | null { - const editor = getEditor(widget); - // widget type check is redundant but hints the type to TypeScript - if (!editor || !(widget instanceof DocumentWidget)) { - return null; - } - - let cellId: string | undefined = undefined; - if (widget.content instanceof Notebook) { - cellId = widget.content.activeCell?.model.id; - } - - const selectionObj = editor.getSelection(); - let { start, end } = selectionObj; - const startOffset = editor.getOffsetAt(start); - const endOffset = editor.getOffsetAt(end); - const text = editor.model.sharedModel - .getSource() - .substring(startOffset, endOffset); - - // Do not return a Selection object if no text is selected - if (!text) { - return null; - } - - // ensure start <= end - // required for editor.model.sharedModel.updateSource() - if (startOffset > endOffset) { - [start, end] = [end, start]; - } - - return { - ...selectionObj, - start, - end, - text, - numLines: text.split('\n').length, - widgetId: widget.id, - ...(cellId && { - cellId - }) - }; -} - -export type Selection = CodeEditor.ITextSelection & { - /** - * The text within the selection as a string. - */ - text: string; - /** - * Number of lines contained by the text selection. - */ - numLines: number; - /** - * The ID of the document widget in which the selection was made. - */ - widgetId: string; - /** - * The ID of the cell in which the selection was made, if the original widget - * was a notebook. - */ - cellId?: string; -}; - -export class SelectionWatcher { - constructor(shell: JupyterFrontEnd.IShell) { - this._shell = shell; - this._shell.currentChanged?.connect((sender, args) => { - this._mainAreaWidget = args.newValue; - }); - - setInterval(this._poll.bind(this), 200); - } - - get selection(): Selection | null { - return this._selection; - } - - get selectionChanged(): Signal { - return this._selectionChanged; - } - - replaceSelection(selection: Selection): void { - // unfortunately shell.currentWidget doesn't update synchronously after - // shell.activateById(), which is why we have to get a reference to the - // widget manually. - const widget = find( - this._shell.widgets(), - widget => widget.id === selection.widgetId - ); - if (!(widget instanceof DocumentWidget)) { - return; - } - - // activate the widget if not already active - this._shell.activateById(selection.widgetId); - - // activate notebook cell if specified - if (widget.content instanceof Notebook && selection.cellId) { - const cellIndex = getCellIndex(widget.content, selection.cellId); - if (cellIndex !== -1) { - widget.content.activeCellIndex = cellIndex; - } - } - - // get editor instance - const editor = getEditor(widget); - if (!editor) { - return; - } - - editor.model.sharedModel.updateSource( - editor.getOffsetAt(selection.start), - editor.getOffsetAt(selection.end), - selection.text - ); - const newPosition = editor.getPositionAt( - editor.getOffsetAt(selection.start) + selection.text.length - ); - editor.setSelection({ start: newPosition, end: newPosition }); - } - - protected _poll(): void { - const prevSelection = this._selection; - const currSelection = getTextSelection(this._mainAreaWidget); - - if (prevSelection?.text === currSelection?.text) { - return; - } - - this._selection = currSelection; - this._selectionChanged.emit(currSelection); - } - - protected _shell: JupyterFrontEnd.IShell; - protected _mainAreaWidget: Widget | null = null; - protected _selection: Selection | null = null; - protected _selectionChanged = new Signal(this); -} diff --git a/packages/jupyter-ai/src/slash-autocompletion.tsx b/packages/jupyter-ai/src/slash-autocompletion.tsx new file mode 100644 index 000000000..50aad1a0c --- /dev/null +++ b/packages/jupyter-ai/src/slash-autocompletion.tsx @@ -0,0 +1,93 @@ +import { + AutocompleteCommand, + IAutocompletionCommandsProps +} from '@jupyter/chat'; +import { + Download, + FindInPage, + Help, + MoreHoriz, + MenuBook, + School, + HideSource, + AutoFixNormal +} from '@mui/icons-material'; +import { Box, Typography } from '@mui/material'; +import React from 'react'; +import { AiService } from './handler'; + +type SlashCommandOption = AutocompleteCommand & { + id: string; + description: string; +}; + +/** + * List of icons per slash command, shown in the autocomplete popup. + * + * This list of icons should eventually be made configurable. However, it is + * unclear whether custom icons should be defined within a Lumino plugin (in the + * frontend) or served from a static server route (in the backend). + */ +const DEFAULT_SLASH_COMMAND_ICONS: Record = { + ask: , + clear: , + export: , + fix: , + generate: , + help: , + learn: , + unknown: +}; + +/** + * Renders an option shown in the slash command autocomplete. + */ +function renderSlashCommandOption( + optionProps: React.HTMLAttributes, + option: SlashCommandOption +): JSX.Element { + const icon = + option.id in DEFAULT_SLASH_COMMAND_ICONS + ? DEFAULT_SLASH_COMMAND_ICONS[option.id] + : DEFAULT_SLASH_COMMAND_ICONS.unknown; + + return ( +
  • + {icon} + + + {option.label} + + + {' — ' + option.description} + + +
  • + ); +} + +/** + * The autocompletion command properties to add to the registry. + */ +export const autocompletion: IAutocompletionCommandsProps = { + opener: '/', + commands: async () => { + const slashCommands = (await AiService.listSlashCommands()).slash_commands; + return slashCommands.map(slashCommand => ({ + id: slashCommand.slash_id, + label: '/' + slashCommand.slash_id + ' ', + description: slashCommand.description + })); + }, + props: { + renderOption: renderSlashCommandOption + } +}; diff --git a/packages/jupyter-ai/src/tokens.ts b/packages/jupyter-ai/src/tokens.ts index 1b1c2eb11..57b7d9a6c 100644 --- a/packages/jupyter-ai/src/tokens.ts +++ b/packages/jupyter-ai/src/tokens.ts @@ -1,12 +1,6 @@ -import React from 'react'; import { Token } from '@lumino/coreutils'; import { ISignal } from '@lumino/signaling'; -import type { IRankedMenu, ReactWidget } from '@jupyterlab/ui-components'; - -import { AiService } from './handler'; -import { ChatHandler } from './chat_handler'; -import { ActiveCellManager } from './contexts/active-cell-context'; -import { SelectionWatcher } from './selection-watcher'; +import type { IRankedMenu } from '@jupyterlab/ui-components'; export interface IJaiStatusItem { addItem(item: IRankedMenu.IItemOptions): void; @@ -33,41 +27,6 @@ export const IJaiCompletionProvider = new Token( 'The jupyter-ai inline completion provider API' ); -export type IJaiMessageFooterProps = { - message: AiService.ChatMessage; -}; - -export interface IJaiMessageFooter { - component: React.FC; -} - -/** - * The message footer provider token. Another extension should provide this - * token to add a footer to each message. - */ - -export const IJaiMessageFooter = new Token( - 'jupyter_ai:IJaiMessageFooter', - 'Optional component that is used to render a footer on each Jupyter AI chat message, when provided.' -); - -export interface IJaiCore { - chatWidget: ReactWidget; - chatHandler: ChatHandler; - activeCellManager: ActiveCellManager; - selectionWatcher: SelectionWatcher; -} - -/** - * The Jupyter AI core provider token. Frontend plugins that want to extend the - * Jupyter AI frontend by adding features which send messages or observe the - * current text selection & active cell should require this plugin. - */ -export const IJaiCore = new Token( - 'jupyter_ai:core', - 'The core implementation of the frontend.' -); - /** * An object that describes an interaction event from the user. * @@ -94,7 +53,7 @@ export type TelemetryEvent = { /** * Type of the message. */ - type: AiService.ChatMessage['type']; + type: 'human' | 'agent'; /** * UNIX timestamp of the message. */ diff --git a/packages/jupyter-ai/src/utils.ts b/packages/jupyter-ai/src/utils.ts index e21b47c6b..8790a90d6 100644 --- a/packages/jupyter-ai/src/utils.ts +++ b/packages/jupyter-ai/src/utils.ts @@ -1,39 +1,34 @@ /** * Contains various utility functions shared throughout the project. */ -import { Notebook } from '@jupyterlab/notebook'; -import { FileEditor } from '@jupyterlab/fileeditor'; import { CodeEditor } from '@jupyterlab/codeeditor'; +import { CodeMirrorEditor } from '@jupyterlab/codemirror'; +import { DocumentWidget } from '@jupyterlab/docregistry'; +import { FileEditor } from '@jupyterlab/fileeditor'; +import { Notebook } from '@jupyterlab/notebook'; import { Widget } from '@lumino/widgets'; /** - * Get text selection from an editor widget (DocumentWidget#content). + * Gets the editor instance used by a document widget. Returns `null` if unable. */ -export function getTextSelection(widget: Widget): string { - const editor = getEditor(widget); - if (!editor) { - return ''; +export function getEditor( + widget: Widget | null +): CodeMirrorEditor | null | undefined { + if (!(widget instanceof DocumentWidget)) { + return null; } - const selectionObj = editor.getSelection(); - const start = editor.getOffsetAt(selectionObj.start); - const end = editor.getOffsetAt(selectionObj.end); - const text = editor.model.sharedModel.getSource().substring(start, end); + let editor: CodeEditor.IEditor | null | undefined; + const { content } = widget; - return text; -} + if (content instanceof FileEditor) { + editor = content.editor; + } else if (content instanceof Notebook) { + editor = content.activeCell?.editor; + } -/** - * Get editor instance from an editor widget (i.e. `DocumentWidget#content`). - */ -export function getEditor( - widget: Widget -): CodeEditor.IEditor | null | undefined { - let editor: CodeEditor.IEditor | null | undefined; - if (widget instanceof FileEditor) { - editor = widget.editor; - } else if (widget instanceof Notebook) { - editor = widget.activeCell?.editor; + if (!(editor instanceof CodeMirrorEditor)) { + return undefined; } return editor; diff --git a/packages/jupyter-ai/src/widgets/chat-sidebar.tsx b/packages/jupyter-ai/src/widgets/chat-sidebar.tsx deleted file mode 100644 index 732eedd3c..000000000 --- a/packages/jupyter-ai/src/widgets/chat-sidebar.tsx +++ /dev/null @@ -1,54 +0,0 @@ -import React from 'react'; -import { ISignal } from '@lumino/signaling'; -import { ReactWidget } from '@jupyterlab/apputils'; -import type { IThemeManager } from '@jupyterlab/apputils'; -import type { User } from '@jupyterlab/services'; -import type { Awareness } from 'y-protocols/awareness'; - -import { Chat } from '../components/chat'; -import { chatIcon } from '../icons'; -import { SelectionWatcher } from '../selection-watcher'; -import { ChatHandler } from '../chat_handler'; -import { - IJaiCompletionProvider, - IJaiMessageFooter, - IJaiTelemetryHandler -} from '../tokens'; -import { IRenderMimeRegistry } from '@jupyterlab/rendermime'; -import type { ActiveCellManager } from '../contexts/active-cell-context'; - -export function buildChatSidebar( - selectionWatcher: SelectionWatcher, - chatHandler: ChatHandler, - globalAwareness: Awareness | null, - themeManager: IThemeManager | null, - rmRegistry: IRenderMimeRegistry, - completionProvider: IJaiCompletionProvider | null, - openInlineCompleterSettings: () => void, - activeCellManager: ActiveCellManager, - focusInputSignal: ISignal, - messageFooter: IJaiMessageFooter | null, - telemetryHandler: IJaiTelemetryHandler | null, - userManager: User.IManager -): ReactWidget { - const ChatWidget = ReactWidget.create( - - ); - ChatWidget.id = 'jupyter-ai::chat'; - ChatWidget.title.icon = chatIcon; - ChatWidget.title.caption = 'Jupyter AI Chat'; // TODO: i18n - return ChatWidget; -} diff --git a/packages/jupyter-ai/src/widgets/settings-widget.tsx b/packages/jupyter-ai/src/widgets/settings-widget.tsx new file mode 100644 index 000000000..75436f3c5 --- /dev/null +++ b/packages/jupyter-ai/src/widgets/settings-widget.tsx @@ -0,0 +1,26 @@ +import React from 'react'; +import { ReactWidget } from '@jupyterlab/apputils'; +import { settingsIcon } from '@jupyterlab/ui-components'; + +import { IJaiCompletionProvider } from '../tokens'; +import { IRenderMimeRegistry } from '@jupyterlab/rendermime'; +import { ChatSettings } from '../components/chat-settings'; + +export function buildAiSettings( + rmRegistry: IRenderMimeRegistry, + completionProvider: IJaiCompletionProvider | null, + openInlineCompleterSettings: () => void +): ReactWidget { + const SettingsWidget = ReactWidget.create( + + ); + SettingsWidget.id = 'jupyter-ai::settings'; + SettingsWidget.title.icon = settingsIcon; + SettingsWidget.title.caption = 'Jupyter AI Settings'; // TODO: i18n + return SettingsWidget; +} diff --git a/packages/jupyter-ai/ui-tests/tests/jupyter-ai.spec.ts b/packages/jupyter-ai/ui-tests/tests/jupyter-ai.spec.ts index 3b6c53b70..d2064d9de 100644 --- a/packages/jupyter-ai/ui-tests/tests/jupyter-ai.spec.ts +++ b/packages/jupyter-ai/ui-tests/tests/jupyter-ai.spec.ts @@ -1,5 +1,4 @@ -import { expect, test } from '@jupyterlab/galata'; -import { AIHelper } from './helpers/AIHelper'; +import { test } from '@jupyterlab/galata'; enum FILENAMES { SIDEBAR = 'sidebar.png', @@ -13,17 +12,7 @@ enum FILENAMES { test.use({ autoGoto: false }); test.describe('Jupyter AI', () => { - let ai: AIHelper; - test.beforeEach(async ({ page }) => { - ai = new AIHelper(page); - await page.goto(); - }); - - test('shows sidebar chat icon', async () => { - await ai.assertSnapshot(FILENAMES.SIDEBAR, { locator: ai.sidebar }); - }); - - test('shows chat welcome message', async () => { - await ai.assertSnapshot(FILENAMES.CHAT_WELCOME_MESSAGE); + test('Should be tested', () => { + // no-op }); }); diff --git a/packages/jupyter-ai/ui-tests/tests/jupyter-ai.spec.ts-snapshots/sidebar-linux.png b/packages/jupyter-ai/ui-tests/tests/jupyter-ai.spec.ts-snapshots/sidebar-linux.png index 7b310395501cc8eaadefc72ef2f078b896f92f74..acfd60c1d929f7b67ab8e89506ed883860a3426a 100644 GIT binary patch delta 881 zcmX>m_?K;hWIY2A_@8Py2Ba8^gWR1M)}51i$-uyD?&;zfQgQ3;jobdhjv{OY|E{iI z+UVr)l+$^Si+O{1$HykakaxS1nglqS#BYU&6n1EEwF)@wEYb8henh>K|Hp;A?VGJ{ z_p&4@+^>A~box!>^s+Rg;4%S`R1pz(rA^7rUOj(HI`7wq7P!8Z4~^*1efDUr@3zp5 ztp}NZ2islxuXTy_qxSU7&GphhC2Y{}pd})8o)qT}p+?n0xe{OyBk$7HmlVT$=9f{rt$Z`5xshMW+*XST7d6oIZEyro1raWzI)eXZ~E{Ub8+#{LbcS z(MRrNe-fU4LyNs(hCsa&$3;GdtJSL{*cm$Rh%++G2y)#WK4aC6r=q#5c03j>554$Q z)OJ-zwQG>*Z9#??=f7@~XINytuB&vH_O}zUm%mDh&hf5x%UifhVov0>o_|GinO|N1 zk{_V+pJ~eXcR%*FN+-*jNAG$hmafXmdi16E%7^Fc9!0<3`c8BwOG1$TrTVQ5tIXOM zj@T&uUSOJus!L?9lVxX+3@7S%!vFp`kWx4bMM^ALfYtb%UMZMX>AqL<#@nhZ!0!Tv(Dh zEA38nf5?q{4QaKj?uFb~r1y*C`GxTMc~`q+FLQS18?gf2mw5Es^_+$sO%Fp4cYZwF z@nE^Z<^K~kK{B2K-7^_3Hh64W@`Gu?)+HUxE53^^?rjU?UvS8v=l5zU#VOT$(t15K z7X6#iGUfAh*}%jtXS1qicWQ+1jr6sh(tUEmQ&IgXA@6q1pJWu>^>3YU_B@}*Km9`L ze|%HrW0)$xq;zqLt(=$Ub%qCf+;l|OP3X?+id~+ec%*Ae5SLW&EyQ5w7_D5EX+O+xnJOhaV|%Wr}us z9I;Eid+%ze#i93e^j>}T@OU1!DfarY(|7I#b7c8T?+!ZEmNZer_SfH8W-%Xi0!}=1 vP%O82uA!l^L%2x&iQfrJcSlk{;jE+aOq|n-UVMu`0m@yTu6{1-oD!M|004~6p7B4gsZapWexj$VIi17%m9giINb z460rIdnQ_<$RYP!llB{TgoTw2q5VZ(^64gqGwY69|CU$b59}>`u@@UnudlB++ZBGi ze;NP@GJH=#8$ls@b~+!G>W@CxUS@z5Yog$djg7ec{QRe~4-Q&-7ZvS^ZA(Xwz2fnN zi*5*n9^mlR!2KcV+qZA;e*5ilrqLhfum5>gSm3M?5X(FRDGFD)<~oAEd)PR9j^4Zp=e!6OEc(>Wq%K8$V}jJxdIul zAKp*%%r`MJGs9cY@cC3aoqoEXUA%WRdHPO3V4$ER!*_gS1h{$gX2KWU?d>D`d+LwA z$V4UN+ z-6jO;=5~0N9`oZ{8PBZ(xH>lG>+0r)^gu<0hhG(oajOf=nG}|?<`$Fb?wP-)6B84o zU(%^TM6mICroFSXula-f-QCkW%Xc#oKYo4B>NutOrFoLh@G#Ifrz#wFWTsx&veiwtyYGy2wU~KDBDL zvzr;$afQ#ektSW?Q5RW|_aObl&HrwD?ipjTSn$O<(`MnjkJ$qQmAw88TT_EkSx#$9 zi&hZDavn6MgeiB{CBJNXS5;NP4XYVCa3pe2oNNyhnIx%mvT}1##1aW!zKA03Za;gCdzt8NFKEs~=B!$L5*)0x8%4}h3IbX=$E!~`Igy#^ zdnRlai`5AKTwR_w|K|Q94o4G7wyJMha&1}??Se~5k;-*rPS(}cS#+~P(*iKx&HSbA z?(eJbzcbT^Dzh*sCr_(HVsV0o>y7}TYD2%PtBdltymj%@fuM~Wfr_sw+1c51{TVb- zML}WVvzZxemf4vgz0bqL;|d!QD$lvLV@*~q5HR@1uIT=y%>w+MDqBO;V`U(XlGyOd z-^Q3srSd4jRn(M=$DAZO5?_R6V03i!0s%~(eG!8$2@%&m8`qcs2UZMWDx#N0!iaoJ zD4C|&-9GJ?>3h=IxwW^~Sd-{LaP^%DyV_UM`jnIu@BDms4-XGA=$ZZM`L!I(5b=m+ z{R7t(#KuT)@$rM!UMz9hm1Ce0skGg8yy6mRGr25dVIY2Ee42*7#HO@O!g?LlB!Yyy zJXUV_NC=_baP49+UT!a_+Mrj_4UT(z>VDZkC_nu+#@^+x#kqQa;^d$E{Qv3y%(~~= aO6$lTyx6iPcTV#$0nP@5`O|&lO8*7nyXA}k diff --git a/yarn.lock b/yarn.lock index 76ddf97ca..f98994fe2 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1615,14 +1615,14 @@ __metadata: languageName: node linkType: hard -"@codemirror/state@npm:^6.0.0, @codemirror/state@npm:^6.2.0, @codemirror/state@npm:^6.4.0, @codemirror/state@npm:^6.4.1": +"@codemirror/state@npm:^6.0.0, @codemirror/state@npm:^6.4.0, @codemirror/state@npm:^6.4.1": version: 6.4.1 resolution: "@codemirror/state@npm:6.4.1" checksum: b81b55574091349eed4d32fc0eadb0c9688f1f7c98b681318f59138ee0f527cb4c4a97831b70547c0640f02f3127647838ae6730782de4a3dd2cc58836125d01 languageName: node linkType: hard -"@codemirror/view@npm:^6.0.0, @codemirror/view@npm:^6.17.0, @codemirror/view@npm:^6.23.0, @codemirror/view@npm:^6.26.0, @codemirror/view@npm:^6.27.0, @codemirror/view@npm:^6.7.0": +"@codemirror/view@npm:^6.0.0, @codemirror/view@npm:^6.17.0, @codemirror/view@npm:^6.23.0, @codemirror/view@npm:^6.26.0, @codemirror/view@npm:^6.27.0": version: 6.28.0 resolution: "@codemirror/view@npm:6.28.0" dependencies: @@ -2220,11 +2220,10 @@ __metadata: "@babel/preset-env": ^7.0.0 "@emotion/react": ^11.10.5 "@emotion/styled": ^11.10.5 - "@jupyter/collaboration": ^1 + "@jupyter/chat": ^0.7.1 "@jupyterlab/application": ^4.2.0 "@jupyterlab/apputils": ^4.2.0 "@jupyterlab/builder": ^4.2.0 - "@jupyterlab/cells": ^4.2.0 "@jupyterlab/codeeditor": ^4.2.0 "@jupyterlab/codemirror": ^4.2.0 "@jupyterlab/completer": ^4.2.0 @@ -2285,45 +2284,33 @@ __metadata: languageName: unknown linkType: soft -"@jupyter/collaboration@npm:^1": - version: 1.2.1 - resolution: "@jupyter/collaboration@npm:1.2.1" - dependencies: - "@codemirror/state": ^6.2.0 - "@codemirror/view": ^6.7.0 - "@jupyter/docprovider": ^1.2.1 - "@jupyterlab/apputils": ^4.0.0 - "@jupyterlab/coreutils": ^6.0.0 - "@jupyterlab/services": ^7.0.0 - "@jupyterlab/ui-components": ^4.0.0 - "@lumino/coreutils": ^2.1.0 - "@lumino/virtualdom": ^2.0.0 - "@lumino/widgets": ^2.1.0 +"@jupyter/chat@npm:^0.7.1": + version: 0.7.1 + resolution: "@jupyter/chat@npm:0.7.1" + dependencies: + "@emotion/react": ^11.10.5 + "@emotion/styled": ^11.10.5 + "@jupyter/react-components": ^0.15.2 + "@jupyterlab/application": ^4.2.0 + "@jupyterlab/apputils": ^4.3.0 + "@jupyterlab/fileeditor": ^4.2.0 + "@jupyterlab/notebook": ^4.2.0 + "@jupyterlab/rendermime": ^4.2.0 + "@jupyterlab/ui-components": ^4.2.0 + "@lumino/commands": ^2.0.0 + "@lumino/coreutils": ^2.0.0 + "@lumino/disposable": ^2.0.0 + "@lumino/signaling": ^2.0.0 + "@mui/icons-material": ^5.11.0 + "@mui/material": ^5.11.0 + clsx: ^2.1.0 react: ^18.2.0 - y-protocols: ^1.0.5 - yjs: ^13.5.40 - checksum: f38885112c337415df963782653866a058b68d1509faed9ca092c3d729cd27cdecb3e36365e49b78135d626a4d76c7c5479601a997f76142d61e7edf41015fc3 - languageName: node - linkType: hard - -"@jupyter/docprovider@npm:^1.2.1": - version: 1.2.1 - resolution: "@jupyter/docprovider@npm:1.2.1" - dependencies: - "@jupyter/ydoc": ^1.0.2 - "@jupyterlab/coreutils": ^6.0.0 - "@jupyterlab/services": ^7.0.0 - "@lumino/coreutils": ^2.1.0 - "@lumino/disposable": ^2.1.0 - "@lumino/signaling": ^2.1.0 - y-protocols: ^1.0.5 - y-websocket: ^1.3.15 - yjs: ^13.5.40 - checksum: 5a8ae37ec44f39754e30f0d7d697a0147bc15b2e9fe37aa98770971dfad77724cf83177220841e4a1ad21a2f5b021fc21bac95499e6476b281ec9491fd3a89b1 + react-dom: ^18.2.0 + checksum: cf92a3412a49619809a94fb27ec849bb399a5e53da5b36bb57509f5e1f84607a5588ca34624a47101baf09bccd9c8bb096ed8d1f6e0189b7ca33065a506d5d52 languageName: node linkType: hard -"@jupyter/react-components@npm:^0.15.3": +"@jupyter/react-components@npm:^0.15.2, @jupyter/react-components@npm:^0.15.3": version: 0.15.3 resolution: "@jupyter/react-components@npm:0.15.3" dependencies: @@ -2346,20 +2333,6 @@ __metadata: languageName: node linkType: hard -"@jupyter/ydoc@npm:^1.0.2": - version: 1.1.1 - resolution: "@jupyter/ydoc@npm:1.1.1" - dependencies: - "@jupyterlab/nbformat": ^3.0.0 || ^4.0.0-alpha.21 || ^4.0.0 - "@lumino/coreutils": ^1.11.0 || ^2.0.0 - "@lumino/disposable": ^1.10.0 || ^2.0.0 - "@lumino/signaling": ^1.10.0 || ^2.0.0 - y-protocols: ^1.0.5 - yjs: ^13.5.40 - checksum: a239b1dd57cfc9ba36c06ac5032a1b6388849ae01a1d0db0d45094f71fdadf4d473b4bf8becbef0cfcdc85cae505361fbec0822b02da5aa48e06b66f742dd7a0 - languageName: node - linkType: hard - "@jupyter/ydoc@npm:^2.0.1": version: 2.0.1 resolution: "@jupyter/ydoc@npm:2.0.1" @@ -2402,19 +2375,19 @@ __metadata: languageName: node linkType: hard -"@jupyterlab/apputils@npm:^4.0.0, @jupyterlab/apputils@npm:^4.2.0, @jupyterlab/apputils@npm:^4.3.2": - version: 4.3.2 - resolution: "@jupyterlab/apputils@npm:4.3.2" +"@jupyterlab/apputils@npm:^4.2.0, @jupyterlab/apputils@npm:^4.3.0, @jupyterlab/apputils@npm:^4.3.2": + version: 4.3.5 + resolution: "@jupyterlab/apputils@npm:4.3.5" dependencies: - "@jupyterlab/coreutils": ^6.2.2 - "@jupyterlab/observables": ^5.2.2 - "@jupyterlab/rendermime-interfaces": ^3.10.2 - "@jupyterlab/services": ^7.2.2 - "@jupyterlab/settingregistry": ^4.2.2 - "@jupyterlab/statedb": ^4.2.2 - "@jupyterlab/statusbar": ^4.2.2 - "@jupyterlab/translation": ^4.2.2 - "@jupyterlab/ui-components": ^4.2.2 + "@jupyterlab/coreutils": ^6.2.5 + "@jupyterlab/observables": ^5.2.5 + "@jupyterlab/rendermime-interfaces": ^3.10.5 + "@jupyterlab/services": ^7.2.5 + "@jupyterlab/settingregistry": ^4.2.5 + "@jupyterlab/statedb": ^4.2.5 + "@jupyterlab/statusbar": ^4.2.5 + "@jupyterlab/translation": ^4.2.5 + "@jupyterlab/ui-components": ^4.2.5 "@lumino/algorithm": ^2.0.1 "@lumino/commands": ^2.3.0 "@lumino/coreutils": ^2.1.2 @@ -2427,7 +2400,7 @@ __metadata: "@types/react": ^18.0.26 react: ^18.2.0 sanitize-html: ~2.12.1 - checksum: 4a49f2b56abc80ab1ca144d39901da5250e7394ace3ceb2e14cba9cc638c6ea720a3f8a3a90cd1f878c34d91b1ce8fe63206d2c314d048b3d83ade0e2e787c89 + checksum: a2307657bfab1aff687eccfdb7a2c378a40989beea618ad6e5a811dbd250753588ea704a11250ddef42a551c8360717c1fe4c8827c5e2c3bfff1e84fc7fdc836 languageName: node linkType: hard @@ -2486,7 +2459,7 @@ __metadata: languageName: node linkType: hard -"@jupyterlab/cells@npm:^4.2.0, @jupyterlab/cells@npm:^4.2.2": +"@jupyterlab/cells@npm:^4.2.2": version: 4.2.2 resolution: "@jupyterlab/cells@npm:4.2.2" dependencies: @@ -2616,9 +2589,9 @@ __metadata: languageName: node linkType: hard -"@jupyterlab/coreutils@npm:^6.0.0, @jupyterlab/coreutils@npm:^6.2.0, @jupyterlab/coreutils@npm:^6.2.2": - version: 6.2.2 - resolution: "@jupyterlab/coreutils@npm:6.2.2" +"@jupyterlab/coreutils@npm:^6.2.0, @jupyterlab/coreutils@npm:^6.2.2, @jupyterlab/coreutils@npm:^6.2.5": + version: 6.2.5 + resolution: "@jupyterlab/coreutils@npm:6.2.5" dependencies: "@lumino/coreutils": ^2.1.2 "@lumino/disposable": ^2.1.2 @@ -2626,7 +2599,7 @@ __metadata: minimist: ~1.2.0 path-browserify: ^1.0.0 url-parse: ~1.5.4 - checksum: cea1ec210ce60b32ccd213a75e10d85aed149437817e81ea89230552b33cec4be61472880669035228a156b89dcf99dccac3fe2e19191f8690d8870a732fa30b + checksum: 3b6a10b117ee82a437b6535801fe012bb5af7769a850be95c8ffa666ee2d6f7c29041ba546c9cfca0ab32b65f91c661570541f4f785f48af9022d08407c0a3e5 languageName: node linkType: hard @@ -2777,12 +2750,12 @@ __metadata: languageName: node linkType: hard -"@jupyterlab/nbformat@npm:^3.0.0 || ^4.0.0-alpha.21 || ^4.0.0, @jupyterlab/nbformat@npm:^4.2.2": - version: 4.2.2 - resolution: "@jupyterlab/nbformat@npm:4.2.2" +"@jupyterlab/nbformat@npm:^3.0.0 || ^4.0.0-alpha.21 || ^4.0.0, @jupyterlab/nbformat@npm:^4.2.2, @jupyterlab/nbformat@npm:^4.2.5": + version: 4.2.5 + resolution: "@jupyterlab/nbformat@npm:4.2.5" dependencies: "@lumino/coreutils": ^2.1.2 - checksum: a60774bcf3e9735bc80dc411b4b79ad2da0dd4df596fef0a74537bfbfb8b168b70b34619638d0abaca6243ac337520275002a27dc13d6951efd681527643d25b + checksum: b3ad2026969bfa59f8cfb7b1a991419f96f7e6dc8c4acf4ac166c210d7ab99631350c785e9b04350095488965d2824492c8adbff24a2e26db615457545426b3c languageName: node linkType: hard @@ -2824,16 +2797,16 @@ __metadata: languageName: node linkType: hard -"@jupyterlab/observables@npm:^5.2.2": - version: 5.2.2 - resolution: "@jupyterlab/observables@npm:5.2.2" +"@jupyterlab/observables@npm:^5.2.2, @jupyterlab/observables@npm:^5.2.5": + version: 5.2.5 + resolution: "@jupyterlab/observables@npm:5.2.5" dependencies: "@lumino/algorithm": ^2.0.1 "@lumino/coreutils": ^2.1.2 "@lumino/disposable": ^2.1.2 "@lumino/messaging": ^2.0.1 "@lumino/signaling": ^2.1.2 - checksum: 916363cb75bd58f109d81ba84649379a848c23b8ced30f9283108fb4133bd5d4f62ebdf9648f053df744701193d4fadbae4491561dd02d14157bf23a0b813dda + checksum: 21fd2828463c08a770714692ff44aeca500f8ea8f3a743ad203a61fbf04cfa81921a47b432d8e65f4935fb45c08fce2b8858cb7e2198cc9bf0fa51f482ec37bd languageName: node linkType: hard @@ -2859,13 +2832,13 @@ __metadata: languageName: node linkType: hard -"@jupyterlab/rendermime-interfaces@npm:^3.10.2": - version: 3.10.2 - resolution: "@jupyterlab/rendermime-interfaces@npm:3.10.2" +"@jupyterlab/rendermime-interfaces@npm:^3.10.2, @jupyterlab/rendermime-interfaces@npm:^3.10.5": + version: 3.10.5 + resolution: "@jupyterlab/rendermime-interfaces@npm:3.10.5" dependencies: "@lumino/coreutils": ^1.11.0 || ^2.1.2 "@lumino/widgets": ^1.37.2 || ^2.3.2 - checksum: 4ace6cda40bc3cdd59e36afb8dce6f4448f974a8214086d2541860b0e5c0de95fe22969fa4f5537e6e7fa06c00543655feaf77825dbb57da0147c38c51686707 + checksum: acfb10315a3ed4d0b0ef664437b33f8938968c61993351fd4067b0eaf6cb6ccd4c5caf50ae050d184a34b35b88d844eee6689d00244e54a02b228c02eab544b4 languageName: node linkType: hard @@ -2889,31 +2862,31 @@ __metadata: languageName: node linkType: hard -"@jupyterlab/services@npm:^7.0.0, @jupyterlab/services@npm:^7.2.0, @jupyterlab/services@npm:^7.2.2": - version: 7.2.2 - resolution: "@jupyterlab/services@npm:7.2.2" +"@jupyterlab/services@npm:^7.2.0, @jupyterlab/services@npm:^7.2.2, @jupyterlab/services@npm:^7.2.5": + version: 7.2.5 + resolution: "@jupyterlab/services@npm:7.2.5" dependencies: "@jupyter/ydoc": ^2.0.1 - "@jupyterlab/coreutils": ^6.2.2 - "@jupyterlab/nbformat": ^4.2.2 - "@jupyterlab/settingregistry": ^4.2.2 - "@jupyterlab/statedb": ^4.2.2 + "@jupyterlab/coreutils": ^6.2.5 + "@jupyterlab/nbformat": ^4.2.5 + "@jupyterlab/settingregistry": ^4.2.5 + "@jupyterlab/statedb": ^4.2.5 "@lumino/coreutils": ^2.1.2 "@lumino/disposable": ^2.1.2 "@lumino/polling": ^2.1.2 "@lumino/properties": ^2.0.1 "@lumino/signaling": ^2.1.2 ws: ^8.11.0 - checksum: dace4f2838cefb129c63cc2b20b35ce2b593e9da4db51dea2963c1109c7f9867faf0a7f428bfd53889f8560953924bf51b355f555ce4fd756b358cfaf8f145c7 + checksum: 72d7578a86af1277b574095423fafb4176bc66373662fdc0e243a7d20e4baf8f291377b6c80300841dba6486767f16664f0e893174c2761658aedb74024e1db6 languageName: node linkType: hard -"@jupyterlab/settingregistry@npm:^4.2.0, @jupyterlab/settingregistry@npm:^4.2.2": - version: 4.2.2 - resolution: "@jupyterlab/settingregistry@npm:4.2.2" +"@jupyterlab/settingregistry@npm:^4.2.0, @jupyterlab/settingregistry@npm:^4.2.2, @jupyterlab/settingregistry@npm:^4.2.5": + version: 4.2.5 + resolution: "@jupyterlab/settingregistry@npm:4.2.5" dependencies: - "@jupyterlab/nbformat": ^4.2.2 - "@jupyterlab/statedb": ^4.2.2 + "@jupyterlab/nbformat": ^4.2.5 + "@jupyterlab/statedb": ^4.2.5 "@lumino/commands": ^2.3.0 "@lumino/coreutils": ^2.1.2 "@lumino/disposable": ^2.1.2 @@ -2923,28 +2896,28 @@ __metadata: json5: ^2.2.3 peerDependencies: react: ">=16" - checksum: 610a43c2308ea7b35c58bc4fdffa0613cd04bbd56bae3f64ee7d7869ae8e484e26102726f5a31f6ae2ffc6f3e77527473fb1a8a9869fdbdac93d5a12984bd56d + checksum: 2403e3198f2937fb9e4c12f96121e8bfc4f2a9ed47a9ad64182c88c8c19d59fcdf7443d0bf7d04527e89ac06378ceb39d6b4196c7f575c2a21fea23283ad3892 languageName: node linkType: hard -"@jupyterlab/statedb@npm:^4.2.2": - version: 4.2.2 - resolution: "@jupyterlab/statedb@npm:4.2.2" +"@jupyterlab/statedb@npm:^4.2.2, @jupyterlab/statedb@npm:^4.2.5": + version: 4.2.5 + resolution: "@jupyterlab/statedb@npm:4.2.5" dependencies: "@lumino/commands": ^2.3.0 "@lumino/coreutils": ^2.1.2 "@lumino/disposable": ^2.1.2 "@lumino/properties": ^2.0.1 "@lumino/signaling": ^2.1.2 - checksum: 6fbeed16a659b3f0d9b7a86cca91a0fd082c35b500264d58206f8a79640ea34ac00192c749a96c10f8762c6153ef26d3face6e6ce30b0e84479a0a5896254c38 + checksum: 236e7628070971af167eb4fdeac96a0090b2256cfa14b6a75aee5ef23b156cd57a8b25518125fbdc58dea09490f8f473740bc4b454d8ad7c23949f64a61b757e languageName: node linkType: hard -"@jupyterlab/statusbar@npm:^4.2.2": - version: 4.2.2 - resolution: "@jupyterlab/statusbar@npm:4.2.2" +"@jupyterlab/statusbar@npm:^4.2.2, @jupyterlab/statusbar@npm:^4.2.5": + version: 4.2.5 + resolution: "@jupyterlab/statusbar@npm:4.2.5" dependencies: - "@jupyterlab/ui-components": ^4.2.2 + "@jupyterlab/ui-components": ^4.2.5 "@lumino/algorithm": ^2.0.1 "@lumino/coreutils": ^2.1.2 "@lumino/disposable": ^2.1.2 @@ -2952,7 +2925,7 @@ __metadata: "@lumino/signaling": ^2.1.2 "@lumino/widgets": ^2.3.2 react: ^18.2.0 - checksum: f687fe87f693036edabaf7273aa3b1da89dac4636daf6632bb8d76bf79693ca713f83105247a90b1b378bfc42f61313d3ebc6177a01d2647b957c3c1b01e25f3 + checksum: fa429b88a5bcd6889b9ac32b5f2500cb10a968cc636ca8dede17972535cc47454cb7fc96518fc8def76935f826b66b071752d0fd26afdacba579f6f3785e97b2 languageName: node linkType: hard @@ -3014,29 +2987,29 @@ __metadata: languageName: node linkType: hard -"@jupyterlab/translation@npm:^4.2.2": - version: 4.2.2 - resolution: "@jupyterlab/translation@npm:4.2.2" +"@jupyterlab/translation@npm:^4.2.2, @jupyterlab/translation@npm:^4.2.5": + version: 4.2.5 + resolution: "@jupyterlab/translation@npm:4.2.5" dependencies: - "@jupyterlab/coreutils": ^6.2.2 - "@jupyterlab/rendermime-interfaces": ^3.10.2 - "@jupyterlab/services": ^7.2.2 - "@jupyterlab/statedb": ^4.2.2 + "@jupyterlab/coreutils": ^6.2.5 + "@jupyterlab/rendermime-interfaces": ^3.10.5 + "@jupyterlab/services": ^7.2.5 + "@jupyterlab/statedb": ^4.2.5 "@lumino/coreutils": ^2.1.2 - checksum: faeda0940384b5d204e5f7ca0e50cdf0122d6be8618a10c9c77ba57b675fc7045c65da8c1fc51fb4803361b7d0bbbbd1d6d224d5905677f3782231bdad2f8164 + checksum: 8983efad2b0d54381cb94799a10eab30f284a87103f93e844bd87106e2df3c304e260b9c95540317819cc2b2520c74ad78cb724816c81e0c315fdb43d0bdaab3 languageName: node linkType: hard -"@jupyterlab/ui-components@npm:^4.0.0, @jupyterlab/ui-components@npm:^4.2.0, @jupyterlab/ui-components@npm:^4.2.2": - version: 4.2.2 - resolution: "@jupyterlab/ui-components@npm:4.2.2" +"@jupyterlab/ui-components@npm:^4.2.0, @jupyterlab/ui-components@npm:^4.2.2, @jupyterlab/ui-components@npm:^4.2.5": + version: 4.2.5 + resolution: "@jupyterlab/ui-components@npm:4.2.5" dependencies: "@jupyter/react-components": ^0.15.3 "@jupyter/web-components": ^0.15.3 - "@jupyterlab/coreutils": ^6.2.2 - "@jupyterlab/observables": ^5.2.2 - "@jupyterlab/rendermime-interfaces": ^3.10.2 - "@jupyterlab/translation": ^4.2.2 + "@jupyterlab/coreutils": ^6.2.5 + "@jupyterlab/observables": ^5.2.5 + "@jupyterlab/rendermime-interfaces": ^3.10.5 + "@jupyterlab/translation": ^4.2.5 "@lumino/algorithm": ^2.0.1 "@lumino/commands": ^2.3.0 "@lumino/coreutils": ^2.1.2 @@ -3054,7 +3027,7 @@ __metadata: typestyle: ^2.0.4 peerDependencies: react: ^18.2.0 - checksum: 5e0f7c835dd64db51332966cb56b5b5f12a22b4b42b229ade772b853dc31aab92ec323125a2e7781e3c7acd41949cd5600b1f1421e64ebafe1c05957e1176501 + checksum: 9d2b887910a3b0d41645388c5ac6183d6fd2f3af4567de9b077b2492b1a9380f98c4598a4ae6d1c3186624ed4f956bedf8ba37adb5f772c96555761384a93e1e languageName: node linkType: hard @@ -3317,10 +3290,10 @@ __metadata: languageName: node linkType: hard -"@lumino/algorithm@npm:^2.0.1": - version: 2.0.1 - resolution: "@lumino/algorithm@npm:2.0.1" - checksum: cbf7fcf6ee6b785ea502cdfddc53d61f9d353dcb9659343511d5cd4b4030be2ff2ca4c08daec42f84417ab0318a3d9972a17319fa5231693e109ab112dcf8000 +"@lumino/algorithm@npm:^2.0.1, @lumino/algorithm@npm:^2.0.2": + version: 2.0.2 + resolution: "@lumino/algorithm@npm:2.0.2" + checksum: 34b25684b845f1bdbf78ca45ebd99a97b67b2992440c9643aafe5fc5a99fae1ddafa9e5890b246b233dc3a12d9f66aa84afe4a2aac44cf31071348ed217740db languageName: node linkType: hard @@ -3344,41 +3317,43 @@ __metadata: languageName: node linkType: hard -"@lumino/commands@npm:^2.3.0": - version: 2.3.0 - resolution: "@lumino/commands@npm:2.3.0" +"@lumino/commands@npm:^2.0.0, @lumino/commands@npm:^2.3.0": + version: 2.3.1 + resolution: "@lumino/commands@npm:2.3.1" dependencies: - "@lumino/algorithm": ^2.0.1 - "@lumino/coreutils": ^2.1.2 - "@lumino/disposable": ^2.1.2 - "@lumino/domutils": ^2.0.1 - "@lumino/keyboard": ^2.0.1 - "@lumino/signaling": ^2.1.2 - "@lumino/virtualdom": ^2.0.1 - checksum: a9b83bbfcc0421ff501e818dd234c65db438a8abb450628db0dea9ee05e8077d10b2275e7e2289f6df9c20dc26d2af458b1db88ccf43ec69f185eb207dbad419 + "@lumino/algorithm": ^2.0.2 + "@lumino/coreutils": ^2.2.0 + "@lumino/disposable": ^2.1.3 + "@lumino/domutils": ^2.0.2 + "@lumino/keyboard": ^2.0.2 + "@lumino/signaling": ^2.1.3 + "@lumino/virtualdom": ^2.0.2 + checksum: 83bc6d66de37e58582b00f70ce66e797c9fcf84e36041c6881631ed0d281305e2a49927f5b2fe6c5c965733f3cd6fb4a233c7b7967fc050497024a941659bd65 languageName: node linkType: hard -"@lumino/coreutils@npm:^1.11.0 || ^2.0.0, @lumino/coreutils@npm:^1.11.0 || ^2.1.2, @lumino/coreutils@npm:^2.1.0, @lumino/coreutils@npm:^2.1.2": - version: 2.1.2 - resolution: "@lumino/coreutils@npm:2.1.2" - checksum: 7865317ac0676b448d108eb57ab5d8b2a17c101995c0f7a7106662d9fe6c859570104525f83ee3cda12ae2e326803372206d6f4c1f415a5b59e4158a7b81066f +"@lumino/coreutils@npm:^1.11.0 || ^2.0.0, @lumino/coreutils@npm:^1.11.0 || ^2.1.2, @lumino/coreutils@npm:^2.0.0, @lumino/coreutils@npm:^2.1.2, @lumino/coreutils@npm:^2.2.0": + version: 2.2.0 + resolution: "@lumino/coreutils@npm:2.2.0" + dependencies: + "@lumino/algorithm": ^2.0.2 + checksum: 345fcd5d7493d745831dd944edfbd8eda06cc59a117e71023fc97ce53badd697be2bd51671f071f5ff0064f75f104575d9695f116a07517bafbedd38e5c7a785 languageName: node linkType: hard -"@lumino/disposable@npm:^1.10.0 || ^2.0.0, @lumino/disposable@npm:^2.1.0, @lumino/disposable@npm:^2.1.2": - version: 2.1.2 - resolution: "@lumino/disposable@npm:2.1.2" +"@lumino/disposable@npm:^1.10.0 || ^2.0.0, @lumino/disposable@npm:^2.0.0, @lumino/disposable@npm:^2.1.2, @lumino/disposable@npm:^2.1.3": + version: 2.1.3 + resolution: "@lumino/disposable@npm:2.1.3" dependencies: - "@lumino/signaling": ^2.1.2 - checksum: ac2fb2bf18d0b2939fda454f3db248a0ff6e8a77b401e586d1caa9293b3318f808b93a117c9c3ac27cd17aab545aea83b49108d099b9b2f5503ae2a012fbc6e2 + "@lumino/signaling": ^2.1.3 + checksum: b9a346fa2752b3cd1b053cb637ee173501d33082a73423429070e8acc508b034ea0babdae0549b923cbdd287ee1fc7f6159f0539c9fff7574393a214eef07c57 languageName: node linkType: hard -"@lumino/domutils@npm:^2.0.1": - version: 2.0.1 - resolution: "@lumino/domutils@npm:2.0.1" - checksum: 61fa0ab226869dfbb763fc426790cf5a43b7d6f4cea1364c6dd56d61c44bff05eea188d33ff847449608ef58ed343161bee15c19b96f35410e4ee35815dc611a +"@lumino/domutils@npm:^2.0.1, @lumino/domutils@npm:^2.0.2": + version: 2.0.2 + resolution: "@lumino/domutils@npm:2.0.2" + checksum: 037b8d0b62af43887fd7edd506fa551e2af104a4b46d62e6fef256e16754dba40d351513beb5083834d468b2c7806aae0fe205fd6aac8ef24759451ee998bbd9 languageName: node linkType: hard @@ -3392,10 +3367,10 @@ __metadata: languageName: node linkType: hard -"@lumino/keyboard@npm:^2.0.1": - version: 2.0.1 - resolution: "@lumino/keyboard@npm:2.0.1" - checksum: cf33f13427a418efd7cc91061233321e860d5404f3d86397781028309bef86c8ad2d88276ffe335c1db0fe619bd9d1e60641c81f881696957a58703ee4652c3e +"@lumino/keyboard@npm:^2.0.1, @lumino/keyboard@npm:^2.0.2": + version: 2.0.2 + resolution: "@lumino/keyboard@npm:2.0.2" + checksum: 198e8c17825c9a831fa0770f58a71574b936acb0f0bbbe7f8feb73d89686dda7ff41fcb02d12b401f5d462b45fe0bba24f7f38befb7cefe0826576559f0bee6d languageName: node linkType: hard @@ -3427,26 +3402,26 @@ __metadata: languageName: node linkType: hard -"@lumino/signaling@npm:^1.10.0 || ^2.0.0, @lumino/signaling@npm:^2.1.0, @lumino/signaling@npm:^2.1.2": - version: 2.1.2 - resolution: "@lumino/signaling@npm:2.1.2" +"@lumino/signaling@npm:^1.10.0 || ^2.0.0, @lumino/signaling@npm:^2.0.0, @lumino/signaling@npm:^2.1.2, @lumino/signaling@npm:^2.1.3": + version: 2.1.3 + resolution: "@lumino/signaling@npm:2.1.3" dependencies: - "@lumino/algorithm": ^2.0.1 - "@lumino/coreutils": ^2.1.2 - checksum: ad7d7153db57980da899c43e412e6130316ef30b231a70250e7af49058db16cadb018c1417a2ea8083d83c48623cfe6b705fa82bf10216b1a8949aed9f4aca4e + "@lumino/algorithm": ^2.0.2 + "@lumino/coreutils": ^2.2.0 + checksum: ce59383bd75fe30df5800e0442dbc4193cc6778e2530b9be0f484d159f1d8668be5c6ee92cee9df36d5a0c3dbd9126d0479a82581dee1df889d5c9f922d3328d languageName: node linkType: hard -"@lumino/virtualdom@npm:^2.0.0, @lumino/virtualdom@npm:^2.0.1": - version: 2.0.1 - resolution: "@lumino/virtualdom@npm:2.0.1" +"@lumino/virtualdom@npm:^2.0.1, @lumino/virtualdom@npm:^2.0.2": + version: 2.0.2 + resolution: "@lumino/virtualdom@npm:2.0.2" dependencies: - "@lumino/algorithm": ^2.0.1 - checksum: cf59b6f15b430e13e9e657b7a0619b9056cd9ea7b2a87f407391d071c501b77403c302b6a66dca510382045e75b2e3fe551630bb391f1c6b33678057d4bec164 + "@lumino/algorithm": ^2.0.2 + checksum: 0e1220d5b3b2441e7668f3542a6341e015bdbea0c8bd6d4be962009386c034336540732596d5dedcd54ca57fbde61c2942549129a3e1b0fccb1aa143685fcd15 languageName: node linkType: hard -"@lumino/widgets@npm:^1.37.2 || ^2.3.2, @lumino/widgets@npm:^2.1.0, @lumino/widgets@npm:^2.3.2": +"@lumino/widgets@npm:^1.37.2 || ^2.3.2, @lumino/widgets@npm:^2.3.2": version: 2.3.2 resolution: "@lumino/widgets@npm:2.3.2" dependencies: @@ -4964,19 +4939,6 @@ __metadata: languageName: node linkType: hard -"abstract-leveldown@npm:^6.2.1, abstract-leveldown@npm:~6.2.1, abstract-leveldown@npm:~6.2.3": - version: 6.2.3 - resolution: "abstract-leveldown@npm:6.2.3" - dependencies: - buffer: ^5.5.0 - immediate: ^3.2.3 - level-concat-iterator: ~2.0.0 - level-supports: ~1.0.0 - xtend: ~4.0.0 - checksum: 00202b2eb7955dd7bc04f3e44d225e60160cedb8f96fe6ae0e6dca9c356d57071f001ece8ae1d53f48095c4c036d92b3440f2bc7666730610ddea030f9fbde4a - languageName: node - linkType: hard - "acorn-globals@npm:^7.0.0": version: 7.0.1 resolution: "acorn-globals@npm:7.0.1" @@ -5312,13 +5274,6 @@ __metadata: languageName: node linkType: hard -"async-limiter@npm:~1.0.0": - version: 1.0.1 - resolution: "async-limiter@npm:1.0.1" - checksum: 2b849695b465d93ad44c116220dee29a5aeb63adac16c1088983c339b0de57d76e82533e8e364a93a9f997f28bbfc6a92948cefc120652bd07f3b59f8d75cf2b - languageName: node - linkType: hard - "async@npm:^3.2.3": version: 3.2.5 resolution: "async@npm:3.2.5" @@ -5606,7 +5561,7 @@ __metadata: languageName: node linkType: hard -"buffer@npm:^5.5.0, buffer@npm:^5.6.0": +"buffer@npm:^5.5.0": version: 5.7.1 resolution: "buffer@npm:5.7.1" dependencies: @@ -6518,16 +6473,6 @@ __metadata: languageName: node linkType: hard -"deferred-leveldown@npm:~5.3.0": - version: 5.3.0 - resolution: "deferred-leveldown@npm:5.3.0" - dependencies: - abstract-leveldown: ~6.2.1 - inherits: ^2.0.3 - checksum: 5631e153528bb9de1aa60d59a5065d1a519374c5e4c1d486f2190dba4008dcf5c2ee8dd7f2f81396fc4d5a6bb6e7d0055e3dfe68afe00da02adaa3bf329addf7 - languageName: node - linkType: hard - "define-data-property@npm:^1.0.1, define-data-property@npm:^1.1.2": version: 1.1.2 resolution: "define-data-property@npm:1.1.2" @@ -6788,18 +6733,6 @@ __metadata: languageName: node linkType: hard -"encoding-down@npm:^6.3.0": - version: 6.3.0 - resolution: "encoding-down@npm:6.3.0" - dependencies: - abstract-leveldown: ^6.2.1 - inherits: ^2.0.3 - level-codec: ^9.0.0 - level-errors: ^2.0.0 - checksum: 74043e6d9061a470614ff61d708c849259ab32932a428fd5ddfb0878719804f56a52f59b31cccd95fddc2e636c0fd22dc3e02481fb98d5bf1bdbbbc44ca09bdc - languageName: node - linkType: hard - "encoding@npm:^0.1.13": version: 0.1.13 resolution: "encoding@npm:0.1.13" @@ -6867,17 +6800,6 @@ __metadata: languageName: node linkType: hard -"errno@npm:~0.1.1": - version: 0.1.8 - resolution: "errno@npm:0.1.8" - dependencies: - prr: ~1.0.1 - bin: - errno: cli.js - checksum: 1271f7b9fbb3bcbec76ffde932485d1e3561856d21d847ec613a9722ee924cdd4e523a62dc71a44174d91e898fe21fdc8d5b50823f4b5e0ce8c35c8271e6ef4a - languageName: node - linkType: hard - "error-ex@npm:^1.3.1": version: 1.3.2 resolution: "error-ex@npm:1.3.2" @@ -8294,13 +8216,6 @@ __metadata: languageName: node linkType: hard -"immediate@npm:^3.2.3": - version: 3.3.0 - resolution: "immediate@npm:3.3.0" - checksum: 634b4305101e2452eba6c07d485bf3e415995e533c94b9c3ffbc37026fa1be34def6e4f2276b0dc2162a3f91628564a4bfb26280278b89d3ee54624e854d2f5f - languageName: node - linkType: hard - "import-fresh@npm:^3.0.0, import-fresh@npm:^3.2.1": version: 3.3.0 resolution: "import-fresh@npm:3.3.0" @@ -9693,109 +9608,6 @@ __metadata: languageName: node linkType: hard -"level-codec@npm:^9.0.0": - version: 9.0.2 - resolution: "level-codec@npm:9.0.2" - dependencies: - buffer: ^5.6.0 - checksum: 289003d51b8afcdd24c4d318606abf2bae81975e4b527d7349abfdbacc8fef26711f2f24e2d20da0e1dce0bb216a856c9433ccb9ca25fa78a96aed9f51e506ed - languageName: node - linkType: hard - -"level-concat-iterator@npm:~2.0.0": - version: 2.0.1 - resolution: "level-concat-iterator@npm:2.0.1" - checksum: 562583ef1292215f8e749c402510cb61c4d6fccf4541082b3d21dfa5ecde9fcccfe52bdcb5cfff9d2384e7ce5891f44df9439a6ddb39b0ffe31015600b4a828a - languageName: node - linkType: hard - -"level-errors@npm:^2.0.0, level-errors@npm:~2.0.0": - version: 2.0.1 - resolution: "level-errors@npm:2.0.1" - dependencies: - errno: ~0.1.1 - checksum: aca5d7670e2a40609db8d7743fce289bb5202c0bc13e4a78f81f36a6642e9abc0110f48087d3d3c2c04f023d70d4ee6f2db0e20c63d29b3fda323a67bfff6526 - languageName: node - linkType: hard - -"level-iterator-stream@npm:~4.0.0": - version: 4.0.2 - resolution: "level-iterator-stream@npm:4.0.2" - dependencies: - inherits: ^2.0.4 - readable-stream: ^3.4.0 - xtend: ^4.0.2 - checksum: 239e2c7e62bffb485ed696bcd3b98de7a2bc455d13be4fce175ae3544fe9cda81c2ed93d3e88b61380ae6d28cce02511862d77b86fb2ba5b5cf00471f3c1eccc - languageName: node - linkType: hard - -"level-js@npm:^5.0.0": - version: 5.0.2 - resolution: "level-js@npm:5.0.2" - dependencies: - abstract-leveldown: ~6.2.3 - buffer: ^5.5.0 - inherits: ^2.0.3 - ltgt: ^2.1.2 - checksum: 3c7f75979bb8c042e95a58245b8fe1230bb0f56a11ee418e08156e3eadda371efae6eb7b9bf10bf1e08e0b1b2a25d80c026858ca99ffd49109d6541e3d9d3b37 - languageName: node - linkType: hard - -"level-packager@npm:^5.1.0": - version: 5.1.1 - resolution: "level-packager@npm:5.1.1" - dependencies: - encoding-down: ^6.3.0 - levelup: ^4.3.2 - checksum: befe2aa54f2010a6ecf7ddce392c8dee225e1839205080a2704d75e560e28b01191b345494696196777b70d376e3eaae4c9e7c330cc70d3000839f5b18dd78f2 - languageName: node - linkType: hard - -"level-supports@npm:~1.0.0": - version: 1.0.1 - resolution: "level-supports@npm:1.0.1" - dependencies: - xtend: ^4.0.2 - checksum: 5d6bdb88cf00c3d9adcde970db06a548c72c5a94bf42c72f998b58341a105bfe2ea30d313ce1e84396b98cc9ddbc0a9bd94574955a86e929f73c986e10fc0df0 - languageName: node - linkType: hard - -"level@npm:^6.0.1": - version: 6.0.1 - resolution: "level@npm:6.0.1" - dependencies: - level-js: ^5.0.0 - level-packager: ^5.1.0 - leveldown: ^5.4.0 - checksum: bd4981f94162469a82a6c98d267d814d9d4a7beed4fc3d18fbe3b156f71cf4c6d35b424d14c46d401dbf0cd91425e842950a7cd17ddf7bf57acdab5af4c278da - languageName: node - linkType: hard - -"leveldown@npm:^5.4.0": - version: 5.6.0 - resolution: "leveldown@npm:5.6.0" - dependencies: - abstract-leveldown: ~6.2.1 - napi-macros: ~2.0.0 - node-gyp: latest - node-gyp-build: ~4.1.0 - checksum: 06d4683170d7fc661acd65457e531b42ad66480e9339d3154ba6d0de38ff0503d7d017c1c6eba12732b5488ecd2915c70c8dc3a7d67f4a836f3de34b8a993949 - languageName: node - linkType: hard - -"levelup@npm:^4.3.2": - version: 4.4.0 - resolution: "levelup@npm:4.4.0" - dependencies: - deferred-leveldown: ~5.3.0 - level-errors: ~2.0.0 - level-iterator-stream: ~4.0.0 - level-supports: ~1.0.0 - xtend: ~4.0.0 - checksum: 5a09e34c78cd7c23f9f6cb73563f1ebe8121ffc5f9f5f232242529d4fbdd40e8d1ffb337d2defa0b842334e0dbd4028fbfe7a072eebfe2c4d07174f0aa4aabca - languageName: node - linkType: hard - "leven@npm:^3.1.0": version: 3.1.0 resolution: "leven@npm:3.1.0" @@ -9813,7 +9625,7 @@ __metadata: languageName: node linkType: hard -"lib0@npm:^0.2.31, lib0@npm:^0.2.52, lib0@npm:^0.2.85, lib0@npm:^0.2.86": +"lib0@npm:^0.2.85, lib0@npm:^0.2.86": version: 0.2.88 resolution: "lib0@npm:0.2.88" dependencies: @@ -10066,13 +9878,6 @@ __metadata: languageName: node linkType: hard -"ltgt@npm:^2.1.2": - version: 2.2.1 - resolution: "ltgt@npm:2.2.1" - checksum: 7e3874296f7538bc8087b428ac4208008d7b76916354b34a08818ca7c83958c1df10ec427eeeaad895f6b81e41e24745b18d30f89abcc21d228b94f6961d50a2 - languageName: node - linkType: hard - "make-dir@npm:3.1.0": version: 3.1.0 resolution: "make-dir@npm:3.1.0" @@ -10594,13 +10399,6 @@ __metadata: languageName: node linkType: hard -"napi-macros@npm:~2.0.0": - version: 2.0.0 - resolution: "napi-macros@npm:2.0.0" - checksum: 30384819386977c1f82034757014163fa60ab3c5a538094f778d38788bebb52534966279956f796a92ea771c7f8ae072b975df65de910d051ffbdc927f62320c - languageName: node - linkType: hard - "natural-compare@npm:^1.4.0": version: 1.4.0 resolution: "natural-compare@npm:1.4.0" @@ -10663,17 +10461,6 @@ __metadata: languageName: node linkType: hard -"node-gyp-build@npm:~4.1.0": - version: 4.1.1 - resolution: "node-gyp-build@npm:4.1.1" - bin: - node-gyp-build: ./bin.js - node-gyp-build-optional: ./optional.js - node-gyp-build-test: ./build-test.js - checksum: 959d42221cc44b92700003efae741652bc4e379e4cf375830ddde03ba43c89f99694bf0883078ed0d4e03ffe2f85decab0572e04068d3900b8538d165dbc17df - languageName: node - linkType: hard - "node-gyp@npm:^9.0.0": version: 9.4.1 resolution: "node-gyp@npm:9.4.1" @@ -11827,13 +11614,6 @@ __metadata: languageName: node linkType: hard -"prr@npm:~1.0.1": - version: 1.0.1 - resolution: "prr@npm:1.0.1" - checksum: 3bca2db0479fd38f8c4c9439139b0c42dcaadcc2fbb7bb8e0e6afaa1383457f1d19aea9e5f961d5b080f1cfc05bfa1fe9e45c97a1d3fd6d421950a73d3108381 - languageName: node - linkType: hard - "psl@npm:^1.1.33": version: 1.9.0 resolution: "psl@npm:1.9.0" @@ -14411,15 +14191,6 @@ __metadata: languageName: node linkType: hard -"ws@npm:^6.2.1": - version: 6.2.2 - resolution: "ws@npm:6.2.2" - dependencies: - async-limiter: ~1.0.0 - checksum: aec3154ec51477c094ac2cb5946a156e17561a581fa27005cbf22c53ac57f8d4e5f791dd4bbba6a488602cb28778c8ab7df06251d590507c3c550fd8ebeee949 - languageName: node - linkType: hard - "ws@npm:^8.11.0": version: 8.16.0 resolution: "ws@npm:8.16.0" @@ -14456,25 +14227,13 @@ __metadata: languageName: node linkType: hard -"xtend@npm:^4.0.1, xtend@npm:^4.0.2, xtend@npm:~4.0.0, xtend@npm:~4.0.1": +"xtend@npm:^4.0.1, xtend@npm:~4.0.1": version: 4.0.2 resolution: "xtend@npm:4.0.2" checksum: ac5dfa738b21f6e7f0dd6e65e1b3155036d68104e67e5d5d1bde74892e327d7e5636a076f625599dc394330a731861e87343ff184b0047fef1360a7ec0a5a36a languageName: node linkType: hard -"y-leveldb@npm:^0.1.0": - version: 0.1.2 - resolution: "y-leveldb@npm:0.1.2" - dependencies: - level: ^6.0.1 - lib0: ^0.2.31 - peerDependencies: - yjs: ^13.0.0 - checksum: 38e3293cfc5e754ba50af4c6bd03a96efde34c92809baf504b38cb4f45959187f896fe6971fa6a91823763e178807aaa14e190d1f7bea1b3a1e9b7265bb88b6d - languageName: node - linkType: hard - "y-protocols@npm:^1.0.5": version: 1.0.6 resolution: "y-protocols@npm:1.0.6" @@ -14486,29 +14245,6 @@ __metadata: languageName: node linkType: hard -"y-websocket@npm:^1.3.15": - version: 1.5.3 - resolution: "y-websocket@npm:1.5.3" - dependencies: - lib0: ^0.2.52 - lodash.debounce: ^4.0.8 - ws: ^6.2.1 - y-leveldb: ^0.1.0 - y-protocols: ^1.0.5 - peerDependencies: - yjs: ^13.5.6 - dependenciesMeta: - ws: - optional: true - y-leveldb: - optional: true - bin: - y-websocket: bin/server.js - y-websocket-server: bin/server.js - checksum: 4e658318a64feb131015cf4e284da23ad3f6b818a5a1c1e927404db3432c858d6ce4efe7e74f9a86ea70e003b7690aba517a34b8c5b6f3cd9eac86a94bf5c67f - languageName: node - linkType: hard - "y18n@npm:^5.0.5": version: 5.0.8 resolution: "y18n@npm:5.0.8"