From 196d9217d07480c478fcafa64e2227e248e82b91 Mon Sep 17 00:00:00 2001 From: rchan Date: Tue, 11 Jun 2024 23:28:59 +0100 Subject: [PATCH 01/11] add streaming option for CLI --- reginald/cli.py | 8 ++ reginald/models/models/__init__.py | 4 +- reginald/models/models/base.py | 3 + reginald/models/models/chat_completion.py | 41 +++++- reginald/models/models/hello.py | 5 + reginald/models/models/llama_index.py | 152 +++++++++++++--------- reginald/run.py | 20 ++- tests/test_chat_interact.py | 11 ++ 8 files changed, 176 insertions(+), 68 deletions(-) diff --git a/reginald/cli.py b/reginald/cli.py index 85a78de8..ef86c942 100644 --- a/reginald/cli.py +++ b/reginald/cli.py @@ -25,6 +25,7 @@ "device": "Device to use (ignored if not using llama-index).", "api_url": "API URL for the Reginald app.", "emoji": "Emoji to use for the bot.", + "streaming": "Whether to use streaming for the chat interaction.", } cli = typer.Typer() @@ -288,6 +289,12 @@ def chat( Optional[str], typer.Option(envvar="REGINALD_MODEL_NAME", help=HELP_TEXT["model_name"]), ] = None, + streaming: Annotated[ + bool, + typer.Option( + help=HELP_TEXT["streaming"], + ), + ] = True, mode: Annotated[ str, typer.Option(envvar="LLAMA_INDEX_MODE", help=HELP_TEXT["mode"]) ] = DEFAULT_ARGS["mode"], @@ -342,6 +349,7 @@ def chat( set_up_logging_config(level=40) main( cli="chat", + streaming=streaming, model=model, model_name=model_name, mode=mode, diff --git a/reginald/models/models/__init__.py b/reginald/models/models/__init__.py index 561b750c..d018a5c4 100644 --- a/reginald/models/models/__init__.py +++ b/reginald/models/models/__init__.py @@ -25,13 +25,13 @@ } DEFAULTS = { - "chat-completion-azure": "reginald-curie", + "chat-completion-azure": "reginald-gpt4", "chat-completion-openai": "gpt-3.5-turbo", "hello": None, "llama-index-ollama": "llama3", "llama-index-llama-cpp": "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGUF/resolve/main/llama-2-13b-chat.Q6_K.gguf", "llama-index-hf": "microsoft/phi-1_5", - "llama-index-gpt-azure": "reginald-gpt35-turbo", + "llama-index-gpt-azure": "reginald-gpt4", "llama-index-gpt-openai": "gpt-3.5-turbo", } diff --git a/reginald/models/models/base.py b/reginald/models/models/base.py index e93377d9..16cb43a3 100644 --- a/reginald/models/models/base.py +++ b/reginald/models/models/base.py @@ -34,3 +34,6 @@ def direct_message(self, message: str, user_id: str) -> MessageResponse: def channel_mention(self, message: str, user_id: str) -> MessageResponse: raise NotImplementedError + + def stream_message(self, message: str, user_id: str) -> None: + raise NotImplementedError diff --git a/reginald/models/models/chat_completion.py b/reginald/models/models/chat_completion.py index 9112b5c4..cd38487a 100644 --- a/reginald/models/models/chat_completion.py +++ b/reginald/models/models/chat_completion.py @@ -1,5 +1,4 @@ import logging -import os import sys from typing import Any @@ -155,6 +154,36 @@ def channel_mention(self, message: str, user_id: str) -> MessageResponse: """ return self._respond(message=message, user_id=user_id) + def stream_message(self, message: str, user_id: str) -> None: + if self.mode == "chat": + response = self.client.chat.completions.create( + model=self.engine, + messages=[{"role": "user", "content": message}], + frequency_penalty=self.frequency_penalty, + max_tokens=self.max_tokens, + presence_penalty=self.presence_penalty, + stop=None, + temperature=self.temperature, + top_p=self.top_p, + stream=True, + ) + elif self.mode == "query": + response = self.client.completions.create( + model=self.engine, + frequency_penalty=self.frequency_penalty, + max_tokens=self.max_tokens, + presence_penalty=self.presence_penalty, + prompt=message, + stop=None, + temperature=self.temperature, + top_p=self.top_p, + stream=True, + ) + + print("Reginald: ", end="") + for chunk in response: + print(chunk.choices[0].delta.content) + class ChatCompletionOpenAI(ChatCompletionBase): def __init__( @@ -233,3 +262,13 @@ def channel_mention(self, message: str, user_id: str) -> MessageResponse: Response from the query engine. """ return self._respond(message=message, user_id=user_id) + + def stream_message(self, message: str, user_id: str) -> None: + response = self.client.chat.completions.create( + model=self.model_name, + messages=[{"role": "user", "content": message}], + stream=True, + ) + print("Reginald: ", end="") + for chunk in response: + print(chunk["choices"][0]["delta"]["content"]) diff --git a/reginald/models/models/hello.py b/reginald/models/models/hello.py index 2560613e..19607185 100644 --- a/reginald/models/models/hello.py +++ b/reginald/models/models/hello.py @@ -16,3 +16,8 @@ def direct_message(self, message: str, user_id: str) -> MessageResponse: def channel_mention(self, message: str, user_id: str) -> MessageResponse: return MessageResponse(f"Hello <@{user_id}>") + + def stream_message(self, message: str, user_id: str) -> None: + print("\nReginald: ", end="") + for token in ["Hello", "!", " How", " are", " you", "?"]: + print(token, end="") diff --git a/reginald/models/models/llama_index.py b/reginald/models/models/llama_index.py index 94da9bc4..bb792e0f 100644 --- a/reginald/models/models/llama_index.py +++ b/reginald/models/models/llama_index.py @@ -647,13 +647,14 @@ def __init__( settings=settings, ) - response_mode = "simple_summarize" + self.response_mode = "simple_summarize" if self.mode == "chat": self.chat_engine = {} logging.info("Done setting up Huggingface backend for chat engine.") elif self.mode == "query": self.query_engine = self.index.as_query_engine( - response_mode=response_mode, similarity_top_k=k + response_mode=self.response_mode, + similarity_top_k=k, ) logging.info("Done setting up Huggingface backend for query engine.") @@ -693,12 +694,48 @@ def _format_sources(response: RESPONSE_TYPE) -> str: result = "I read the following documents to compose this answer:\n" result += "\n\n".join(texts) + return result - def _get_response(self, msg_in: str, user_id: str) -> str: + def _prep_llm(self) -> BaseLLM: + """ + Method to prepare the LLM to be used. + + Returns + ------- + BaseLLM + LLM to be used. + + Raises + ------ + NotImplemented + This must be implemented by a subclass of LlamaIndex. + """ + raise NotImplementedError( + "_prep_llm needs to be implemented by a subclass of LlamaIndex." + ) + + def _prep_tokenizer(self) -> callable[str] | None: + """ + Method to prepare the Tokenizer to be used. + + Returns + ------- + callable[str] | None + Tokenizer to use. A callable function on a string. + Can also be None if using the default set by LlamaIndex. + + Raises + ------ + NotImplemented """ - Method to obtain a response from the query/chat engine given - a message and a user id. + raise NotImplementedError( + "_prep_tokenizer needs to be implemented by a subclass of LlamaIndex." + ) + + def _get_response(self, message: str, user_id: str) -> MessageResponse: + """ + Method to respond to a message in Slack. Parameters ---------- @@ -709,25 +746,25 @@ def _get_response(self, msg_in: str, user_id: str) -> str: Returns ------- - str - String containing the response from the query engine. + MessageResponse + Response from the query engine. """ - response_mode = "simple_summarize" try: if self.mode == "chat": # create chat engine for user if does not exist if self.chat_engine.get(user_id) is None: self.chat_engine[user_id] = self.index.as_chat_engine( - chat_mode="context", - response_mode=response_mode, + chat_mode="condense_plus_context", + response_mode=self.response_mode, similarity_top_k=self.k, ) # obtain chat engine for particular user chat_engine = self.chat_engine[user_id] - response = chat_engine.chat(msg_in) + response = chat_engine.chat(message) elif self.mode == "query": - response = self.query_engine.query(msg_in) + self.query_engine._response_synthesizer._streaming = False + response = self.query_engine.query(message) # concatenate the response with the resources that it used formatted_response = ( @@ -735,14 +772,16 @@ def _get_response(self, msg_in: str, user_id: str) -> str: ) except Exception as e: # ignore: broad-except formatted_response = self.error_response_template.format(repr(e)) + pattern = ( r"(?s)^Context information is" r".*" r"Given the context information and not prior knowledge, answer the question: " - rf"{msg_in}" + rf"{message}" r"\n(.*)" ) m = re.search(pattern, formatted_response) + if m: answer = m.group(1) else: @@ -750,47 +789,12 @@ def _get_response(self, msg_in: str, user_id: str) -> str: "Was expecting a backend response with a regular expression but couldn't find a match." ) answer = formatted_response - return answer - def _prep_llm(self) -> BaseLLM: - """ - Method to prepare the LLM to be used. + return MessageResponse(answer) - Returns - ------- - BaseLLM - LLM to be used. - - Raises - ------ - NotImplemented - This must be implemented by a subclass of LlamaIndex. - """ - raise NotImplementedError( - "_prep_llm needs to be implemented by a subclass of LlamaIndex." - ) - - def _prep_tokenizer(self) -> callable[str] | None: - """ - Method to prepare the Tokenizer to be used. - - Returns - ------- - callable[str] | None - Tokenizer to use. A callable function on a string. - Can also be None if using the default set by LlamaIndex. - - Raises - ------ - NotImplemented - """ - raise NotImplementedError( - "_prep_tokenizer needs to be implemented by a subclass of LlamaIndex." - ) - - def _respond(self, message: str, user_id: str) -> MessageResponse: + def direct_message(self, message: str, user_id: str) -> MessageResponse: """ - Method to respond to a message in Slack. + Method to respond to a direct message in Slack. Parameters ---------- @@ -804,13 +808,11 @@ def _respond(self, message: str, user_id: str) -> MessageResponse: MessageResponse Response from the query engine. """ - backend_response = self._get_response(message, user_id) - - return MessageResponse(backend_response) + return self._get_response(message=message, user_id=user_id) - def direct_message(self, message: str, user_id: str) -> MessageResponse: + def channel_mention(self, message: str, user_id: str) -> MessageResponse: """ - Method to respond to a direct message in Slack. + Method to respond to a channel mention in Slack. Parameters ---------- @@ -824,11 +826,11 @@ def direct_message(self, message: str, user_id: str) -> MessageResponse: MessageResponse Response from the query engine. """ - return self._respond(message=message, user_id=user_id) + return self._get_response(message=message, user_id=user_id) - def channel_mention(self, message: str, user_id: str) -> MessageResponse: + def stream_message(self, message: str, user_id: str) -> None: """ - Method to respond to a channel mention in Slack. + Method to respond to a stream message in Slack. Parameters ---------- @@ -842,7 +844,37 @@ def channel_mention(self, message: str, user_id: str) -> MessageResponse: MessageResponse Response from the query engine. """ - return self._respond(message=message, user_id=user_id) + try: + if self.mode == "chat": + # create chat engine for user if does not exist + if self.chat_engine.get(user_id) is None: + self.chat_engine[user_id] = self.index.as_chat_engine( + chat_mode="condense_plus_context", + response_mode=self.response_mode, + similarity_top_k=self.k, + streaming=True, + ) + + # obtain chat engine for particular user + chat_engine = self.chat_engine[user_id] + response_stream = chat_engine.stream_chat(message) + elif self.mode == "query": + self.query_engine._response_synthesizer._streaming = True + response_stream = self.query_engine.query(message) + + print("\nReginald: ", end="") + for token in response_stream.response_gen: + print(token, end="") + + formatted_response = "\n\n\n" + self._format_sources(response_stream) + + for token in re.split(r"(\s+)", formatted_response): + print(token, end="") + except Exception as e: # ignore: broad-except + for token in re.split( + r"(\s+)", self.error_response_template.format(repr(e)) + ): + print(token, end="") class LlamaIndexOllama(LlamaIndex): diff --git a/reginald/run.py b/reginald/run.py index 956257d0..c69c6d4c 100644 --- a/reginald/run.py +++ b/reginald/run.py @@ -53,7 +53,7 @@ async def run_full_pipeline(**kwargs): await connect_client(client) -def run_chat_interact(**kwargs) -> ResponseModel: +def run_chat_interact(streaming: bool = False, **kwargs) -> ResponseModel: # set up response model response_model = setup_llm(**kwargs) while True: @@ -61,8 +61,12 @@ def run_chat_interact(**kwargs) -> ResponseModel: if message == "exit": return response_model - response = response_model.direct_message(message=message, user_id="chat") - print(f"\nReginald: {response.message}") + if streaming: + response = response_model.stream_message(message=message, user_id="chat") + print("") + else: + response = response_model.direct_message(message=message, user_id="chat") + print(f"\nReginald: {response.message}") async def connect_client(client: SocketModeClient): @@ -73,7 +77,13 @@ async def connect_client(client: SocketModeClient): await asyncio.sleep(float("inf")) -def main(cli: str, api_url: str | None = None, emoji: str = EMOJI_DEFAULT, **kwargs): +def main( + cli: str, + api_url: str | None = None, + emoji: str = EMOJI_DEFAULT, + streaming: bool = False, + **kwargs, +): # initialise logging if cli == "run_all": asyncio.run(run_full_pipeline(**kwargs)) @@ -82,7 +92,7 @@ def main(cli: str, api_url: str | None = None, emoji: str = EMOJI_DEFAULT, **kwa elif cli == "app": asyncio.run(run_reginald_app(**kwargs)) elif cli == "chat": - run_chat_interact(**kwargs) + run_chat_interact(streaming=streaming, **kwargs) elif cli == "create_index": create_index(**kwargs) else: diff --git a/tests/test_chat_interact.py b/tests/test_chat_interact.py index c7f935e4..02e28ea2 100644 --- a/tests/test_chat_interact.py +++ b/tests/test_chat_interact.py @@ -16,6 +16,17 @@ def test_chat_cli(): result = runner.invoke(cli, ["chat"], input="What's up dock?\nexit\n") term_stdout_lines: list[str] = result.stdout.split("\n") assert term_stdout_lines[0] == ">>> " + assert term_stdout_lines[1] == "Reginald: Hello! How are you?" + assert term_stdout_lines[2] == ">>> " + + +def test_chat_cli_no_stream(): + """Test sending an input `str` via `cli` and then exiting.""" + result = runner.invoke( + cli, ["chat", "--no-streaming"], input="What's up dock?\nexit\n" + ) + term_stdout_lines: list[str] = result.stdout.split("\n") + assert term_stdout_lines[0] == ">>> " assert term_stdout_lines[1] == "Reginald: Let's discuss this in a channel!" assert term_stdout_lines[2] == ">>> " From 2f2d1c732e0d122c584e1f4f1c0091f3a80c54bc Mon Sep 17 00:00:00 2001 From: rchan Date: Tue, 11 Jun 2024 23:44:06 +0100 Subject: [PATCH 02/11] add more exit strings for chat interface --- reginald/models/models/llama_utils.py | 5 +++-- reginald/run.py | 2 +- tests/test_chat_interact.py | 18 ++++++++++++++++++ 3 files changed, 22 insertions(+), 3 deletions(-) diff --git a/reginald/models/models/llama_utils.py b/reginald/models/models/llama_utils.py index 21a7181a..372f8856 100644 --- a/reginald/models/models/llama_utils.py +++ b/reginald/models/models/llama_utils.py @@ -7,11 +7,12 @@ B_SYS, E_SYS = "<>\n", "\n<>\n\n" # use for Llama2 # B_SYS, E_SYS = "", "\n\n" # use for Mistral DEFAULT_SYSTEM_PROMPT = """\ -You are a helpful, respectful and honest assistant. \ +You are a helpful, respectful and honest assistant named Reginald. \ Always answer as helpfully as possible and follow ALL given instructions. \ Do not speculate or make up information. \ Do not reference any given instructions or context. \ -If the content is not relevant, just ignore it and provide a helpful response. \ +If the content is not relevant, just ignore it and provide a helpful \ +response without mentioning the context. \ """ diff --git a/reginald/run.py b/reginald/run.py index c69c6d4c..aa716eb5 100644 --- a/reginald/run.py +++ b/reginald/run.py @@ -58,7 +58,7 @@ def run_chat_interact(streaming: bool = False, **kwargs) -> ResponseModel: response_model = setup_llm(**kwargs) while True: message = input(">>> ") - if message == "exit": + if message in ["exit", "exit()", "quit()", "bye Reginald"]: return response_model if streaming: diff --git a/tests/test_chat_interact.py b/tests/test_chat_interact.py index 02e28ea2..9b22b348 100644 --- a/tests/test_chat_interact.py +++ b/tests/test_chat_interact.py @@ -35,3 +35,21 @@ def test_chat_interact_exit(): with mock.patch.object(builtins, "input", lambda _: "exit"): interaction = run_chat_interact(model="hello") assert isinstance(interaction, Hello) + + +def test_chat_interact_exit_with_bracket(): + with mock.patch.object(builtins, "input", lambda _: "exit()"): + interaction = run_chat_interact(model="hello") + assert isinstance(interaction, Hello) + + +def test_chat_interact_quit_with_bracket(): + with mock.patch.object(builtins, "input", lambda _: "quit()"): + interaction = run_chat_interact(model="hello") + assert isinstance(interaction, Hello) + + +def test_chat_interact_bye(): + with mock.patch.object(builtins, "input", lambda _: "bye Reginald"): + interaction = run_chat_interact(model="hello") + assert isinstance(interaction, Hello) From a04b0c0b444da9e2e7e5419876cae49fecbd7107 Mon Sep 17 00:00:00 2001 From: rchan Date: Wed, 12 Jun 2024 11:54:51 +0100 Subject: [PATCH 03/11] add option to clear history in cli --- reginald/cli.py | 17 ++++++++++++++--- reginald/models/models/base.py | 1 + reginald/run.py | 22 +++++++++++++++++----- tests/test_chat_interact.py | 16 ++++++++++++++++ 4 files changed, 48 insertions(+), 8 deletions(-) diff --git a/reginald/cli.py b/reginald/cli.py index ef86c942..ec86d970 100644 --- a/reginald/cli.py +++ b/reginald/cli.py @@ -103,6 +103,11 @@ def run_all( str, typer.Option(envvar="LLAMA_INDEX_DEVICE", help=HELP_TEXT["device"]) ] = DEFAULT_ARGS["device"], ) -> None: + """ + Run all the components of the Reginald slack bot. + Establishes the connection to the Slack API, sets up the bot, + and creates a Reginald model to query from. + """ set_up_logging_config(level=20) main( cli="run_all", @@ -136,7 +141,7 @@ def bot( ] = EMOJI_DEFAULT, ) -> None: """ - Main function to run the Slack bot which sets up the bot + Run the Slack bot which sets up the bot (which uses an API for responding to messages) and then establishes a WebSocket connection to the Socket Mode servers and listens for events. @@ -214,8 +219,8 @@ def app( ] = DEFAULT_ARGS["device"], ) -> None: """ - Main function to run the app which sets up the response model - and then creates a FastAPI app to serve the model. + Sets up the response model and then creates a + FastAPI app to serve the model. The app listens on port 8000 and has two endpoints: - /direct_message: for obtaining responses from direct messages @@ -263,6 +268,9 @@ def create_index( int, typer.Option(envvar="LLAMA_INDEX_NUM_OUTPUT") ] = DEFAULT_ARGS["num_output"], ) -> None: + """ + Create an index for the Reginald model. + """ set_up_logging_config(level=20) main( cli="create_index", @@ -346,6 +354,9 @@ def chat( str, typer.Option(envvar="LLAMA_INDEX_DEVICE", help=HELP_TEXT["device"]) ] = DEFAULT_ARGS["device"], ) -> None: + """ + Run the chat interaction with the Reginald model. + """ set_up_logging_config(level=40) main( cli="chat", diff --git a/reginald/models/models/base.py b/reginald/models/models/base.py index 16cb43a3..786fd57b 100644 --- a/reginald/models/models/base.py +++ b/reginald/models/models/base.py @@ -28,6 +28,7 @@ def __init__(self, emoji: Optional[str], *args: Any, **kwargs: Any): Emoji to use for the bot's response """ self.emoji = emoji + self.mode = "NA" def direct_message(self, message: str, user_id: str) -> MessageResponse: raise NotImplementedError diff --git a/reginald/run.py b/reginald/run.py index aa716eb5..d09d347d 100644 --- a/reginald/run.py +++ b/reginald/run.py @@ -21,7 +21,7 @@ LISTENING_MSG: Final[str] = "Listening for requests..." -async def run_bot(api_url: str | None, emoji: str): +async def run_bot(api_url: str | None, emoji: str) -> None: if api_url is None: logging.error( "API URL is not set. Please set the REGINALD_API_URL " @@ -44,7 +44,7 @@ async def run_reginald_app(**kwargs) -> None: uvicorn.run(app, host="0.0.0.0", port=8000) -async def run_full_pipeline(**kwargs): +async def run_full_pipeline(**kwargs) -> None: # set up response model response_model = setup_llm(**kwargs) bot = setup_slack_bot(response_model) @@ -56,20 +56,32 @@ async def run_full_pipeline(**kwargs): def run_chat_interact(streaming: bool = False, **kwargs) -> ResponseModel: # set up response model response_model = setup_llm(**kwargs) + user_id = "command_line_chat" + while True: message = input(">>> ") if message in ["exit", "exit()", "quit()", "bye Reginald"]: return response_model + if message in ["clear_history", "\clear_history"]: + if ( + response_model.mode == "chat" + and response_model.chat_engine.get(user_id) is not None + ): + response_model.chat_engine[user_id].reset() + print("\nReginald: History cleared.") + else: + print("\nReginald: No history to clear.") + continue if streaming: - response = response_model.stream_message(message=message, user_id="chat") + response = response_model.stream_message(message=message, user_id=user_id) print("") else: - response = response_model.direct_message(message=message, user_id="chat") + response = response_model.direct_message(message=message, user_id=user_id) print(f"\nReginald: {response.message}") -async def connect_client(client: SocketModeClient): +async def connect_client(client: SocketModeClient) -> None: await client.connect() # listen for events logging.info(LISTENING_MSG) diff --git a/tests/test_chat_interact.py b/tests/test_chat_interact.py index 9b22b348..2379372c 100644 --- a/tests/test_chat_interact.py +++ b/tests/test_chat_interact.py @@ -53,3 +53,19 @@ def test_chat_interact_bye(): with mock.patch.object(builtins, "input", lambda _: "bye Reginald"): interaction = run_chat_interact(model="hello") assert isinstance(interaction, Hello) + + +def test_chat_interact_clear_history(): + result = runner.invoke(cli, ["chat"], input="clear_history\n") + term_stdout_lines: list[str] = result.stdout.split("\n") + assert term_stdout_lines[0] == ">>> " + assert term_stdout_lines[1] == "Reginald: No history to clear." + assert term_stdout_lines[2] == ">>> " + + +def test_chat_interact_slash_clear_history(): + result = runner.invoke(cli, ["chat"], input="\clear_history\n") + term_stdout_lines: list[str] = result.stdout.split("\n") + assert term_stdout_lines[0] == ">>> " + assert term_stdout_lines[1] == "Reginald: No history to clear." + assert term_stdout_lines[2] == ">>> " From 2a63e25ecae15f52e235b71abcbcdc5e2b295271 Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Wed, 12 Jun 2024 14:21:36 +0100 Subject: [PATCH 04/11] feat: add `stream_progress_wrapper` `util` --- poetry.lock | 3 +- pyproject.toml | 5 +++- reginald/models/models/hello.py | 6 ++-- reginald/models/models/llama_index.py | 6 ++-- reginald/utils.py | 40 +++++++++++++++++++++++++++ 5 files changed, 53 insertions(+), 7 deletions(-) diff --git a/poetry.lock b/poetry.lock index 8351547c..4a2fdfe5 100644 --- a/poetry.lock +++ b/poetry.lock @@ -4630,6 +4630,7 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, @@ -6731,4 +6732,4 @@ llama-index-notebooks = ["bitsandbytes", "gradio", "ipykernel", "nbconvert"] [metadata] lock-version = "2.0" python-versions = ">=3.11,<3.12" -content-hash = "9c31a7068b0c587ac336dcd9fbccd44865ddd836af349d81e725507819b7b844" +content-hash = "1fc58571fc197416364d44dd56dfbb448bf212fa93668cb8ce1555abec625b16" diff --git a/pyproject.toml b/pyproject.toml index b7a9b80a..af339625 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -56,6 +56,7 @@ typer = {extras = ["all"], version = "^0.12.3"} langchain-community = "^0.2.4" tiktoken = "^0.7.0" llama-index-embeddings-huggingface = "^0.2.1" +rich = "^13.7.1" [tool.poetry.group.dev.dependencies] @@ -105,10 +106,12 @@ build-backend = "poetry.core.masonry.api" minversion = "6.0" testpaths = [ "tests", + "reginald", ] addopts = """ - --cov=estios + --cov=reginald --cov-report=term:skip-covered --cov-append --pdbcls=IPython.terminal.debugger:TerminalPdb + --doctest-modules """ diff --git a/reginald/models/models/hello.py b/reginald/models/models/hello.py index 19607185..8a5da641 100644 --- a/reginald/models/models/hello.py +++ b/reginald/models/models/hello.py @@ -1,6 +1,7 @@ from typing import Any from reginald.models.models.base import MessageResponse, ResponseModel +from reginald.utils import stream_progress_wrapper class Hello(ResponseModel): @@ -18,6 +19,7 @@ def channel_mention(self, message: str, user_id: str) -> MessageResponse: return MessageResponse(f"Hello <@{user_id}>") def stream_message(self, message: str, user_id: str) -> None: - print("\nReginald: ", end="") - for token in ["Hello", "!", " How", " are", " you", "?"]: + # print("\nReginald: ", end="") + token_list: tuple[str, ...] = ("Hello", "!", " How", " are", " you", "?") + for token in stream_progress_wrapper(token_list): print(token, end="") diff --git a/reginald/models/models/llama_index.py b/reginald/models/models/llama_index.py index bb792e0f..b7be3619 100644 --- a/reginald/models/models/llama_index.py +++ b/reginald/models/models/llama_index.py @@ -44,7 +44,7 @@ from reginald.models.models.base import MessageResponse, ResponseModel from reginald.models.models.llama_utils import completion_to_prompt, messages_to_prompt -from reginald.utils import get_env_var +from reginald.utils import get_env_var, stream_progress_wrapper nest_asyncio.apply() @@ -862,8 +862,8 @@ def stream_message(self, message: str, user_id: str) -> None: self.query_engine._response_synthesizer._streaming = True response_stream = self.query_engine.query(message) - print("\nReginald: ", end="") - for token in response_stream.response_gen: + # print("\nReginald: ", end="") + for token in stream_progress_wrapper(response_stream.response_gen): print(token, end="") formatted_response = "\n\n\n" + self._format_sources(response_stream) diff --git a/reginald/utils.py b/reginald/utils.py index ec20fb30..a59793f8 100644 --- a/reginald/utils.py +++ b/reginald/utils.py @@ -1,5 +1,45 @@ import logging import os +from time import sleep +from typing import Any, Callable, Final, Iterable + +from rich.progress import Progress, SpinnerColumn, TextColumn + +REGINAL_PROMPT: Final[str] = "Reginald: " + + +def stream_progress_wrapper( + streamer: Callable | Iterable, + task_str: str = REGINAL_PROMPT, + progress_bar: bool = True, + *args, + **kwargs, +) -> Any: + """Add a progress bar for iteration. + + Examples + -------- + >>> from time import sleep + >>> def sleeper() -> str: + ... sleep(1) + ... return 'hi' + >>> stream_progress_wrapper(streamer=sleeper) + + Reginald: 'hi' + >>> stream_progress_wrapper(streamer=sleeper, progress_bar=False) + Reginald: 'hi' + """ + if isinstance(streamer, Callable): + streamer = streamer(*args, **kwargs) + if progress_bar: + with Progress( + TextColumn("{task.description}[progress.description]"), + SpinnerColumn(), + transient=True, + ) as progress: + progress.add_task(task_str) + print(task_str, end="") + return streamer def get_env_var( From 0f957ec19446402fde891a9460617fb27eb3fac8 Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Wed, 12 Jun 2024 14:53:41 +0100 Subject: [PATCH 05/11] feat: add `end: str = \n` parameter to `stream_progress_wrapper` --- reginald/utils.py | 9 ++++++--- tests/test_chat_interact.py | 5 +++-- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/reginald/utils.py b/reginald/utils.py index a59793f8..4710ba26 100644 --- a/reginald/utils.py +++ b/reginald/utils.py @@ -12,6 +12,7 @@ def stream_progress_wrapper( streamer: Callable | Iterable, task_str: str = REGINAL_PROMPT, progress_bar: bool = True, + end: str = "\n", *args, **kwargs, ) -> Any: @@ -25,9 +26,11 @@ def stream_progress_wrapper( ... return 'hi' >>> stream_progress_wrapper(streamer=sleeper) - Reginald: 'hi' + Reginald: + 'hi' >>> stream_progress_wrapper(streamer=sleeper, progress_bar=False) - Reginald: 'hi' + Reginald: + 'hi' """ if isinstance(streamer, Callable): streamer = streamer(*args, **kwargs) @@ -38,7 +41,7 @@ def stream_progress_wrapper( transient=True, ) as progress: progress.add_task(task_str) - print(task_str, end="") + print(task_str, end=end) return streamer diff --git a/tests/test_chat_interact.py b/tests/test_chat_interact.py index 2379372c..da28c1c8 100644 --- a/tests/test_chat_interact.py +++ b/tests/test_chat_interact.py @@ -16,8 +16,9 @@ def test_chat_cli(): result = runner.invoke(cli, ["chat"], input="What's up dock?\nexit\n") term_stdout_lines: list[str] = result.stdout.split("\n") assert term_stdout_lines[0] == ">>> " - assert term_stdout_lines[1] == "Reginald: Hello! How are you?" - assert term_stdout_lines[2] == ">>> " + assert term_stdout_lines[1] == "Reginald: " + assert term_stdout_lines[2] == "Hello! How are you?" + assert term_stdout_lines[3] == ">>> " def test_chat_cli_no_stream(): From f211e316839aff6f966eed9c3d9e17ccf586b7db Mon Sep 17 00:00:00 2001 From: rchan Date: Wed, 12 Jun 2024 15:49:00 +0100 Subject: [PATCH 06/11] add flush=True to stream prints --- reginald/models/models/chat_completion.py | 13 ++++++------- reginald/models/models/hello.py | 2 +- reginald/models/models/llama_index.py | 14 ++++++++------ reginald/run.py | 2 ++ 4 files changed, 17 insertions(+), 14 deletions(-) diff --git a/reginald/models/models/chat_completion.py b/reginald/models/models/chat_completion.py index cd38487a..efb80080 100644 --- a/reginald/models/models/chat_completion.py +++ b/reginald/models/models/chat_completion.py @@ -6,7 +6,7 @@ from openai import AzureOpenAI, OpenAI from reginald.models.models.base import MessageResponse, ResponseModel -from reginald.utils import get_env_var +from reginald.utils import get_env_var, stream_progress_wrapper class ChatCompletionBase(ResponseModel): @@ -180,9 +180,8 @@ def stream_message(self, message: str, user_id: str) -> None: stream=True, ) - print("Reginald: ", end="") - for chunk in response: - print(chunk.choices[0].delta.content) + for chunk in stream_progress_wrapper(response): + print(chunk.choices[0].delta.content, end="", flush=True) class ChatCompletionOpenAI(ChatCompletionBase): @@ -269,6 +268,6 @@ def stream_message(self, message: str, user_id: str) -> None: messages=[{"role": "user", "content": message}], stream=True, ) - print("Reginald: ", end="") - for chunk in response: - print(chunk["choices"][0]["delta"]["content"]) + + for chunk in stream_progress_wrapper(response): + print(chunk.choices[0].delta.content, end="", flush=True) diff --git a/reginald/models/models/hello.py b/reginald/models/models/hello.py index 8a5da641..6030529b 100644 --- a/reginald/models/models/hello.py +++ b/reginald/models/models/hello.py @@ -22,4 +22,4 @@ def stream_message(self, message: str, user_id: str) -> None: # print("\nReginald: ", end="") token_list: tuple[str, ...] = ("Hello", "!", " How", " are", " you", "?") for token in stream_progress_wrapper(token_list): - print(token, end="") + print(token, end="", flush=True) diff --git a/reginald/models/models/llama_index.py b/reginald/models/models/llama_index.py index b7be3619..20f79d18 100644 --- a/reginald/models/models/llama_index.py +++ b/reginald/models/models/llama_index.py @@ -637,8 +637,11 @@ def __init__( else: logging.info("Loading the storage context") - storage_context = StorageContext.from_defaults( - persist_dir=self.data_dir / LLAMA_INDEX_DIR / self.which_index + storage_context = stream_progress_wrapper( + streamer=StorageContext.from_defaults( + persist_dir=self.data_dir / LLAMA_INDEX_DIR / self.which_index + ), + task_str="Loading the storage context...", ) logging.info("Loading the pre-processed index") @@ -862,19 +865,18 @@ def stream_message(self, message: str, user_id: str) -> None: self.query_engine._response_synthesizer._streaming = True response_stream = self.query_engine.query(message) - # print("\nReginald: ", end="") for token in stream_progress_wrapper(response_stream.response_gen): - print(token, end="") + print(token, end="", flush=True) formatted_response = "\n\n\n" + self._format_sources(response_stream) for token in re.split(r"(\s+)", formatted_response): - print(token, end="") + print(token, end="", flush=True) except Exception as e: # ignore: broad-except for token in re.split( r"(\s+)", self.error_response_template.format(repr(e)) ): - print(token, end="") + print(token, end="", flush=True) class LlamaIndexOllama(LlamaIndex): diff --git a/reginald/run.py b/reginald/run.py index d09d347d..99b617a9 100644 --- a/reginald/run.py +++ b/reginald/run.py @@ -62,6 +62,8 @@ def run_chat_interact(streaming: bool = False, **kwargs) -> ResponseModel: message = input(">>> ") if message in ["exit", "exit()", "quit()", "bye Reginald"]: return response_model + if message == "": + continue if message in ["clear_history", "\clear_history"]: if ( response_model.mode == "chat" From 7735f01f2e90080f585105090b72afc620fc1cdc Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Wed, 12 Jun 2024 16:30:06 +0100 Subject: [PATCH 07/11] fix: change `stream_progress_wrapper` to return `chain` --- reginald/utils.py | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/reginald/utils.py b/reginald/utils.py index 4710ba26..e021cafe 100644 --- a/reginald/utils.py +++ b/reginald/utils.py @@ -1,7 +1,8 @@ import logging import os +from itertools import chain from time import sleep -from typing import Any, Callable, Final, Iterable +from typing import Any, Callable, Final, Generator, Iterable from rich.progress import Progress, SpinnerColumn, TextColumn @@ -9,28 +10,30 @@ def stream_progress_wrapper( - streamer: Callable | Iterable, + streamer: Generator | list | tuple | Callable | chain, task_str: str = REGINAL_PROMPT, progress_bar: bool = True, end: str = "\n", *args, **kwargs, -) -> Any: +) -> chain | Generator | list | tuple | Callable: """Add a progress bar for iteration. Examples -------- >>> from time import sleep - >>> def sleeper() -> str: - ... sleep(1) - ... return 'hi' - >>> stream_progress_wrapper(streamer=sleeper) + >>> def sleeper(naps: int = 3) -> Generator[str, None, None]: + ... for nap in range(naps): + ... sleep(1) + ... yield f'nap: {nap}' + >>> tuple(stream_progress_wrapper(streamer=sleeper)) Reginald: - 'hi' - >>> stream_progress_wrapper(streamer=sleeper, progress_bar=False) + ('nap: 0', 'nap: 1', 'nap: 2') + >>> tuple(stream_progress_wrapper( + ... streamer=sleeper, progress_bar=False)) Reginald: - 'hi' + ('nap: 0', 'nap: 1', 'nap: 2') """ if isinstance(streamer, Callable): streamer = streamer(*args, **kwargs) @@ -40,7 +43,12 @@ def stream_progress_wrapper( SpinnerColumn(), transient=True, ) as progress: + if isinstance(streamer, list | tuple): + streamer = (item for item in streamer) + assert isinstance(streamer, Generator) progress.add_task(task_str) + first_item = next(streamer) + streamer = chain((first_item,), streamer) print(task_str, end=end) return streamer From 7efa70301cdf5c3f45cc6a534d6bd44577e7a846 Mon Sep 17 00:00:00 2001 From: rchan Date: Wed, 12 Jun 2024 16:55:19 +0100 Subject: [PATCH 08/11] remove stream_progress_wrapper call on storage --- reginald/models/models/llama_index.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/reginald/models/models/llama_index.py b/reginald/models/models/llama_index.py index 20f79d18..77eedb91 100644 --- a/reginald/models/models/llama_index.py +++ b/reginald/models/models/llama_index.py @@ -637,11 +637,8 @@ def __init__( else: logging.info("Loading the storage context") - storage_context = stream_progress_wrapper( - streamer=StorageContext.from_defaults( - persist_dir=self.data_dir / LLAMA_INDEX_DIR / self.which_index - ), - task_str="Loading the storage context...", + storage_context = StorageContext.from_defaults( + persist_dir=self.data_dir / LLAMA_INDEX_DIR / self.which_index ) logging.info("Loading the pre-processed index") From 7b2347c29a6167265ea0f31c0a1674bc866528cd Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Wed, 12 Jun 2024 17:17:33 +0100 Subject: [PATCH 09/11] feat: add `stream_iter_progress_wrapper` and refactor `stream_progress_wrapper` --- reginald/utils.py | 54 ++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 44 insertions(+), 10 deletions(-) diff --git a/reginald/utils.py b/reginald/utils.py index e021cafe..00e9ad1e 100644 --- a/reginald/utils.py +++ b/reginald/utils.py @@ -9,14 +9,14 @@ REGINAL_PROMPT: Final[str] = "Reginald: " -def stream_progress_wrapper( - streamer: Generator | list | tuple | Callable | chain, +def stream_iter_progress_wrapper( + streamer: Iterable | Callable | chain, task_str: str = REGINAL_PROMPT, progress_bar: bool = True, - end: str = "\n", + end: str = "", *args, **kwargs, -) -> chain | Generator | list | tuple | Callable: +) -> Iterable: """Add a progress bar for iteration. Examples @@ -26,14 +26,12 @@ def stream_progress_wrapper( ... for nap in range(naps): ... sleep(1) ... yield f'nap: {nap}' - >>> tuple(stream_progress_wrapper(streamer=sleeper)) + >>> tuple(stream_iter_progress_wrapper(streamer=sleeper)) - Reginald: - ('nap: 0', 'nap: 1', 'nap: 2') - >>> tuple(stream_progress_wrapper( + Reginald: ('nap: 0', 'nap: 1', 'nap: 2') + >>> tuple(stream_iter_progress_wrapper( ... streamer=sleeper, progress_bar=False)) - Reginald: - ('nap: 0', 'nap: 1', 'nap: 2') + Reginald: ('nap: 0', 'nap: 1', 'nap: 2') """ if isinstance(streamer, Callable): streamer = streamer(*args, **kwargs) @@ -53,6 +51,42 @@ def stream_progress_wrapper( return streamer +def stream_progress_wrapper( + streamer: Callable, + task_str: str = REGINAL_PROMPT, + progress_bar: bool = True, + end: str = "\n", + *args, + **kwargs, +) -> chain | Generator | list | tuple | Callable: + """Add a progress bar for iteration. + + Examples + -------- + >>> from time import sleep + >>> def sleeper(seconds: int = 3) -> str: + ... sleep(seconds) + ... return f'{seconds} seconds nap' + >>> stream_progress_wrapper(sleeper) + + Reginald: + '3 seconds nap' + """ + if progress_bar: + with Progress( + TextColumn("{task.description}[progress.description]"), + SpinnerColumn(), + transient=True, + ) as progress: + progress.add_task(task_str) + results: Any = streamer(*args, **kwargs) + print(task_str, end=end) + return results + else: + print(task_str, end=end) + return streamer(*args, **kwargs) + + def get_env_var( var: str, log: bool = True, secret_value: bool = True, default: str = None ) -> str | None: From 72d92bdf64407cb05e4c8e71c0383685e02bb941 Mon Sep 17 00:00:00 2001 From: rchan Date: Wed, 12 Jun 2024 17:25:48 +0100 Subject: [PATCH 10/11] ahhfevq --- reginald/models/models/chat_completion.py | 6 ++--- reginald/models/models/hello.py | 4 ++-- reginald/models/models/llama_index.py | 28 +++++++++++++++++------ reginald/utils.py | 2 +- tests/test_chat_interact.py | 5 ++-- 5 files changed, 29 insertions(+), 16 deletions(-) diff --git a/reginald/models/models/chat_completion.py b/reginald/models/models/chat_completion.py index efb80080..fb0c0bcc 100644 --- a/reginald/models/models/chat_completion.py +++ b/reginald/models/models/chat_completion.py @@ -6,7 +6,7 @@ from openai import AzureOpenAI, OpenAI from reginald.models.models.base import MessageResponse, ResponseModel -from reginald.utils import get_env_var, stream_progress_wrapper +from reginald.utils import get_env_var, stream_iter_progress_wrapper class ChatCompletionBase(ResponseModel): @@ -180,7 +180,7 @@ def stream_message(self, message: str, user_id: str) -> None: stream=True, ) - for chunk in stream_progress_wrapper(response): + for chunk in stream_iter_progress_wrapper(response): print(chunk.choices[0].delta.content, end="", flush=True) @@ -269,5 +269,5 @@ def stream_message(self, message: str, user_id: str) -> None: stream=True, ) - for chunk in stream_progress_wrapper(response): + for chunk in stream_iter_progress_wrapper(response): print(chunk.choices[0].delta.content, end="", flush=True) diff --git a/reginald/models/models/hello.py b/reginald/models/models/hello.py index 6030529b..01ba40a3 100644 --- a/reginald/models/models/hello.py +++ b/reginald/models/models/hello.py @@ -1,7 +1,7 @@ from typing import Any from reginald.models.models.base import MessageResponse, ResponseModel -from reginald.utils import stream_progress_wrapper +from reginald.utils import stream_iter_progress_wrapper class Hello(ResponseModel): @@ -21,5 +21,5 @@ def channel_mention(self, message: str, user_id: str) -> MessageResponse: def stream_message(self, message: str, user_id: str) -> None: # print("\nReginald: ", end="") token_list: tuple[str, ...] = ("Hello", "!", " How", " are", " you", "?") - for token in stream_progress_wrapper(token_list): + for token in stream_iter_progress_wrapper(token_list): print(token, end="", flush=True) diff --git a/reginald/models/models/llama_index.py b/reginald/models/models/llama_index.py index 77eedb91..3d08355b 100644 --- a/reginald/models/models/llama_index.py +++ b/reginald/models/models/llama_index.py @@ -44,7 +44,11 @@ from reginald.models.models.base import MessageResponse, ResponseModel from reginald.models.models.llama_utils import completion_to_prompt, messages_to_prompt -from reginald.utils import get_env_var, stream_progress_wrapper +from reginald.utils import ( + get_env_var, + stream_iter_progress_wrapper, + stream_progress_wrapper, +) nest_asyncio.apply() @@ -632,17 +636,27 @@ def __init__( data_dir=self.data_dir, settings=settings, ) - self.index = data_creator.create_index() - data_creator.save_index() + self.index = stream_progress_wrapper( + data_creator.create_index, + task_str="Generating the index from scratch...", + ) + stream_progress_wrapper( + data_creator.save_index, + task_str="Saving the index...", + ) else: logging.info("Loading the storage context") - storage_context = StorageContext.from_defaults( - persist_dir=self.data_dir / LLAMA_INDEX_DIR / self.which_index + storage_context = stream_progress_wrapper( + StorageContext.from_defaults, + task_str="Loading the storage context...", + persist_dir=self.data_dir / LLAMA_INDEX_DIR / self.which_index, ) logging.info("Loading the pre-processed index") - self.index = load_index_from_storage( + self.index = stream_progress_wrapper( + load_index_from_storage, + task_str="Loading the pre-processed index...", storage_context=storage_context, settings=settings, ) @@ -862,7 +876,7 @@ def stream_message(self, message: str, user_id: str) -> None: self.query_engine._response_synthesizer._streaming = True response_stream = self.query_engine.query(message) - for token in stream_progress_wrapper(response_stream.response_gen): + for token in stream_iter_progress_wrapper(response_stream.response_gen): print(token, end="", flush=True) formatted_response = "\n\n\n" + self._format_sources(response_stream) diff --git a/reginald/utils.py b/reginald/utils.py index 00e9ad1e..ff54dadd 100644 --- a/reginald/utils.py +++ b/reginald/utils.py @@ -58,7 +58,7 @@ def stream_progress_wrapper( end: str = "\n", *args, **kwargs, -) -> chain | Generator | list | tuple | Callable: +) -> Any: """Add a progress bar for iteration. Examples diff --git a/tests/test_chat_interact.py b/tests/test_chat_interact.py index da28c1c8..2379372c 100644 --- a/tests/test_chat_interact.py +++ b/tests/test_chat_interact.py @@ -16,9 +16,8 @@ def test_chat_cli(): result = runner.invoke(cli, ["chat"], input="What's up dock?\nexit\n") term_stdout_lines: list[str] = result.stdout.split("\n") assert term_stdout_lines[0] == ">>> " - assert term_stdout_lines[1] == "Reginald: " - assert term_stdout_lines[2] == "Hello! How are you?" - assert term_stdout_lines[3] == ">>> " + assert term_stdout_lines[1] == "Reginald: Hello! How are you?" + assert term_stdout_lines[2] == ">>> " def test_chat_cli_no_stream(): From 9afe1c484b4a19cdddb9cbf81d3cadca7c0a6587 Mon Sep 17 00:00:00 2001 From: Dr Griffith Rees Date: Wed, 12 Jun 2024 17:31:20 +0100 Subject: [PATCH 11/11] fix(test): ignore `WHITESPACE` and `ELLIPSES` --- poetry.lock | 6 +++--- pyproject.toml | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 4a2fdfe5..982350f1 100644 --- a/poetry.lock +++ b/poetry.lock @@ -4298,13 +4298,13 @@ files = [ [[package]] name = "pydantic" -version = "2.7.3" +version = "2.7.4" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.7.3-py3-none-any.whl", hash = "sha256:ea91b002777bf643bb20dd717c028ec43216b24a6001a280f83877fd2655d0b4"}, - {file = "pydantic-2.7.3.tar.gz", hash = "sha256:c46c76a40bb1296728d7a8b99aa73dd70a48c3510111ff290034f860c99c419e"}, + {file = "pydantic-2.7.4-py3-none-any.whl", hash = "sha256:ee8538d41ccb9c0a9ad3e0e5f07bf15ed8015b481ced539a1759d8cc89ae90d0"}, + {file = "pydantic-2.7.4.tar.gz", hash = "sha256:0c84efd9548d545f63ac0060c1e4d39bb9b14db8b3c0652338aecc07b5adec52"}, ] [package.dependencies] diff --git a/pyproject.toml b/pyproject.toml index af339625..6ca2093f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -115,3 +115,4 @@ addopts = """ --pdbcls=IPython.terminal.debugger:TerminalPdb --doctest-modules """ +doctest_optionflags = ["NORMALIZE_WHITESPACE", "ELLIPSIS",]