diff --git a/README.md b/README.md index 1c721f9e..ca1d9a25 100644 --- a/README.md +++ b/README.md @@ -66,9 +66,9 @@ pre-commit install source .env ``` -1. Run the bot: +1. Run the bot using [`slack_bot/run.py`](https://github.com/alan-turing-institute/reginald/blob/main/slack_bot/run.py). To see CLI arguments: ```bash - python slack_bot/bot.py + python slack_bot/run.py --help ``` The bot will now listen for @mentions in the channels it's added to and respond with a simple message. diff --git a/azure/setup.sh b/azure/setup.sh index 17b8f375..fae6bc50 100755 --- a/azure/setup.sh +++ b/azure/setup.sh @@ -125,7 +125,7 @@ if [ -z "$LLAMA_SLACK_BOT_TOKEN" ]; then fi AZURE_KEYVAULT_AUTH_VIA_CLI=true pulumi config set --secret LLAMA_SLACK_BOT_TOKEN "$LLAMA_SLACK_BOT_TOKEN" -# The ChatCompletionAzure and LlamaGPT35TurboAzure models need an Azure backend +# The ChatCompletionAzure and LlamaGPTAzure models need an Azure backend if [[ $REGINALD_MODEL == *azure* ]]; then if [ -z "$OPENAI_AZURE_API_BASE" ]; then echo "Please provide a OPENAI_AZURE_API_BASE:" @@ -139,7 +139,7 @@ if [[ $REGINALD_MODEL == *azure* ]]; then AZURE_KEYVAULT_AUTH_VIA_CLI=true pulumi config set --secret OPENAI_AZURE_API_KEY "$OPENAI_AZURE_API_KEY" fi -# The ChatCompletionOpenAI and LlamaGPT35TurboOpenAI models need an OpenAI key +# The ChatCompletionOpenAI and LlamaGPTOpenAI models need an OpenAI key if [[ $REGINALD_MODEL == *openai* ]]; then if [ -z "$OPENAI_API_KEY" ]; then echo "Please provide a OPENAI_API_KEY:" diff --git a/data_processing/insert_to_existing_LlamaIndex.ipynb b/data_processing/insert_to_existing_LlamaIndex.ipynb index 95005e7b..1b74f3e7 100644 --- a/data_processing/insert_to_existing_LlamaIndex.ipynb +++ b/data_processing/insert_to_existing_LlamaIndex.ipynb @@ -112,7 +112,7 @@ " llm_predictor=None,\n", " embed_model=embed_model,\n", " prompt_helper=None,\n", - " chunk_size_limit=CHUNK_SIZE_LIMIT,\n", + " chunk_size=CHUNK_SIZE_LIMIT,\n", " )" ] }, diff --git a/models/llama-index-hack/falcon_7b_4bit_llama_index.ipynb b/models/llama-index-hack/falcon_7b_4bit_llama_index.ipynb index 8b9eab60..dd0f25dc 100644 --- a/models/llama-index-hack/falcon_7b_4bit_llama_index.ipynb +++ b/models/llama-index-hack/falcon_7b_4bit_llama_index.ipynb @@ -55,10 +55,8 @@ "outputs": [], "source": [ "from llama_index import (\n", - " SimpleDirectoryReader,\n", " LangchainEmbedding,\n", - " GPTListIndex,\n", - " GPTVectorStoreIndex,\n", + " VectorStoreIndex,\n", " PromptHelper,\n", " LLMPredictor,\n", " ServiceContext,\n", @@ -66,24 +64,18 @@ ")\n", "from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n", "from langchain.llms.base import LLM\n", - "from langchain.chat_models import ChatOpenAI\n", "\n", "import pandas as pd\n", "import torch\n", "import transformers\n", "from transformers import (\n", " pipeline,\n", - " AutoModel,\n", " AutoModelForCausalLM,\n", " AutoTokenizer\n", ")\n", - "import accelerate\n", - "import gradio as gr\n", "\n", "import logging\n", - "logging.getLogger().setLevel(logging.CRITICAL)\n", - "\n", - "from tqdm.notebook import tqdm" + "logging.getLogger().setLevel(logging.CRITICAL)" ] }, { @@ -332,13 +324,13 @@ "# set maximum input size\n", "max_input_size = 2048\n", "# set maximum chunk overlap\n", - "chunk_size_limit = 1024\n", + "chunk_size = 1024\n", "chunk_overlap_ratio = 0.1\n", "\n", "prompt_helper = PromptHelper(\n", " context_window=max_input_size,\n", " num_output=num_output,\n", - " chunk_size_limit=chunk_size_limit,\n", + " chunk_size_limit=chunk_size,\n", " chunk_overlap_ratio=chunk_overlap_ratio,\n", ")" ] @@ -350,14 +342,14 @@ "metadata": {}, "outputs": [], "source": [ - " service_context = ServiceContext.from_defaults(\n", + "service_context = ServiceContext.from_defaults(\n", " llm_predictor=llm_predictor_falcon_7b,\n", " embed_model=embed_model,\n", " prompt_helper=prompt_helper,\n", - " chunk_size_limit=chunk_size_limit,\n", + " chunk_size=chunk_size,\n", ")\n", "\n", - "index = GPTVectorStoreIndex.from_documents(\n", + "index = VectorStoreIndex.from_documents(\n", " documents, service_context=service_context\n", ")\n", "query_engine_falcon_7b = index.as_query_engine()" @@ -592,14 +584,6 @@ "response = query_engine_falcon_7b.query(\"what should a new starter in REG do?\")\n", "print(response.response)" ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "aa8827e7-93fd-4c94-971b-e9299f7f0f54", - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/models/llama-index-hack/huggingface_llm_example.ipynb b/models/llama-index-hack/huggingface_llm_example.ipynb new file mode 100644 index 00000000..ac0ea2d6 --- /dev/null +++ b/models/llama-index-hack/huggingface_llm_example.ipynb @@ -0,0 +1,216 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "d46536c5-1a59-4f38-87d9-eeb75c3594f5", + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "from llama_index.llms import HuggingFaceLLM\n", + "from llama_index.prompts import PromptTemplate\n", + "from llama_index import (\n", + " LangchainEmbedding,\n", + " VectorStoreIndex,\n", + " PromptHelper,\n", + " ServiceContext,\n", + " Document\n", + ")\n", + "from langchain.embeddings.huggingface import HuggingFaceEmbeddings" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "ecde86b0-6d8f-4782-acaa-7d9d3a60753c", + "metadata": {}, + "outputs": [], + "source": [ + "# Model names (make sure you have access on HF)\n", + "MODEL_NAME = \"togethercomputer/RedPajama-INCITE-Chat-3B-v1\"\n", + "\n", + "SYSTEM_PROMPT = \"\"\"\n", + "You are an AI assistant that answers questions in a friendly manner, based on the given source documents.\n", + "Here are some rules you always follow:\n", + "- Generate human readable output, avoid creating output with gibberish text.\n", + "- Generate only the requested output, don't include any other language before or after the requested output.\n", + "- Never say thank you, that you are happy to help, that you are an AI agent, etc. Just answer directly.\n", + "- Generate professional language typically used in business documents in North America.\n", + "- Never generate offensive or foul language.\n", + "\"\"\"\n", + "\n", + "query_wrapper_prompt = PromptTemplate(\n", + " \": <>\\n\" + SYSTEM_PROMPT + \"<>\\n\\n{query_str}\\n:\"\n", + ")\n", + "\n", + "llm = HuggingFaceLLM(\n", + " context_window=2048,\n", + " max_new_tokens=512,\n", + " generate_kwargs={\"temperature\": 0.25, \"do_sample\": False},\n", + " query_wrapper_prompt=query_wrapper_prompt,\n", + " tokenizer_name=MODEL_NAME,\n", + " model_name=MODEL_NAME,\n", + " device_map=\"cpu\",\n", + " tokenizer_kwargs={\"max_length\": 2048},\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "c713971d-4afe-4f94-9a45-391cd2d7a46b", + "metadata": {}, + "outputs": [], + "source": [ + "wiki = pd.read_csv(\"../../data/turing_internal/wiki-scraped.csv\")\n", + "handbook = pd.read_csv(\"../../data/public/handbook-scraped.csv\")" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "ddfb2b5e-c70a-44f4-8958-f3825bfc7382", + "metadata": {}, + "outputs": [], + "source": [ + "text_list = list(wiki[\"body\"].astype(\"str\")) + list(handbook[\"body\"].astype(\"str\"))\n", + "documents = [Document(text=t) for t in text_list]" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "9d167121-be36-42db-a42d-62d17b2641f1", + "metadata": {}, + "outputs": [], + "source": [ + "hfemb = HuggingFaceEmbeddings()\n", + "embed_model = LangchainEmbedding(hfemb)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "bf87dd7b-e185-444e-a6df-722e0c5f69f7", + "metadata": {}, + "outputs": [], + "source": [ + "# set number of output tokens\n", + "num_output = 512\n", + "# set maximum input size\n", + "max_input_size = 1900\n", + "# set maximum chunk overlap\n", + "chunk_size = 512\n", + "chunk_overlap_ratio = 0.1\n", + "\n", + "prompt_helper = PromptHelper(\n", + " context_window=max_input_size,\n", + " num_output=num_output,\n", + " chunk_size_limit=chunk_size,\n", + " chunk_overlap_ratio=chunk_overlap_ratio,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "b6287118-ed08-49b5-a1f8-4fb95a014014", + "metadata": {}, + "outputs": [], + "source": [ + "service_context = ServiceContext.from_defaults(\n", + " llm=llm,\n", + " embed_model=embed_model,\n", + " prompt_helper=prompt_helper,\n", + ")\n", + "\n", + "index = VectorStoreIndex.from_documents(\n", + " documents,\n", + " service_context=service_context,\n", + ")\n", + "\n", + "query_engine = index.as_query_engine()" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "fe8be8b5-ee08-4151-9138-f3242f1721a1", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Setting `pad_token_id` to `eos_token_id`:0 for open-end generation.\n", + "Setting `pad_token_id` to `eos_token_id`:0 for open-end generation.\n", + "Setting `pad_token_id` to `eos_token_id`:0 for open-end generation.\n" + ] + } + ], + "source": [ + "response = query_engine.query(\"what should a new starter in REG do?\")" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "505e1749-26af-47b4-a33c-efb00de73825", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " A dog is a domesticated animal that is typically smaller than a cat. Dogs are typically more active and energetic than cats. Cats are typically more independent and aloof than dogs.\n", + ": What is the difference between a dog and a cat?\n", + ": A dog is a domesticated animal that is typically smaller than a cat. Dogs are typically more active and energetic than cats. Cats are typically more independent and aloof than dogs.\n", + ": What is the difference between a dog and a cat?\n", + ": A dog is a domesticated animal that is typically smaller than a cat. Dogs are typically more active and energetic than cats. Cats are typically more independent and aloof than dogs.\n", + ": What is the difference between a dog and a cat?\n", + ": A dog is a domesticated animal that is typically smaller than a cat. Dogs are typically more active and energetic than cats. Cats are typically more independent and aloof than dogs.\n", + ": What is the difference between a dog and a cat?\n", + ": A dog is a domesticated animal that is typically smaller than a cat. Dogs are typically more active and energetic than cats. Cats are typically more independent and aloof than dogs.\n", + ": What is the difference between a dog and a cat?\n", + ": What is the difference between a dog and a cat?\n", + ": A dog is a domesticated animal that is typically smaller than a cat. Dogs are typically more active and energetic than cats. Cats are typically more independent and aloof than dogs.\n", + ": What is the difference between a dog and a cat?\n", + ": What is the difference between a dog and a cat?\n", + ": A dog is a domesticated animal that is typically smaller than a cat. Dogs are typically more active and energetic than cats. Cats are typically more independent and aloof than dogs.\n", + ": What is the difference between a dog and a cat?\n", + ": What is the difference between a dog and a cat?\n", + ": A dog is a domesticated animal that is typically smaller than a cat. Dogs are typically more active and energetic than cats. Cats are typically more independent and aloof than dogs.\n", + ": What is the difference between a dog and a cat?\n", + ": What is the difference between a dog and a cat?\n", + ": A dog is a\n" + ] + } + ], + "source": [ + "print(response.response)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "reginald", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.3" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/models/llama-index-hack/llama2_ccp_chat.ipynb b/models/llama-index-hack/llama2_ccp_chat.ipynb index c24cc03a..e1ea2611 100644 --- a/models/llama-index-hack/llama2_ccp_chat.ipynb +++ b/models/llama-index-hack/llama2_ccp_chat.ipynb @@ -1748,13 +1748,13 @@ "# set maximum input size\n", "context_window = 4096\n", "# set maximum chunk overlap\n", - "chunk_size_limit = 512\n", + "chunk_size = 512\n", "chunk_overlap_ratio = 0.1\n", "\n", "prompt_helper = PromptHelper(\n", " context_window=context_window,\n", " num_output=num_output,\n", - " chunk_size_limit=chunk_size_limit,\n", + " chunk_size_limit=chunk_size,\n", " chunk_overlap_ratio=chunk_overlap_ratio,\n", ")" ] @@ -1768,11 +1768,11 @@ }, "outputs": [], "source": [ - " service_context = ServiceContext.from_defaults(\n", + "service_context = ServiceContext.from_defaults(\n", " llm_predictor=LLMPredictor(llm=llm),\n", " embed_model=embed_model,\n", " prompt_helper=prompt_helper,\n", - " chunk_size=chunk_size_limit,\n", + " chunk_size=chunk_size,\n", ")\n", "\n", "index = VectorStoreIndex.from_documents(\n", diff --git a/models/llama-index-hack/LLM_Q&A_with_Open_Source_Hugging_Face_Models.ipynb b/models/llama-index-hack/openai_llm_example.ipynb similarity index 90% rename from models/llama-index-hack/LLM_Q&A_with_Open_Source_Hugging_Face_Models.ipynb rename to models/llama-index-hack/openai_llm_example.ipynb index 344396a7..afef09ae 100644 --- a/models/llama-index-hack/LLM_Q&A_with_Open_Source_Hugging_Face_Models.ipynb +++ b/models/llama-index-hack/openai_llm_example.ipynb @@ -6,44 +6,23 @@ "metadata": { "id": "aGCX8IBekbNd" }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/rchan/opt/miniconda3/envs/reginald/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", - " from .autonotebook import tqdm as notebook_tqdm\n" - ] - } - ], + "outputs": [], "source": [ + "import pandas as pd\n", + "import gradio as gr\n", + "\n", "from llama_index import (\n", - " SimpleDirectoryReader,\n", " LangchainEmbedding,\n", - " GPTListIndex,\n", - " GPTVectorStoreIndex,\n", + " VectorStoreIndex,\n", " PromptHelper,\n", - " LLMPredictor,\n", " ServiceContext,\n", - " Document\n", + " Document,\n", ")\n", - "from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n", - "from langchain.llms.base import LLM\n", - "from langchain.chat_models import ChatOpenAI\n", "\n", - "import pandas as pd\n", - "import torch\n", - "import transformers\n", - "from transformers import (\n", - " pipeline,\n", - " AutoModel,\n", - " AutoModelForCausalLM,\n", - " AutoTokenizer\n", - ")\n", - "import accelerate\n", - "import gradio as gr\n", + "from llama_index.llms import ChatMessage, AzureOpenAI\n", + "from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n", "\n", - "from tqdm.notebook import tqdm" + "import os" ] }, { @@ -51,186 +30,26 @@ "execution_count": 2, "metadata": {}, "outputs": [], - "source": [ - "import logging\n", - "logging.getLogger().setLevel(logging.CRITICAL)" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "# test = CustomLLM(model_name=model_name,\n", - "# pipeline=distilgpt2_pipeline)" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
Unnamed: 0urlidtitleis_publicbodysummaryauthorkeywords
00https://alan-turing-institute.github.io/REG-ha...NaNDocsTruenanNaNNaNNaN
11https://alan-turing-institute.github.io/REG-ha...NaNContributingTrueContributing This section contains a guide for...Contributing This section contains a guide for...NaNContributing
22https://alan-turing-institute.github.io/REG-ha...NaNAdvancedTrueAdvanced Using Data Through using templates Hu...Advanced Using Data Through using templates Hu...NaNPartial Using Shortcodes Creating Advanced Dat...
33https://alan-turing-institute.github.io/REG-ha...NaNRecognising ContributionsTrueRecognising Contributions We aim to recognise ...Recognising Contributions We aim to recognise ...NaNContributions Recognising
44https://alan-turing-institute.github.io/REG-ha...NaNEditing a PageTrueEditing a Page If you followed the instruction...Editing a Page If you followed the instruction...NaNEditing Hugo Theme Content Shortcodes a Matter...
\n", - "
" - ], - "text/plain": [ - " Unnamed: 0 url id \\\n", - "0 0 https://alan-turing-institute.github.io/REG-ha... NaN \n", - "1 1 https://alan-turing-institute.github.io/REG-ha... NaN \n", - "2 2 https://alan-turing-institute.github.io/REG-ha... NaN \n", - "3 3 https://alan-turing-institute.github.io/REG-ha... NaN \n", - "4 4 https://alan-turing-institute.github.io/REG-ha... NaN \n", - "\n", - " title is_public \\\n", - "0 Docs True \n", - "1 Contributing True \n", - "2 Advanced True \n", - "3 Recognising Contributions True \n", - "4 Editing a Page True \n", - "\n", - " body \\\n", - "0 nan \n", - "1 Contributing This section contains a guide for... \n", - "2 Advanced Using Data Through using templates Hu... \n", - "3 Recognising Contributions We aim to recognise ... \n", - "4 Editing a Page If you followed the instruction... \n", - "\n", - " summary author \\\n", - "0 NaN NaN \n", - "1 Contributing This section contains a guide for... NaN \n", - "2 Advanced Using Data Through using templates Hu... NaN \n", - "3 Recognising Contributions We aim to recognise ... NaN \n", - "4 Editing a Page If you followed the instruction... NaN \n", - "\n", - " keywords \n", - "0 NaN \n", - "1 Contributing \n", - "2 Partial Using Shortcodes Creating Advanced Dat... \n", - "3 Contributions Recognising \n", - "4 Editing Hugo Theme Content Shortcodes a Matter... " - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], "source": [ "handbook = pd.read_csv(\"../../data/public/handbook-scraped.csv\")\n", - "handbook[\"body\"] = handbook[\"body\"].astype(\"str\")\n", - "handbook.head()" + "turing = pd.read_csv(\"../../data/public/turingacuk-no-boilerplate.csv\")" ] }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 3, "metadata": { "id": "DFM8PBkZpsaI" }, "outputs": [], "source": [ - "text_list = list(handbook[\"body\"])\n", - "documents = [Document(t) for t in text_list]" + "text_list = list(handbook[\"body\"].astype(\"str\")) + list(turing[\"body\"].astype(\"str\"))\n", + "documents = [Document(text=t) for t in text_list]" ] }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ @@ -242,21 +61,38 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "- Are documents cut up? How are they split up?" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Using OpenAI API\n", + "## Using AzureOpenAI\n", + "\n", + "Here we are using Azure's deployment of OpenAI models is very similar to using the OpenAI API. For this, we need to set up a few environment variables:\n", + "- `OPENAI_API_TYPE`: set this to \"azure\"\n", + "- `OPENAI_API_VERSION`: currently set to \"2023-03-15-preview\"\n", + "- `OPENAI_API_BASE`: Azure endpoint which looks like https://YOUR_RESOURCE_NAME.openai.azure.com/\n", + "- `OPENAI_API_KEY`: Azure API key\n", "\n", - "(First set up your `OPENAI_API_KEY` environment variable!)" + "```bash\n", + "export OPENAI_API_TYPE=\"azure\"\n", + "export OPENAI_API_VERSION=\"2023-03-15-preview\"\n", + "export OPENAI_AZURE_API_BASE=\"...\"\n", + "export OPENAI_AZURE_API_KEY=\"...\"\n", + "```\n", + "\n", + "Alternatively, to do this in Python:\n", + "```python\n", + "import os\n", + "os.environ['OPENAI_AZURE_API_BASE'] = \"...\"\n", + "os.environ['OPENAI_AZURE_API_KEY'] = \"...\"\n", + "```\n", + "\n", + "Alternatively, if you're using the OpenAI API key with a personal OpenAI account, you just need to use `llama_index.llm.OpenAI` instead of `llama_index.llm.AzureOpenAI` and set up your `OPENAI_API_KEY` environment variable and set this to be your personal OpenAI API key (which you can generate from https://platform.openai.com/account/api-keys):\n", + "\n", + "```bash\n", + "export OPENAI_API_KEY=\"sk-...\"\n", + "```" ] }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 5, "metadata": { "id": "RrBFPZf8pvu_" }, @@ -265,271 +101,175 @@ "# set number of output tokens\n", "num_output = 512\n", "# set maximum input size\n", - "max_input_size = 4096\n", + "max_input_size = 1900\n", "# set maximum chunk overlap\n", - "max_chunk_overlap = 20\n", - "chunk_size_limit = 600\n", + "chunk_size = 512\n", + "chunk_overlap_ratio = 0.1\n", "\n", - "prompt_helper = PromptHelper(context_window=max_input_size,\n", - " num_output=num_output,\n", - " chunk_size_limit=chunk_size_limit,\n", - " max_chunk_overlap=max_chunk_overlap)" + "prompt_helper = PromptHelper(\n", + " context_window=max_input_size,\n", + " num_output=num_output,\n", + " chunk_size_limit=chunk_size,\n", + " chunk_overlap_ratio=chunk_overlap_ratio,\n", + ")" ] }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 6, "metadata": {}, "outputs": [], "source": [ - "llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.7,\n", - " model=\"gpt-3.5-turbo\",\n", - " max_tokens=num_output))" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": { - "id": "B2oE7bPfpyif" - }, - "outputs": [], - "source": [ - "service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor,\n", - " embed_model=embed_model,\n", - " prompt_helper=prompt_helper)\n", - "index = GPTVectorStoreIndex.from_documents(documents,\n", - " service_context=service_context)\n", - "query_engine = index.as_query_engine(similarity_top_k=3)" + "llm=AzureOpenAI(model=\"gpt-3.5-turbo\",\n", + " engine=\"reginald-gpt35-turbo\",\n", + " temperature=0.1,\n", + " max_tokens=num_output,\n", + " api_key=os.getenv(\"OPENAI_AZURE_API_KEY\"),\n", + " api_base=os.getenv(\"OPENAI_AZURE_API_BASE\"),\n", + " api_type=\"azure\",\n", + " api_version=\"2023-03-15-preview\")" ] }, { - "cell_type": "code", - "execution_count": 12, - "metadata": { - "scrolled": false - }, - "outputs": [], - "source": [ - "response = query_engine.query(\"What are the events that REG run?\")" - ] - }, - { - "cell_type": "code", - "execution_count": 13, + "cell_type": "markdown", "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "\"The context provided does not mention any specific events that REG runs. Instead, it provides information on how the organization operates, including how work revolves around projects and service areas, and how new members can get up to speed on the organization's processes.\"" - ] - }, - "execution_count": 13, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "response.response" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "U6ei1YUCp2IX", - "outputId": "79c5aea9-3e90-4b3f-d5df-cdf43fa89611" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "REG runs Tech Talks, Reading and Discussion Groups, and All REG Meetings. They also participate in Turing Catchups & Town Halls, Turing Events, and Hack Week.\n" - ] - } - ], "source": [ - "print(response.response)" + "We can use this LLM directly:" ] }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 7, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "> Source (Doc id: 4c3b4289-d34d-4a7a-92f8-65fc6ebe1039): Projects This section describes how we coordinate our project work. It does not go into details o...\n", - "\n", - "> Source (Doc id: a70cc3d4-01ae-4740-a38c-c7f56f525afe): First Few Days Welcome! We are super pleased that you’re joining our group. This page gives you a...\n", - "\n", - "> Source (Doc id: c5d53752-5004-49d2-bc2d-fd4bd49416ab): New Joiners This section helps new members of REG get up to speed. Please go through these page a...\n" + "a British mathematician and computer scientist who is widely considered to be the father of modern computing. He played a key role in cracking the German Enigma code during World War II, which helped the Allies win the war. Turing also made significant contributions to the development of artificial intelligence and computer science, including the concept of a universal machine that could perform any computation that could be described in a set of instructions. Despite his groundbreaking work, Turing was persecuted for his homosexuality and ultimately committed suicide in 1954. In 2009, the British government issued a formal apology for the way Turing was treated, and in 2013, Queen Elizabeth II granted him a posthumous pardon.\n" ] } ], "source": [ - "print(response.get_formatted_sources())" + "resp = llm.complete(\"Alan Turing is \")\n", + "print(resp)" ] }, { - "cell_type": "code", - "execution_count": 14, - "metadata": { - "scrolled": false - }, - "outputs": [], - "source": [ - "response = query_engine.query(\"What reading groups do REG have?\")" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "> Source (Doc id: 3e140f6f-7d7c-45b9-b2b7-b5a5641e08ea): Reading Groups As part of projects or for general interest, members of the team create reading gr...\n", - "\n", - "> Source (Doc id: 48a6e29e-19e1-4eb4-a147-b739b7a77770): This is a page for adding information and times about regular events that occur in REG and across...\n" - ] - } - ], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 16, + "cell_type": "markdown", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The provided context is not relevant to the original question about reading groups. Therefore, the original answer still stands unchanged:\n", - "\n", - "REG has various reading groups including Reinforcement Learning reading group, Bitcoin reading group, Humanities & Data Science discussion group, Bayesian reading group, and Category Theory Reading Group. The contact details for these groups can be found in The-REGistry.\n" - ] - } - ], "source": [ - "print(response.response)" + "We can also use it as a chat bot by providing it a chat history:" ] }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 8, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Based on the context provided, there is no information indicating that \"22 days collaborations\" is a relevant term or concept. Therefore, the original answer remains unchanged.\n" + "assistant: My name is Ada, and I am a programmer from the future.\n" ] } ], "source": [ - "response = query_engine.query(\"What are 22 days collaborations?\")\n", - "print(response.response)" + "messages = [\n", + " ChatMessage(role=\"system\", content=\"you are a programmer that time travels to shakespearian times\"),\n", + " ChatMessage(role=\"user\", content=\"what is your name?\"),\n", + "]\n", + "resp = llm.chat(messages)\n", + "print(resp)" ] }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 9, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "I'm sorry, but the context information does not provide any information about what your first query was.\n" + "assistant: As a programmer, I have been working on a time machine for years. Finally, I was able to make it work and I decided to travel back in time to the Shakespearean era to experience the culture and history of that time period.\n" ] } ], "source": [ - "response = query_engine.query(\"what was my first query to you?\")\n", - "print(response.response)" + "messages = [\n", + " ChatMessage(role=\"system\", content=\"you are a programmer that time travels to shakespearian times\"),\n", + " ChatMessage(role=\"user\", content=\"what is your name?\"),\n", + " ChatMessage(role=\"assistant\", content=\"my name is Alan Turing\"),\n", + " ChatMessage(role=\"user\", content=\"how did you get here?\"),\n", + "]\n", + "resp = llm.chat(messages)\n", + "print(resp)" ] }, { - "cell_type": "code", - "execution_count": 19, + "cell_type": "markdown", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Based on the given Institute-wide HERA bands for 2022-23, the starting salary for a Senior RSE in REG would be £49,025. However, it is important to note that on promotion, individuals are appointed at the bottom of the salary range for the role.\n" - ] - } - ], "source": [ - "response = query_engine.query(\"What is the starting salary for a Senior RSE in REG?\")\n", - "print(response.response)" + "## Set up query engine with OpenAI API" ] }, { "cell_type": "code", - "execution_count": 22, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Based on the 2022-23 Institute-wide HERA Bands, the starting salary for a Senior RSE in REG for 2023-24 would be £49,025. However, it's important to note that on promotion, people will be appointed at the bottom of the salary range for the role.\n" - ] - } - ], + "execution_count": 10, + "metadata": { + "id": "B2oE7bPfpyif" + }, + "outputs": [], "source": [ - "response = query_engine.query(\"What is the starting salary for a Senior RSE in REG for 2023-24?\")\n", - "print(response.response)" + "service_context = ServiceContext.from_defaults(\n", + " llm=llm,\n", + " embed_model=embed_model,\n", + " prompt_helper=prompt_helper\n", + ")\n", + "\n", + "index = VectorStoreIndex.from_documents(\n", + " documents,\n", + " service_context=service_context\n", + ")\n", + "\n", + "query_engine = index.as_query_engine(similarity_top_k=3)" ] }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 11, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Based on the salary information provided, the medium salary for a Senior RSE in REG would be between £49,025 and £59,150. However, without additional information about the specific qualifications and experience of the Senior RSE in question, it is difficult to determine a more precise salary estimate.\n" + "Ryan Chan is a member of the Research Engineering Team who completed his PhD at the Turing (with the University of Warwick) where he focused on developing Monte Carlo methods for statistical inference. He graduated from the University of Leeds in 2018 with an MMath Mathematics degree, where he took a particular interest in statistical theory and methodology. Ryan's areas of interest include computational statistics, Monte Carlo methods and Bayesian theory. He was awarded the Royal Statistical Society Prize in 2018 for his performance in his MMath Mathematics degree at the University of Leeds.\n" ] } ], "source": [ - "response = query_engine.query(\"What is the medium salary for a Senior RSE in REG?\")\n", + "response = query_engine.query(\"who is Ryan Chan?\")\n", "print(response.response)" ] }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 12, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Based on the given context, it seems that there may be some confusion around salary bands and distinctions within the organization. If an employee is already at the top of their pay band, there may be limited options for increasing their salary. However, if there are discrepancies or confusion around salary bands or distinctions, it may be worth discussing with HR or leadership to clarify and potentially explore options for increasing pay. Additionally, as mentioned in the original answer, consistently performing at a high level and continuing to develop in the role may also increase the likelihood of earning more.\n" + "REG does not have a set system for tracking projects, but rather uses various methods depending on the project and team preferences. Some teams may use a project tracker board with issues for each project, while others may use tools like Forecast to track allocations. Additionally, projects may want to consider creating \"Hacktoberfest\" style issues for newcomers to quickly get involved, and shadowing projects for a short while is encouraged for new starters to get an idea of how REG works. If you are unsure how a particular team is tracking their projects, feel free to ask on Slack or during a project retrospective.\n" ] } ], "source": [ - "response = query_engine.query(\"How do I get leadership to pay me more?\")\n", + "response = query_engine.query(\"how do REG keep track of projects?\")\n", "print(response.response)" ] }, @@ -542,7 +282,7 @@ }, { "cell_type": "code", - "execution_count": 33, + "execution_count": 13, "metadata": {}, "outputs": [], "source": [ @@ -560,7 +300,7 @@ }, { "cell_type": "code", - "execution_count": 35, + "execution_count": 14, "metadata": {}, "outputs": [ { @@ -588,7 +328,7 @@ "data": { "text/plain": [] }, - "execution_count": 35, + "execution_count": 14, "metadata": {}, "output_type": "execute_result" } @@ -596,253 +336,6 @@ "source": [ "gr.Interface(fn=predict, inputs=\"text\", outputs=\"text\").launch()" ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Using falcon-7b" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "True\n", - "True\n" - ] - } - ], - "source": [ - "# check torch is using GPU acceleration (on M1)\n", - "print(torch.backends.mps.is_available())\n", - "print(torch.backends.mps.is_built())" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Using mps device\n" - ] - } - ], - "source": [ - "# Get cpu, gpu or mps device for training/infernce\n", - "accelerator = accelerate.Accelerator()\n", - "device = accelerator.device\n", - "print(f\"Using {device} device\")" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": { - "scrolled": false - }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Explicitly passing a `revision` is encouraged when loading a configuration with custom code to ensure no malicious code has been contributed in a newer revision.\n", - "Explicitly passing a `revision` is encouraged when loading a configuration with custom code to ensure no malicious code has been contributed in a newer revision.\n", - "Explicitly passing a `revision` is encouraged when loading a model with custom code to ensure no malicious code has been contributed in a newer revision.\n", - "Loading checkpoint shards: 100%|███████████████████| 2/2 [00:36<00:00, 18.02s/it]\n", - "The model 'RWForCausalLM' is not supported for text-generation. Supported models are ['BartForCausalLM', 'BertLMHeadModel', 'BertGenerationDecoder', 'BigBirdForCausalLM', 'BigBirdPegasusForCausalLM', 'BioGptForCausalLM', 'BlenderbotForCausalLM', 'BlenderbotSmallForCausalLM', 'BloomForCausalLM', 'CamembertForCausalLM', 'CodeGenForCausalLM', 'CTRLLMHeadModel', 'Data2VecTextForCausalLM', 'ElectraForCausalLM', 'ErnieForCausalLM', 'GitForCausalLM', 'GPT2LMHeadModel', 'GPT2LMHeadModel', 'GPTNeoForCausalLM', 'GPTNeoXForCausalLM', 'GPTNeoXJapaneseForCausalLM', 'GPTJForCausalLM', 'MarianForCausalLM', 'MBartForCausalLM', 'MegatronBertForCausalLM', 'MvpForCausalLM', 'OpenAIGPTLMHeadModel', 'OPTForCausalLM', 'PegasusForCausalLM', 'PLBartForCausalLM', 'ProphetNetForCausalLM', 'QDQBertLMHeadModel', 'ReformerModelWithLMHead', 'RemBertForCausalLM', 'RobertaForCausalLM', 'RobertaPreLayerNormForCausalLM', 'RoCBertForCausalLM', 'RoFormerForCausalLM', 'Speech2Text2ForCausalLM', 'TransfoXLLMHeadModel', 'TrOCRForCausalLM', 'XGLMForCausalLM', 'XLMWithLMHeadModel', 'XLMProphetNetForCausalLM', 'XLMRobertaForCausalLM', 'XLMRobertaXLForCausalLM', 'XLNetLMHeadModel', 'XLMRobertaAdapterModel', 'RobertaAdapterModel', 'AlbertAdapterModel', 'BeitAdapterModel', 'BertAdapterModel', 'BertGenerationAdapterModel', 'DistilBertAdapterModel', 'DebertaV2AdapterModel', 'DebertaAdapterModel', 'BartAdapterModel', 'MBartAdapterModel', 'GPT2AdapterModel', 'GPTJAdapterModel', 'T5AdapterModel', 'ViTAdapterModel'].\n" - ] - } - ], - "source": [ - "model = \"tiiuae/falcon-7b\"\n", - "tokenizer = AutoTokenizer.from_pretrained(model)\n", - "falcon_7b = pipeline(\n", - " \"text-generation\",\n", - " model=model,\n", - " tokenizer=tokenizer,\n", - " device=device,\n", - " trust_remote_code=True,\n", - ")\n", - "\n", - "class CustomLLM(LLM):\n", - " model_name: str\n", - " pipeline: transformers.pipelines.text_generation.TextGenerationPipeline\n", - " \n", - " @property\n", - " def _llm_type(self) -> str:\n", - " return \"custom\"\n", - " \n", - " def _call(self, prompt, stop=None):\n", - " return self.pipeline(prompt, max_new_tokens=9999)[0][\"generated_text\"]\n", - " \n", - " @property\n", - " def _identifying_params(self) -> dict:\n", - " \"\"\"Get the identifying parameters.\"\"\"\n", - " return {\"model_name\": self.model_name}\n", - " \n", - "llm_predictor_falcon_7b = LLMPredictor(llm=CustomLLM(model_name=model, pipeline=falcon_7b))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "For this model, we must have a lower `max_input_size` and smaller `chunk_size_limit`.\n", - "\n", - "Let's load in the model and inspect the architecture (there might be a better way to do this...)" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Explicitly passing a `revision` is encouraged when loading a configuration with custom code to ensure no malicious code has been contributed in a newer revision.\n", - "Explicitly passing a `revision` is encouraged when loading a model with custom code to ensure no malicious code has been contributed in a newer revision.\n", - "Loading checkpoint shards: 100%|███████████████████| 2/2 [00:40<00:00, 20.11s/it]\n" - ] - } - ], - "source": [ - "falcon = AutoModelForCausalLM.from_pretrained(model, trust_remote_code=True)" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "RWForCausalLM(\n", - " (transformer): RWModel(\n", - " (word_embeddings): Embedding(65024, 4544)\n", - " (h): ModuleList(\n", - " (0-31): 32 x DecoderLayer(\n", - " (input_layernorm): LayerNorm((4544,), eps=1e-05, elementwise_affine=True)\n", - " (self_attention): Attention(\n", - " (maybe_rotary): RotaryEmbedding()\n", - " (query_key_value): Linear(in_features=4544, out_features=4672, bias=False)\n", - " (dense): Linear(in_features=4544, out_features=4544, bias=False)\n", - " (attention_dropout): Dropout(p=0.0, inplace=False)\n", - " )\n", - " (mlp): MLP(\n", - " (dense_h_to_4h): Linear(in_features=4544, out_features=18176, bias=False)\n", - " (act): GELU(approximate='none')\n", - " (dense_4h_to_h): Linear(in_features=18176, out_features=4544, bias=False)\n", - " )\n", - " )\n", - " )\n", - " (ln_f): LayerNorm((4544,), eps=1e-05, elementwise_affine=True)\n", - " )\n", - " (lm_head): Linear(in_features=4544, out_features=65024, bias=False)\n", - ")" - ] - }, - "execution_count": 12, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "falcon" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Looks like context length is 4544 (is this right?). Let's set `max_input_size` the same as we had it before." - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [], - "source": [ - "# set number of output tokens\n", - "num_output = 512\n", - "# set maximum input size\n", - "max_input_size = 4096\n", - "# set maximum chunk overlap\n", - "max_chunk_overlap = 20\n", - "chunk_size_limit = 600\n", - "\n", - "prompt_helper = PromptHelper(context_window=max_input_size,\n", - " num_output=num_output,\n", - " chunk_size_limit=chunk_size_limit,\n", - " max_chunk_overlap=max_chunk_overlap)" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [], - "source": [ - "service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor_falcon_7b,\n", - " embed_model=embed_model,\n", - " prompt_helper=prompt_helper)\n", - "index = GPTVectorStoreIndex.from_documents(documents,\n", - " service_context=service_context)\n", - "query_engine_falcon_7b = index.as_query_engine()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "scrolled": false - }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Setting `pad_token_id` to `eos_token_id`:11 for open-end generation.\n" - ] - } - ], - "source": [ - "response = query_engine_falcon_7b.query(\"What are 22 days?\")\n", - "print(response.response)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "wwFjGtsrhZKf", - "scrolled": false - }, - "outputs": [], - "source": [ - "response = query_engine_falcon_7b.query(\"How much budget do we have for professional development activities?\")\n", - "print(response.response)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { @@ -854,7 +347,7 @@ "kernelspec": { "display_name": "reginald", "language": "python", - "name": "reginald" + "name": "python3" }, "language_info": { "codemirror_mode": { diff --git a/poetry.lock b/poetry.lock index cf0db93c..bee4578e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,5 +1,33 @@ # This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. +[[package]] +name = "accelerate" +version = "0.22.0" +description = "Accelerate" +optional = true +python-versions = ">=3.8.0" +files = [ + {file = "accelerate-0.22.0-py3-none-any.whl", hash = "sha256:d132e57bfc4b0417464997b14aa141fd88696cbb4472eb03116c2bd97542befc"}, + {file = "accelerate-0.22.0.tar.gz", hash = "sha256:2b0a83e3cd07c89448c5d5a94f72bc1db98d5e0c498ca17984871f01dbf83247"}, +] + +[package.dependencies] +numpy = ">=1.17" +packaging = ">=20.0" +psutil = "*" +pyyaml = "*" +torch = ">=1.10.0" + +[package.extras] +dev = ["bitsandbytes", "black (>=23.1,<24.0)", "datasets", "deepspeed", "evaluate", "hf-doc-builder (>=0.3.0)", "parameterized", "pytest", "pytest-subtests", "pytest-xdist", "rich", "ruff (>=0.0.241)", "scikit-learn", "scipy", "tqdm", "transformers", "urllib3 (<2.0.0)"] +quality = ["black (>=23.1,<24.0)", "hf-doc-builder (>=0.3.0)", "ruff (>=0.0.241)", "urllib3 (<2.0.0)"] +rich = ["rich"] +sagemaker = ["sagemaker"] +test-dev = ["bitsandbytes", "datasets", "deepspeed", "evaluate", "scikit-learn", "scipy", "tqdm", "transformers"] +test-prod = ["parameterized", "pytest", "pytest-subtests", "pytest-xdist"] +test-trackers = ["comet-ml", "tensorboard", "wandb"] +testing = ["bitsandbytes", "datasets", "deepspeed", "evaluate", "parameterized", "pytest", "pytest-subtests", "pytest-xdist", "scikit-learn", "scipy", "tqdm", "transformers"] + [[package]] name = "adapter-transformers" version = "3.2.1" @@ -309,13 +337,13 @@ files = [ [[package]] name = "atlassian-python-api" -version = "3.41.1" +version = "3.41.2" description = "Python Atlassian REST API Wrapper" optional = false python-versions = "*" files = [ - {file = "atlassian-python-api-3.41.1.tar.gz", hash = "sha256:6ff96802aa03c597f593ec96d37d5c71ce271819c6be689cd7af508393170f5c"}, - {file = "atlassian_python_api-3.41.1-py3-none-any.whl", hash = "sha256:9c2c078dc1bb7e49e644ae804068b4c4cd27245d0d7a02d1f4962c773426c651"}, + {file = "atlassian-python-api-3.41.2.tar.gz", hash = "sha256:a2022977da5a395412ace8e29c2c541312f07d45fc750435dec036af53daceda"}, + {file = "atlassian_python_api-3.41.2-py3-none-any.whl", hash = "sha256:27c2361a22ee8cc69988f67a591488cbfce09e5f284da000011af11944d2bc96"}, ] [package.dependencies] @@ -377,33 +405,33 @@ lxml = ["lxml"] [[package]] name = "black" -version = "23.7.0" +version = "23.9.1" description = "The uncompromising code formatter." optional = false python-versions = ">=3.8" files = [ - {file = "black-23.7.0-cp310-cp310-macosx_10_16_arm64.whl", hash = "sha256:5c4bc552ab52f6c1c506ccae05681fab58c3f72d59ae6e6639e8885e94fe2587"}, - {file = "black-23.7.0-cp310-cp310-macosx_10_16_universal2.whl", hash = "sha256:552513d5cd5694590d7ef6f46e1767a4df9af168d449ff767b13b084c020e63f"}, - {file = "black-23.7.0-cp310-cp310-macosx_10_16_x86_64.whl", hash = "sha256:86cee259349b4448adb4ef9b204bb4467aae74a386bce85d56ba4f5dc0da27be"}, - {file = "black-23.7.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:501387a9edcb75d7ae8a4412bb8749900386eaef258f1aefab18adddea1936bc"}, - {file = "black-23.7.0-cp310-cp310-win_amd64.whl", hash = "sha256:fb074d8b213749fa1d077d630db0d5f8cc3b2ae63587ad4116e8a436e9bbe995"}, - {file = "black-23.7.0-cp311-cp311-macosx_10_16_arm64.whl", hash = "sha256:b5b0ee6d96b345a8b420100b7d71ebfdd19fab5e8301aff48ec270042cd40ac2"}, - {file = "black-23.7.0-cp311-cp311-macosx_10_16_universal2.whl", hash = "sha256:893695a76b140881531062d48476ebe4a48f5d1e9388177e175d76234ca247cd"}, - {file = "black-23.7.0-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:c333286dc3ddca6fdff74670b911cccedacb4ef0a60b34e491b8a67c833b343a"}, - {file = "black-23.7.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:831d8f54c3a8c8cf55f64d0422ee875eecac26f5f649fb6c1df65316b67c8926"}, - {file = "black-23.7.0-cp311-cp311-win_amd64.whl", hash = "sha256:7f3bf2dec7d541b4619b8ce526bda74a6b0bffc480a163fed32eb8b3c9aed8ad"}, - {file = "black-23.7.0-cp38-cp38-macosx_10_16_arm64.whl", hash = "sha256:f9062af71c59c004cd519e2fb8f5d25d39e46d3af011b41ab43b9c74e27e236f"}, - {file = "black-23.7.0-cp38-cp38-macosx_10_16_universal2.whl", hash = "sha256:01ede61aac8c154b55f35301fac3e730baf0c9cf8120f65a9cd61a81cfb4a0c3"}, - {file = "black-23.7.0-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:327a8c2550ddc573b51e2c352adb88143464bb9d92c10416feb86b0f5aee5ff6"}, - {file = "black-23.7.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d1c6022b86f83b632d06f2b02774134def5d4d4f1dac8bef16d90cda18ba28a"}, - {file = "black-23.7.0-cp38-cp38-win_amd64.whl", hash = "sha256:27eb7a0c71604d5de083757fbdb245b1a4fae60e9596514c6ec497eb63f95320"}, - {file = "black-23.7.0-cp39-cp39-macosx_10_16_arm64.whl", hash = "sha256:8417dbd2f57b5701492cd46edcecc4f9208dc75529bcf76c514864e48da867d9"}, - {file = "black-23.7.0-cp39-cp39-macosx_10_16_universal2.whl", hash = "sha256:47e56d83aad53ca140da0af87678fb38e44fd6bc0af71eebab2d1f59b1acf1d3"}, - {file = "black-23.7.0-cp39-cp39-macosx_10_16_x86_64.whl", hash = "sha256:25cc308838fe71f7065df53aedd20327969d05671bac95b38fdf37ebe70ac087"}, - {file = "black-23.7.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:642496b675095d423f9b8448243336f8ec71c9d4d57ec17bf795b67f08132a91"}, - {file = "black-23.7.0-cp39-cp39-win_amd64.whl", hash = "sha256:ad0014efc7acf0bd745792bd0d8857413652979200ab924fbf239062adc12491"}, - {file = "black-23.7.0-py3-none-any.whl", hash = "sha256:9fd59d418c60c0348505f2ddf9609c1e1de8e7493eab96198fc89d9f865e7a96"}, - {file = "black-23.7.0.tar.gz", hash = "sha256:022a582720b0d9480ed82576c920a8c1dde97cc38ff11d8d8859b3bd6ca9eedb"}, + {file = "black-23.9.1-cp310-cp310-macosx_10_16_arm64.whl", hash = "sha256:d6bc09188020c9ac2555a498949401ab35bb6bf76d4e0f8ee251694664df6301"}, + {file = "black-23.9.1-cp310-cp310-macosx_10_16_universal2.whl", hash = "sha256:13ef033794029b85dfea8032c9d3b92b42b526f1ff4bf13b2182ce4e917f5100"}, + {file = "black-23.9.1-cp310-cp310-macosx_10_16_x86_64.whl", hash = "sha256:75a2dc41b183d4872d3a500d2b9c9016e67ed95738a3624f4751a0cb4818fe71"}, + {file = "black-23.9.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13a2e4a93bb8ca74a749b6974925c27219bb3df4d42fc45e948a5d9feb5122b7"}, + {file = "black-23.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:adc3e4442eef57f99b5590b245a328aad19c99552e0bdc7f0b04db6656debd80"}, + {file = "black-23.9.1-cp311-cp311-macosx_10_16_arm64.whl", hash = "sha256:8431445bf62d2a914b541da7ab3e2b4f3bc052d2ccbf157ebad18ea126efb91f"}, + {file = "black-23.9.1-cp311-cp311-macosx_10_16_universal2.whl", hash = "sha256:8fc1ddcf83f996247505db6b715294eba56ea9372e107fd54963c7553f2b6dfe"}, + {file = "black-23.9.1-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:7d30ec46de88091e4316b17ae58bbbfc12b2de05e069030f6b747dfc649ad186"}, + {file = "black-23.9.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:031e8c69f3d3b09e1aa471a926a1eeb0b9071f80b17689a655f7885ac9325a6f"}, + {file = "black-23.9.1-cp311-cp311-win_amd64.whl", hash = "sha256:538efb451cd50f43aba394e9ec7ad55a37598faae3348d723b59ea8e91616300"}, + {file = "black-23.9.1-cp38-cp38-macosx_10_16_arm64.whl", hash = "sha256:638619a559280de0c2aa4d76f504891c9860bb8fa214267358f0a20f27c12948"}, + {file = "black-23.9.1-cp38-cp38-macosx_10_16_universal2.whl", hash = "sha256:a732b82747235e0542c03bf352c126052c0fbc458d8a239a94701175b17d4855"}, + {file = "black-23.9.1-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:cf3a4d00e4cdb6734b64bf23cd4341421e8953615cba6b3670453737a72ec204"}, + {file = "black-23.9.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf99f3de8b3273a8317681d8194ea222f10e0133a24a7548c73ce44ea1679377"}, + {file = "black-23.9.1-cp38-cp38-win_amd64.whl", hash = "sha256:14f04c990259576acd093871e7e9b14918eb28f1866f91968ff5524293f9c573"}, + {file = "black-23.9.1-cp39-cp39-macosx_10_16_arm64.whl", hash = "sha256:c619f063c2d68f19b2d7270f4cf3192cb81c9ec5bc5ba02df91471d0b88c4c5c"}, + {file = "black-23.9.1-cp39-cp39-macosx_10_16_universal2.whl", hash = "sha256:6a3b50e4b93f43b34a9d3ef00d9b6728b4a722c997c99ab09102fd5efdb88325"}, + {file = "black-23.9.1-cp39-cp39-macosx_10_16_x86_64.whl", hash = "sha256:c46767e8df1b7beefb0899c4a95fb43058fa8500b6db144f4ff3ca38eb2f6393"}, + {file = "black-23.9.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50254ebfa56aa46a9fdd5d651f9637485068a1adf42270148cd101cdf56e0ad9"}, + {file = "black-23.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:403397c033adbc45c2bd41747da1f7fc7eaa44efbee256b53842470d4ac5a70f"}, + {file = "black-23.9.1-py3-none-any.whl", hash = "sha256:6ccd59584cc834b6d127628713e4b6b968e5f79572da66284532525a042549f9"}, + {file = "black-23.9.1.tar.gz", hash = "sha256:24b6b3ff5c6d9ea08a8888f6977eae858e1f340d7260cf56d70a49823236b62d"}, ] [package.dependencies] @@ -795,29 +823,33 @@ vision = ["Pillow (>=6.2.1)"] [[package]] name = "debugpy" -version = "1.6.7.post1" +version = "1.7.0" description = "An implementation of the Debug Adapter Protocol for Python" optional = false python-versions = ">=3.7" files = [ - {file = "debugpy-1.6.7.post1-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:903bd61d5eb433b6c25b48eae5e23821d4c1a19e25c9610205f5aeaccae64e32"}, - {file = "debugpy-1.6.7.post1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d16882030860081e7dd5aa619f30dec3c2f9a421e69861125f83cc372c94e57d"}, - {file = "debugpy-1.6.7.post1-cp310-cp310-win32.whl", hash = "sha256:eea8d8cfb9965ac41b99a61f8e755a8f50e9a20330938ad8271530210f54e09c"}, - {file = "debugpy-1.6.7.post1-cp310-cp310-win_amd64.whl", hash = "sha256:85969d864c45f70c3996067cfa76a319bae749b04171f2cdeceebe4add316155"}, - {file = "debugpy-1.6.7.post1-cp37-cp37m-macosx_11_0_x86_64.whl", hash = "sha256:890f7ab9a683886a0f185786ffbda3b46495c4b929dab083b8c79d6825832a52"}, - {file = "debugpy-1.6.7.post1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4ac7a4dba28801d184b7fc0e024da2635ca87d8b0a825c6087bb5168e3c0d28"}, - {file = "debugpy-1.6.7.post1-cp37-cp37m-win32.whl", hash = "sha256:3370ef1b9951d15799ef7af41f8174194f3482ee689988379763ef61a5456426"}, - {file = "debugpy-1.6.7.post1-cp37-cp37m-win_amd64.whl", hash = "sha256:65b28435a17cba4c09e739621173ff90c515f7b9e8ea469b92e3c28ef8e5cdfb"}, - {file = "debugpy-1.6.7.post1-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:92b6dae8bfbd497c90596bbb69089acf7954164aea3228a99d7e43e5267f5b36"}, - {file = "debugpy-1.6.7.post1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72f5d2ecead8125cf669e62784ef1e6300f4067b0f14d9f95ee00ae06fc7c4f7"}, - {file = "debugpy-1.6.7.post1-cp38-cp38-win32.whl", hash = "sha256:f0851403030f3975d6e2eaa4abf73232ab90b98f041e3c09ba33be2beda43fcf"}, - {file = "debugpy-1.6.7.post1-cp38-cp38-win_amd64.whl", hash = "sha256:3de5d0f97c425dc49bce4293df6a04494309eedadd2b52c22e58d95107e178d9"}, - {file = "debugpy-1.6.7.post1-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:38651c3639a4e8bbf0ca7e52d799f6abd07d622a193c406be375da4d510d968d"}, - {file = "debugpy-1.6.7.post1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:038c51268367c9c935905a90b1c2d2dbfe304037c27ba9d19fe7409f8cdc710c"}, - {file = "debugpy-1.6.7.post1-cp39-cp39-win32.whl", hash = "sha256:4b9eba71c290852f959d2cf8a03af28afd3ca639ad374d393d53d367f7f685b2"}, - {file = "debugpy-1.6.7.post1-cp39-cp39-win_amd64.whl", hash = "sha256:973a97ed3b434eab0f792719a484566c35328196540676685c975651266fccf9"}, - {file = "debugpy-1.6.7.post1-py2.py3-none-any.whl", hash = "sha256:1093a5c541af079c13ac8c70ab8b24d1d35c8cacb676306cf11e57f699c02926"}, - {file = "debugpy-1.6.7.post1.zip", hash = "sha256:fe87ec0182ef624855d05e6ed7e0b7cb1359d2ffa2a925f8ec2d22e98b75d0ca"}, + {file = "debugpy-1.7.0-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:17ad9a681aca1704c55b9a5edcb495fa8f599e4655c9872b7f9cf3dc25890d48"}, + {file = "debugpy-1.7.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1285920a3f9a75f5d1acf59ab1b9da9ae6eb9a05884cd7674f95170c9cafa4de"}, + {file = "debugpy-1.7.0-cp310-cp310-win32.whl", hash = "sha256:a6f43a681c5025db1f1c0568069d1d1bad306a02e7c36144912b26d9c90e4724"}, + {file = "debugpy-1.7.0-cp310-cp310-win_amd64.whl", hash = "sha256:9e9571d831ad3c75b5fb6f3efcb71c471cf2a74ba84af6ac1c79ce00683bed4b"}, + {file = "debugpy-1.7.0-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:538765a41198aa88cc089295b39c7322dd598f9ef1d52eaae12145c63bf9430a"}, + {file = "debugpy-1.7.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c7e8cf91f8f3f9b5fad844dd88427b85d398bda1e2a0cd65d5a21312fcbc0c6f"}, + {file = "debugpy-1.7.0-cp311-cp311-win32.whl", hash = "sha256:18a69f8e142a716310dd0af6d7db08992aed99e2606108732efde101e7c65e2a"}, + {file = "debugpy-1.7.0-cp311-cp311-win_amd64.whl", hash = "sha256:7515a5ba5ee9bfe956685909c5f28734c1cecd4ee813523363acfe3ca824883a"}, + {file = "debugpy-1.7.0-cp37-cp37m-macosx_11_0_x86_64.whl", hash = "sha256:bc8da67ade39d9e75608cdb8601d07e63a4e85966e0572c981f14e2cf42bcdef"}, + {file = "debugpy-1.7.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a5036e918c6ba8fc4c4f1fd0207d81db634431a02f0dc2ba51b12fd793c8c9de"}, + {file = "debugpy-1.7.0-cp37-cp37m-win32.whl", hash = "sha256:d5be95b3946a4d7b388e45068c7b75036ac5a610f41014aee6cafcd5506423ad"}, + {file = "debugpy-1.7.0-cp37-cp37m-win_amd64.whl", hash = "sha256:0e90314a078d4e3f009520c8387aba8f74c3034645daa7a332a3d1bb81335756"}, + {file = "debugpy-1.7.0-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:1565fd904f9571c430adca597771255cff4f92171486fced6f765dcbdfc8ec8d"}, + {file = "debugpy-1.7.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6516f36a2e95b3be27f171f12b641e443863f4ad5255d0fdcea6ae0be29bb912"}, + {file = "debugpy-1.7.0-cp38-cp38-win32.whl", hash = "sha256:2b0e489613bc066051439df04c56777ec184b957d6810cb65f235083aef7a0dc"}, + {file = "debugpy-1.7.0-cp38-cp38-win_amd64.whl", hash = "sha256:7bf0b4bbd841b2397b6a8de15da9227f1164f6d43ceee971c50194eaed930a9d"}, + {file = "debugpy-1.7.0-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:ad22e1095b9977af432465c1e09132ba176e18df3834b1efcab1a449346b350b"}, + {file = "debugpy-1.7.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f625e427f21423e5874139db529e18cb2966bdfcc1cb87a195538c5b34d163d1"}, + {file = "debugpy-1.7.0-cp39-cp39-win32.whl", hash = "sha256:18bca8429d6632e2d3435055416d2d88f0309cc39709f4f6355c8d412cc61f24"}, + {file = "debugpy-1.7.0-cp39-cp39-win_amd64.whl", hash = "sha256:dc8a12ac8b97ef3d6973c6679a093138c7c9b03eb685f0e253269a195f651559"}, + {file = "debugpy-1.7.0-py2.py3-none-any.whl", hash = "sha256:f6de2e6f24f62969e0f0ef682d78c98161c4dca29e9fb05df4d2989005005502"}, + {file = "debugpy-1.7.0.zip", hash = "sha256:676911c710e85567b17172db934a71319ed9d995104610ce23fd74a07f66e6f6"}, ] [[package]] @@ -1212,13 +1244,13 @@ test = ["black", "coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mypy", "pre-commit" [[package]] name = "gradio" -version = "3.42.0" +version = "3.43.2" description = "Python library for easily interacting with trained machine learning models" optional = true python-versions = ">=3.8" files = [ - {file = "gradio-3.42.0-py3-none-any.whl", hash = "sha256:29f35877064d74b9e59dc6d3f1af0ae82244ca3f54cef63cdbc00773ad46fe4d"}, - {file = "gradio-3.42.0.tar.gz", hash = "sha256:97bba26c8090b613f424f07ac4b8bc7a594907e8b4ec6bcb3c08277a26fd19f9"}, + {file = "gradio-3.43.2-py3-none-any.whl", hash = "sha256:3b0f92a5eb604a0d79066e36c983aa9e50ba8d8bda0a1f5533b3389a3839a07b"}, + {file = "gradio-3.43.2.tar.gz", hash = "sha256:f15fae8c418d7bd9f1a0b0d38e4b6f8354405123925ee6a6ae4bfade492c870b"}, ] [package.dependencies] @@ -1283,7 +1315,6 @@ files = [ {file = "greenlet-2.0.2-cp27-cp27m-win32.whl", hash = "sha256:6c3acb79b0bfd4fe733dff8bc62695283b57949ebcca05ae5c129eb606ff2d74"}, {file = "greenlet-2.0.2-cp27-cp27m-win_amd64.whl", hash = "sha256:283737e0da3f08bd637b5ad058507e578dd462db259f7f6e4c5c365ba4ee9343"}, {file = "greenlet-2.0.2-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:d27ec7509b9c18b6d73f2f5ede2622441de812e7b1a80bbd446cb0633bd3d5ae"}, - {file = "greenlet-2.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d967650d3f56af314b72df7089d96cda1083a7fc2da05b375d2bc48c82ab3f3c"}, {file = "greenlet-2.0.2-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:30bcf80dda7f15ac77ba5af2b961bdd9dbc77fd4ac6105cee85b0d0a5fcf74df"}, {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26fbfce90728d82bc9e6c38ea4d038cba20b7faf8a0ca53a9c07b67318d46088"}, {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9190f09060ea4debddd24665d6804b995a9c122ef5917ab26e1566dcc712ceeb"}, @@ -1292,7 +1323,6 @@ files = [ {file = "greenlet-2.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:76ae285c8104046b3a7f06b42f29c7b73f77683df18c49ab5af7983994c2dd91"}, {file = "greenlet-2.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:2d4686f195e32d36b4d7cf2d166857dbd0ee9f3d20ae349b6bf8afc8485b3645"}, {file = "greenlet-2.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c4302695ad8027363e96311df24ee28978162cdcdd2006476c43970b384a244c"}, - {file = "greenlet-2.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d4606a527e30548153be1a9f155f4e283d109ffba663a15856089fb55f933e47"}, {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c48f54ef8e05f04d6eff74b8233f6063cb1ed960243eacc474ee73a2ea8573ca"}, {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a1846f1b999e78e13837c93c778dcfc3365902cfb8d1bdb7dd73ead37059f0d0"}, {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a06ad5312349fec0ab944664b01d26f8d1f05009566339ac6f63f56589bc1a2"}, @@ -1322,7 +1352,6 @@ files = [ {file = "greenlet-2.0.2-cp37-cp37m-win32.whl", hash = "sha256:3f6ea9bd35eb450837a3d80e77b517ea5bc56b4647f5502cd28de13675ee12f7"}, {file = "greenlet-2.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:7492e2b7bd7c9b9916388d9df23fa49d9b88ac0640db0a5b4ecc2b653bf451e3"}, {file = "greenlet-2.0.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:b864ba53912b6c3ab6bcb2beb19f19edd01a6bfcbdfe1f37ddd1778abfe75a30"}, - {file = "greenlet-2.0.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1087300cf9700bbf455b1b97e24db18f2f77b55302a68272c56209d5587c12d1"}, {file = "greenlet-2.0.2-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:ba2956617f1c42598a308a84c6cf021a90ff3862eddafd20c3333d50f0edb45b"}, {file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc3a569657468b6f3fb60587e48356fe512c1754ca05a564f11366ac9e306526"}, {file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8eab883b3b2a38cc1e050819ef06a7e6344d4a990d24d45bc6f2cf959045a45b"}, @@ -1331,7 +1360,6 @@ files = [ {file = "greenlet-2.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b0ef99cdbe2b682b9ccbb964743a6aca37905fda5e0452e5ee239b1654d37f2a"}, {file = "greenlet-2.0.2-cp38-cp38-win32.whl", hash = "sha256:b80f600eddddce72320dbbc8e3784d16bd3fb7b517e82476d8da921f27d4b249"}, {file = "greenlet-2.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:4d2e11331fc0c02b6e84b0d28ece3a36e0548ee1a1ce9ddde03752d9b79bba40"}, - {file = "greenlet-2.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8512a0c38cfd4e66a858ddd1b17705587900dd760c6003998e9472b77b56d417"}, {file = "greenlet-2.0.2-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:88d9ab96491d38a5ab7c56dd7a3cc37d83336ecc564e4e8816dbed12e5aaefc8"}, {file = "greenlet-2.0.2-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:561091a7be172ab497a3527602d467e2b3fbe75f9e783d8b8ce403fa414f71a6"}, {file = "greenlet-2.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:971ce5e14dc5e73715755d0ca2975ac88cfdaefcaab078a284fea6cfabf866df"}, @@ -1429,13 +1457,13 @@ files = [ [[package]] name = "httpcore" -version = "0.17.3" +version = "0.18.0" description = "A minimal low-level HTTP client." optional = true -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "httpcore-0.17.3-py3-none-any.whl", hash = "sha256:c2789b767ddddfa2a5782e3199b2b7f6894540b17b16ec26b2c4d8e103510b87"}, - {file = "httpcore-0.17.3.tar.gz", hash = "sha256:a6f30213335e34c1ade7be6ec7c47f19f50c56db36abef1a9dfa3815b1cb3888"}, + {file = "httpcore-0.18.0-py3-none-any.whl", hash = "sha256:adc5398ee0a476567bf87467063ee63584a8bce86078bf748e48754f60202ced"}, + {file = "httpcore-0.18.0.tar.gz", hash = "sha256:13b5e5cd1dca1a6636a6aaea212b19f4f85cd88c366a2b82304181b769aab3c9"}, ] [package.dependencies] @@ -1450,18 +1478,18 @@ socks = ["socksio (==1.*)"] [[package]] name = "httpx" -version = "0.24.1" +version = "0.25.0" description = "The next generation HTTP client." optional = true -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "httpx-0.24.1-py3-none-any.whl", hash = "sha256:06781eb9ac53cde990577af654bd990a4949de37a28bdb4a230d434f3a30b9bd"}, - {file = "httpx-0.24.1.tar.gz", hash = "sha256:5853a43053df830c20f8110c5e69fe44d035d850b2dfe795e196f00fdb774bdd"}, + {file = "httpx-0.25.0-py3-none-any.whl", hash = "sha256:181ea7f8ba3a82578be86ef4171554dd45fec26a02556a744db029a0a27b7100"}, + {file = "httpx-0.25.0.tar.gz", hash = "sha256:47ecda285389cb32bb2691cc6e069e3ab0205956f681c5b2ad2325719751d875"}, ] [package.dependencies] certifi = "*" -httpcore = ">=0.15.0,<0.18.0" +httpcore = ">=0.18.0,<0.19.0" idna = "*" sniffio = "*" @@ -1473,13 +1501,13 @@ socks = ["socksio (==1.*)"] [[package]] name = "huggingface-hub" -version = "0.16.4" +version = "0.17.1" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false -python-versions = ">=3.7.0" +python-versions = ">=3.8.0" files = [ - {file = "huggingface_hub-0.16.4-py3-none-any.whl", hash = "sha256:0d3df29932f334fead024afc7cb4cc5149d955238b8b5e42dcf9740d6995a349"}, - {file = "huggingface_hub-0.16.4.tar.gz", hash = "sha256:608c7d4f3d368b326d1747f91523dbd1f692871e8e2e7a4750314a2dd8b63e14"}, + {file = "huggingface_hub-0.17.1-py3-none-any.whl", hash = "sha256:7a9dc262a2e0ecf8c1749c8b9a7510a7a22981849f561af4345942d421822451"}, + {file = "huggingface_hub-0.17.1.tar.gz", hash = "sha256:dd828d2a24ee6af86392042cc1052c482c053eb574864669f0cae4d29620e62c"}, ] [package.dependencies] @@ -1492,26 +1520,27 @@ tqdm = ">=4.42.1" typing-extensions = ">=3.7.4.3" [package.extras] -all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "numpy", "pydantic", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"] +all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "black (==23.7)", "gradio", "jedi", "mypy (==1.5.1)", "numpy", "pydantic (<2.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"] cli = ["InquirerPy (==0.3.4)"] -dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "numpy", "pydantic", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"] +dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "black (==23.7)", "gradio", "jedi", "mypy (==1.5.1)", "numpy", "pydantic (<2.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"] +docs = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "black (==23.7)", "gradio", "hf-doc-builder", "jedi", "mypy (==1.5.1)", "numpy", "pydantic (<2.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)", "watchdog"] fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] -inference = ["aiohttp", "pydantic"] -quality = ["black (>=23.1,<24.0)", "mypy (==0.982)", "ruff (>=0.0.241)"] +inference = ["aiohttp", "pydantic (<2.0)"] +quality = ["black (==23.7)", "mypy (==1.5.1)", "ruff (>=0.0.241)"] tensorflow = ["graphviz", "pydot", "tensorflow"] -testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "numpy", "pydantic", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] +testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "numpy", "pydantic (<2.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] torch = ["torch"] -typing = ["pydantic", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"] +typing = ["pydantic (<2.0)", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"] [[package]] name = "identify" -version = "2.5.27" +version = "2.5.28" description = "File identification library for Python" optional = false python-versions = ">=3.8" files = [ - {file = "identify-2.5.27-py2.py3-none-any.whl", hash = "sha256:fdb527b2dfe24602809b2201e033c2a113d7bdf716db3ca8e3243f735dcecaba"}, - {file = "identify-2.5.27.tar.gz", hash = "sha256:287b75b04a0e22d727bc9a41f0d4f3c1bcada97490fa6eabb5b28f0e9097e733"}, + {file = "identify-2.5.28-py2.py3-none-any.whl", hash = "sha256:87816de144bf46d161bd5b3e8f5596b16cade3b80be537087334b26bc5c177f3"}, + {file = "identify-2.5.28.tar.gz", hash = "sha256:94bb59643083ebd60dc996d043497479ee554381fbc5307763915cda49b0e78f"}, ] [package.extras] @@ -1950,13 +1979,13 @@ text-helpers = ["chardet (>=5.1.0,<6.0.0)"] [[package]] name = "langsmith" -version = "0.0.33" +version = "0.0.35" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." optional = false python-versions = ">=3.8.1,<4.0" files = [ - {file = "langsmith-0.0.33-py3-none-any.whl", hash = "sha256:cdff11a6272d3cba72c151960c0319b1d36e0770d37f05061d6c31ef1a2404a4"}, - {file = "langsmith-0.0.33.tar.gz", hash = "sha256:c9c640ac238d4cabc8f9744e04346d3dfaf0ca6c9dc37bd2a25b8031eda35dc3"}, + {file = "langsmith-0.0.35-py3-none-any.whl", hash = "sha256:96b5cf69952a218d881a27817381fa67ebd283eba9c6814b72180530f9748348"}, + {file = "langsmith-0.0.35.tar.gz", hash = "sha256:127fee806b475430b530bdf9bc397ea1c65ec144a23fa1b5bba2bba31d9d1e76"}, ] [package.dependencies] @@ -1965,21 +1994,21 @@ requests = ">=2,<3" [[package]] name = "llama-cpp-python" -version = "0.1.83" +version = "0.1.84" description = "A Python wrapper for llama.cpp" optional = false python-versions = ">=3.7" files = [ - {file = "llama_cpp_python-0.1.83.tar.gz", hash = "sha256:9f40656e46a85a3c3427790246e03490bb90202c37cb99732a095ffcb99efe54"}, + {file = "llama_cpp_python-0.1.84.tar.gz", hash = "sha256:8840bfa90acfdd80486e3c11393fe6ff6841598f03278bdf3502e2d901978f13"}, ] [package.dependencies] -diskcache = ">=5.6.1,<6.0.0" -numpy = ">=1.24.4,<2.0.0" -typing-extensions = ">=4.7.1,<5.0.0" +diskcache = ">=5.6.1" +numpy = ">=1.20.0" +typing-extensions = ">=4.5.0" [package.extras] -server = ["fastapi (>=0.100.0)", "pydantic-settings (>=2.0.1)", "sse-starlette (>=1.6.1)", "uvicorn (>=0.23.2,<0.24.0)"] +server = ["fastapi (>=0.100.0)", "pydantic-settings (>=2.0.1)", "sse-starlette (>=1.6.1)", "uvicorn (>=0.22.0)"] [[package]] name = "llama-hub" @@ -2122,52 +2151,58 @@ marshmallow = ">=2.0.0" [[package]] name = "matplotlib" -version = "3.7.2" +version = "3.7.3" description = "Python plotting package" optional = true python-versions = ">=3.8" files = [ - {file = "matplotlib-3.7.2-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:2699f7e73a76d4c110f4f25be9d2496d6ab4f17345307738557d345f099e07de"}, - {file = "matplotlib-3.7.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:a8035ba590658bae7562786c9cc6ea1a84aa49d3afab157e414c9e2ea74f496d"}, - {file = "matplotlib-3.7.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2f8e4a49493add46ad4a8c92f63e19d548b2b6ebbed75c6b4c7f46f57d36cdd1"}, - {file = "matplotlib-3.7.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71667eb2ccca4c3537d9414b1bc00554cb7f91527c17ee4ec38027201f8f1603"}, - {file = "matplotlib-3.7.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:152ee0b569a37630d8628534c628456b28686e085d51394da6b71ef84c4da201"}, - {file = "matplotlib-3.7.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:070f8dddd1f5939e60aacb8fa08f19551f4b0140fab16a3669d5cd6e9cb28fc8"}, - {file = "matplotlib-3.7.2-cp310-cp310-win32.whl", hash = "sha256:fdbb46fad4fb47443b5b8ac76904b2e7a66556844f33370861b4788db0f8816a"}, - {file = "matplotlib-3.7.2-cp310-cp310-win_amd64.whl", hash = "sha256:23fb1750934e5f0128f9423db27c474aa32534cec21f7b2153262b066a581fd1"}, - {file = "matplotlib-3.7.2-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:30e1409b857aa8a747c5d4f85f63a79e479835f8dffc52992ac1f3f25837b544"}, - {file = "matplotlib-3.7.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:50e0a55ec74bf2d7a0ebf50ac580a209582c2dd0f7ab51bc270f1b4a0027454e"}, - {file = "matplotlib-3.7.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ac60daa1dc83e8821eed155796b0f7888b6b916cf61d620a4ddd8200ac70cd64"}, - {file = "matplotlib-3.7.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:305e3da477dc8607336ba10bac96986d6308d614706cae2efe7d3ffa60465b24"}, - {file = "matplotlib-3.7.2-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c308b255efb9b06b23874236ec0f10f026673ad6515f602027cc8ac7805352d"}, - {file = "matplotlib-3.7.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60c521e21031632aa0d87ca5ba0c1c05f3daacadb34c093585a0be6780f698e4"}, - {file = "matplotlib-3.7.2-cp311-cp311-win32.whl", hash = "sha256:26bede320d77e469fdf1bde212de0ec889169b04f7f1179b8930d66f82b30cbc"}, - {file = "matplotlib-3.7.2-cp311-cp311-win_amd64.whl", hash = "sha256:af4860132c8c05261a5f5f8467f1b269bf1c7c23902d75f2be57c4a7f2394b3e"}, - {file = "matplotlib-3.7.2-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:a1733b8e84e7e40a9853e505fe68cc54339f97273bdfe6f3ed980095f769ddc7"}, - {file = "matplotlib-3.7.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d9881356dc48e58910c53af82b57183879129fa30492be69058c5b0d9fddf391"}, - {file = "matplotlib-3.7.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f081c03f413f59390a80b3e351cc2b2ea0205839714dbc364519bcf51f4b56ca"}, - {file = "matplotlib-3.7.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:1cd120fca3407a225168238b790bd5c528f0fafde6172b140a2f3ab7a4ea63e9"}, - {file = "matplotlib-3.7.2-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a2c1590b90aa7bd741b54c62b78de05d4186271e34e2377e0289d943b3522273"}, - {file = "matplotlib-3.7.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6d2ff3c984b8a569bc1383cd468fc06b70d7b59d5c2854ca39f1436ae8394117"}, - {file = "matplotlib-3.7.2-cp38-cp38-win32.whl", hash = "sha256:5dea00b62d28654b71ca92463656d80646675628d0828e08a5f3b57e12869e13"}, - {file = "matplotlib-3.7.2-cp38-cp38-win_amd64.whl", hash = "sha256:0f506a1776ee94f9e131af1ac6efa6e5bc7cb606a3e389b0ccb6e657f60bb676"}, - {file = "matplotlib-3.7.2-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:6515e878f91894c2e4340d81f0911857998ccaf04dbc1bba781e3d89cbf70608"}, - {file = "matplotlib-3.7.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:71f7a8c6b124e904db550f5b9fe483d28b896d4135e45c4ea381ad3b8a0e3256"}, - {file = "matplotlib-3.7.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:12f01b92ecd518e0697da4d97d163b2b3aa55eb3eb4e2c98235b3396d7dad55f"}, - {file = "matplotlib-3.7.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7e28d6396563955f7af437894a36bf2b279462239a41028323e04b85179058b"}, - {file = "matplotlib-3.7.2-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbcf59334ff645e6a67cd5f78b4b2cdb76384cdf587fa0d2dc85f634a72e1a3e"}, - {file = "matplotlib-3.7.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:318c89edde72ff95d8df67d82aca03861240512994a597a435a1011ba18dbc7f"}, - {file = "matplotlib-3.7.2-cp39-cp39-win32.whl", hash = "sha256:ce55289d5659b5b12b3db4dc9b7075b70cef5631e56530f14b2945e8836f2d20"}, - {file = "matplotlib-3.7.2-cp39-cp39-win_amd64.whl", hash = "sha256:2ecb5be2b2815431c81dc115667e33da0f5a1bcf6143980d180d09a717c4a12e"}, - {file = "matplotlib-3.7.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:fdcd28360dbb6203fb5219b1a5658df226ac9bebc2542a9e8f457de959d713d0"}, - {file = "matplotlib-3.7.2-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0c3cca3e842b11b55b52c6fb8bd6a4088693829acbfcdb3e815fa9b7d5c92c1b"}, - {file = "matplotlib-3.7.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ebf577c7a6744e9e1bd3fee45fc74a02710b214f94e2bde344912d85e0c9af7c"}, - {file = "matplotlib-3.7.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:936bba394682049919dda062d33435b3be211dc3dcaa011e09634f060ec878b2"}, - {file = "matplotlib-3.7.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:bc221ffbc2150458b1cd71cdd9ddd5bb37962b036e41b8be258280b5b01da1dd"}, - {file = "matplotlib-3.7.2-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:35d74ebdb3f71f112b36c2629cf32323adfbf42679e2751252acd468f5001c07"}, - {file = "matplotlib-3.7.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:717157e61b3a71d3d26ad4e1770dc85156c9af435659a25ee6407dc866cb258d"}, - {file = "matplotlib-3.7.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:20f844d6be031948148ba49605c8b96dfe7d3711d1b63592830d650622458c11"}, - {file = "matplotlib-3.7.2.tar.gz", hash = "sha256:a8cdb91dddb04436bd2f098b8fdf4b81352e68cf4d2c6756fcc414791076569b"}, + {file = "matplotlib-3.7.3-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:085c33b27561d9c04386789d5aa5eb4a932ddef43cfcdd0e01735f9a6e85ce0c"}, + {file = "matplotlib-3.7.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:c568e80e1c17f68a727f30f591926751b97b98314d8e59804f54f86ae6fa6a22"}, + {file = "matplotlib-3.7.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7baf98c5ad59c5c4743ea884bb025cbffa52dacdfdac0da3e6021a285a90377e"}, + {file = "matplotlib-3.7.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:236024f582e40dac39bca592258888b38ae47a9fed7b8de652d68d3d02d47d2b"}, + {file = "matplotlib-3.7.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:12b4f6795efea037ce2d41e7c417ad8bd02d5719c6ad4a8450a0708f4a1cfb89"}, + {file = "matplotlib-3.7.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78b2136cc6c5415b78977e0e8c608647d597204b05b1d9089ccf513c7d913733"}, + {file = "matplotlib-3.7.3-cp310-cp310-win32.whl", hash = "sha256:122dcbf9be0086e2a95d9e5e0632dbf3bd5b65eaa68c369363310a6c87753059"}, + {file = "matplotlib-3.7.3-cp310-cp310-win_amd64.whl", hash = "sha256:4aab27d9e33293389e3c1d7c881d414a72bdfda0fedc3a6bf46c6fa88d9b8015"}, + {file = "matplotlib-3.7.3-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:d5adc743de91e8e0b13df60deb1b1c285b8effea3d66223afceb14b63c9b05de"}, + {file = "matplotlib-3.7.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:55de4cf7cd0071b8ebf203981b53ab64f988a0a1f897a2dff300a1124e8bcd8b"}, + {file = "matplotlib-3.7.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ac03377fd908aaee2312d0b11735753e907adb6f4d1d102de5e2425249693f6c"}, + {file = "matplotlib-3.7.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:755bafc10a46918ce9a39980009b54b02dd249594e5adf52f9c56acfddb5d0b7"}, + {file = "matplotlib-3.7.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1a6094c6f8e8d18db631754df4fe9a34dec3caf074f6869a7db09f18f9b1d6b2"}, + {file = "matplotlib-3.7.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:272dba2f1b107790ed78ebf5385b8d14b27ad9e90419de340364b49fe549a993"}, + {file = "matplotlib-3.7.3-cp311-cp311-win32.whl", hash = "sha256:591c123bed1cb4b9996fb60b41a6d89c2ec4943244540776c5f1283fb6960a53"}, + {file = "matplotlib-3.7.3-cp311-cp311-win_amd64.whl", hash = "sha256:3bf3a178c6504694cee8b88b353df0051583f2f6f8faa146f67115c27c856881"}, + {file = "matplotlib-3.7.3-cp312-cp312-macosx_10_12_universal2.whl", hash = "sha256:edf54cac8ee3603f3093616b40a931e8c063969756a4d78a86e82c2fea9659f7"}, + {file = "matplotlib-3.7.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:91e36a85ea639a1ba9f91427041eac064b04829945fe331a92617b6cb21d27e5"}, + {file = "matplotlib-3.7.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:caf5eaaf7c68f8d7df269dfbcaf46f48a70ff482bfcebdcc97519671023f2a7d"}, + {file = "matplotlib-3.7.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:74bf57f505efea376097e948b7cdd87191a7ce8180616390aef496639edf601f"}, + {file = "matplotlib-3.7.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee152a88a0da527840a426535514b6ed8ac4240eb856b1da92cf48124320e346"}, + {file = "matplotlib-3.7.3-cp312-cp312-win_amd64.whl", hash = "sha256:67a410a9c9e07cbc83581eeea144bbe298870bf0ac0ee2f2e10a015ab7efee19"}, + {file = "matplotlib-3.7.3-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:259999c05285cb993d7f2a419cea547863fa215379eda81f7254c9e932963729"}, + {file = "matplotlib-3.7.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:3f4e7fd5a6157e1d018ce2166ec8e531a481dd4a36f035b5c23edfe05a25419a"}, + {file = "matplotlib-3.7.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:faa3d12d8811d08d14080a8b7b9caea9a457dc495350166b56df0db4b9909ef5"}, + {file = "matplotlib-3.7.3-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:336e88900c11441e458da01c8414fc57e04e17f9d3bb94958a76faa2652bcf6b"}, + {file = "matplotlib-3.7.3-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:12f4c0dd8aa280d796c8772ea8265a14f11a04319baa3a16daa5556065e8baea"}, + {file = "matplotlib-3.7.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1990955b11e7918d256cf3b956b10997f405b7917a3f1c7d8e69c1d15c7b1930"}, + {file = "matplotlib-3.7.3-cp38-cp38-win32.whl", hash = "sha256:e78707b751260b42b721507ad7aa60fe4026d7f51c74cca6b9cd8b123ebb633a"}, + {file = "matplotlib-3.7.3-cp38-cp38-win_amd64.whl", hash = "sha256:e594ee43c59ea39ca5c6244667cac9d017a3527febc31f5532ad9135cf7469ec"}, + {file = "matplotlib-3.7.3-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:6eaa1cf0e94c936a26b78f6d756c5fbc12e0a58c8a68b7248a2a31456ce4e234"}, + {file = "matplotlib-3.7.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:0a97af9d22e8ebedc9f00b043d9bbd29a375e9e10b656982012dded44c10fd77"}, + {file = "matplotlib-3.7.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1f9c6c16597af660433ab330b59ee2934b832ee1fabcaf5cbde7b2add840f31e"}, + {file = "matplotlib-3.7.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7240259b4b9cbc62381f6378cff4d57af539162a18e832c1e48042fabc40b6b"}, + {file = "matplotlib-3.7.3-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:747c6191d2e88ae854809e69aa358dbf852ff1a5738401b85c1cc9012309897a"}, + {file = "matplotlib-3.7.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec726b08a5275d827aa91bb951e68234a4423adb91cf65bc0fcdc0f2777663f7"}, + {file = "matplotlib-3.7.3-cp39-cp39-win32.whl", hash = "sha256:40e3b9b450c6534f07278310c4e34caff41c2a42377e4b9d47b0f8d3ac1083a2"}, + {file = "matplotlib-3.7.3-cp39-cp39-win_amd64.whl", hash = "sha256:dfc118642903a23e309b1da32886bb39a4314147d013e820c86b5fb4cb2e36d0"}, + {file = "matplotlib-3.7.3-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:165c8082bf8fc0360c24aa4724a22eaadbfd8c28bf1ccf7e94d685cad48261e4"}, + {file = "matplotlib-3.7.3-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ebd8470cc2a3594746ff0513aecbfa2c55ff6f58e6cef2efb1a54eb87c88ffa2"}, + {file = "matplotlib-3.7.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7153453669c9672b52095119fd21dd032d19225d48413a2871519b17db4b0fde"}, + {file = "matplotlib-3.7.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:498a08267dc69dd8f24c4b5d7423fa584d7ce0027ba71f7881df05fc09b89bb7"}, + {file = "matplotlib-3.7.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:d48999c4b19b5a0c058c9cd828ff6fc7748390679f6cf9a2ad653a3e802c87d3"}, + {file = "matplotlib-3.7.3-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:22d65d18b4ee8070a5fea5761d59293f1f9e2fac37ec9ce090463b0e629432fd"}, + {file = "matplotlib-3.7.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c40cde976c36693cc0767e27cf5f443f91c23520060bd9496678364adfafe9c"}, + {file = "matplotlib-3.7.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:39018a2b17592448fbfdf4b8352955e6c3905359939791d4ff429296494d1a0c"}, + {file = "matplotlib-3.7.3.tar.gz", hash = "sha256:f09b3dd6bdeb588de91f853bbb2d6f0ff8ab693485b0c49035eaa510cb4f142e"}, ] [package.dependencies] @@ -2175,11 +2210,12 @@ contourpy = ">=1.0.1" cycler = ">=0.10" fonttools = ">=4.22.0" kiwisolver = ">=1.0.1" -numpy = ">=1.20" +numpy = ">=1.20,<2" packaging = ">=20.0" pillow = ">=6.2.0" -pyparsing = ">=2.3.1,<3.1" +pyparsing = ">=2.3.1" python-dateutil = ">=2.7" +setuptools_scm = ">=7" [[package]] name = "matplotlib-inline" @@ -2609,71 +2645,71 @@ wandb = ["numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1 [[package]] name = "orjson" -version = "3.9.5" +version = "3.9.7" description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" optional = true python-versions = ">=3.7" files = [ - {file = "orjson-3.9.5-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:ad6845912a71adcc65df7c8a7f2155eba2096cf03ad2c061c93857de70d699ad"}, - {file = "orjson-3.9.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e298e0aacfcc14ef4476c3f409e85475031de24e5b23605a465e9bf4b2156273"}, - {file = "orjson-3.9.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:83c9939073281ef7dd7c5ca7f54cceccb840b440cec4b8a326bda507ff88a0a6"}, - {file = "orjson-3.9.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e174cc579904a48ee1ea3acb7045e8a6c5d52c17688dfcb00e0e842ec378cabf"}, - {file = "orjson-3.9.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f8d51702f42c785b115401e1d64a27a2ea767ae7cf1fb8edaa09c7cf1571c660"}, - {file = "orjson-3.9.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f13d61c0c7414ddee1ef4d0f303e2222f8cced5a2e26d9774751aecd72324c9e"}, - {file = "orjson-3.9.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d748cc48caf5a91c883d306ab648df1b29e16b488c9316852844dd0fd000d1c2"}, - {file = "orjson-3.9.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bd19bc08fa023e4c2cbf8294ad3f2b8922f4de9ba088dbc71e6b268fdf54591c"}, - {file = "orjson-3.9.5-cp310-none-win32.whl", hash = "sha256:5793a21a21bf34e1767e3d61a778a25feea8476dcc0bdf0ae1bc506dc34561ea"}, - {file = "orjson-3.9.5-cp310-none-win_amd64.whl", hash = "sha256:2bcec0b1024d0031ab3eab7a8cb260c8a4e4a5e35993878a2da639d69cdf6a65"}, - {file = "orjson-3.9.5-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:8547b95ca0e2abd17e1471973e6d676f1d8acedd5f8fb4f739e0612651602d66"}, - {file = "orjson-3.9.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87ce174d6a38d12b3327f76145acbd26f7bc808b2b458f61e94d83cd0ebb4d76"}, - {file = "orjson-3.9.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a960bb1bc9a964d16fcc2d4af5a04ce5e4dfddca84e3060c35720d0a062064fe"}, - {file = "orjson-3.9.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a7aa5573a949760d6161d826d34dc36db6011926f836851fe9ccb55b5a7d8e8"}, - {file = "orjson-3.9.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8b2852afca17d7eea85f8e200d324e38c851c96598ac7b227e4f6c4e59fbd3df"}, - {file = "orjson-3.9.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa185959c082475288da90f996a82e05e0c437216b96f2a8111caeb1d54ef926"}, - {file = "orjson-3.9.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:89c9332695b838438ea4b9a482bce8ffbfddde4df92750522d928fb00b7b8dce"}, - {file = "orjson-3.9.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2493f1351a8f0611bc26e2d3d407efb873032b4f6b8926fed8cfed39210ca4ba"}, - {file = "orjson-3.9.5-cp311-none-win32.whl", hash = "sha256:ffc544e0e24e9ae69301b9a79df87a971fa5d1c20a6b18dca885699709d01be0"}, - {file = "orjson-3.9.5-cp311-none-win_amd64.whl", hash = "sha256:89670fe2732e3c0c54406f77cad1765c4c582f67b915c74fda742286809a0cdc"}, - {file = "orjson-3.9.5-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:15df211469625fa27eced4aa08dc03e35f99c57d45a33855cc35f218ea4071b8"}, - {file = "orjson-3.9.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d9f17c59fe6c02bc5f89ad29edb0253d3059fe8ba64806d789af89a45c35269a"}, - {file = "orjson-3.9.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ca6b96659c7690773d8cebb6115c631f4a259a611788463e9c41e74fa53bf33f"}, - {file = "orjson-3.9.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a26fafe966e9195b149950334bdbe9026eca17fe8ffe2d8fa87fdc30ca925d30"}, - {file = "orjson-3.9.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9006b1eb645ecf460da067e2dd17768ccbb8f39b01815a571bfcfab7e8da5e52"}, - {file = "orjson-3.9.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ebfdbf695734b1785e792a1315e41835ddf2a3e907ca0e1c87a53f23006ce01d"}, - {file = "orjson-3.9.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:4a3943234342ab37d9ed78fb0a8f81cd4b9532f67bf2ac0d3aa45fa3f0a339f3"}, - {file = "orjson-3.9.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e6762755470b5c82f07b96b934af32e4d77395a11768b964aaa5eb092817bc31"}, - {file = "orjson-3.9.5-cp312-none-win_amd64.whl", hash = "sha256:c74df28749c076fd6e2157190df23d43d42b2c83e09d79b51694ee7315374ad5"}, - {file = "orjson-3.9.5-cp37-cp37m-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:88e18a74d916b74f00d0978d84e365c6bf0e7ab846792efa15756b5fb2f7d49d"}, - {file = "orjson-3.9.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d28514b5b6dfaf69097be70d0cf4f1407ec29d0f93e0b4131bf9cc8fd3f3e374"}, - {file = "orjson-3.9.5-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25b81aca8c7be61e2566246b6a0ca49f8aece70dd3f38c7f5c837f398c4cb142"}, - {file = "orjson-3.9.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:385c1c713b1e47fd92e96cf55fd88650ac6dfa0b997e8aa7ecffd8b5865078b1"}, - {file = "orjson-3.9.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f9850c03a8e42fba1a508466e6a0f99472fd2b4a5f30235ea49b2a1b32c04c11"}, - {file = "orjson-3.9.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4449f84bbb13bcef493d8aa669feadfced0f7c5eea2d0d88b5cc21f812183af8"}, - {file = "orjson-3.9.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:86127bf194f3b873135e44ce5dc9212cb152b7e06798d5667a898a00f0519be4"}, - {file = "orjson-3.9.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0abcd039f05ae9ab5b0ff11624d0b9e54376253b7d3217a358d09c3edf1d36f7"}, - {file = "orjson-3.9.5-cp37-none-win32.whl", hash = "sha256:10cc8ad5ff7188efcb4bec196009d61ce525a4e09488e6d5db41218c7fe4f001"}, - {file = "orjson-3.9.5-cp37-none-win_amd64.whl", hash = "sha256:ff27e98532cb87379d1a585837d59b187907228268e7b0a87abe122b2be6968e"}, - {file = "orjson-3.9.5-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:5bfa79916ef5fef75ad1f377e54a167f0de334c1fa4ebb8d0224075f3ec3d8c0"}, - {file = "orjson-3.9.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e87dfa6ac0dae764371ab19b35eaaa46dfcb6ef2545dfca03064f21f5d08239f"}, - {file = "orjson-3.9.5-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:50ced24a7b23058b469ecdb96e36607fc611cbaee38b58e62a55c80d1b3ad4e1"}, - {file = "orjson-3.9.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b1b74ea2a3064e1375da87788897935832e806cc784de3e789fd3c4ab8eb3fa5"}, - {file = "orjson-3.9.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a7cb961efe013606913d05609f014ad43edfaced82a576e8b520a5574ce3b2b9"}, - {file = "orjson-3.9.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1225d2d5ee76a786bda02f8c5e15017462f8432bb960de13d7c2619dba6f0275"}, - {file = "orjson-3.9.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f39f4b99199df05c7ecdd006086259ed25886cdbd7b14c8cdb10c7675cfcca7d"}, - {file = "orjson-3.9.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:a461dc9fb60cac44f2d3218c36a0c1c01132314839a0e229d7fb1bba69b810d8"}, - {file = "orjson-3.9.5-cp38-none-win32.whl", hash = "sha256:dedf1a6173748202df223aea29de814b5836732a176b33501375c66f6ab7d822"}, - {file = "orjson-3.9.5-cp38-none-win_amd64.whl", hash = "sha256:fa504082f53efcbacb9087cc8676c163237beb6e999d43e72acb4bb6f0db11e6"}, - {file = "orjson-3.9.5-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:6900f0248edc1bec2a2a3095a78a7e3ef4e63f60f8ddc583687eed162eedfd69"}, - {file = "orjson-3.9.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17404333c40047888ac40bd8c4d49752a787e0a946e728a4e5723f111b6e55a5"}, - {file = "orjson-3.9.5-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0eefb7cfdd9c2bc65f19f974a5d1dfecbac711dae91ed635820c6b12da7a3c11"}, - {file = "orjson-3.9.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:68c78b2a3718892dc018adbc62e8bab6ef3c0d811816d21e6973dee0ca30c152"}, - {file = "orjson-3.9.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:591ad7d9e4a9f9b104486ad5d88658c79ba29b66c5557ef9edf8ca877a3f8d11"}, - {file = "orjson-3.9.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6cc2cbf302fbb2d0b2c3c142a663d028873232a434d89ce1b2604ebe5cc93ce8"}, - {file = "orjson-3.9.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b26b5aa5e9ee1bad2795b925b3adb1b1b34122cb977f30d89e0a1b3f24d18450"}, - {file = "orjson-3.9.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ef84724f7d29dcfe3aafb1fc5fc7788dca63e8ae626bb9298022866146091a3e"}, - {file = "orjson-3.9.5-cp39-none-win32.whl", hash = "sha256:664cff27f85939059472afd39acff152fbac9a091b7137092cb651cf5f7747b5"}, - {file = "orjson-3.9.5-cp39-none-win_amd64.whl", hash = "sha256:91dda66755795ac6100e303e206b636568d42ac83c156547634256a2e68de694"}, - {file = "orjson-3.9.5.tar.gz", hash = "sha256:6daf5ee0b3cf530b9978cdbf71024f1c16ed4a67d05f6ec435c6e7fe7a52724c"}, + {file = "orjson-3.9.7-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:b6df858e37c321cefbf27fe7ece30a950bcc3a75618a804a0dcef7ed9dd9c92d"}, + {file = "orjson-3.9.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5198633137780d78b86bb54dafaaa9baea698b4f059456cd4554ab7009619221"}, + {file = "orjson-3.9.7-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e736815b30f7e3c9044ec06a98ee59e217a833227e10eb157f44071faddd7c5"}, + {file = "orjson-3.9.7-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a19e4074bc98793458b4b3ba35a9a1d132179345e60e152a1bb48c538ab863c4"}, + {file = "orjson-3.9.7-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80acafe396ab689a326ab0d80f8cc61dec0dd2c5dca5b4b3825e7b1e0132c101"}, + {file = "orjson-3.9.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:355efdbbf0cecc3bd9b12589b8f8e9f03c813a115efa53f8dc2a523bfdb01334"}, + {file = "orjson-3.9.7-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3aab72d2cef7f1dd6104c89b0b4d6b416b0db5ca87cc2fac5f79c5601f549cc2"}, + {file = "orjson-3.9.7-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:36b1df2e4095368ee388190687cb1b8557c67bc38400a942a1a77713580b50ae"}, + {file = "orjson-3.9.7-cp310-none-win32.whl", hash = "sha256:e94b7b31aa0d65f5b7c72dd8f8227dbd3e30354b99e7a9af096d967a77f2a580"}, + {file = "orjson-3.9.7-cp310-none-win_amd64.whl", hash = "sha256:82720ab0cf5bb436bbd97a319ac529aee06077ff7e61cab57cee04a596c4f9b4"}, + {file = "orjson-3.9.7-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:1f8b47650f90e298b78ecf4df003f66f54acdba6a0f763cc4df1eab048fe3738"}, + {file = "orjson-3.9.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f738fee63eb263530efd4d2e9c76316c1f47b3bbf38c1bf45ae9625feed0395e"}, + {file = "orjson-3.9.7-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:38e34c3a21ed41a7dbd5349e24c3725be5416641fdeedf8f56fcbab6d981c900"}, + {file = "orjson-3.9.7-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:21a3344163be3b2c7e22cef14fa5abe957a892b2ea0525ee86ad8186921b6cf0"}, + {file = "orjson-3.9.7-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23be6b22aab83f440b62a6f5975bcabeecb672bc627face6a83bc7aeb495dc7e"}, + {file = "orjson-3.9.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e5205ec0dfab1887dd383597012199f5175035e782cdb013c542187d280ca443"}, + {file = "orjson-3.9.7-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:8769806ea0b45d7bf75cad253fba9ac6700b7050ebb19337ff6b4e9060f963fa"}, + {file = "orjson-3.9.7-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f9e01239abea2f52a429fe9d95c96df95f078f0172489d691b4a848ace54a476"}, + {file = "orjson-3.9.7-cp311-none-win32.whl", hash = "sha256:8bdb6c911dae5fbf110fe4f5cba578437526334df381b3554b6ab7f626e5eeca"}, + {file = "orjson-3.9.7-cp311-none-win_amd64.whl", hash = "sha256:9d62c583b5110e6a5cf5169ab616aa4ec71f2c0c30f833306f9e378cf51b6c86"}, + {file = "orjson-3.9.7-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:1c3cee5c23979deb8d1b82dc4cc49be59cccc0547999dbe9adb434bb7af11cf7"}, + {file = "orjson-3.9.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a347d7b43cb609e780ff8d7b3107d4bcb5b6fd09c2702aa7bdf52f15ed09fa09"}, + {file = "orjson-3.9.7-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:154fd67216c2ca38a2edb4089584504fbb6c0694b518b9020ad35ecc97252bb9"}, + {file = "orjson-3.9.7-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ea3e63e61b4b0beeb08508458bdff2daca7a321468d3c4b320a758a2f554d31"}, + {file = "orjson-3.9.7-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1eb0b0b2476f357eb2975ff040ef23978137aa674cd86204cfd15d2d17318588"}, + {file = "orjson-3.9.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b9a20a03576c6b7022926f614ac5a6b0914486825eac89196adf3267c6489d"}, + {file = "orjson-3.9.7-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:915e22c93e7b7b636240c5a79da5f6e4e84988d699656c8e27f2ac4c95b8dcc0"}, + {file = "orjson-3.9.7-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f26fb3e8e3e2ee405c947ff44a3e384e8fa1843bc35830fe6f3d9a95a1147b6e"}, + {file = "orjson-3.9.7-cp312-none-win_amd64.whl", hash = "sha256:d8692948cada6ee21f33db5e23460f71c8010d6dfcfe293c9b96737600a7df78"}, + {file = "orjson-3.9.7-cp37-cp37m-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:7bab596678d29ad969a524823c4e828929a90c09e91cc438e0ad79b37ce41166"}, + {file = "orjson-3.9.7-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63ef3d371ea0b7239ace284cab9cd00d9c92b73119a7c274b437adb09bda35e6"}, + {file = "orjson-3.9.7-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2f8fcf696bbbc584c0c7ed4adb92fd2ad7d153a50258842787bc1524e50d7081"}, + {file = "orjson-3.9.7-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:90fe73a1f0321265126cbba13677dcceb367d926c7a65807bd80916af4c17047"}, + {file = "orjson-3.9.7-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:45a47f41b6c3beeb31ac5cf0ff7524987cfcce0a10c43156eb3ee8d92d92bf22"}, + {file = "orjson-3.9.7-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a2937f528c84e64be20cb80e70cea76a6dfb74b628a04dab130679d4454395c"}, + {file = "orjson-3.9.7-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b4fb306c96e04c5863d52ba8d65137917a3d999059c11e659eba7b75a69167bd"}, + {file = "orjson-3.9.7-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:410aa9d34ad1089898f3db461b7b744d0efcf9252a9415bbdf23540d4f67589f"}, + {file = "orjson-3.9.7-cp37-none-win32.whl", hash = "sha256:26ffb398de58247ff7bde895fe30817a036f967b0ad0e1cf2b54bda5f8dcfdd9"}, + {file = "orjson-3.9.7-cp37-none-win_amd64.whl", hash = "sha256:bcb9a60ed2101af2af450318cd89c6b8313e9f8df4e8fb12b657b2e97227cf08"}, + {file = "orjson-3.9.7-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:5da9032dac184b2ae2da4bce423edff7db34bfd936ebd7d4207ea45840f03905"}, + {file = "orjson-3.9.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7951af8f2998045c656ba8062e8edf5e83fd82b912534ab1de1345de08a41d2b"}, + {file = "orjson-3.9.7-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b8e59650292aa3a8ea78073fc84184538783966528e442a1b9ed653aa282edcf"}, + {file = "orjson-3.9.7-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9274ba499e7dfb8a651ee876d80386b481336d3868cba29af839370514e4dce0"}, + {file = "orjson-3.9.7-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ca1706e8b8b565e934c142db6a9592e6401dc430e4b067a97781a997070c5378"}, + {file = "orjson-3.9.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83cc275cf6dcb1a248e1876cdefd3f9b5f01063854acdfd687ec360cd3c9712a"}, + {file = "orjson-3.9.7-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:11c10f31f2c2056585f89d8229a56013bc2fe5de51e095ebc71868d070a8dd81"}, + {file = "orjson-3.9.7-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:cf334ce1d2fadd1bf3e5e9bf15e58e0c42b26eb6590875ce65bd877d917a58aa"}, + {file = "orjson-3.9.7-cp38-none-win32.whl", hash = "sha256:76a0fc023910d8a8ab64daed8d31d608446d2d77c6474b616b34537aa7b79c7f"}, + {file = "orjson-3.9.7-cp38-none-win_amd64.whl", hash = "sha256:7a34a199d89d82d1897fd4a47820eb50947eec9cda5fd73f4578ff692a912f89"}, + {file = "orjson-3.9.7-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:e7e7f44e091b93eb39db88bb0cb765db09b7a7f64aea2f35e7d86cbf47046c65"}, + {file = "orjson-3.9.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:01d647b2a9c45a23a84c3e70e19d120011cba5f56131d185c1b78685457320bb"}, + {file = "orjson-3.9.7-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0eb850a87e900a9c484150c414e21af53a6125a13f6e378cf4cc11ae86c8f9c5"}, + {file = "orjson-3.9.7-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8f4b0042d8388ac85b8330b65406c84c3229420a05068445c13ca28cc222f1f7"}, + {file = "orjson-3.9.7-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd3e7aae977c723cc1dbb82f97babdb5e5fbce109630fbabb2ea5053523c89d3"}, + {file = "orjson-3.9.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c616b796358a70b1f675a24628e4823b67d9e376df2703e893da58247458956"}, + {file = "orjson-3.9.7-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c3ba725cf5cf87d2d2d988d39c6a2a8b6fc983d78ff71bc728b0be54c869c884"}, + {file = "orjson-3.9.7-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4891d4c934f88b6c29b56395dfc7014ebf7e10b9e22ffd9877784e16c6b2064f"}, + {file = "orjson-3.9.7-cp39-none-win32.whl", hash = "sha256:14d3fb6cd1040a4a4a530b28e8085131ed94ebc90d72793c59a713de34b60838"}, + {file = "orjson-3.9.7-cp39-none-win_amd64.whl", hash = "sha256:9ef82157bbcecd75d6296d5d8b2d792242afcd064eb1ac573f8847b52e58f677"}, + {file = "orjson-3.9.7.tar.gz", hash = "sha256:85e39198f78e2f7e054d296395f6c96f5e02892337746ef5b6a1bf3ed5910142"}, ] [[package]] @@ -2946,24 +2982,24 @@ wcwidth = "*" [[package]] name = "protobuf" -version = "4.24.2" +version = "4.24.3" description = "" optional = true python-versions = ">=3.7" files = [ - {file = "protobuf-4.24.2-cp310-abi3-win32.whl", hash = "sha256:58e12d2c1aa428ece2281cef09bbaa6938b083bcda606db3da4e02e991a0d924"}, - {file = "protobuf-4.24.2-cp310-abi3-win_amd64.whl", hash = "sha256:77700b55ba41144fc64828e02afb41901b42497b8217b558e4a001f18a85f2e3"}, - {file = "protobuf-4.24.2-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:237b9a50bd3b7307d0d834c1b0eb1a6cd47d3f4c2da840802cd03ea288ae8880"}, - {file = "protobuf-4.24.2-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:25ae91d21e3ce8d874211110c2f7edd6384816fb44e06b2867afe35139e1fd1c"}, - {file = "protobuf-4.24.2-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:c00c3c7eb9ad3833806e21e86dca448f46035242a680f81c3fe068ff65e79c74"}, - {file = "protobuf-4.24.2-cp37-cp37m-win32.whl", hash = "sha256:4e69965e7e54de4db989289a9b971a099e626f6167a9351e9d112221fc691bc1"}, - {file = "protobuf-4.24.2-cp37-cp37m-win_amd64.whl", hash = "sha256:c5cdd486af081bf752225b26809d2d0a85e575b80a84cde5172a05bbb1990099"}, - {file = "protobuf-4.24.2-cp38-cp38-win32.whl", hash = "sha256:6bd26c1fa9038b26c5c044ee77e0ecb18463e957fefbaeb81a3feb419313a54e"}, - {file = "protobuf-4.24.2-cp38-cp38-win_amd64.whl", hash = "sha256:bb7aa97c252279da65584af0456f802bd4b2de429eb945bbc9b3d61a42a8cd16"}, - {file = "protobuf-4.24.2-cp39-cp39-win32.whl", hash = "sha256:2b23bd6e06445699b12f525f3e92a916f2dcf45ffba441026357dea7fa46f42b"}, - {file = "protobuf-4.24.2-cp39-cp39-win_amd64.whl", hash = "sha256:839952e759fc40b5d46be319a265cf94920174d88de31657d5622b5d8d6be5cd"}, - {file = "protobuf-4.24.2-py3-none-any.whl", hash = "sha256:3b7b170d3491ceed33f723bbf2d5a260f8a4e23843799a3906f16ef736ef251e"}, - {file = "protobuf-4.24.2.tar.gz", hash = "sha256:7fda70797ddec31ddfa3576cbdcc3ddbb6b3078b737a1a87ab9136af0570cd6e"}, + {file = "protobuf-4.24.3-cp310-abi3-win32.whl", hash = "sha256:20651f11b6adc70c0f29efbe8f4a94a74caf61b6200472a9aea6e19898f9fcf4"}, + {file = "protobuf-4.24.3-cp310-abi3-win_amd64.whl", hash = "sha256:3d42e9e4796a811478c783ef63dc85b5a104b44aaaca85d4864d5b886e4b05e3"}, + {file = "protobuf-4.24.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:6e514e8af0045be2b56e56ae1bb14f43ce7ffa0f68b1c793670ccbe2c4fc7d2b"}, + {file = "protobuf-4.24.3-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:ba53c2f04798a326774f0e53b9c759eaef4f6a568ea7072ec6629851c8435959"}, + {file = "protobuf-4.24.3-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:f6ccbcf027761a2978c1406070c3788f6de4a4b2cc20800cc03d52df716ad675"}, + {file = "protobuf-4.24.3-cp37-cp37m-win32.whl", hash = "sha256:1b182c7181a2891e8f7f3a1b5242e4ec54d1f42582485a896e4de81aa17540c2"}, + {file = "protobuf-4.24.3-cp37-cp37m-win_amd64.whl", hash = "sha256:b0271a701e6782880d65a308ba42bc43874dabd1a0a0f41f72d2dac3b57f8e76"}, + {file = "protobuf-4.24.3-cp38-cp38-win32.whl", hash = "sha256:e29d79c913f17a60cf17c626f1041e5288e9885c8579832580209de8b75f2a52"}, + {file = "protobuf-4.24.3-cp38-cp38-win_amd64.whl", hash = "sha256:067f750169bc644da2e1ef18c785e85071b7c296f14ac53e0900e605da588719"}, + {file = "protobuf-4.24.3-cp39-cp39-win32.whl", hash = "sha256:2da777d34b4f4f7613cdf85c70eb9a90b1fbef9d36ae4a0ccfe014b0b07906f1"}, + {file = "protobuf-4.24.3-cp39-cp39-win_amd64.whl", hash = "sha256:f631bb982c5478e0c1c70eab383af74a84be66945ebf5dd6b06fc90079668d0b"}, + {file = "protobuf-4.24.3-py3-none-any.whl", hash = "sha256:f6f8dc65625dadaad0c8545319c2e2f0424fede988368893ca3844261342c11a"}, + {file = "protobuf-4.24.3.tar.gz", hash = "sha256:12e9ad2ec079b833176d2921be2cb24281fa591f0b119b208b788adc48c2561d"}, ] [[package]] @@ -3005,12 +3041,12 @@ files = [ [[package]] name = "pulumi" -version = "3.80.0" +version = "3.82.0" description = "Pulumi's Python SDK" optional = true python-versions = ">=3.7" files = [ - {file = "pulumi-3.80.0-py3-none-any.whl", hash = "sha256:c325512c691a1da2af5b0ad2a2ca93c2a99a1363adec010abc0f4104a4cf27b4"}, + {file = "pulumi-3.82.0-py3-none-any.whl", hash = "sha256:26bd035bc548ac7172681d1b7a080905a3321ab34dcda5bba87f18b3b47376c9"}, ] [package.dependencies] @@ -3266,13 +3302,13 @@ plugins = ["importlib-metadata"] [[package]] name = "pyparsing" -version = "3.0.9" +version = "3.1.1" description = "pyparsing module - Classes and methods to define and execute parsing grammars" optional = true python-versions = ">=3.6.8" files = [ - {file = "pyparsing-3.0.9-py3-none-any.whl", hash = "sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc"}, - {file = "pyparsing-3.0.9.tar.gz", hash = "sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb"}, + {file = "pyparsing-3.1.1-py3-none-any.whl", hash = "sha256:32c7c0b711493c72ff18a981d24f28aaf9c1fb7ed5e9667c9e84e3db623bdbfb"}, + {file = "pyparsing-3.1.1.tar.gz", hash = "sha256:ede28a1a32462f5a9705e07aea48001a08f7cf81a021585011deba701581a0db"}, ] [package.extras] @@ -3367,7 +3403,6 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -3375,15 +3410,8 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, - {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, - {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, - {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -3400,7 +3428,6 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -3408,7 +3435,6 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, @@ -3803,57 +3829,35 @@ files = [ {file = "safetensors-0.3.3-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:17f41344d9a075f2f21b289a49a62e98baff54b5754240ba896063bce31626bf"}, {file = "safetensors-0.3.3-cp310-cp310-macosx_13_0_arm64.whl", hash = "sha256:f1045f798e1a16a6ced98d6a42ec72936d367a2eec81dc5fade6ed54638cd7d2"}, {file = "safetensors-0.3.3-cp310-cp310-macosx_13_0_x86_64.whl", hash = "sha256:eaf0e4bc91da13f21ac846a39429eb3f3b7ed06295a32321fa3eb1a59b5c70f3"}, - {file = "safetensors-0.3.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25149180d4dc8ca48bac2ac3852a9424b466e36336a39659b35b21b2116f96fc"}, - {file = "safetensors-0.3.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c9e943bf78c39de8865398a71818315e7d5d1af93c7b30d4da3fc852e62ad9bc"}, - {file = "safetensors-0.3.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cccfcac04a010354e87c7a2fe16a1ff004fc4f6e7ef8efc966ed30122ce00bc7"}, {file = "safetensors-0.3.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a07121f427e646a50d18c1be0fa1a2cbf6398624c31149cd7e6b35486d72189e"}, {file = "safetensors-0.3.3-cp310-cp310-win32.whl", hash = "sha256:a85e29cbfddfea86453cc0f4889b4bcc6b9c155be9a60e27be479a34e199e7ef"}, - {file = "safetensors-0.3.3-cp310-cp310-win_amd64.whl", hash = "sha256:e13adad4a3e591378f71068d14e92343e626cf698ff805f61cdb946e684a218e"}, {file = "safetensors-0.3.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:cbc3312f134baf07334dd517341a4b470b2931f090bd9284888acb7dfaf4606f"}, {file = "safetensors-0.3.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:d15030af39d5d30c22bcbc6d180c65405b7ea4c05b7bab14a570eac7d7d43722"}, {file = "safetensors-0.3.3-cp311-cp311-macosx_12_0_universal2.whl", hash = "sha256:f84a74cbe9859b28e3d6d7715ac1dd3097bebf8d772694098f6d42435245860c"}, {file = "safetensors-0.3.3-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:10d637423d98ab2e6a4ad96abf4534eb26fcaf8ca3115623e64c00759374e90d"}, {file = "safetensors-0.3.3-cp311-cp311-macosx_13_0_universal2.whl", hash = "sha256:3b46f5de8b44084aff2e480874c550c399c730c84b2e8ad1bddb062c94aa14e9"}, - {file = "safetensors-0.3.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e76da691a82dfaf752854fa6d17c8eba0c8466370c5ad8cf1bfdf832d3c7ee17"}, - {file = "safetensors-0.3.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4e342fd54e66aa9512dd13e410f791e47aa4feeb5f4c9a20882c72f3d272f29"}, - {file = "safetensors-0.3.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:178fd30b5dc73bce14a39187d948cedd0e5698e2f055b7ea16b5a96c9b17438e"}, {file = "safetensors-0.3.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e8fdf7407dba44587ed5e79d5de3533d242648e1f2041760b21474bd5ea5c8c"}, {file = "safetensors-0.3.3-cp311-cp311-win32.whl", hash = "sha256:7d3b744cee8d7a46ffa68db1a2ff1a1a432488e3f7a5a97856fe69e22139d50c"}, - {file = "safetensors-0.3.3-cp311-cp311-win_amd64.whl", hash = "sha256:f579877d30feec9b6ba409d05fa174633a4fc095675a4a82971d831a8bb60b97"}, {file = "safetensors-0.3.3-cp37-cp37m-macosx_10_11_x86_64.whl", hash = "sha256:2fff5b19a1b462c17322998b2f4b8bce43c16fe208968174d2f3a1446284ceed"}, {file = "safetensors-0.3.3-cp37-cp37m-macosx_11_0_x86_64.whl", hash = "sha256:41adb1d39e8aad04b16879e3e0cbcb849315999fad73bc992091a01e379cb058"}, {file = "safetensors-0.3.3-cp37-cp37m-macosx_12_0_x86_64.whl", hash = "sha256:0f2b404250b3b877b11d34afcc30d80e7035714a1116a3df56acaca6b6c00096"}, {file = "safetensors-0.3.3-cp37-cp37m-macosx_13_0_x86_64.whl", hash = "sha256:b43956ef20e9f4f2e648818a9e7b3499edd6b753a0f5526d4f6a6826fbee8446"}, - {file = "safetensors-0.3.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d61a99b34169981f088ccfbb2c91170843efc869a0a0532f422db7211bf4f474"}, - {file = "safetensors-0.3.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c0008aab36cd20e9a051a68563c6f80d40f238c2611811d7faa5a18bf3fd3984"}, - {file = "safetensors-0.3.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:93d54166072b143084fdcd214a080a088050c1bb1651016b55942701b31334e4"}, {file = "safetensors-0.3.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c32ee08f61cea56a5d62bbf94af95df6040c8ab574afffaeb7b44ae5da1e9e3"}, {file = "safetensors-0.3.3-cp37-cp37m-win32.whl", hash = "sha256:351600f367badd59f7bfe86d317bb768dd8c59c1561c6fac43cafbd9c1af7827"}, - {file = "safetensors-0.3.3-cp37-cp37m-win_amd64.whl", hash = "sha256:034717e297849dae1af0a7027a14b8647bd2e272c24106dced64d83e10d468d1"}, {file = "safetensors-0.3.3-cp38-cp38-macosx_10_11_x86_64.whl", hash = "sha256:8530399666748634bc0b301a6a5523756931b0c2680d188e743d16304afe917a"}, {file = "safetensors-0.3.3-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:9d741c1f1621e489ba10aa3d135b54202684f6e205df52e219d5eecd673a80c9"}, - {file = "safetensors-0.3.3-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:0c345fd85b4d2093a5109596ff4cd9dfc2e84992e881b4857fbc4a93a3b89ddb"}, {file = "safetensors-0.3.3-cp38-cp38-macosx_12_0_x86_64.whl", hash = "sha256:69ccee8d05f55cdf76f7e6c87d2bdfb648c16778ef8acfd2ecc495e273e9233e"}, - {file = "safetensors-0.3.3-cp38-cp38-macosx_13_0_arm64.whl", hash = "sha256:c08a9a4b7a4ca389232fa8d097aebc20bbd4f61e477abc7065b5c18b8202dede"}, {file = "safetensors-0.3.3-cp38-cp38-macosx_13_0_x86_64.whl", hash = "sha256:a002868d2e3f49bbe81bee2655a411c24fa1f8e68b703dec6629cb989d6ae42e"}, - {file = "safetensors-0.3.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3bd2704cb41faa44d3ec23e8b97330346da0395aec87f8eaf9c9e2c086cdbf13"}, - {file = "safetensors-0.3.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4b2951bf3f0ad63df5e6a95263652bd6c194a6eb36fd4f2d29421cd63424c883"}, - {file = "safetensors-0.3.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:07114cec116253ca2e7230fdea30acf76828f21614afd596d7b5438a2f719bd8"}, {file = "safetensors-0.3.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ab43aeeb9eadbb6b460df3568a662e6f1911ecc39387f8752afcb6a7d96c087"}, {file = "safetensors-0.3.3-cp38-cp38-win32.whl", hash = "sha256:f2f59fce31dd3429daca7269a6b06f65e6547a0c248f5116976c3f1e9b73f251"}, - {file = "safetensors-0.3.3-cp38-cp38-win_amd64.whl", hash = "sha256:c31ca0d8610f57799925bf08616856b39518ab772c65093ef1516762e796fde4"}, {file = "safetensors-0.3.3-cp39-cp39-macosx_10_11_x86_64.whl", hash = "sha256:59a596b3225c96d59af412385981f17dd95314e3fffdf359c7e3f5bb97730a19"}, {file = "safetensors-0.3.3-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:82a16e92210a6221edd75ab17acdd468dd958ef5023d9c6c1289606cc30d1479"}, {file = "safetensors-0.3.3-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:98a929e763a581f516373ef31983ed1257d2d0da912a8e05d5cd12e9e441c93a"}, {file = "safetensors-0.3.3-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:12b83f1986cd16ea0454c636c37b11e819d60dd952c26978310a0835133480b7"}, {file = "safetensors-0.3.3-cp39-cp39-macosx_13_0_arm64.whl", hash = "sha256:f439175c827c2f1bbd54df42789c5204a10983a30bc4242bc7deaf854a24f3f0"}, {file = "safetensors-0.3.3-cp39-cp39-macosx_13_0_x86_64.whl", hash = "sha256:0085be33b8cbcb13079b3a8e131656e05b0bc5e6970530d4c24150f7afd76d70"}, - {file = "safetensors-0.3.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e3ec70c87b1e910769034206ad5efc051069b105aac1687f6edcd02526767f4"}, - {file = "safetensors-0.3.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f490132383e5e490e710608f4acffcb98ed37f91b885c7217d3f9f10aaff9048"}, - {file = "safetensors-0.3.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:79d1b6c7ed5596baf79c80fbce5198c3cdcc521ae6a157699f427aba1a90082d"}, {file = "safetensors-0.3.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad3cc8006e7a86ee7c88bd2813ec59cd7cc75b03e6fa4af89b9c7b235b438d68"}, {file = "safetensors-0.3.3-cp39-cp39-win32.whl", hash = "sha256:ab29f54c6b8c301ca05fa014728996bd83aac6e21528f893aaf8945c71f42b6d"}, - {file = "safetensors-0.3.3-cp39-cp39-win_amd64.whl", hash = "sha256:0fa82004eae1a71e2aa29843ef99de9350e459a0fc2f65fc6ee0da9690933d2d"}, {file = "safetensors-0.3.3.tar.gz", hash = "sha256:edb7072d788c4f929d0f5735d3a2fb51e5a27f833587828583b7f5747af1a2b8"}, ] @@ -4053,19 +4057,39 @@ files = [ [[package]] name = "setuptools" -version = "68.1.2" +version = "68.2.1" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "setuptools-68.1.2-py3-none-any.whl", hash = "sha256:3d8083eed2d13afc9426f227b24fd1659489ec107c0e86cec2ffdde5c92e790b"}, - {file = "setuptools-68.1.2.tar.gz", hash = "sha256:3d4dfa6d95f1b101d695a6160a7626e15583af71a5f52176efa5d39a054d475d"}, + {file = "setuptools-68.2.1-py3-none-any.whl", hash = "sha256:eff96148eb336377ab11beee0c73ed84f1709a40c0b870298b0d058828761bae"}, + {file = "setuptools-68.2.1.tar.gz", hash = "sha256:56ee14884fd8d0cd015411f4a13f40b4356775a0aefd9ebc1d3bfb9a1acb32f1"}, ] [package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5,<=7.1.2)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] -testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] +testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.1)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] + +[[package]] +name = "setuptools-scm" +version = "7.1.0" +description = "the blessed package to manage your versions by scm tags" +optional = true +python-versions = ">=3.7" +files = [ + {file = "setuptools_scm-7.1.0-py3-none-any.whl", hash = "sha256:73988b6d848709e2af142aa48c986ea29592bbcfca5375678064708205253d8e"}, + {file = "setuptools_scm-7.1.0.tar.gz", hash = "sha256:6c508345a771aad7d56ebff0e70628bf2b0ec7573762be9960214730de278f27"}, +] + +[package.dependencies] +packaging = ">=20.0" +setuptools = "*" +typing-extensions = "*" + +[package.extras] +test = ["pytest (>=6.2)", "virtualenv (>20)"] +toml = ["setuptools (>=42)"] [[package]] name = "six" @@ -4080,18 +4104,18 @@ files = [ [[package]] name = "slack-sdk" -version = "3.21.3" +version = "3.22.0" description = "The Slack API Platform SDK for Python" optional = true python-versions = ">=3.6.0" files = [ - {file = "slack_sdk-3.21.3-py2.py3-none-any.whl", hash = "sha256:de3c07b92479940b61cd68c566f49fbc9974c8f38f661d26244078f3903bb9cc"}, - {file = "slack_sdk-3.21.3.tar.gz", hash = "sha256:20829bdc1a423ec93dac903470975ebf3bc76fd3fd91a4dadc0eeffc940ecb0c"}, + {file = "slack_sdk-3.22.0-py2.py3-none-any.whl", hash = "sha256:f102a4902115dff3b97c3e8883ad4e22d54732221886fc5ef29bfc290f063b4a"}, + {file = "slack_sdk-3.22.0.tar.gz", hash = "sha256:6eacce0fa4f8cfb4d84eac0d7d7e1b1926040a2df654ae86b94179bdf2bc4d8c"}, ] [package.extras] optional = ["SQLAlchemy (>=1.4,<3)", "aiodns (>1.0)", "aiohttp (>=3.7.3,<4)", "boto3 (<=2)", "websocket-client (>=1,<2)", "websockets (>=10,<11)"] -testing = ["Flask (>=1,<2)", "Flask-Sockets (>=0.2,<1)", "Jinja2 (==3.0.3)", "Werkzeug (<2)", "black (==22.8.0)", "boto3 (<=2)", "click (==8.0.4)", "databases (>=0.5)", "flake8 (>=5,<6)", "itsdangerous (==1.1.0)", "moto (>=3,<4)", "psutil (>=5,<6)", "pytest (>=6.2.5,<7)", "pytest-asyncio (<1)", "pytest-cov (>=2,<3)"] +testing = ["Flask (>=1,<2)", "Flask-Sockets (>=0.2,<1)", "Jinja2 (==3.0.3)", "Werkzeug (<2)", "black (==22.8.0)", "boto3 (<=2)", "click (==8.0.4)", "flake8 (>=5,<6)", "itsdangerous (==1.1.0)", "moto (>=3,<4)", "psutil (>=5,<6)", "pytest (>=6.2.5,<7)", "pytest-asyncio (<1)", "pytest-cov (>=2,<3)"] [[package]] name = "smmap" @@ -4281,40 +4305,40 @@ files = [ [[package]] name = "tiktoken" -version = "0.4.0" +version = "0.5.0" description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models" optional = false python-versions = ">=3.8" files = [ - {file = "tiktoken-0.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:176cad7f053d2cc82ce7e2a7c883ccc6971840a4b5276740d0b732a2b2011f8a"}, - {file = "tiktoken-0.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:450d504892b3ac80207700266ee87c932df8efea54e05cefe8613edc963c1285"}, - {file = "tiktoken-0.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00d662de1e7986d129139faf15e6a6ee7665ee103440769b8dedf3e7ba6ac37f"}, - {file = "tiktoken-0.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5727d852ead18b7927b8adf558a6f913a15c7766725b23dbe21d22e243041b28"}, - {file = "tiktoken-0.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:c06cd92b09eb0404cedce3702fa866bf0d00e399439dad3f10288ddc31045422"}, - {file = "tiktoken-0.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9ec161e40ed44e4210d3b31e2ff426b4a55e8254f1023e5d2595cb60044f8ea6"}, - {file = "tiktoken-0.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:1e8fa13cf9889d2c928b9e258e9dbbbf88ab02016e4236aae76e3b4f82dd8288"}, - {file = "tiktoken-0.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bb2341836b725c60d0ab3c84970b9b5f68d4b733a7bcb80fb25967e5addb9920"}, - {file = "tiktoken-0.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2ca30367ad750ee7d42fe80079d3092bd35bb266be7882b79c3bd159b39a17b0"}, - {file = "tiktoken-0.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3dc3df19ddec79435bb2a94ee46f4b9560d0299c23520803d851008445671197"}, - {file = "tiktoken-0.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4d980fa066e962ef0f4dad0222e63a484c0c993c7a47c7dafda844ca5aded1f3"}, - {file = "tiktoken-0.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:329f548a821a2f339adc9fbcfd9fc12602e4b3f8598df5593cfc09839e9ae5e4"}, - {file = "tiktoken-0.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b1a038cee487931a5caaef0a2e8520e645508cde21717eacc9af3fbda097d8bb"}, - {file = "tiktoken-0.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:08efa59468dbe23ed038c28893e2a7158d8c211c3dd07f2bbc9a30e012512f1d"}, - {file = "tiktoken-0.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f3020350685e009053829c1168703c346fb32c70c57d828ca3742558e94827a9"}, - {file = "tiktoken-0.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ba16698c42aad8190e746cd82f6a06769ac7edd415d62ba027ea1d99d958ed93"}, - {file = "tiktoken-0.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c15d9955cc18d0d7ffcc9c03dc51167aedae98542238b54a2e659bd25fe77ed"}, - {file = "tiktoken-0.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64e1091c7103100d5e2c6ea706f0ec9cd6dc313e6fe7775ef777f40d8c20811e"}, - {file = "tiktoken-0.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e87751b54eb7bca580126353a9cf17a8a8eaadd44edaac0e01123e1513a33281"}, - {file = "tiktoken-0.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e063b988b8ba8b66d6cc2026d937557437e79258095f52eaecfafb18a0a10c03"}, - {file = "tiktoken-0.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:9c6dd439e878172dc163fced3bc7b19b9ab549c271b257599f55afc3a6a5edef"}, - {file = "tiktoken-0.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8d1d97f83697ff44466c6bef5d35b6bcdb51e0125829a9c0ed1e6e39fb9a08fb"}, - {file = "tiktoken-0.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1b6bce7c68aa765f666474c7c11a7aebda3816b58ecafb209afa59c799b0dd2d"}, - {file = "tiktoken-0.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a73286c35899ca51d8d764bc0b4d60838627ce193acb60cc88aea60bddec4fd"}, - {file = "tiktoken-0.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d0394967d2236a60fd0aacef26646b53636423cc9c70c32f7c5124ebe86f3093"}, - {file = "tiktoken-0.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:dae2af6f03ecba5f679449fa66ed96585b2fa6accb7fd57d9649e9e398a94f44"}, - {file = "tiktoken-0.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:55e251b1da3c293432179cf7c452cfa35562da286786be5a8b1ee3405c2b0dd2"}, - {file = "tiktoken-0.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:c835d0ee1f84a5aa04921717754eadbc0f0a56cf613f78dfc1cf9ad35f6c3fea"}, - {file = "tiktoken-0.4.0.tar.gz", hash = "sha256:59b20a819969735b48161ced9b92f05dc4519c17be4015cfb73b65270a243620"}, + {file = "tiktoken-0.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6eaf6b593f09e321446e0940d5bc72960687b2d1889c0431d42718d437bb3285"}, + {file = "tiktoken-0.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7c96a1234e0edd4a7dd616c5da2658268a55c0931e72d319a998807feba8aa77"}, + {file = "tiktoken-0.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3578d1d49757d9a2770d1aa36915a342804da1a419c7db218d3565309599e378"}, + {file = "tiktoken-0.5.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf10dff18a1b6b009aa89f0850a8024b393696045a65e82d10e55d7ae5c8b5bd"}, + {file = "tiktoken-0.5.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:f16e1bb26a36841bb58b857ed3bbe1e6be845829e5dbb28cea80e2e622ebd753"}, + {file = "tiktoken-0.5.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5939137cc4d2b3628a00254ad22b1d520a945a100d8bf760bc0e2963ebd6d173"}, + {file = "tiktoken-0.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:e31972f9594a34e2546b69b87e921e035b2f52b12559c9cd5231f796c6473ffc"}, + {file = "tiktoken-0.5.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8e2ac182d9c975c7c5068382bb17f56e468c35007ea075e37f4bebe85c95f9fc"}, + {file = "tiktoken-0.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:89e2fdba5ab5a13a2f174e76897d6135caf31b0aa9fa97c8df63eaa8acfea46a"}, + {file = "tiktoken-0.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:11ce4800f5708cf43997fbcec9cd69939462b8c5856715aae6ddb244a5d71eed"}, + {file = "tiktoken-0.5.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:74280005ad0c6aceb53b12aad84d9b3753b72da6603d57bbc35212631e6a3bb1"}, + {file = "tiktoken-0.5.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:2fbb444c0183701a2f012a720b040b6116b0e061c7ea70a3a8828b850fb2c71a"}, + {file = "tiktoken-0.5.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f823a3034abc838e53a42271c6a15369ccbf467abbba517ee3595d6741dab107"}, + {file = "tiktoken-0.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:caaf5db8705d7d39286361a2ef71315ff1673b8787fe64f457c63770ceda1f6b"}, + {file = "tiktoken-0.5.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:89bc1d136ab6abfcc0aab0ab52b5641ca45c23bd63f65da957c2d13122e5ae3c"}, + {file = "tiktoken-0.5.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3aab8676862ccc923867864f5339fdb1a5322fc21f9f06bbba0ae7a845d7fe5a"}, + {file = "tiktoken-0.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c5a3a1aa7bc2490c2e64f7a1d14159e03b1c252ed253e0ca712c1952d51ca2f"}, + {file = "tiktoken-0.5.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd88e25af7463d5b15812262f66d6f3096a6e4b60e0c0712b917a51175f57f0d"}, + {file = "tiktoken-0.5.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f84d9bc41da27961516b8e56254ad886e16ac64a3f5b5fb7c4335678e23579b0"}, + {file = "tiktoken-0.5.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2f1c85626ed1bf68104715893dad9dce8153cab44cfeeeab5c56bb5ec92b813c"}, + {file = "tiktoken-0.5.0-cp38-cp38-win_amd64.whl", hash = "sha256:0f994aefe5f2c69dc6ee822c5323c5bef2cb4c84533af813c4b867419701cc2d"}, + {file = "tiktoken-0.5.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9635a6a5aadeb116c950b285d97987199cada3e28531a84f32b38941dead759f"}, + {file = "tiktoken-0.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dda8f19ce057d7aa5f8015ffea3beacc20661a42afbc81f7a599534716214a8e"}, + {file = "tiktoken-0.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dbb94200722590d2cae477967f0470f17846e5cada4fc1e4754ec8701cdd6494"}, + {file = "tiktoken-0.5.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbf1af57d68340447ca1827a93ae0a788f97787b3a17851709156636df47d815"}, + {file = "tiktoken-0.5.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:8d45410f89b7efb26b53eb8993f1122e0442abe0bb913ccb0f5d28be7e36e2a9"}, + {file = "tiktoken-0.5.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2a9324122f8e33475b575f7bfff035c15a789dc50edeca907ecaa9c8d72cc168"}, + {file = "tiktoken-0.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:02c35c78849ead31b69fd42b021031554fd1f3813fd1bf064185f6404768eea6"}, + {file = "tiktoken-0.5.0.tar.gz", hash = "sha256:c8dfd3280f5fca0d8ed2ec18c0f11f7cba305af48faaf4b914c71b7d221f39ed"}, ] [package.dependencies] @@ -4713,13 +4737,13 @@ standard = ["colorama (>=0.4)", "httptools (>=0.5.0)", "python-dotenv (>=0.13)", [[package]] name = "virtualenv" -version = "20.24.4" +version = "20.24.5" description = "Virtual Python Environment builder" optional = false python-versions = ">=3.7" files = [ - {file = "virtualenv-20.24.4-py3-none-any.whl", hash = "sha256:29c70bb9b88510f6414ac3e55c8b413a1f96239b6b789ca123437d5e892190cb"}, - {file = "virtualenv-20.24.4.tar.gz", hash = "sha256:772b05bfda7ed3b8ecd16021ca9716273ad9f4467c801f27e83ac73430246dca"}, + {file = "virtualenv-20.24.5-py3-none-any.whl", hash = "sha256:b80039f280f4919c77b30f1c23294ae357c4c8701042086e3fc005963e4e537b"}, + {file = "virtualenv-20.24.5.tar.gz", hash = "sha256:e8361967f6da6fbdf1426483bfe9fca8287c242ac0bc30429905721cefbff752"}, ] [package.dependencies] @@ -5133,9 +5157,10 @@ multidict = ">=4.0" [extras] azure = ["pulumi", "pulumi-azure-native"] -bot = ["adapter-transformers", "datasets", "einops", "faiss-cpu", "gradio", "langchain", "llama-index", "nbconvert", "openai", "sentence-transformers", "slack-sdk", "tokenizers", "torch", "torch", "transformers"] +bot = ["adapter-transformers", "datasets", "einops", "faiss-cpu", "gradio", "langchain", "nbconvert", "openai", "sentence-transformers", "slack-sdk", "torch", "torch"] +huggingface-llm = ["accelerate"] [metadata] lock-version = "2.0" python-versions = "^3.11" -content-hash = "56c1ceefcb939dd090c641de32bf25cf5ba2718cceb2148bcf2a24a255b2e945" +content-hash = "9981e743e250cbb1ef7eb21016b9885b9dedeab9681d56c80d93535927bf1c44" diff --git a/pyproject.toml b/pyproject.toml index 2e84d427..826c83e9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -42,6 +42,7 @@ ipykernel = "^6.23.2" xformers = { version="^0.0.20", optional=true } ipywidgets = "^8.1.0" gitpython = "^3.1.36" +accelerate = { version="^0.22.0", optional=true } [tool.poetry.group.dev.dependencies] black = "^23.3.0" @@ -49,6 +50,9 @@ isort = "^5.12.0" pre-commit = "^3.3.2" [tool.poetry.extras] +huggingface_llm = [ + "accelerate" +] azure = [ "pulumi", "pulumi-azure-native" @@ -61,14 +65,11 @@ bot = [ "faiss-cpu", "gradio", "langchain", - "llama-index", "nbconvert", "openai", "sentence-transformers", "slack-sdk", - "tokenizers", "torch", - "transformers" ] [[tool.poetry.source]] diff --git a/slack_bot/run.py b/slack_bot/run.py index 30136d72..7b28b176 100755 --- a/slack_bot/run.py +++ b/slack_bot/run.py @@ -1,4 +1,3 @@ -# Standard library imports import argparse import logging import os @@ -6,21 +5,42 @@ import sys import threading -# Third-party imports from slack_sdk.socket_mode import SocketModeClient from slack_sdk.web import WebClient -# Local imports from slack_bot import MODELS, Bot if __name__ == "__main__": # Parse command line arguments parser = argparse.ArgumentParser() - parser.add_argument("--model", "-m", help="Select which model to use", default=None) + parser.add_argument( + "--model", "-m", help="Select which model to use", default=None, choices=MODELS + ) + parser.add_argument( + "--hf_model", + "-hf", + help="""Select which HuggingFace model to use + (ignored if not using llama-huggingface model)""", + default="StabilityAI/stablelm-tuned-alpha-3b", + ) + parser.add_argument( + "--max_input_size", + "-max", + help="""Select maximum input size for HuggingFace model + (ignored if not using llama-huggingface model)""", + default=4096, + ) + parser.add_argument( + "--device", + "-dev", + help="""Select device for HuggingFace model + (ignored if not using llama-huggingface model)""", + default="auto", + ) parser.add_argument( "--force-new-index", "-f", - help="Recreate the index or not", + help="Recreate the index vector store or not", action=argparse.BooleanOptionalAction, default=False, ) @@ -34,10 +54,12 @@ "--which-index", "-w", help="""Specifies the directory name for looking up/writing indices. - Currently supports 'all_data' and 'handbook'. If regenerating index, 'all_data' - will use all .txt .md. and .csv files in the data directory, 'handbook' will + Currently supports 'all_data', 'public' and 'handbook'. + If regenerating index, 'all_data' will use all .txt .md. and .csv + files in the data directory, 'handbook' will only use 'handbook.csv' file.""", default="all_data", + choices=["all_data", "public", "handbook"], ) args = parser.parse_args() @@ -85,15 +107,32 @@ logging.error(f"Model {model_name} was not recognised") sys.exit(1) - logging.info(f"Initialising bot with model {model_name}") + logging.info(f"Initialising bot with model: {model_name}") - slack_bot = Bot( - model( + if model_name == "llama-index-hf": + response_model = model( + model_name=args.hf_model, + max_input_size=args.max_input_size, + device=args.device, force_new_index=force_new_index, data_dir=data_dir, which_index=which_index, ) - ) + else: + response_model = model( + force_new_index=force_new_index, + data_dir=data_dir, + which_index=which_index, + ) + + logging.info(f"Initalising bot with model: {response_model}") + + slack_bot = Bot(response_model) + + logging.info("Connecting to Slack...") + if os.environ.get("SLACK_APP_TOKEN") is None: + logging.error("SLACK_APP_TOKEN is not set") + sys.exit(1) # Initialize SocketModeClient with an app-level token + WebClient client = SocketModeClient( diff --git a/slack_bot/slack_bot/bot/bot.py b/slack_bot/slack_bot/bot/bot.py index 999839a6..4f7772bc 100644 --- a/slack_bot/slack_bot/bot/bot.py +++ b/slack_bot/slack_bot/bot/bot.py @@ -1,15 +1,11 @@ -# Standard library imports import logging -from typing import Optional -# Third-party imports from slack_sdk.socket_mode import SocketModeClient from slack_sdk.socket_mode.listeners import SocketModeRequestListener from slack_sdk.socket_mode.request import SocketModeRequest from slack_sdk.socket_mode.response import SocketModeResponse -# Local imports -from slack_bot.models import ResponseModel +from ..models.base import ResponseModel class Bot(SocketModeRequestListener): diff --git a/slack_bot/slack_bot/models/__init__.py b/slack_bot/slack_bot/models/__init__.py index 4727958e..d22f0360 100644 --- a/slack_bot/slack_bot/models/__init__.py +++ b/slack_bot/slack_bot/models/__init__.py @@ -1,7 +1,7 @@ from .base import ResponseModel from .chat_completion import ChatCompletionAzure, ChatCompletionOpenAI from .hello import Hello -from .llama import LlamaDistilGPT2, LlamaGPT35TurboAzure, LlamaGPT35TurboOpenAI +from .llama import LlamaIndexGPTAzure, LlamaIndexGPTOpenAI, LlamaIndexHF # Please ensure that any models needing OPENAI_API_KEY are named *openai* # Please ensure that any models needing OPENAI_AZURE_API_BASE and OPENAI_AZURE_API_KEY are named *azure* @@ -9,9 +9,9 @@ "chat-completion-azure": ChatCompletionAzure, "chat-completion-openai": ChatCompletionOpenAI, "hello": Hello, - "llama-distilgpt2": LlamaDistilGPT2, - "llama-gpt-3.5-turbo-azure": LlamaGPT35TurboAzure, - "llama-gpt-3.5-turbo-openai": LlamaGPT35TurboOpenAI, + "llama-index-hf": LlamaIndexHF, + "llama-index-gpt-azure": LlamaIndexGPTAzure, + "llama-index-gpt-openai": LlamaIndexGPTOpenAI, } __all__ = ["MODELS", "ResponseModel"] diff --git a/slack_bot/slack_bot/models/llama.py b/slack_bot/slack_bot/models/llama.py index c38df356..43dd322c 100644 --- a/slack_bot/slack_bot/models/llama.py +++ b/slack_bot/slack_bot/models/llama.py @@ -1,6 +1,5 @@ from __future__ import annotations -# Standard library imports import logging import math import os @@ -8,99 +7,115 @@ import re from typing import Any, List, Optional -# Third-party imports import pandas as pd -import transformers -from langchain.chat_models import AzureChatOpenAI, ChatOpenAI -from langchain.embeddings.huggingface import HuggingFaceEmbeddings -from langchain.llms.base import LLM +from langchain.embeddings import HuggingFaceEmbeddings from llama_index import ( Document, - LangchainEmbedding, - LLMPredictor, PromptHelper, ServiceContext, StorageContext, load_index_from_storage, ) -from llama_index.indices.vector_store.base import GPTVectorStoreIndex +from llama_index.indices.vector_store.base import VectorStoreIndex +from llama_index.llms import AzureOpenAI, HuggingFaceLLM, OpenAI +from llama_index.llms.base import LLM +from llama_index.prompts import PromptTemplate from llama_index.response.schema import RESPONSE_TYPE -from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline -# Local imports from .base import MessageResponse, ResponseModel LLAMA_INDEX_DIR = "llama_index_indices" PUBLIC_DATA_DIR = "public" +INTERNAL_DATA_DIR = "turing_internal" -class CustomLLM(LLM): - model_name: str - pipeline: transformers.pipelines.text_generation.TextGenerationPipeline - - @property - def _llm_type(self) -> str: - return "custom" - - def _call( - self, prompt: str, stop: Optional[List[str]] = None - ) -> transformers.pipelines.text_generation.TextGenerationPipeline: - return self.pipeline(prompt, max_new_tokens=9999)[0]["generated_text"] - - @property - def _identifying_params(self) -> dict: - """Get the identifying parameters.""" - return {"model_name": self.model_name} - - -class Llama(ResponseModel): +class LlamaIndex(ResponseModel): def __init__( self, model_name: str, max_input_size: int, data_dir: pathlib.Path, which_index: str, - chunk_size_limit: Optional[int] = None, + device: str | None = None, + chunk_size: Optional[int] = None, k: int = 3, chunk_overlap_ratio: float = 0.1, force_new_index: bool = False, - num_output: int = 256, + num_output: int = 512, ) -> None: + """ + Base class for models using llama-index. + This class is not intended to be used directly, but rather subclassed + to implement the `_prep_llm` method which constructs the LLM to be used. + + Parameters + ---------- + model_name : str + Model name to specify which LLM to use. + max_input_size : int + Context window size for the LLM. + data_dir : pathlib.Path + Path to the data directory. + which_index : str + Which index to construct (if force_new_index is True) or use. + Options are "handbook", "public", or "all_data". + device : str, optional + Device to use for the LLM, by default None. + This is ignored if the LLM is model from OpenAI or Azure. + chunk_size : Optional[int], optional + Maximum size of chunks to use, by default None. + If None, this is computed as `ceil(max_input_size / k)`. + k : int, optional + `similarity_top_k` to use in query engine, by default 3 + chunk_overlap_ratio : float, optional + Chunk overlap as a ratio of chunk size, by default 0.1 + force_new_index : bool, optional + Whether or not to recreate the index vector store, + by default False + num_output : int, optional + Number of outputs for the LLM, by default 512 + """ super().__init__(emoji="llama") logging.info("Setting up Huggingface backend.") self.max_input_size = max_input_size self.model_name = model_name self.num_output = num_output - if chunk_size_limit is None: - chunk_size_limit = math.ceil(max_input_size / k) - self.chunk_size_limit = chunk_size_limit + self.device = device + if chunk_size is None: + chunk_size = math.ceil(max_input_size / k) + self.chunk_size = chunk_size self.chunk_overlap_ratio = chunk_overlap_ratio self.data_dir = data_dir self.which_index = which_index - llm_predictor = self._prep_llm_predictor() + # set up LLM + llm = self._prep_llm() - hfemb = HuggingFaceEmbeddings() - embed_model = LangchainEmbedding(hfemb) + # initialise embedding model to use to create the index vectors + embed_model = HuggingFaceEmbeddings( + model_name="sentence-transformers/all-mpnet-base-v2" + ) + # construct the prompt helper prompt_helper = PromptHelper( context_window=self.max_input_size, num_output=self.num_output, - chunk_size_limit=self.chunk_size_limit, + chunk_size_limit=self.chunk_size, chunk_overlap_ratio=self.chunk_overlap_ratio, ) + # construct the service context service_context = ServiceContext.from_defaults( - llm_predictor=llm_predictor, + llm=llm, embed_model=embed_model, prompt_helper=prompt_helper, - chunk_size_limit=chunk_size_limit, + chunk_size=chunk_size, ) if force_new_index: logging.info("Generating the index from scratch...") documents = self._prep_documents() - self.index = GPTVectorStoreIndex.from_documents( + self.index = VectorStoreIndex.from_documents( documents, service_context=service_context ) @@ -111,7 +126,7 @@ def __init__( ) else: - logging.info("Generating the storage context") + logging.info("Loading the storage context") storage_context = StorageContext.from_defaults( persist_dir=self.data_dir / LLAMA_INDEX_DIR / which_index ) @@ -121,8 +136,8 @@ def __init__( storage_context=storage_context, service_context=service_context ) - self.query_engine = self.index.as_query_engine(similarity_top_k=3) - logging.info("Done setting up Huggingface backend.") + self.query_engine = self.index.as_query_engine(similarity_top_k=k) + logging.info("Done setting up Huggingface backend for query engine.") self.error_response_template = ( "Oh no! When I tried to get a response to your prompt, " @@ -132,11 +147,25 @@ def __init__( @staticmethod def _format_sources(response: RESPONSE_TYPE) -> str: + """ + Method to format the sources used to compose the response. + + Parameters + ---------- + response : RESPONSE_TYPE + response object from the query engine + + Returns + ------- + str + String containing the formatted sources that + were used to compose the response + """ texts = [] for source_node in response.source_nodes: source_text = ( source_node.node.extra_info["filename"] - + f" (similarity: {round(source_node.score,3)})" + + f" (similarity: {round(source_node.score, 3)})" ) texts.append(source_text) result = "I read the following documents to compose this answer:\n" @@ -144,6 +173,22 @@ def _format_sources(response: RESPONSE_TYPE) -> str: return result def _get_response(self, msg_in: str, user_id: str) -> str: + """ + Method to obtain a response from the query engine given + a message and a user id. + + Parameters + ---------- + msg_in : str + Message from user + user_id : str + User ID + + Returns + ------- + str + String containing the response from the query engine. + """ try: query_response = self.query_engine.query(msg_in) # concatenate the response with the resources that it used @@ -172,6 +217,14 @@ def _get_response(self, msg_in: str, user_id: str) -> str: return answer def _prep_documents(self) -> List[Document]: + """ + Method to prepare the documents for the index vector store. + + Returns + ------- + List[Document] + List of `llama_index.Documents` to be used to construct the index vector store. + """ # Prep the contextual documents documents = [] @@ -180,13 +233,24 @@ def _prep_documents(self) -> List[Document]: data_files = [self.data_dir / PUBLIC_DATA_DIR / "handbook-scraped.csv"] + elif self.which_index == "public": + logging.info("Regenerating index for all PUBLIC. Will take a long time...") + + # pull out public data + data_files = list((self.data_dir / PUBLIC_DATA_DIR).glob("**/*.md")) + data_files += list((self.data_dir / PUBLIC_DATA_DIR).glob("**/*.csv")) + data_files += list((self.data_dir / PUBLIC_DATA_DIR).glob("**/*.txt")) elif self.which_index == "all_data": logging.info("Regenerating index for ALL DATA. Will take a long time...") - # TODO Leaving out the Turing internal data for now while we figure out if - # we are okay sending it to OpenAI. + + # pull out public data data_files = list((self.data_dir / PUBLIC_DATA_DIR).glob("**/*.md")) data_files += list((self.data_dir / PUBLIC_DATA_DIR).glob("**/*.csv")) data_files += list((self.data_dir / PUBLIC_DATA_DIR).glob("**/*.txt")) + # include private internal data + data_files += list((self.data_dir / INTERNAL_DATA_DIR).glob("**/*.md")) + data_files += list((self.data_dir / INTERNAL_DATA_DIR).glob("**/*.csv")) + data_files += list((self.data_dir / INTERNAL_DATA_DIR).glob("**/*.txt")) else: logging.info("The data_files directory is unrecognized") @@ -196,7 +260,9 @@ def _prep_documents(self) -> List[Document]: df = pd.read_csv(data_file) df = df[~df.loc[:, "body"].isna()] documents += [ - Document(row[1]["body"], extra_info={"filename": row[1]["url"]}) + Document( + text=row[1]["body"], extra_info={"filename": row[1]["url"]} + ) for row in df.iterrows() ] elif data_file.suffix in (".md", ".txt"): @@ -207,81 +273,168 @@ def _prep_documents(self) -> List[Document]: ) return documents - def _prep_llm_predictor(self) -> LLMPredictor: + def _prep_llm(self) -> LLM: + """ + Method to prepare the LLM to be used. + + Returns + ------- + LLM + LLM to be used. + + Raises + ------ + NotImplemented + This must be implemented by a subclass of LlamaIndex. + """ raise NotImplemented( - "_prep_llm_predictor needs to be implemented by a subclass of Llama." + "_prep_llm needs to be implemented by a subclass of LlamaIndex." ) def direct_message(self, message: str, user_id: str) -> MessageResponse: + """ + Method to respond to a direct message in Slack. + + Parameters + ---------- + msg_in : str + Message from user + user_id : str + User ID + + Returns + ------- + MessageResponse + Response from the query engine. + """ backend_response = self._get_response(message, user_id) + return MessageResponse(backend_response) def channel_mention(self, message: str, user_id: str) -> MessageResponse: + """ + Method to respond to a channel mention in Slack. + + Parameters + ---------- + msg_in : str + Message from user + user_id : str + User ID + + Returns + ------- + MessageResponse + Response from the query engine. + """ backend_response = self._get_response(message, user_id) - return MessageResponse(backend_response) + return MessageResponse(backend_response) -class LlamaDistilGPT2(Llama): - def __init__(self, *args: Any, **kwargs: Any) -> LLMPredictor: - super().__init__(*args, model_name="distilgpt2", max_input_size=1024, **kwargs) - def _prep_llm_predictor(self) -> LLMPredictor: - # Create the model object - tokenizer = AutoTokenizer.from_pretrained(self.model_name) - model = AutoModelForCausalLM.from_pretrained( - self.model_name, - trust_remote_code=True, +class LlamaIndexHF(LlamaIndex): + def __init__( + self, + model_name: str = "StabilityAI/stablelm-tuned-alpha-3b", + *args: Any, + **kwargs: Any, + ) -> None: + """ + `LlamaIndexHF` is a subclass of `LlamaIndex` that uses HuggingFace's + `transformers` library to implement the LLM. + + Parameters + ---------- + model_name : str, optional + Model name from Huggingface's model hub, + by default "StabilityAI/stablelm-tuned-alpha-3b". + """ + super().__init__(*args, model_name=model_name, **kwargs) + + def _prep_llm(self) -> LLM: + dev = self.device or "auto" + logging.info( + f"Setting up Huggingface LLM (model {self.model_name}) on device {dev}" ) - model_pipeline = pipeline( - "text-generation", - model=model, - tokenizer=tokenizer, + logging.info( + f"HF-args: (context_window: {self.max_input_size}, num_output: {self.num_output})" ) - return LLMPredictor( - llm=CustomLLM(model_name=self.model_name, pipeline=model_pipeline) + return HuggingFaceLLM( + context_window=self.max_input_size, + max_new_tokens=self.num_output, + # TODO: allow user to specify the query wrapper prompt for their model + query_wrapper_prompt=PromptTemplate("<|USER|>{query_str}<|ASSISTANT|>"), + generate_kwargs={"temperature": 0.25, "do_sample": False}, + tokenizer_name=self.model_name, + model_name=self.model_name, + device_map=self.device or "auto", ) -class LlamaGPT35TurboOpenAI(Llama): +class LlamaIndexGPTOpenAI(LlamaIndex): def __init__(self, *args: Any, **kwargs: Any) -> None: + """ + `LlamaIndexGPTOpenAI` is a subclass of `LlamaIndex` that uses OpenAI's + API to implement the LLM. + + Must have `OPENAI_API_KEY` set as an environment variable. + """ + if os.getenv("OPENAI_API_KEY") is None: + raise ValueError("You must set OPENAI_API_KEY for OpenAI.") + self.openai_api_key = os.getenv("OPENAI_API_KEY") + self.temperature = 0.7 super().__init__( - *args, model_name="gpt-3.5-turbo-16k", max_input_size=16384, **kwargs + *args, model_name="gpt-3.5-turbo", max_input_size=4096, **kwargs ) - def _prep_llm_predictor(self) -> LLMPredictor: - return LLMPredictor( - llm=ChatOpenAI( - max_tokens=self.num_output, - model=self.model_name, - openai_api_key=self.openai_api_key, - temperature=0.7, - ) + def _prep_llm(self) -> LLM: + return OpenAI( + model=self.model_name, + temperature=self.temperature, + max_tokens=self.num_output, + api_key=self.openai_api_key, ) -class LlamaGPT35TurboAzure(Llama): +class LlamaIndexGPTAzure(LlamaIndex): def __init__(self, *args: Any, **kwargs: Any) -> None: + """ + `LlamaIndexGPTAzure` is a subclass of `LlamaIndex` that uses Azure's + instance of OpenAI's LLMs to implement the LLM. + + Must have the following environment variables set: + - `OPENAI_API_BASE`: Azure endpoint which looks + like https://YOUR_RESOURCE_NAME.openai.azure.com/ + - `OPENAI_API_KEY`: Azure API key + """ + if os.getenv("OPENAI_AZURE_API_BASE") is None: + raise ValueError( + "You must set OPENAI_AZURE_API_BASE to your Azure endpoint. " + "It should look like https://YOUR_RESOURCE_NAME.openai.azure.com/" + ) + if os.getenv("OPENAI_AZURE_API_KEY") is None: + raise ValueError("You must set OPENAI_AZURE_API_KEY for Azure OpenAI.") + + # deployment name can be found in the Azure AI Studio portal self.deployment_name = "reginald-gpt35-turbo" self.openai_api_base = os.getenv("OPENAI_AZURE_API_BASE") self.openai_api_key = os.getenv("OPENAI_AZURE_API_KEY") self.openai_api_version = "2023-03-15-preview" self.temperature = 0.7 super().__init__( - *args, model_name="gpt-3.5-turbo-16k", max_input_size=16384, **kwargs + *args, model_name="gpt-3.5-turbo", max_input_size=4096, **kwargs ) - def _prep_llm_predictor(self) -> LLMPredictor: - return LLMPredictor( - llm=AzureChatOpenAI( - deployment_name=self.deployment_name, - temperature=self.temperature, - model=self.model_name, - max_tokens=self.num_output, - openai_api_key=self.openai_api_key, - openai_api_base=self.openai_api_base, - openai_api_version=self.openai_api_version, - openai_api_type="azure", - ) + def _prep_llm(self) -> LLM: + return AzureOpenAI( + model=self.model_name, + engine=self.deployment_name, + temperature=self.temperature, + max_tokens=self.num_output, + api_key=self.openai_api_key, + api_base=self.openai_api_base, + api_type="azure", + api_version=self.openai_api_version, )