diff --git a/.github/workflows/_scheduled_test.yml b/.github/workflows/_scheduled_test.yml index 849b8042..2572e940 100644 --- a/.github/workflows/_scheduled_test.yml +++ b/.github/workflows/_scheduled_test.yml @@ -4,7 +4,7 @@ run-name: langchain-nvidia Scheduled tests on: workflow_dispatch: schedule: - - cron: '0 13 * * *' + - cron: '0 8 * * *' env: POETRY_VERSION: "1.7.1" diff --git a/cookbook/nvidia_nim_agents_llama3.1.ipynb b/cookbook/nvidia_nim_agents_llama3.1.ipynb index e3e13dce..dcfb24dd 100644 --- a/cookbook/nvidia_nim_agents_llama3.1.ipynb +++ b/cookbook/nvidia_nim_agents_llama3.1.ipynb @@ -38,7 +38,9 @@ "\n", "Next, the model is packaged as a NIM, meaning it's optimized to deliver best performance on NVIDIA accelerated infrastructure and easy to deploy as well as use. This microservice packaging also uses OpenAI compatible APIs, so developers can build world-class generative AI agents with ease.\n", "\n", - "Let's see how to use tools in a couple of examples." + "Let's see how to use tools for agentic applications with LangGraph. \n", + "\n", + "*Note: lots of the educational content is adapted from https://langchain-ai.github.io/langgraph/concepts/high_level/.*" ] }, { @@ -81,7 +83,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "id": "aaeb35a9", "metadata": {}, "outputs": [], @@ -89,7 +91,7 @@ "import getpass\n", "import os\n", "\n", - "os.environ[\"NVIDIA_API_KEY\"] = getpass.getpass(\"Enter your NVIDIA API key: \")" + "os.environ[\"NVIDIA_API_KEY\"] = \"nvapi-xxx\"" ] }, { @@ -102,7 +104,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "id": "579881ca", "metadata": {}, "outputs": [], @@ -126,7 +128,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "id": "c8832545-d3c1-404f-afdb-6a00891f84c9", "metadata": {}, "outputs": [], @@ -134,12 +136,12 @@ "import getpass\n", "import os\n", "\n", - "os.environ[\"TAVILY_API_KEY\"] = getpass.getpass(\"Enter your Tavily API key: \")" + "os.environ[\"TAVILY_API_KEY\"] = \"tvly-xxx\"" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "id": "e1d1511d", "metadata": {}, "outputs": [], @@ -152,48 +154,146 @@ }, { "cell_type": "markdown", - "id": "cd230847", + "id": "8f63dd76-d8c7-429e-bf7c-d2f575ef8340", "metadata": {}, "source": [ - "Create [ReAct agent](https://python.langchain.com/v0.2/docs/concepts/#react-agents), prebuilt in [LangGraph](https://langchain-ai.github.io/langgraph/#overview). " + "We will wrap the tools as a `ToolNode` which will be beneficial to use in LangGraph later." ] }, { "cell_type": "code", - "execution_count": null, - "id": "da73ae35", + "execution_count": 5, + "id": "75437d15-2e38-4673-850c-3272274aa917", "metadata": {}, "outputs": [], "source": [ - "from langgraph.prebuilt import create_react_agent\n", - "from langchain.callbacks.tracers import ConsoleCallbackHandler\n", + "from langgraph.prebuilt import ToolNode\n", "\n", - "app = create_react_agent(llm, tools)" + "tool_node = ToolNode(tools)" ] }, { "cell_type": "markdown", - "id": "be70d7ee", + "id": "d69d35aa-09d6-4484-a230-c0fe4c2b6bcb", "metadata": {}, "source": [ - "Run agent; a callback is passed to provide more verbose output." + "Let's invoke the tool manually to see the result." ] }, { "cell_type": "code", - "execution_count": null, - "id": "02a109cc", + "execution_count": 6, + "id": "a433c5c5-c69e-410c-bfbd-df9b3f9bcf3b", "metadata": { "scrolled": true }, + "outputs": [ + { + "data": { + "text/plain": [ + "{'messages': [ToolMessage(content='[{\"url\": \"https://www.weatherapi.com/\", \"content\": \"{\\'location\\': {\\'name\\': \\'San Francisco\\', \\'region\\': \\'California\\', \\'country\\': \\'United States of America\\', \\'lat\\': 37.78, \\'lon\\': -122.42, \\'tz_id\\': \\'America/Los_Angeles\\', \\'localtime_epoch\\': 1723495766, \\'localtime\\': \\'2024-08-12 13:49\\'}, \\'current\\': {\\'last_updated_epoch\\': 1723495500, \\'last_updated\\': \\'2024-08-12 13:45\\', \\'temp_c\\': 16.0, \\'temp_f\\': 60.8, \\'is_day\\': 1, \\'condition\\': {\\'text\\': \\'Sunny\\', \\'icon\\': \\'//cdn.weatherapi.com/weather/64x64/day/113.png\\', \\'code\\': 1000}, \\'wind_mph\\': 12.3, \\'wind_kph\\': 19.8, \\'wind_degree\\': 250, \\'wind_dir\\': \\'WSW\\', \\'pressure_mb\\': 1015.0, \\'pressure_in\\': 29.98, \\'precip_mm\\': 0.0, \\'precip_in\\': 0.0, \\'humidity\\': 75, \\'cloud\\': 0, \\'feelslike_c\\': 15.9, \\'feelslike_f\\': 60.7, \\'windchill_c\\': 15.9, \\'windchill_f\\': 60.7, \\'heatindex_c\\': 16.0, \\'heatindex_f\\': 60.8, \\'dewpoint_c\\': 11.5, \\'dewpoint_f\\': 52.8, \\'vis_km\\': 10.0, \\'vis_miles\\': 6.0, \\'uv\\': 5.0, \\'gust_mph\\': 14.5, \\'gust_kph\\': 23.4}}\"}]', name='tavily_search_results_json', tool_call_id='tool_call_id', artifact={'query': \"What's the weather in San Francisco?\", 'follow_up_questions': None, 'answer': None, 'images': [], 'results': [{'title': 'Weather in San Francisco', 'url': 'https://www.weatherapi.com/', 'content': \"{'location': {'name': 'San Francisco', 'region': 'California', 'country': 'United States of America', 'lat': 37.78, 'lon': -122.42, 'tz_id': 'America/Los_Angeles', 'localtime_epoch': 1723495766, 'localtime': '2024-08-12 13:49'}, 'current': {'last_updated_epoch': 1723495500, 'last_updated': '2024-08-12 13:45', 'temp_c': 16.0, 'temp_f': 60.8, 'is_day': 1, 'condition': {'text': 'Sunny', 'icon': '//cdn.weatherapi.com/weather/64x64/day/113.png', 'code': 1000}, 'wind_mph': 12.3, 'wind_kph': 19.8, 'wind_degree': 250, 'wind_dir': 'WSW', 'pressure_mb': 1015.0, 'pressure_in': 29.98, 'precip_mm': 0.0, 'precip_in': 0.0, 'humidity': 75, 'cloud': 0, 'feelslike_c': 15.9, 'feelslike_f': 60.7, 'windchill_c': 15.9, 'windchill_f': 60.7, 'heatindex_c': 16.0, 'heatindex_f': 60.8, 'dewpoint_c': 11.5, 'dewpoint_f': 52.8, 'vis_km': 10.0, 'vis_miles': 6.0, 'uv': 5.0, 'gust_mph': 14.5, 'gust_kph': 23.4}}\", 'score': 0.9999223, 'raw_content': None}], 'response_time': 2.19})]}" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from langchain_core.messages import AIMessage\n", + "\n", + "message_with_single_tool_call = AIMessage(\n", + " content=\"\",\n", + " tool_calls=[\n", + " {\n", + " \"name\": \"tavily_search_results_json\",\n", + " \"args\": {\"query\": \"What's the weather in San Francisco?\"},\n", + " \"id\": \"tool_call_id\",\n", + " \"type\": \"tool_call\",\n", + " }\n", + " ],\n", + ")\n", + "\n", + "tool_node.invoke({\"messages\": [message_with_single_tool_call]})" + ] + }, + { + "cell_type": "markdown", + "id": "6ae74552-a8d9-4a05-ab55-47d6b3cfea5d", + "metadata": {}, + "source": [ + "Now, let's see how to use the tool with a chat model. This requires binding the tool to the LLM. " + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "771400bb-3a7d-4c87-b7fe-30e2f9c92f5a", + "metadata": {}, "outputs": [], "source": [ - "query = \"What is LangChain?\"\n", - "messages = app.invoke({\"messages\": [(\"human\", query)]}, config={'callbacks': [ConsoleCallbackHandler()]})\n", - "{\n", - " \"input\": query,\n", - " \"output\": messages[\"messages\"][-1].content,\n", - "}" + "llm_with_tools = llm.bind_tools(tools)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "5a15de17-dc4b-467a-bbf6-7526b2adb069", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[{'name': 'tavily_search_results_json',\n", + " 'args': {'query': 'San Francisco weather today'},\n", + " 'id': 'chatcmpl-tool-99f68eb818504801aec450e1cee73b6f',\n", + " 'type': 'tool_call'}]" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "llm_with_tools.invoke(\"What's the weather in San Francisco?\").tool_calls" + ] + }, + { + "cell_type": "markdown", + "id": "ef07259b-2baf-4407-a3b3-3e48d060adfa", + "metadata": {}, + "source": [ + "As you can see, the LLM decides that it is best to use the `tavily_search_results_json` tool and that the query is \"San Francisco Weather today\". Output is structured accordingly." + ] + }, + { + "cell_type": "markdown", + "id": "6909a295-e9d4-416f-a57d-c08a2bc5f1c5", + "metadata": {}, + "source": [ + "Let's send this as a message to the ToolNode -- more on this in the next section :) " + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "335a3176-2f52-4001-afe7-d6536498493c", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'messages': [ToolMessage(content='[{\"url\": \"https://www.weatherapi.com/\", \"content\": \"{\\'location\\': {\\'name\\': \\'San Francisco\\', \\'region\\': \\'California\\', \\'country\\': \\'United States of America\\', \\'lat\\': 37.78, \\'lon\\': -122.42, \\'tz_id\\': \\'America/Los_Angeles\\', \\'localtime_epoch\\': 1723495766, \\'localtime\\': \\'2024-08-12 13:49\\'}, \\'current\\': {\\'last_updated_epoch\\': 1723495500, \\'last_updated\\': \\'2024-08-12 13:45\\', \\'temp_c\\': 16.0, \\'temp_f\\': 60.8, \\'is_day\\': 1, \\'condition\\': {\\'text\\': \\'Sunny\\', \\'icon\\': \\'//cdn.weatherapi.com/weather/64x64/day/113.png\\', \\'code\\': 1000}, \\'wind_mph\\': 12.3, \\'wind_kph\\': 19.8, \\'wind_degree\\': 250, \\'wind_dir\\': \\'WSW\\', \\'pressure_mb\\': 1015.0, \\'pressure_in\\': 29.98, \\'precip_mm\\': 0.0, \\'precip_in\\': 0.0, \\'humidity\\': 75, \\'cloud\\': 0, \\'feelslike_c\\': 15.9, \\'feelslike_f\\': 60.7, \\'windchill_c\\': 15.9, \\'windchill_f\\': 60.7, \\'heatindex_c\\': 16.0, \\'heatindex_f\\': 60.8, \\'dewpoint_c\\': 11.5, \\'dewpoint_f\\': 52.8, \\'vis_km\\': 10.0, \\'vis_miles\\': 6.0, \\'uv\\': 5.0, \\'gust_mph\\': 14.5, \\'gust_kph\\': 23.4}}\"}]', name='tavily_search_results_json', tool_call_id='chatcmpl-tool-d4fbc9ca41ec4728b6df56ab2c41fa97', artifact={'query': 'San Francisco weather today', 'follow_up_questions': None, 'answer': None, 'images': [], 'results': [{'title': 'Weather in San Francisco', 'url': 'https://www.weatherapi.com/', 'content': \"{'location': {'name': 'San Francisco', 'region': 'California', 'country': 'United States of America', 'lat': 37.78, 'lon': -122.42, 'tz_id': 'America/Los_Angeles', 'localtime_epoch': 1723495766, 'localtime': '2024-08-12 13:49'}, 'current': {'last_updated_epoch': 1723495500, 'last_updated': '2024-08-12 13:45', 'temp_c': 16.0, 'temp_f': 60.8, 'is_day': 1, 'condition': {'text': 'Sunny', 'icon': '//cdn.weatherapi.com/weather/64x64/day/113.png', 'code': 1000}, 'wind_mph': 12.3, 'wind_kph': 19.8, 'wind_degree': 250, 'wind_dir': 'WSW', 'pressure_mb': 1015.0, 'pressure_in': 29.98, 'precip_mm': 0.0, 'precip_in': 0.0, 'humidity': 75, 'cloud': 0, 'feelslike_c': 15.9, 'feelslike_f': 60.7, 'windchill_c': 15.9, 'windchill_f': 60.7, 'heatindex_c': 16.0, 'heatindex_f': 60.8, 'dewpoint_c': 11.5, 'dewpoint_f': 52.8, 'vis_km': 10.0, 'vis_miles': 6.0, 'uv': 5.0, 'gust_mph': 14.5, 'gust_kph': 23.4}}\", 'score': 0.9988695, 'raw_content': None}], 'response_time': 2.36})]}" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "tool_node.invoke({\"messages\": [llm_with_tools.invoke(\"What's the weather in San Francisco?\")]})" ] }, { @@ -201,11 +301,13 @@ "id": "b5e9bbb9", "metadata": {}, "source": [ - "## 🔨 Tool Usage -- Adding on a Custom Tool\n", + "## 🔨 Tool Usage -- Adding on a Custom Tool and Using LangGraph\n", "\n", "Let's see how to [define a custom tool](https://python.langchain.com/v0.2/docs/how_to/custom_tools/) for your NIM agent and how it handles multiple tools. \n", "\n", - "We'll enhance the NIM with Tavily search with some custom tools to determine a user's current location (based on IP address) and return a latitude and longitude. We will use these tools to have Tavily look up the weather in the user's current location." + "We'll enhance the NIM with Tavily search with some custom tools to determine a user's current location (based on IP address) and return a latitude and longitude. We will use these tools to have Tavily look up the weather in the user's current location.\n", + "\n", + "In addition, we'll see how to use the `ToolNode` we declared earlier in a graph declared with LangGraph. We'll use an agent that repeatedly calls an LLM deciding which tools to call, the input to those tools, executes/produces output, and then feeds the outputs back to the LLM as observation. When no more tools are needed, the loop ends. " ] }, { @@ -218,7 +320,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 10, "id": "e9d8ed5f-b6e9-495f-85ff-e431d39475c4", "metadata": {}, "outputs": [], @@ -239,81 +341,215 @@ "id": "089e3223-50f3-4e8e-9043-24c792ca7daf", "metadata": {}, "source": [ - "Let's update the tools to use the Tavily tool delcared earlier and also add the `get_current_location` tool." + "Let's update the tools and the `ToolNode` to use the Tavily tool delcared earlier and also add the `get_current_location` tool." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 11, "id": "b71d7d05-d3ec-4005-911c-3e44df8102b4", "metadata": {}, "outputs": [], "source": [ "# Declare two tools: Tavily and custom get_current_location tool.\n", - "tools = [TavilySearchResults(max_results=1), get_current_location]" + "tools = [TavilySearchResults(max_results=1), get_current_location]\n", + "tool_node = ToolNode(tools)\n", + "\n", + "# be sure to bind the updated tools to the LLM!\n", + "llm_with_tools = llm.bind_tools(tools)" ] }, { "cell_type": "markdown", - "id": "cd04f130-3f9b-4a0d-a018-d954dc41ad4b", + "id": "5e0a6ed4-d6a8-4e0b-a094-40adf98f77d4", "metadata": {}, "source": [ - "We already declared our LLM, so we don't need to redeclare it. However, we do want to update the agent to have the updated tools." + "Let's create a graph! LangGraph models agent workflows as graphs and the behavior of the agent is defined by 3 key pieces:\n", + "1) `State`: shared data structure that represents the snapshot of the application. In this example, the state consists of messages.\n", + "2) `Nodes`: Python functions that encode the logic of the agents. They receive the state as input and then perform some actions and return an updated State. In this example, the nodes are an agent and tools. \n", + "3) `Edges`: Python functions that determine which Node to execute next based on the State.\n", + "\n", + "A `StateGraph` is the main graph class used and is parameterized to use `MessagesState` as the graph state." ] }, { "cell_type": "code", - "execution_count": null, - "id": "64a0eead-ee86-4b0b-8ae3-fb194ea69186", - "metadata": { - "scrolled": true - }, + "execution_count": 12, + "id": "9dc8754b-0734-4eec-98e3-0234d3c111f3", + "metadata": {}, "outputs": [], "source": [ - "from langgraph.prebuilt import create_react_agent\n", - "from langchain.globals import set_verbose\n", - "from langchain.callbacks.tracers import ConsoleCallbackHandler\n", + "from typing import Literal\n", + "\n", + "from langgraph.graph import StateGraph, MessagesState\n", + "\n", + "# in this graph continue until no more tools\n", + "def should_continue(state: MessagesState) -> Literal[\"tools\", \"__end__\"]:\n", + " messages = state[\"messages\"]\n", + " last_message = messages[-1]\n", + " if last_message.tool_calls:\n", + " return \"tools\"\n", + " return \"__end__\"\n", + "\n", + "# call the model on the current messages\n", + "def call_model(state: MessagesState):\n", + " messages = state[\"messages\"]\n", + " response = llm_with_tools.invoke(messages)\n", + " return {\"messages\": [response]}\n", + "\n", + "\n", + "workflow = StateGraph(MessagesState)\n", "\n", - "set_verbose(True) # verbose output to follow function calling\n", + "# Define the two nodes we will cycle between\n", + "workflow.add_node(\"agent\", call_model)\n", + "workflow.add_node(\"tools\", tool_node)\n", "\n", - "query = \"What is the current weather where I am?\"\n", - "app = create_react_agent(llm, tools)\n", + "# Define edges of the graph\n", + "workflow.add_edge(\"__start__\", \"agent\")\n", + "workflow.add_conditional_edges(\n", + " \"agent\",\n", + " should_continue,\n", + ")\n", "\n", "\n", - "messages = app.invoke({\"messages\": [(\"human\", query)]}, config={'callbacks': [ConsoleCallbackHandler()]})\n", - "{\n", - " \"input\": query,\n", - " \"output\": messages[\"messages\"][-1].content,\n", - "}" + "workflow.add_edge(\"tools\", \"agent\")\n", + "\n", + "# check structure of graph by compiling it\n", + "app = workflow.compile()" ] }, { "cell_type": "markdown", - "id": "147c4727", + "id": "f84c68ce-bb8d-4e95-94fa-fbb9c40c01e5", + "metadata": {}, + "source": [ + "Let's see a visual representation of the graph. As you can see, the agent will keep calling tools until it's finished." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "0deb52d9-51a9-4d90-88e0-402d7b77e6e7", "metadata": {}, + "outputs": [ + { + "data": { + "image/jpeg": "/9j/4AAQSkZJRgABAQAAAQABAAD/4gHYSUNDX1BST0ZJTEUAAQEAAAHIAAAAAAQwAABtbnRyUkdCIFhZWiAH4AABAAEAAAAAAABhY3NwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAA9tYAAQAAAADTLQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAlkZXNjAAAA8AAAACRyWFlaAAABFAAAABRnWFlaAAABKAAAABRiWFlaAAABPAAAABR3dHB0AAABUAAAABRyVFJDAAABZAAAAChnVFJDAAABZAAAAChiVFJDAAABZAAAAChjcHJ0AAABjAAAADxtbHVjAAAAAAAAAAEAAAAMZW5VUwAAAAgAAAAcAHMAUgBHAEJYWVogAAAAAAAAb6IAADj1AAADkFhZWiAAAAAAAABimQAAt4UAABjaWFlaIAAAAAAAACSgAAAPhAAAts9YWVogAAAAAAAA9tYAAQAAAADTLXBhcmEAAAAAAAQAAAACZmYAAPKnAAANWQAAE9AAAApbAAAAAAAAAABtbHVjAAAAAAAAAAEAAAAMZW5VUwAAACAAAAAcAEcAbwBvAGcAbABlACAASQBuAGMALgAgADIAMAAxADb/2wBDAAMCAgMCAgMDAwMEAwMEBQgFBQQEBQoHBwYIDAoMDAsKCwsNDhIQDQ4RDgsLEBYQERMUFRUVDA8XGBYUGBIUFRT/2wBDAQMEBAUEBQkFBQkUDQsNFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBT/wAARCADbAMcDASIAAhEBAxEB/8QAHQABAAEFAQEBAAAAAAAAAAAAAAYDBAUHCAkBAv/EAFkQAAEDBAADAgcICwoKCwAAAAECAwQABQYRBxIhEzEIFBYiQVGUFRcyVVZh0dMJI0JxdIGRk5W00jU2OFJTdZKyw9QYJDdUYmNyobHBMzRDRVdkgoOE4fH/xAAaAQEBAAMBAQAAAAAAAAAAAAAAAQIDBQQH/8QAMxEBAAEDAAYIBAYDAAAAAAAAAAECAxEEEiExUaEUQVJhcZGxwRMVI9EiM1OB4fAFMkL/2gAMAwEAAhEDEQA/APVOlKUClKUClWl0ucezW9+bKUUsMp5jypKlKPcEpSOqlE6ASOpJAHU1g/J6Xk32+/OOsxVbLdnjulCEJ9HbKSduL9YB5BvQCtc6ttNETGtVOI/u5cMzJvtthOFEi4RWFjoUuvpSR+ImqPlVZfjiB7Sj6apR8Lx+I2EMWK2tIAA0iI2O7oPRVXyVsvxPA9mR9FZ/R7+RsPKqy/HED2lH008qrL8cQPaUfTTyVsvxPA9mR9FPJWy/E8D2ZH0U+j38l2HlVZfjiB7Sj6aeVVl+OIHtKPpp5K2X4ngezI+inkrZfieB7Mj6KfR7+RsPKqy/HED2lH019Rk1ncUEou0FSj6EyUE/8a+eStl+J4HsyPor4vE7G4gpVZrepJ6EGKgg/wC6n0e/kbGUSoLSFJIUkjYIOwRX2owvAoMFan7ApWOyyeb/ABIajrP+sY+AoH0kAK79KBO6yNjvLk9b8OYx4pc4ug8yDtCwe5xs+lCtHR7wQQeorGqiMa1E5jylMcGWpSlaUKUpQKUpQKUpQKUpQKUpQKUpQRe7au2cWm3L0qNBYXcnEH7p3mDbP3wNuq6+kIPeNiUVGHR4nxJYcXsIn2tTSFa6czLvNrfrIeJH+yfVUnr0Xd1ERux9881kpSledEAhceMHuWUXLHYd4cmXa3KfRIajQJLiA4ykqdbS6lsoW4kA7QlRVsa1vpUZ4U+E9jfEPhnMzC4NS7AxAK1TUPwJXZtI7dxprkcUykPKIQNhvmKSrRAPSojhwvGOeEAYOF2TLbZityudwkZNBvluKLU25yqUmZCkK9LroSezQpQIWSUoIqOYvc86w7wd7hhFnx3J7VllinuplzI1rUrtITlzUp12A4oFt93xdwqSkbOwemwKDeVq8ILAbziGQZPFv27Rj6Su6qdhyGn4aeXm2thbYdGx1HmddHW9VFM78LHFMYtNjuNrbn3yHcb3GtSpLNrm9kG3DtbzSgwQ/pPVIbJ5yfNJ1qtG3bDbxLsvH1NmxvO5MPIcQiItb2RsSpEue8yZCXEjtOZxKtup5WlBKtbKU8vWt7cfrDcU8PcHm2myzLonGshtN1k262sFyT4swsBwNNDqtSQd8o69DQbfs92j320w7lE7bxWWyl9rxhhbDnKobHM24ErQdHqlQBHcQKvKxuOXxvJbJEubUSbAbkp50x7lGXGkIGyNLbWApJ6b0R6RWSoFRjLtWu52G8o0lbcxEB49fPZkKDYT+dLKvxH11J6jGeJ8bi2e3pBLsu6xCkAb6MuiQon1DlZV1+cV6LH5kRO7r8Ovksb0npSledClKUClKUClKUClKUClKUClKUGKyKzKvERosOJYuER0SYb6wSG3QCOoBBKVJUpCgD1StQBHfVO13yNfA/b5TQjXFCSmTbnjs8vcVJ2BztnfRYGj3HRBSMzWOvOPW7IWm27hEbk9kSppw7S40ojRUhY0pB102kg1upqpmNWvd6f3+998UIHg2cJ0kEcN8WBHcRaGP2a+f4NfCf8A8NsV/RDH7NSE4MW+kfIr7HR0AR44HdD77iVKP4zunkTI+VV+/PM/VVlqW+3ykxHFJI8dqJHaYZbS0y0kIQ2gaSlIGgAPQAKqVF/ImR8qr9+eZ+qp5EyPlVfvzzP1VPh2+3ykxHFKKVz74LV6yHjHwXtOVX7KLqi5ypMtpwQ1NNt8rUlxtOgWyfgoG+vfW2vImR8qr9+eZ+qp8O32+UmI4rDIuB3DzLrzIu17wiwXe6SeXtpk23NOuucqQlPMpSSTpKQPvAVj1eDfwpWlAVw4xdQQOVINpYPKNk6Hm+sk/jrP+RMj5VX788z9VQYS8QQrJ78tJ6a7dof7w2DT4dvt8pMRxVrZacX4W46ItuhW7GrM2sqTHiNJYa7RR7koSBtSj6ANk92zX2zwpF1uwvs9gxilpTMGKv4bTaiCpax6Fq5U9PuQAO8qqpa8LtVqmiaGnZlwAIEyc+uQ6nfeEqWTyA+pOh81Z2pNVNETFvr6/sbI3FKUrQhSlKBSlKBSlKBSlKBSlKBSlKBSlKBSlKBSlKDnfwA/4MOPfhtx/XXq6IrnfwA/4MOPfhtx/XXq6IoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoOd/AD/gw49+G3H9deroiud/AD/gw49+G3H9deroigUpSgUpSgUpSgUpSgUpSgUpSgUpSgUpSgUpSgUpSgUrHX6+M2C3mS6hby1LS0zHaAK3nFdEoTvps+s6AAJJABNRtd+y1StottlbSe5K5rqiPxhob/ACVvosV3IzG7vnC4TWuG/sn/AALVlOE2ziTbI5cuNgAh3HkGyqEtZKFf+24o93odUT0TXVvu7mH+YWP2t76usfkKciyqw3Gy3Sz2GXbbhHciSY65b2nGlpKVJP2v0gmtvRa+MecGHmP9jy4KOcU+O8K9yW1CyYkpu6vuDYCpIVuM3sdx508/qIaUPTXr/XOvg6cGLp4OGCu45ZmLTcFSJbkyTPkSHEuPKVoJBAb0AlASnQ6b2enMa2n7u5h/mFj9re+rp0WvjHnBhN6VCRfcw2NwLJr8Le+rrLY/kr0+Yu33KIiBc0t9slDLpdaebBAKkLKUnoVAEEAjY7wQawq0euiNbZPhMGEgpSleZClKUClKUClKUClKUClKUClKUClKUClKUEO4gHVxw4dNKvCgQR/5KUf+IFX1WPEH908M/nhX6jLqOcXsnbw/h7dbkq9rx9xIbaZnMwvHXUurcShCG2P+0WoqCUp9agT0FdONlqjwn1lZ6kxpXKELjvxAxPEuLTN0RPn3XGrfAuFuk3+2x40lDclTiFreairLakN9mXOmiQFBQGqp3XjdlXDeRxBltZmjidbLPj8CTDltxorbDM6VKLKW3CwEhWhyOAc6fMJB2dKGvXhHWdK5ot+X8YMeavT9yj36TaEWKfJcuN/t9rjKgS22StlTKYr7nOhRCgUOJJGknmPWr/DcxzqLfOEy7xlhvEXPbY+uRGFuYYTAfTCElC2ClPMR0UkhwrB3sa7qusOhkOIdBKFJWASklJ3og6I/LWKUdcQ8e+eHNG/m2z/9fkrTfgc2K6W/hm7LmZJNu0V653RtuDIYjobYWm4SAtxKm20rJWQVEKUQCfNAGhW41f5Q8d/BJv8AY1ttzmJnun0lYTulKVykKUpQKUpQKUpQKUpQKUpQKUpQKUpQKUpQQ7iD+6eGfzwr9Rl1iuIOB23iTjD9kui5LDK3WpDUmE72b8d5pxLjTratHSkrSkjYI6dQR0rLcUnmLTjByGTJYisY+tV0cXJWEIKEtLQ4Co/BPI4vR9eh0B3UaxnivZMwx+Be7Oxd7hbJzfasSY1pkuoWnZHRSUEd4I7/AEV1LcTct0xTtxs5zPuyxnchN44At2ez5ncbbdslyLIr9aBbpXjd2bYcmFBUWyHOx5WVALUkcqQjSjtBJJqI8IeD2TyGr/jOW2ibD4a3C1qjO2S+Sbc8+uUpaftjSoDTaUICAr4R5ubkIA1W9/LON8WX79CS/qqeWcYf92X79CS/qqy6PX2ZNWeCK2HggxZ7Rd7bLzDLL/EuFuctYbu9wQ8I7K08pLYDaQVgdy1hSvnPWslH4SWeM9w/dTJnFWFMqYtwLiNOpVFMY9t5nnHkO/N5fO+bpVfH+K+P5Za27nYzcbzbXSpKJlvtkl9lZSopUAtDZBIUCD16EEVkfLON8WX79CS/qqvwK+zJqzwYXBOEkDh1e7tMtN4vBt1wefk+4ciQhcGM6852ji2U8gWklXMdFZA51aA3UhV/lDx38Em/2NURmUZRA9zL719dklj+zrAZDxKsGDZDjt7y6Q9jcCc8u0Wzx1hXO/Ie5VFSwkEtIAaCQV62VnYACSWrNqJmuMRiecYMTG9uClKVyGJSlKBSlKBSlKBSlKBSlKBSlKBSlfla0toUpSglKRsqJ0AKD9VCuLHFKHwkxyPdZVovF9clTGoEaBZIapMh15zfKNDoB0PUkegDZIBt7pxCvRz3ErTYMVev+M3eM5MmZSxLbESI0E/awnqS4paigjWvNO082lctfhVwrhcJ7RcoUW73i+O3Ge7cZM29TFSXluL0NAnoAEpSOg662dmgoQMBvb/EXJL5ecqfvGK3OCiDExJ6I2IsdOh2qnNjbilHmHXXmrIPNpOpyww3GZbZZbS002kIQ2hISlKQNAADuAqpSgVzj4eHHP3luBs9qBI7HI8i5rZb+U6W2lQ+3PD0jlQdAjuUtBro6tDeEH4HGIeEnkltvOT3zJIblvieKMRLXKYbYSOdS1L5XGVnnVzAEgjYQnp0oOTfsXfHX3HyO6cL7pICYl05rjai4r4MlKR2rQ/220hQHcOyV6VV6V159eBP4GGGZPiGG8VJF4yKPkUG7OyW48aUwmKoxpa0oSpJZKylQbAUOcb2rWt16C0CqMqIxNbS3IZbfbStLgS6gKAUlQUlWj6QQCD6CAarUoNaP4ZfcGyTOs0tN3vWWKuUIOxcNkyW0xky20aT2LiwOyCwlCSO7qpR5joJkOE50Mlx2wS7xbXsSvd2ZW6mwXV1AloKPhgJB87Q0dgbAUnYSToSqovlvDHFs6u9gut9skW43OwyhNtktxJDsZ0EHaVAg62EkpOwSlJI2BoJRStVP5Zk/COBnuT8RrvAueHQ5CZVqVZ7c745GjKUQpt5A2Fcm0AKG9gKUogdE7Fx6/wMqsVuvNrf8atlwjolRnwhSe0aWkKSrSgCNgg9QKDIUpSgUpSgUpSgUpSgUpSgxGVZdZcHsjt4yC5xrPa2lttuTJbgQ2hS1pQjaj0G1KSN/PUMuGP5JxJuec4vmdktsfh1LjIhQFw5zvj0zmTt1aynlDaeoSE9CCg/CSQakfE6x2jIsAvsK/WVOR2rxZT71qUN+Ndl9tSgdR1KkJ18+q+cMMyb4g8PbBkbVtkWdu4xEPiBLSQ7H2NFCtgdxGt6699BlcZxm14bj9vsdkhNW60wGUsRorI0ltA7gPSfvnqT1NZOlKBSlKBVGXLYgRXpUp5uNGYQpx155YShtAGypRPQAAEkmsflWV2fB8enX2/XFi1WiC2XZEuSrlQhP/Mk6AA6kkAAk1zExByfw3Z7cq5Nz8R4EsuBbEAkszsnIOwtzXVuNsAgDqrvGzooCU+AAoL8F7HFpIUlUy4lKgdgjx17qK6Kqystkt+N2mJa7VCYt1tiNpZjxYzYQ20gDQSlI6AVe0ClKUClKUHwgKBBGwehBqGXfhgxcuIuP5exfLzbHrTGchqtcOVywJjKgdJdZIIJSohQUNHzQOuhqaUoIHw7z+935me1mOMjCLk1cnYUKPIntPpuDYAUh1kpI3tJGxroQfUdTytU8X/Iny/4W+VHjvu17sOe4Hiu+z8Z7I83a6+55fX6a2tQKUpQKUpQKUpQKUr8rcQ2NrUEj/SOqDUnhDeEvj3g1Wyz3HJbLf7lAubrjCJNmitutsuJCVBDqnHEBKlgqKQNkhtf8WuK4X2TnPLlMXYcexy23a6Tr6tu2XC8pKdw3FlLDC47Kk6dG0bWHVDvGj0VXoPxIwTHOK+F3TFsjZam2q4NFtxJUOZtX3LiCfgrSdEH0EV5ocI/BQvPDHw4sUxi9N+N2WDKXeod2Sn7VJjsJU40vv6K7RLaVJJ2kn0ggm4kerNKpeNM/wAs3/SFPGmf5Zv+kKYkVah/FXixjPBjDpeS5VcUwLex5qEDznZDhHmtNI71rOu775JABIwXHHj7j3AzGmJ1wS7drxcHPFrRYreOeVcZHQBttI3obUnatdNjoSUpOueFXATIs9zCJxS41qZn5M159kxZs80CwIJ2PN6hb/dtR3ogHZISUwYnFeF2U+FLkMHOOLcByy4PEcEjH+Hzij9s/iyZ4+6UQejZ7t6IA5gvqdttDLaG20JQ2gBKUpGgAO4AV+qUClKUClKUClK/C3UN651pTvu5jqg/dWl2flxbVNet8VE6e2ytceK692KXnAklKCvlVyAnQ5tHW96PdVbxpn+Wb/pCnjTP8s3/AEhVxI86Mg+yoSBeIrcrg/FZk26QsOtzruVvNLG0kIJjAtLB6E6Pq1XXvgu8e5PhHcNnsufxheKte6DsNiOuZ40H0IQgl1K+zb6cylo1o9Wz19A4b8OjwW573hG2KbicdLkXP5QbIQPtcefsB5SyB5qVJIdJP+tPcmvRnhrhVm4W4FYsTs6m0W+0xURmzsAuEdVOK190tRUo/Oo0xIlVKpeNM/yzf9IV9EhpRADqCT3AKFMSKlKUqBSlKC1uk33NtkuXy83YMrd5fXypJ/5Vry14lar9bolyvNviXi5SmUPPSZzCXlbUASlPMPNQO4JGhoevZqc5V+9i8fgb39Q1Hsa/e5avwRr+oK6WjzNFuaqZxOWW6Fl732LfJqz+wNfs0977Fvk1Z/YGv2agvCvwirFxJGUlxqTZkWOZMQt6bDksseKsLCe2W860hCFHfMWiedA3sdCakGEcbcK4iz34VhvYlS2o/jZZfjPRlLY3rtm+1QntG9kDnRtPUdeorbF+5P8A3PmmZ4s1732LfJqz+wNfs0977Fvk1Z/YGv2awGJceMEzq/os1kyBubPdS4uOkx3mm5SW/hlh1aAh4J9JbUrp17qjWD+EPa18HsTy7Npce1zr4XG241uivvF1xK3BpplAccOko2e/XedU6Rc7c+ZmeLYZ4fYz0Ldgt0dwdUvRoyGXEH1pWgBST84IIqRYJdJF0sBMp0yJEaTIhqeOtuBp1SEqOgBzFKQToAb3rpVhZLzDyOzwrrbnvGIE1lEhh7lKedtQ2lWlAEbBHeK/XDP9xLh/O079YXWF6qblmZqnOJj3XOY2pdSlK5bEpSlAq1ul0i2W3yJ015MeIwgrccV3AD5h1J9QHUnoKuq1Bx1vLjs6zWNCtMFK50hO/hFJCWh842Vq++hNezQ9HnSr9Nrj6LCOZVxFvOWPuJZkSLPatkNxY6+zecT6C44nzgT/ABUkAb0ebW6hqrDbXFqW5AjuuK1zLdaC1K++T1NX1K+j2bVGj06lqMQx1pY/yetXxZD9nR9FPJ61fFkP2dH0VkKiF54uYlj95ctc+8IYlNKSh49i4pphStcqXXUpKGydjopQ7xWyq7FEZqqx+5meLP8Ak9aviyH7Oj6KeT1q+LIfs6PoqO3zjDiOOXOdb7hdizLgKQJaERXnBHCkJWlTikoISgpWnzyQnvG9ggXeUcTMaw5+Gzdboll+WgustMtOPrU2O9zlbSohH+kdD56x+PRGfx7t+0zPFl/J61fFkP2dH0UOO2ogj3Mh6PT/AKuj6KwXCfLpeecO7Jf5zbDUqcyXHERklLYPMoeaCSe4DvJqW1lRc16YqidkmZ4q9kuNwxdxK7NPft4SR9oSoqYUPUWj5v4wAfURW8eH2fM5nDW28hMW7RwPGIyTtJB6BxBPek6++D0PoJ0PV3Y7w5jeS2m6tq5Q1IQy91+Ew4oIcB9ethWvWgVytP0GjSrc1RH443T7SsTnZLpulKV89GLyr97F4/A3v6hqPY1+9y1fgjX9QVJMjZXIx66NNpKnFxXUpSPSSggVGsXWlzGrSpJ2lURkg+scgroWfyZ8fZepzNdMTyK8cPuNXDVrH7uxe7vd7pdrdMXEWm3zGXXUvNoEn4AUsbbKSQQd70KyGXW+9+EDlNp9xMYvmHxrVjd5hSJl9gqg8r8yMllqO0D1cCFDnKkgoHInRJNdOUpqo5hx5F7zd3gtjkfCr5jMjDJDMq8TblBMeNHSxDcjqYYdPmvBxSxotkjlGzqsNj9gVaeB+H2u/Y1nVnyvFbjMjQ7rjlrVIkQ39rPbISOYPR3UOhJPKpKuoOtbHW9KaoiPCS45NduGuOzMyiJg5O9EQqewlITyufOkEhKiNEpHcSR6KkfDP9xLh/O079YXV3Vtw1QU2GYv7ly6TlJOu8eMuDf+4/8A5WVeyxV4x7r1JZSlK5qFKUoFaQ43RVR81tUpX/RyoC2UnX3TbnMR+R0fkPqrd9RniBhyc0sJioWlmcwsPxHl70hwAjStfcqBKT8x33gV0v8AH6RTo2k0117t0/usOf6UlxnI8iRb58ZUeU1tD8V4dR6P/Uk+gjoRUNHBjAgdjDbGD/N7X7NfQpqqmImjEx4/xLBMq5yiYWzbrplFhyex5ncvdS7yX2nbPLl+58uNIXsFwNuJbQQFELCwOifTW2veXwH5GWL9Htfs1MWWUR2kNNIS22hISlCRoJA6ACtFdmb2NeIjH7+sDTj2LzWPfrjtW2UWJkFlmCCytXjITbUt6bJH2w8w5em+vTvqwxNVz4eZYzc7njt5uke7Y7bIrL8CEp9yI6whQcYcSOrfMVhWzobB2enTelKnRozFUTiYzPnMz7iAcBLbMtHCDGYc+I/AmNR1ByNJbLbjZ7RR0pJ6g9an9R2/cOsWyid47eMdtl0l8gb7eXFQ4vlHcNkb11NY73lsB+Rli/R7X7NbKKa7dMUUxExGzf8AwJnVJ+Kq4uRILfV2XKZjoGt9VOJG/wAQ2fxVjrFjNkw2E8zaLbCs0Ra+1cRFaSygq0BzHQA3oAb+atu8JcEffnsZJcWVMstJV4hHcSQslQ5S8oHu83YSPUpR9IrXpOkxotmble/q8Vp35bfpSlfM1Kicrh8nt3F2y93KxsrUVmLDDC2Qo9SUpdaXy7PXSSBsk661LKVsouVW/wDWVzhDfIC4fLO9/mIX93p5AXD5Z3v8xC/u9TKlbuk3O7yj7GUN8gLh8s73+Yhf3enkBcPlne/zEL+71MqU6Tc7vKPsZRBHD+QvzZWVXqUyfhNf4szzD0jnaZSsffSoH1EVKYcNi3RGYsVlEeMygNttNJCUoSBoAAdwqtStdd2u5sqn29DOSlKVpQpSlApSlBhckw2zZc0hF1gokLbBDbwJQ63vv5XEkKT+I9ahT3AO1qWSzfb1HQe5AWwsD7xU0T+Umtn0r2WtM0ixGrbrmIXLVnvAwflLe/yRfqKe8DB+Ut7/ACRfqK2nSt/zPS/1PT7GWrPeBg/KW9/ki/UU94GD8pb3+SL9RW06U+Z6X+p6fYy1Z7wMH5S3v8kX6ivo4AwN9ckvZH/xR/YVtKlPmel/qehlCrBwgxywyG5KmHrpLbIUh64udrykdxCNBAPzhINTWlK8V29cvVa1yqZnvMlKUrSj/9k=", + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ - "In order to execute this query, first a tool to get the current location needs to be called. Then a tool to get the current weather at that location needs to be called. \n", - "Finally, the result is returned to the user." + "from IPython.display import Image, display\n", + "\n", + "try:\n", + " display(Image(app.get_graph().draw_mermaid_png()))\n", + "except Exception:\n", + " # This requires some extra dependencies and is optional\n", + " pass" ] }, { "cell_type": "markdown", - "id": "8ace95bd-f2f7-469e-9d9e-ea7b4c57e8f4", + "id": "3365e44f-cbb2-4822-a910-05d8aeaf4982", "metadata": {}, "source": [ - "Below, you can see a diagram of the application's graph. The agent continues to use tools until the query is resolved." + "And now let's run the graph in 2 examples! First, we'll try a query that only requires one tool call. Then we'll try a query that requires multiple tool calls." ] }, { "cell_type": "code", - "execution_count": null, - "id": "128b55cf-5ee3-42d2-897b-173a6d696921", + "execution_count": 14, + "id": "1d4ca572-4016-4ab6-8791-ffaa791d86ac", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "================================\u001b[1m Human Message \u001b[0m=================================\n", + "\n", + "What's the weather in San Francisco?\n", + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "Tool Calls:\n", + " tavily_search_results_json (chatcmpl-tool-a266d125e555420f976689051e8d3f5c)\n", + " Call ID: chatcmpl-tool-a266d125e555420f976689051e8d3f5c\n", + " Args:\n", + " query: San Francisco weather\n", + "=================================\u001b[1m Tool Message \u001b[0m=================================\n", + "Name: tavily_search_results_json\n", + "\n", + "[{\"url\": \"https://www.weatherapi.com/\", \"content\": \"{'location': {'name': 'San Francisco', 'region': 'California', 'country': 'United States of America', 'lat': 37.78, 'lon': -122.42, 'tz_id': 'America/Los_Angeles', 'localtime_epoch': 1723495766, 'localtime': '2024-08-12 13:49'}, 'current': {'last_updated_epoch': 1723495500, 'last_updated': '2024-08-12 13:45', 'temp_c': 16.0, 'temp_f': 60.8, 'is_day': 1, 'condition': {'text': 'Sunny', 'icon': '//cdn.weatherapi.com/weather/64x64/day/113.png', 'code': 1000}, 'wind_mph': 12.3, 'wind_kph': 19.8, 'wind_degree': 250, 'wind_dir': 'WSW', 'pressure_mb': 1015.0, 'pressure_in': 29.98, 'precip_mm': 0.0, 'precip_in': 0.0, 'humidity': 75, 'cloud': 0, 'feelslike_c': 15.9, 'feelslike_f': 60.7, 'windchill_c': 15.9, 'windchill_f': 60.7, 'heatindex_c': 16.0, 'heatindex_f': 60.8, 'dewpoint_c': 11.5, 'dewpoint_f': 52.8, 'vis_km': 10.0, 'vis_miles': 6.0, 'uv': 5.0, 'gust_mph': 14.5, 'gust_kph': 23.4}}\"}]\n", + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "The current weather in San Francisco is sunny with a temperature of 16.0 degrees Celsius (60.8 degrees Fahrenheit) and a wind speed of 19.8 kilometers per hour (12.3 miles per hour) from the west-southwest. The humidity is 75% and the pressure is 1015 millibars.\n" + ] + } + ], "source": [ - "from IPython.display import Image, display\n", - "\n", - "display(Image(app.get_graph(xray=True).draw_mermaid_png()))" + "# example with a single tool call\n", + "for chunk in app.stream(\n", + " {\"messages\": [(\"human\", \"What's the weather in San Francisco?\")]}, stream_mode=\"values\"\n", + "):\n", + " chunk[\"messages\"][-1].pretty_print()" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "6c297224-9c76-4a3c-a085-61546239dab8", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "================================\u001b[1m Human Message \u001b[0m=================================\n", + "\n", + "What's the weather where I currently am?\n", + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "Tool Calls:\n", + " get_current_location (chatcmpl-tool-3de300004d9d42179f8cac2f00a85752)\n", + " Call ID: chatcmpl-tool-3de300004d9d42179f8cac2f00a85752\n", + " Args:\n", + "=================================\u001b[1m Tool Message \u001b[0m=================================\n", + "Name: get_current_location\n", + "\n", + "[43.7064, -79.3986]\n", + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "Tool Calls:\n", + " tavily_search_results_json (chatcmpl-tool-9b6c5f77fe174bbf8308e0dffa57545c)\n", + " Call ID: chatcmpl-tool-9b6c5f77fe174bbf8308e0dffa57545c\n", + " Args:\n", + " query: weather [43.7064, -79.3986]\n", + "=================================\u001b[1m Tool Message \u001b[0m=================================\n", + "Name: tavily_search_results_json\n", + "\n", + "[{\"url\": \"https://www.weatherapi.com/\", \"content\": \"{'location': {'name': 'Toronto', 'region': 'Ontario', 'country': 'Canada', 'lat': 43.67, 'lon': -79.42, 'tz_id': 'America/Toronto', 'localtime_epoch': 1723495822, 'localtime': '2024-08-12 16:50'}, 'current': {'last_updated_epoch': 1723495500, 'last_updated': '2024-08-12 16:45', 'temp_c': 24.5, 'temp_f': 76.1, 'is_day': 1, 'condition': {'text': 'Partly Cloudy', 'icon': '//cdn.weatherapi.com/weather/64x64/day/116.png', 'code': 1003}, 'wind_mph': 11.2, 'wind_kph': 18.0, 'wind_degree': 308, 'wind_dir': 'NW', 'pressure_mb': 1015.0, 'pressure_in': 29.97, 'precip_mm': 0.0, 'precip_in': 0.0, 'humidity': 45, 'cloud': 37, 'feelslike_c': 25.3, 'feelslike_f': 77.6, 'windchill_c': 24.5, 'windchill_f': 76.1, 'heatindex_c': 25.3, 'heatindex_f': 77.6, 'dewpoint_c': 11.9, 'dewpoint_f': 53.5, 'vis_km': 10.0, 'vis_miles': 6.0, 'uv': 6.0, 'gust_mph': 12.9, 'gust_kph': 20.7}}\"}]\n", + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "The current weather in Toronto, Ontario, Canada is Partly Cloudy with a temperature of 24.5 degrees Celsius and a wind speed of 18.0 km/h. The humidity is 45% and the UV index is 6.0.\n" + ] + } + ], + "source": [ + "# example with a multiple tool calls\n", + "for chunk in app.stream(\n", + " {\"messages\": [(\"human\", \"What's the weather where I currently am?\")]}, stream_mode=\"values\"\n", + "):\n", + " chunk[\"messages\"][-1].pretty_print()" + ] + }, + { + "cell_type": "markdown", + "id": "cd04f130-3f9b-4a0d-a018-d954dc41ad4b", + "metadata": {}, + "source": [ + "We already declared our LLM, so we don't need to redeclare it. However, we do want to update the agent to have the updated tools." ] }, { diff --git a/libs/ai-endpoints/Makefile b/libs/ai-endpoints/Makefile index dd4ffcbb..52863145 100644 --- a/libs/ai-endpoints/Makefile +++ b/libs/ai-endpoints/Makefile @@ -7,16 +7,16 @@ all: help TEST_FILE ?= tests/unit_tests/ test: - poetry run pytest $(TEST_FILE) + poetry run pytest $(PYTEST_ARGS) $(TEST_FILE) tests: - poetry run pytest $(TEST_FILE) + poetry run pytest $(PYTEST_ARGS) $(TEST_FILE) check_imports: $(shell find langchain_nvidia_ai_endpoints -name '*.py') poetry run python ./scripts/check_imports.py $^ integration_tests: - poetry run pytest tests/integration_tests + poetry run pytest tests/integration_tests $(PYTEST_ARGS) ###################### diff --git a/libs/ai-endpoints/README.md b/libs/ai-endpoints/README.md index 7171c9a0..68f06985 100644 --- a/libs/ai-endpoints/README.md +++ b/libs/ai-endpoints/README.md @@ -225,6 +225,29 @@ llm.invoke( ) ``` +## Completions + +You can also work with models that support the Completions API. These models accept a `prompt` instead of `messages`. + +```python +completions_llm = NVIDIA().bind(max_tokens=512) +[model.id for model in completions_llm.get_available_models()] + +# [ +# ... +# 'bigcode/starcoder2-7b', +# 'bigcode/starcoder2-15b', +# ... +# ] +``` + +```python +prompt = "# Function that does quicksort written in Rust without comments:" +for chunk in completions_llm.stream(prompt): + print(chunk, end="", flush=True) +``` + + ## Embeddings You can also connect to embeddings models through this package. Below is an example: diff --git a/libs/ai-endpoints/docs/llms/nvidia_ai_endpoints.ipynb b/libs/ai-endpoints/docs/llms/nvidia_ai_endpoints.ipynb new file mode 100644 index 00000000..a4a41f76 --- /dev/null +++ b/libs/ai-endpoints/docs/llms/nvidia_ai_endpoints.ipynb @@ -0,0 +1,250 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# NVIDIA NIMs\n", + "\n", + ":::caution\n", + "You are currently on a page documenting the use of models as [text completion models](/docs/concepts/#llms).\n", + "Many popular models are [chat completion models](/docs/concepts/#chat-models).\n", + "\n", + "To use chat completion models, use [ChatNVIDIA](/docs/integrations/chat/nvidia_ai_endpoints/) instead.\n", + ":::\n", + "\n", + "The `langchain-nvidia-ai-endpoints` package contains LangChain integrations building applications with models on \n", + "NVIDIA NIM inference microservice. NIM supports models across domains like chat, completion, embedding, and re-ranking models \n", + "from the community as well as NVIDIA. These models are optimized by NVIDIA to deliver the best performance on NVIDIA \n", + "accelerated infrastructure and deployed as a NIM, an easy-to-use, prebuilt containers that deploy anywhere using a single \n", + "command on NVIDIA accelerated infrastructure.\n", + "\n", + "NVIDIA hosted deployments of NIMs are available to test on the [NVIDIA API catalog](https://build.nvidia.com/). After testing, \n", + "NIMs can be exported from NVIDIA’s API catalog using the NVIDIA AI Enterprise license and run on-premises or in the cloud, \n", + "giving enterprises ownership and full control of their IP and AI application.\n", + "\n", + "NIMs are packaged as container images on a per model basis and are distributed as NGC container images through the NVIDIA NGC Catalog. \n", + "At their core, NIMs provide easy, consistent, and familiar APIs for running inference on an AI model.\n", + "\n", + "This example goes over how to use LangChain to interact with NVIDIA supported via the `NVIDIA` class.\n", + "\n", + "For more information on accessing the completion models through this api, check out the [NVIDIA](https://python.langchain.com/docs/integrations/llms/nvidia_ai_endpoints/) documentation.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Installation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#%pip install -qU langchain-nvidia-ai-endpoints" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup\n", + "\n", + "**To get started:**\n", + "\n", + "1. Create a free account with [NVIDIA](https://build.nvidia.com/), which hosts NVIDIA AI Foundation models.\n", + "\n", + "2. Click on your model of choice.\n", + "\n", + "3. Under `Input` select the `Python` tab, and click `Get API Key`. Then click `Generate Key`.\n", + "\n", + "4. Copy and save the generated key as `NVIDIA_API_KEY`. From there, you should have access to the endpoints." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from getpass import getpass\n", + "\n", + "# del os.environ['NVIDIA_API_KEY'] ## delete key and reset\n", + "if os.environ.get(\"NVIDIA_API_KEY\", \"\").startswith(\"nvapi-\"):\n", + " print(\"Valid NVIDIA_API_KEY already in environment. Delete to reset\")\n", + "else:\n", + " candidate_api_key = getpass(\"NVAPI Key (starts with nvapi-): \")\n", + " assert candidate_api_key.startswith(\"nvapi-\"), f\"{candidate_api_key[:5]}... is not a valid key\"\n", + " os.environ[\"NVIDIA_API_KEY\"] = candidate_api_key" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Usage\n", + "\n", + "See [LLM](/docs/how_to#llms) for full functionality." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_nvidia_ai_endpoints import NVIDIA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "llm = NVIDIA().bind(max_tokens=256)\n", + "llm" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "prompt = \"# Function that does quicksort written in Rust without comments:\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(llm.invoke(prompt))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Stream, Batch, and Async\n", + "\n", + "These models natively support streaming, and as is the case with all LangChain LLMs they expose a batch method to handle concurrent requests, as well as async methods for invoke, stream, and batch. Below are a few examples." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for chunk in llm.stream(prompt):\n", + " print(chunk, end=\"\", flush=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "llm.batch([prompt])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "await llm.ainvoke(prompt)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "async for chunk in llm.astream(prompt):\n", + " print(chunk, end=\"\", flush=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "await llm.abatch([prompt])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "async for chunk in llm.astream_log(prompt):\n", + " print(chunk)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "response = llm.invoke(\n", + " \"X_train, y_train, X_test, y_test = train_test_split(X, y, test_size=0.1) #Train a logistic regression model, predict the labels on the test set and compute the accuracy score\"\n", + ")\n", + "print(response)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Supported models\n", + "\n", + "Querying `available_models` will still give you all of the other models offered by your API credentials." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "NVIDIA.get_available_models()\n", + "# llm.get_available_models()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "langchain-nvidia-ai-endpoints-m0-Y4aGr-py3.10", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.14" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/__init__.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/__init__.py index d5796e3c..bfda8d36 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/__init__.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/__init__.py @@ -42,6 +42,14 @@ from langchain_nvidia_ai_endpoints._statics import Model, register_model from langchain_nvidia_ai_endpoints.chat_models import ChatNVIDIA from langchain_nvidia_ai_endpoints.embeddings import NVIDIAEmbeddings +from langchain_nvidia_ai_endpoints.llm import NVIDIA from langchain_nvidia_ai_endpoints.reranking import NVIDIARerank -__all__ = ["ChatNVIDIA", "NVIDIAEmbeddings", "NVIDIARerank", "register_model", "Model"] +__all__ = [ + "ChatNVIDIA", + "NVIDIA", + "NVIDIAEmbeddings", + "NVIDIARerank", + "register_model", + "Model", +] diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py index 5393e160..2bde648a 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py @@ -17,7 +17,7 @@ Tuple, Union, ) -from urllib.parse import urlparse, urlunparse +from urllib.parse import urlparse import requests from langchain_core.pydantic_v1 import ( @@ -124,7 +124,7 @@ def _preprocess_args(cls, values: Dict[str, Any]) -> Dict[str, Any]: ## Making sure /v1 in added to the url, followed by infer_path if "base_url" in values: - base_url = values["base_url"] + base_url = values["base_url"].strip("/") parsed = urlparse(base_url) expected_format = "Expected format is: http://host:port" @@ -133,24 +133,11 @@ def _preprocess_args(cls, values: Dict[str, Any]) -> Dict[str, Any]: f"Invalid base_url format. {expected_format} Got: {base_url}" ) - if parsed.path: - normalized_path = parsed.path.strip("/") - if normalized_path == "v1": - pass - elif normalized_path in [ - "v1/embeddings", - "v1/completions", - "v1/rankings", - ]: - warnings.warn(f"Using {base_url}, ignoring the rest") - else: - raise ValueError( - f"Base URL path is not recognized. {expected_format}" - ) + if base_url.endswith( + ("/embeddings", "/completions", "/rankings", "/reranking") + ): + warnings.warn(f"Using {base_url}, ignoring the rest") - base_url = urlunparse( - (parsed.scheme, parsed.netloc, "v1", None, None, None) - ) values["base_url"] = base_url values["infer_path"] = values["infer_path"].format(base_url=base_url) diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py index 075a470f..e1970872 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py @@ -10,8 +10,8 @@ class Model(BaseModel): Model information. id: unique identifier for the model, passed as model parameter for requests - model_type: API type (chat, vlm, embedding, ranking, completion) - client: client name, e.g. ChatNVIDIA, NVIDIAEmbeddings, NVIDIARerank + model_type: API type (chat, vlm, embedding, ranking, completions) + client: client name, e.g. ChatNVIDIA, NVIDIAEmbeddings, NVIDIARerank, NVIDIA endpoint: custom endpoint for the model aliases: list of aliases for the model supports_tools: whether the model supports tool calling @@ -23,9 +23,11 @@ class Model(BaseModel): id: str # why do we have a model_type? because ChatNVIDIA can speak both chat and vlm. model_type: Optional[ - Literal["chat", "vlm", "embedding", "ranking", "completion", "qa"] + Literal["chat", "vlm", "embedding", "ranking", "completions", "qa"] + ] = None + client: Optional[ + Literal["ChatNVIDIA", "NVIDIAEmbeddings", "NVIDIARerank", "NVIDIA"] ] = None - client: Optional[Literal["ChatNVIDIA", "NVIDIAEmbeddings", "NVIDIARerank"]] = None endpoint: Optional[str] = None aliases: Optional[list] = None supports_tools: Optional[bool] = False @@ -42,6 +44,7 @@ def validate_client(cls, client: str, values: dict) -> str: "ChatNVIDIA": ("chat", "vlm", "qa"), "NVIDIAEmbeddings": ("embedding",), "NVIDIARerank": ("ranking",), + "NVIDIA": ("completions",), } model_type = values.get("model_type") if model_type not in supported[client]: @@ -257,6 +260,7 @@ def validate_client(cls, client: str, values: dict) -> str: model_type="chat", client="ChatNVIDIA", aliases=["ai-codestral-22b-instruct-v01"], + supports_structured_output=True, ), "google/gemma-2-9b-it": Model( id="google/gemma-2-9b-it", @@ -286,6 +290,7 @@ def validate_client(cls, client: str, values: dict) -> str: id="nv-mistralai/mistral-nemo-12b-instruct", model_type="chat", client="ChatNVIDIA", + supports_tools=True, supports_structured_output=True, ), "meta/llama-3.1-8b-instruct": Model( @@ -330,6 +335,73 @@ def validate_client(cls, client: str, values: dict) -> str: model_type="chat", client="ChatNVIDIA", ), + "mistralai/mistral-large-2-instruct": Model( + id="mistralai/mistral-large-2-instruct", + model_type="chat", + client="ChatNVIDIA", + supports_tools=True, + supports_structured_output=True, + ), + "mistralai/mathstral-7b-v0.1": Model( + id="mistralai/mathstral-7b-v0.1", + model_type="chat", + client="ChatNVIDIA", + ), + "rakuten/rakutenai-7b-instruct": Model( + id="rakuten/rakutenai-7b-instruct", + model_type="chat", + client="ChatNVIDIA", + ), + "rakuten/rakutenai-7b-chat": Model( + id="rakuten/rakutenai-7b-chat", + model_type="chat", + client="ChatNVIDIA", + ), + "baichuan-inc/baichuan2-13b-chat": Model( + id="baichuan-inc/baichuan2-13b-chat", + model_type="chat", + client="ChatNVIDIA", + ), + "thudm/chatglm3-6b": Model( + id="thudm/chatglm3-6b", + model_type="chat", + client="ChatNVIDIA", + ), + "microsoft/phi-3.5-mini-instruct": Model( + id="microsoft/phi-3.5-mini-instruct", + model_type="chat", + client="ChatNVIDIA", + ), + "microsoft/phi-3.5-moe-instruct": Model( + id="microsoft/phi-3.5-moe-instruct", + model_type="chat", + client="ChatNVIDIA", + ), + "nvidia/nemotron-mini-4b-instruct": Model( + id="nvidia/nemotron-mini-4b-instruct", + model_type="chat", + client="ChatNVIDIA", + ), + "ai21labs/jamba-1.5-large-instruct": Model( + id="ai21labs/jamba-1.5-large-instruct", + model_type="chat", + client="ChatNVIDIA", + ), + "ai21labs/jamba-1.5-mini-instruct": Model( + id="ai21labs/jamba-1.5-mini-instruct", + model_type="chat", + client="ChatNVIDIA", + ), + "yentinglin/llama-3-taiwan-70b-instruct": Model( + id="yentinglin/llama-3-taiwan-70b-instruct", + model_type="chat", + client="ChatNVIDIA", + ), + "tokyotech-llm/llama-3-swallow-70b-instruct-v0.1": Model( + id="tokyotech-llm/llama-3-swallow-70b-instruct-v0.1", + model_type="chat", + client="ChatNVIDIA", + ), } QA_MODEL_TABLE = { @@ -467,14 +539,23 @@ def validate_client(cls, client: str, values: dict) -> str: ), } -# COMPLETION_MODEL_TABLE = { -# "mistralai/mixtral-8x22b-v0.1": Model( -# id="mistralai/mixtral-8x22b-v0.1", -# model_type="completion", -# client="NVIDIA", -# aliases=["ai-mixtral-8x22b"], -# ), -# } +COMPLETION_MODEL_TABLE = { + "bigcode/starcoder2-7b": Model( + id="bigcode/starcoder2-7b", + model_type="completions", + client="NVIDIA", + ), + "bigcode/starcoder2-15b": Model( + id="bigcode/starcoder2-15b", + model_type="completions", + client="NVIDIA", + ), + "nvidia/mistral-nemo-minitron-8b-base": Model( + id="nvidia/mistral-nemo-minitron-8b-base", + model_type="completions", + client="NVIDIA", + ), +} OPENAI_MODEL_TABLE = { @@ -494,6 +575,7 @@ def validate_client(cls, client: str, values: dict) -> str: **VLM_MODEL_TABLE, **EMBEDDING_MODEL_TABLE, **RANKING_MODEL_TABLE, + **COMPLETION_MODEL_TABLE, } if "_INCLUDE_OPENAI" in os.environ: diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py index 6d0cf965..df5245e7 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py @@ -51,6 +51,7 @@ from langchain_core.runnables import Runnable from langchain_core.tools import BaseTool from langchain_core.utils.function_calling import convert_to_openai_tool +from langchain_core.utils.pydantic import is_basemodel_subclass from langchain_nvidia_ai_endpoints._common import _NVIDIAClient from langchain_nvidia_ai_endpoints._statics import Model @@ -698,24 +699,6 @@ class Choices(enum.Enum): output_parser: BaseOutputParser = JsonOutputParser() nvext_param: Dict[str, Any] = {"guided_json": schema} - elif issubclass(schema, BaseModel): - # PydanticOutputParser does not support streaming. what we do - # instead is ignore all inputs that are incomplete wrt the - # underlying Pydantic schema. if the entire input is invalid, - # we return None. - class ForgivingPydanticOutputParser(PydanticOutputParser): - def parse_result( - self, result: List[Generation], *, partial: bool = False - ) -> Any: - try: - return super().parse_result(result, partial=partial) - except OutputParserException: - pass - return None - - output_parser = ForgivingPydanticOutputParser(pydantic_object=schema) - nvext_param = {"guided_json": schema.schema()} - elif issubclass(schema, enum.Enum): # langchain's EnumOutputParser is not in langchain_core # and doesn't support streaming. this is a simple implementation @@ -743,6 +726,25 @@ def parse(self, response: str) -> Any: ) output_parser = EnumOutputParser(enum=schema) nvext_param = {"guided_choice": choices} + + elif is_basemodel_subclass(schema): + # PydanticOutputParser does not support streaming. what we do + # instead is ignore all inputs that are incomplete wrt the + # underlying Pydantic schema. if the entire input is invalid, + # we return None. + class ForgivingPydanticOutputParser(PydanticOutputParser): + def parse_result( + self, result: List[Generation], *, partial: bool = False + ) -> Any: + try: + return super().parse_result(result, partial=partial) + except OutputParserException: + pass + return None + + output_parser = ForgivingPydanticOutputParser(pydantic_object=schema) + nvext_param = {"guided_json": schema.schema()} + else: raise ValueError( "Schema must be a Pydantic object, a dictionary " diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/embeddings.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/embeddings.py index cd32b4ed..f8940f84 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/embeddings.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/embeddings.py @@ -34,7 +34,7 @@ class Config: validate_assignment = True _client: _NVIDIAClient = PrivateAttr(_NVIDIAClient) - _default_model_name: str = "NV-Embed-QA" + _default_model_name: str = "nvidia/nv-embedqa-e5-v5" _default_max_batch_size: int = 50 _default_base_url: str = "https://integrate.api.nvidia.com/v1" base_url: str = Field( diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/llm.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/llm.py new file mode 100644 index 00000000..12f364a5 --- /dev/null +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/llm.py @@ -0,0 +1,233 @@ +from __future__ import annotations + +import os +import warnings +from typing import Any, Dict, Iterator, List, Optional + +from langchain_core.callbacks.manager import CallbackManagerForLLMRun +from langchain_core.language_models.llms import LLM +from langchain_core.outputs import GenerationChunk +from langchain_core.pydantic_v1 import Field, PrivateAttr, root_validator + +from langchain_nvidia_ai_endpoints._common import _NVIDIAClient +from langchain_nvidia_ai_endpoints._statics import Model + + +class NVIDIA(LLM): + """ + LangChain LLM that uses the Completions API with NVIDIA NIMs. + """ + + class Config: + validate_assignment = True + + _client: _NVIDIAClient = PrivateAttr(_NVIDIAClient) + _default_model_name: str = "nvidia/mistral-nemo-minitron-8b-base" + _default_base_url: str = "https://integrate.api.nvidia.com/v1" + base_url: str = Field( + description="Base url for model listing and invocation", + ) + model: Optional[str] = Field(description="The model to use for completions.") + + _base_url_var = "NVIDIA_BASE_URL" + + _init_args: Dict[str, Any] = PrivateAttr() + """Stashed arguments given to the constructor that can be passed to + the Completions API endpoint.""" + + @root_validator(pre=True) + def _validate_base_url(cls, values: Dict[str, Any]) -> Dict[str, Any]: + values["base_url"] = ( + values.get(cls._base_url_var.lower()) + or values.get("base_url") + or os.getenv(cls._base_url_var) + or cls._default_base_url + ) + return values + + def __check_kwargs(self, kwargs: Dict[str, Any]) -> Dict[str, Any]: + """ + Check kwargs, warn for unknown keys, and return a copy recognized keys. + """ + completions_arguments = { + "frequency_penalty", + "max_tokens", + "presence_penalty", + "seed", + "stop", + "temperature", + "top_p", + "best_of", + "echo", + "logit_bias", + "logprobs", + "n", + "suffix", + "user", + "stream", + } + + recognized_kwargs = { + k: v for k, v in kwargs.items() if k in completions_arguments + } + unrecognized_kwargs = set(kwargs) - completions_arguments + if len(unrecognized_kwargs) > 0: + warnings.warn(f"Unrecognized, ignored arguments: {unrecognized_kwargs}") + + return recognized_kwargs + + def __init__(self, **kwargs: Any): + """ + Create a new NVIDIA LLM for Completions APIs. + + This class provides access to a NVIDIA NIM for completions. By default, it + connects to a hosted NIM, but can be configured to connect to a local NIM + using the `base_url` parameter. An API key is required to connect to the + hosted NIM. + + Args: + model (str): The model to use for completions. + nvidia_api_key (str): The API key to use for connecting to the hosted NIM. + api_key (str): Alternative to nvidia_api_key. + base_url (str): The base URL of the NIM to connect to. + + API Key: + - The recommended way to provide the API key is through the `NVIDIA_API_KEY` + environment variable. + + Additional arguments that can be passed to the Completions API: + - max_tokens (int): The maximum number of tokens to generate. + - stop (str or List[str]): The stop sequence to use for generating completions. + - temperature (float): The temperature to use for generating completions. + - top_p (float): The top-p value to use for generating completions. + - frequency_penalty (float): The frequency penalty to apply to the completion. + - presence_penalty (float): The presence penalty to apply to the completion. + - seed (int): The seed to use for generating completions. + + These additional arguments can also be passed with `bind()`, e.g. + `NVIDIA().bind(max_tokens=512)`, or pass directly to `invoke()` or `stream()`, + e.g. `NVIDIA().invoke("prompt", max_tokens=512)`. + """ + super().__init__(**kwargs) + self._client = _NVIDIAClient( + base_url=self.base_url, + model_name=self.model, + default_hosted_model_name=self._default_model_name, + api_key=kwargs.pop("nvidia_api_key", kwargs.pop("api_key", None)), + infer_path="{base_url}/completions", + cls=self.__class__.__name__, + ) + # todo: only store the model in one place + # the model may be updated to a newer name during initialization + self.model = self._client.model_name + + # stash all additional args that can be passed to the Completions API, + # but first make sure we pull out any args that are processed elsewhere. + for key in [ + "model", + "nvidia_base_url", + "base_url", + ]: + if key in kwargs: + del kwargs[key] + self._init_args = self.__check_kwargs(kwargs) + + @property + def available_models(self) -> List[Model]: + """ + Get a list of available models that work with NVIDIA. + """ + return self._client.get_available_models(self.__class__.__name__) + + @classmethod + def get_available_models( + cls, + **kwargs: Any, + ) -> List[Model]: + """ + Get a list of available models that work with the Completions API. + """ + return cls(**kwargs).available_models + + @property + def _llm_type(self) -> str: + """ + Get the type of language model used by this chat model. + Used for logging purposes only. + """ + return "NVIDIA" + + @property + def _identifying_params(self) -> Dict[str, Any]: + """ + Get parameters used to help identify the LLM. + """ + return { + "model": self.model, + "base_url": self.base_url, + } + + def _call( + self, + prompt: str, + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> str: + payload: Dict[str, Any] = { + "model": self.model, + "prompt": prompt, + **self._init_args, + **self.__check_kwargs(kwargs), + } + if stop: + payload["stop"] = stop + + if payload.get("stream", False): + warnings.warn("stream set to true for non-streaming call, ignoring") + del payload["stream"] + + response = self._client.get_req(payload=payload) + response.raise_for_status() + + # todo: handle response's usage and system_fingerprint + + choices = response.json()["choices"] + # todo: write a test for this by setting n > 1 on the request + # aug 2024: n > 1 is not supported by endpoints + if len(choices) > 1: + warnings.warn( + f"Multiple choices in response, returning only the first: {choices}" + ) + + return choices[0]["text"] + + def _stream( + self, + prompt: str, + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> Iterator[GenerationChunk]: + payload: Dict[str, Any] = { + "model": self.model, + "prompt": prompt, + "stream": True, + **self._init_args, + **self.__check_kwargs(kwargs), + } + if stop: + payload["stop"] = stop + + # we construct payload w/ **kwargs positioned to override stream=True, + # this lets us know if a user passed stream=False + if not payload.get("stream", True): + warnings.warn("stream set to false for streaming call, ignoring") + payload["stream"] = True + + for chunk in self._client.get_req_stream(payload=payload): + content = chunk["content"] + generation = GenerationChunk(text=content) + if run_manager: # todo: add tests for run_manager + run_manager.on_llm_new_token(content, chunk=generation) + yield generation diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/reranking.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/reranking.py index 62a425b0..2c2047fa 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/reranking.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/reranking.py @@ -28,7 +28,7 @@ class Config: _client: _NVIDIAClient = PrivateAttr(_NVIDIAClient) _default_batch_size: int = 32 - _default_model_name: str = "nv-rerank-qa-mistral-4b:1" + _default_model_name: str = "nvidia/nv-rerankqa-mistral-4b-v3" _default_base_url: str = "https://integrate.api.nvidia.com/v1" base_url: str = Field( description="Base url for model listing an invocation", diff --git a/libs/ai-endpoints/poetry.lock b/libs/ai-endpoints/poetry.lock index b976f6c4..3cdd919f 100644 --- a/libs/ai-endpoints/poetry.lock +++ b/libs/ai-endpoints/poetry.lock @@ -2,98 +2,113 @@ [[package]] name = "aiohappyeyeballs" -version = "2.3.4" +version = "2.4.0" description = "Happy Eyeballs for asyncio" optional = false -python-versions = "<4.0,>=3.8" +python-versions = ">=3.8" files = [ - {file = "aiohappyeyeballs-2.3.4-py3-none-any.whl", hash = "sha256:40a16ceffcf1fc9e142fd488123b2e218abc4188cf12ac20c67200e1579baa42"}, - {file = "aiohappyeyeballs-2.3.4.tar.gz", hash = "sha256:7e1ae8399c320a8adec76f6c919ed5ceae6edd4c3672f4d9eae2b27e37c80ff6"}, + {file = "aiohappyeyeballs-2.4.0-py3-none-any.whl", hash = "sha256:7ce92076e249169a13c2f49320d1967425eaf1f407522d707d59cac7628d62bd"}, + {file = "aiohappyeyeballs-2.4.0.tar.gz", hash = "sha256:55a1714f084e63d49639800f95716da97a1f173d46a16dfcfda0016abb93b6b2"}, ] [[package]] name = "aiohttp" -version = "3.10.0" +version = "3.10.5" description = "Async http client/server framework (asyncio)" optional = false python-versions = ">=3.8" files = [ - {file = "aiohttp-3.10.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:68ab608118e212f56feef44d4785aa90b713042da301f26338f36497b481cd79"}, - {file = "aiohttp-3.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:64a117c16273ca9f18670f33fc7fd9604b9f46ddb453ce948262889a6be72868"}, - {file = "aiohttp-3.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:54076a25f32305e585a3abae1f0ad10646bec539e0e5ebcc62b54ee4982ec29f"}, - {file = "aiohttp-3.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71c76685773444d90ae83874433505ed800e1706c391fdf9e57cc7857611e2f4"}, - {file = "aiohttp-3.10.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bdda86ab376f9b3095a1079a16fbe44acb9ddde349634f1c9909d13631ff3bcf"}, - {file = "aiohttp-3.10.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d6dcd1d21da5ae1416f69aa03e883a51e84b6c803b8618cbab341ac89a85b9e"}, - {file = "aiohttp-3.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06ef0135d7ab7fb0284342fbbf8e8ddf73b7fee8ecc55f5c3a3d0a6b765e6d8b"}, - {file = "aiohttp-3.10.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ccab9381f38c669bb9254d848f3b41a3284193b3e274a34687822f98412097e9"}, - {file = "aiohttp-3.10.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:947da3aee057010bc750b7b4bb65cbd01b0bdb7c4e1cf278489a1d4a1e9596b3"}, - {file = "aiohttp-3.10.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:5268b35fee7eb754fb5b3d0f16a84a2e9ed21306f5377f3818596214ad2d7714"}, - {file = "aiohttp-3.10.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:ff25d988fd6ce433b5c393094a5ca50df568bdccf90a8b340900e24e0d5fb45c"}, - {file = "aiohttp-3.10.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:594b4b4f1dfe8378b4a0342576dc87a930c960641159f5ae83843834016dbd59"}, - {file = "aiohttp-3.10.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c8820dad615cd2f296ed3fdea8402b12663ac9e5ea2aafc90ef5141eb10b50b8"}, - {file = "aiohttp-3.10.0-cp310-cp310-win32.whl", hash = "sha256:ab1d870403817c9a0486ca56ccbc0ebaf85d992277d48777faa5a95e40e5bcca"}, - {file = "aiohttp-3.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:563705a94ea3af43467167f3a21c665f3b847b2a0ae5544fa9e18df686a660da"}, - {file = "aiohttp-3.10.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:13679e11937d3f37600860de1f848e2e062e2b396d3aa79b38c89f9c8ab7e791"}, - {file = "aiohttp-3.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8c66a1aadafbc0bd7d648cb7fcb3860ec9beb1b436ce3357036a4d9284fcef9a"}, - {file = "aiohttp-3.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b7e3545b06aae925f90f06402e05cfb9c62c6409ce57041932163b09c48daad6"}, - {file = "aiohttp-3.10.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:effafe5144aa32f0388e8f99b1b2692cf094ea2f6b7ceca384b54338b77b1f50"}, - {file = "aiohttp-3.10.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a04f2c8d41821a2507b49b2694c40495a295b013afb0cc7355b337980b47c546"}, - {file = "aiohttp-3.10.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6dbfac556219d884d50edc6e1952a93545c2786193f00f5521ec0d9d464040ab"}, - {file = "aiohttp-3.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a65472256c5232681968deeea3cd5453aa091c44e8db09f22f1a1491d422c2d9"}, - {file = "aiohttp-3.10.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:941366a554e566efdd3f042e17a9e461a36202469e5fd2aee66fe3efe6412aef"}, - {file = "aiohttp-3.10.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:927b4aca6340301e7d8bb05278d0b6585b8633ea852b7022d604a5df920486bf"}, - {file = "aiohttp-3.10.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:34adb8412e736a5d0df6d1fccdf71599dfb07a63add241a94a189b6364e997f1"}, - {file = "aiohttp-3.10.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:43c60d9b332a01ee985f080f639f3e56abcfb95ec1320013c94083c3b6a2e143"}, - {file = "aiohttp-3.10.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:3f49edf7c5cd2987634116e1b6a0ee2438fca17f7c4ee480ff41decb76cf6158"}, - {file = "aiohttp-3.10.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9784246431eaf9d651b3cc06f9c64f9a9f57299f4971c5ea778fa0b81074ef13"}, - {file = "aiohttp-3.10.0-cp311-cp311-win32.whl", hash = "sha256:bec91402df78b897a47b66b9c071f48051cea68d853d8bc1d4404896c6de41ae"}, - {file = "aiohttp-3.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:25a9924343bf91b0c5082cae32cfc5a1f8787ac0433966319ec07b0ed4570722"}, - {file = "aiohttp-3.10.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:21dab4a704c68dc7bc2a1219a4027158e8968e2079f1444eda2ba88bc9f2895f"}, - {file = "aiohttp-3.10.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:872c0dcaccebd5733d535868fe2356aa6939f5827dcea7a8b9355bb2eff6f56e"}, - {file = "aiohttp-3.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f381424dbce313bb5a666a215e7a9dcebbc533e9a2c467a1f0c95279d24d1fa7"}, - {file = "aiohttp-3.10.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ca48e9f092a417c6669ee8d3a19d40b3c66dde1a2ae0d57e66c34812819b671"}, - {file = "aiohttp-3.10.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bbe2f6d0466f5c59c7258e0745c20d74806a1385fbb7963e5bbe2309a11cc69b"}, - {file = "aiohttp-3.10.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:03799a95402a7ed62671c4465e1eae51d749d5439dbc49edb6eee52ea165c50b"}, - {file = "aiohttp-3.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5549c71c35b5f057a4eebcc538c41299826f7813f28880722b60e41c861a57ec"}, - {file = "aiohttp-3.10.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f6fa7a42b78d8698491dc4ad388169de54cca551aa9900f750547372de396277"}, - {file = "aiohttp-3.10.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:77bbf0a2f6fefac6c0db1792c234f577d80299a33ce7125467439097cf869198"}, - {file = "aiohttp-3.10.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:34eaf5cfcc979846d73571b1a4be22cad5e029d55cdbe77cdc7545caa4dcb925"}, - {file = "aiohttp-3.10.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:4f1de31a585344a106db43a9c3af2e15bb82e053618ff759f1fdd31d82da38eb"}, - {file = "aiohttp-3.10.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:f3a1ea61d96146e9b9e5597069466e2e4d9e01e09381c5dd51659f890d5e29e7"}, - {file = "aiohttp-3.10.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:73c01201219eb039a828bb58dcc13112eec2fed6eea718356316cd552df26e04"}, - {file = "aiohttp-3.10.0-cp312-cp312-win32.whl", hash = "sha256:33e915971eee6d2056d15470a1214e4e0f72b6aad10225548a7ab4c4f54e2db7"}, - {file = "aiohttp-3.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:2dc75da06c35a7b47a88ceadbf993a53d77d66423c2a78de8c6f9fb41ec35687"}, - {file = "aiohttp-3.10.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:f1bc4d68b83966012813598fe39b35b4e6019b69d29385cf7ec1cb08e1ff829b"}, - {file = "aiohttp-3.10.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d9b8b31c057a0b7bb822a159c490af05cb11b8069097f3236746a78315998afa"}, - {file = "aiohttp-3.10.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:10f0d7894ddc6ff8f369e3fdc082ef1f940dc1f5b9003cd40945d24845477220"}, - {file = "aiohttp-3.10.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:72de8ffba4a27e3c6e83e58a379fc4fe5548f69f9b541fde895afb9be8c31658"}, - {file = "aiohttp-3.10.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd36d0f0afc2bd84f007cedd2d9a449c3cf04af471853a25eb71f28bc2e1a119"}, - {file = "aiohttp-3.10.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f64d503c661864866c09806ac360b95457f872d639ca61719115a9f389b2ec90"}, - {file = "aiohttp-3.10.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31616121369bc823791056c632f544c6c8f8d1ceecffd8bf3f72ef621eaabf49"}, - {file = "aiohttp-3.10.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f76c12abb88b7ee64b3f9ae72f0644af49ff139067b5add142836dab405d60d4"}, - {file = "aiohttp-3.10.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:6c99eef30a7e98144bcf44d615bc0f445b3a3730495fcc16124cb61117e1f81e"}, - {file = "aiohttp-3.10.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:39e7ec718e7a1971a5d98357e3e8c0529477d45c711d32cd91999dc8d8404e1e"}, - {file = "aiohttp-3.10.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:f1cef548ee4e84264b78879de0c754bbe223193c6313beb242ce862f82eab184"}, - {file = "aiohttp-3.10.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:f98f036eab11d2f90cdd01b9d1410de9d7eb520d070debeb2edadf158b758431"}, - {file = "aiohttp-3.10.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:cc4376ff537f7d2c1e98f97f6d548e99e5d96078b0333c1d3177c11467b972de"}, - {file = "aiohttp-3.10.0-cp38-cp38-win32.whl", hash = "sha256:ebedc51ee6d39f9ea5e26e255fd56a7f4e79a56e77d960f9bae75ef4f95ed57f"}, - {file = "aiohttp-3.10.0-cp38-cp38-win_amd64.whl", hash = "sha256:aad87626f31a85fd4af02ba7fd6cc424b39d4bff5c8677e612882649da572e47"}, - {file = "aiohttp-3.10.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1dc95c5e2a5e60095f1bb51822e3b504e6a7430c9b44bff2120c29bb876c5202"}, - {file = "aiohttp-3.10.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1c83977f7b6f4f4a96fab500f5a76d355f19f42675224a3002d375b3fb309174"}, - {file = "aiohttp-3.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8cedc48d36652dd3ac40e5c7c139d528202393e341a5e3475acedb5e8d5c4c75"}, - {file = "aiohttp-3.10.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b099fbb823efed3c1d736f343ac60d66531b13680ee9b2669e368280f41c2b8"}, - {file = "aiohttp-3.10.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d583755ddb9c97a2da1322f17fc7d26792f4e035f472d675e2761c766f94c2ff"}, - {file = "aiohttp-3.10.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a03a4407bdb9ae815f0d5a19df482b17df530cf7bf9c78771aa1c713c37ff1f"}, - {file = "aiohttp-3.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcb6e65f6ea7caa0188e36bebe9e72b259d3d525634758c91209afb5a6cbcba7"}, - {file = "aiohttp-3.10.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6612c6ed3147a4a2d6463454b94b877566b38215665be4c729cd8b7bdce15b4"}, - {file = "aiohttp-3.10.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0b0c0148d2a69b82ffe650c2ce235b431d49a90bde7dd2629bcb40314957acf6"}, - {file = "aiohttp-3.10.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:0d85a173b4dbbaaad1900e197181ea0fafa617ca6656663f629a8a372fdc7d06"}, - {file = "aiohttp-3.10.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:12c43dace645023583f3dd2337dfc3aa92c99fb943b64dcf2bc15c7aa0fb4a95"}, - {file = "aiohttp-3.10.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:33acb0d9bf12cdc80ceec6f5fda83ea7990ce0321c54234d629529ca2c54e33d"}, - {file = "aiohttp-3.10.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:91e0b76502205484a4d1d6f25f461fa60fe81a7987b90e57f7b941b0753c3ec8"}, - {file = "aiohttp-3.10.0-cp39-cp39-win32.whl", hash = "sha256:1ebd8ed91428ffbe8b33a5bd6f50174e11882d5b8e2fe28670406ab5ee045ede"}, - {file = "aiohttp-3.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:0433795c4a8bafc03deb3e662192250ba5db347c41231b0273380d2f53c9ea0b"}, - {file = "aiohttp-3.10.0.tar.gz", hash = "sha256:e8dd7da2609303e3574c95b0ec9f1fd49647ef29b94701a2862cceae76382e1d"}, + {file = "aiohttp-3.10.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:18a01eba2574fb9edd5f6e5fb25f66e6ce061da5dab5db75e13fe1558142e0a3"}, + {file = "aiohttp-3.10.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:94fac7c6e77ccb1ca91e9eb4cb0ac0270b9fb9b289738654120ba8cebb1189c6"}, + {file = "aiohttp-3.10.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2f1f1c75c395991ce9c94d3e4aa96e5c59c8356a15b1c9231e783865e2772699"}, + {file = "aiohttp-3.10.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f7acae3cf1a2a2361ec4c8e787eaaa86a94171d2417aae53c0cca6ca3118ff6"}, + {file = "aiohttp-3.10.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:94c4381ffba9cc508b37d2e536b418d5ea9cfdc2848b9a7fea6aebad4ec6aac1"}, + {file = "aiohttp-3.10.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c31ad0c0c507894e3eaa843415841995bf8de4d6b2d24c6e33099f4bc9fc0d4f"}, + {file = "aiohttp-3.10.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0912b8a8fadeb32ff67a3ed44249448c20148397c1ed905d5dac185b4ca547bb"}, + {file = "aiohttp-3.10.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0d93400c18596b7dc4794d48a63fb361b01a0d8eb39f28800dc900c8fbdaca91"}, + {file = "aiohttp-3.10.5-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d00f3c5e0d764a5c9aa5a62d99728c56d455310bcc288a79cab10157b3af426f"}, + {file = "aiohttp-3.10.5-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:d742c36ed44f2798c8d3f4bc511f479b9ceef2b93f348671184139e7d708042c"}, + {file = "aiohttp-3.10.5-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:814375093edae5f1cb31e3407997cf3eacefb9010f96df10d64829362ae2df69"}, + {file = "aiohttp-3.10.5-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:8224f98be68a84b19f48e0bdc14224b5a71339aff3a27df69989fa47d01296f3"}, + {file = "aiohttp-3.10.5-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:d9a487ef090aea982d748b1b0d74fe7c3950b109df967630a20584f9a99c0683"}, + {file = "aiohttp-3.10.5-cp310-cp310-win32.whl", hash = "sha256:d9ef084e3dc690ad50137cc05831c52b6ca428096e6deb3c43e95827f531d5ef"}, + {file = "aiohttp-3.10.5-cp310-cp310-win_amd64.whl", hash = "sha256:66bf9234e08fe561dccd62083bf67400bdbf1c67ba9efdc3dac03650e97c6088"}, + {file = "aiohttp-3.10.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8c6a4e5e40156d72a40241a25cc226051c0a8d816610097a8e8f517aeacd59a2"}, + {file = "aiohttp-3.10.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2c634a3207a5445be65536d38c13791904fda0748b9eabf908d3fe86a52941cf"}, + {file = "aiohttp-3.10.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4aff049b5e629ef9b3e9e617fa6e2dfeda1bf87e01bcfecaf3949af9e210105e"}, + {file = "aiohttp-3.10.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1942244f00baaacaa8155eca94dbd9e8cc7017deb69b75ef67c78e89fdad3c77"}, + {file = "aiohttp-3.10.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e04a1f2a65ad2f93aa20f9ff9f1b672bf912413e5547f60749fa2ef8a644e061"}, + {file = "aiohttp-3.10.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7f2bfc0032a00405d4af2ba27f3c429e851d04fad1e5ceee4080a1c570476697"}, + {file = "aiohttp-3.10.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:424ae21498790e12eb759040bbb504e5e280cab64693d14775c54269fd1d2bb7"}, + {file = "aiohttp-3.10.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:975218eee0e6d24eb336d0328c768ebc5d617609affaca5dbbd6dd1984f16ed0"}, + {file = "aiohttp-3.10.5-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4120d7fefa1e2d8fb6f650b11489710091788de554e2b6f8347c7a20ceb003f5"}, + {file = "aiohttp-3.10.5-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:b90078989ef3fc45cf9221d3859acd1108af7560c52397ff4ace8ad7052a132e"}, + {file = "aiohttp-3.10.5-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:ba5a8b74c2a8af7d862399cdedce1533642fa727def0b8c3e3e02fcb52dca1b1"}, + {file = "aiohttp-3.10.5-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:02594361128f780eecc2a29939d9dfc870e17b45178a867bf61a11b2a4367277"}, + {file = "aiohttp-3.10.5-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:8fb4fc029e135859f533025bc82047334e24b0d489e75513144f25408ecaf058"}, + {file = "aiohttp-3.10.5-cp311-cp311-win32.whl", hash = "sha256:e1ca1ef5ba129718a8fc827b0867f6aa4e893c56eb00003b7367f8a733a9b072"}, + {file = "aiohttp-3.10.5-cp311-cp311-win_amd64.whl", hash = "sha256:349ef8a73a7c5665cca65c88ab24abe75447e28aa3bc4c93ea5093474dfdf0ff"}, + {file = "aiohttp-3.10.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:305be5ff2081fa1d283a76113b8df7a14c10d75602a38d9f012935df20731487"}, + {file = "aiohttp-3.10.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3a1c32a19ee6bbde02f1cb189e13a71b321256cc1d431196a9f824050b160d5a"}, + {file = "aiohttp-3.10.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:61645818edd40cc6f455b851277a21bf420ce347baa0b86eaa41d51ef58ba23d"}, + {file = "aiohttp-3.10.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c225286f2b13bab5987425558baa5cbdb2bc925b2998038fa028245ef421e75"}, + {file = "aiohttp-3.10.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8ba01ebc6175e1e6b7275c907a3a36be48a2d487549b656aa90c8a910d9f3178"}, + {file = "aiohttp-3.10.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8eaf44ccbc4e35762683078b72bf293f476561d8b68ec8a64f98cf32811c323e"}, + {file = "aiohttp-3.10.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1c43eb1ab7cbf411b8e387dc169acb31f0ca0d8c09ba63f9eac67829585b44f"}, + {file = "aiohttp-3.10.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:de7a5299827253023c55ea549444e058c0eb496931fa05d693b95140a947cb73"}, + {file = "aiohttp-3.10.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4790f0e15f00058f7599dab2b206d3049d7ac464dc2e5eae0e93fa18aee9e7bf"}, + {file = "aiohttp-3.10.5-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:44b324a6b8376a23e6ba25d368726ee3bc281e6ab306db80b5819999c737d820"}, + {file = "aiohttp-3.10.5-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:0d277cfb304118079e7044aad0b76685d30ecb86f83a0711fc5fb257ffe832ca"}, + {file = "aiohttp-3.10.5-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:54d9ddea424cd19d3ff6128601a4a4d23d54a421f9b4c0fff740505813739a91"}, + {file = "aiohttp-3.10.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:4f1c9866ccf48a6df2b06823e6ae80573529f2af3a0992ec4fe75b1a510df8a6"}, + {file = "aiohttp-3.10.5-cp312-cp312-win32.whl", hash = "sha256:dc4826823121783dccc0871e3f405417ac116055bf184ac04c36f98b75aacd12"}, + {file = "aiohttp-3.10.5-cp312-cp312-win_amd64.whl", hash = "sha256:22c0a23a3b3138a6bf76fc553789cb1a703836da86b0f306b6f0dc1617398abc"}, + {file = "aiohttp-3.10.5-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7f6b639c36734eaa80a6c152a238242bedcee9b953f23bb887e9102976343092"}, + {file = "aiohttp-3.10.5-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f29930bc2921cef955ba39a3ff87d2c4398a0394ae217f41cb02d5c26c8b1b77"}, + {file = "aiohttp-3.10.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f489a2c9e6455d87eabf907ac0b7d230a9786be43fbe884ad184ddf9e9c1e385"}, + {file = "aiohttp-3.10.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:123dd5b16b75b2962d0fff566effb7a065e33cd4538c1692fb31c3bda2bfb972"}, + {file = "aiohttp-3.10.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b98e698dc34966e5976e10bbca6d26d6724e6bdea853c7c10162a3235aba6e16"}, + {file = "aiohttp-3.10.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c3b9162bab7e42f21243effc822652dc5bb5e8ff42a4eb62fe7782bcbcdfacf6"}, + {file = "aiohttp-3.10.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1923a5c44061bffd5eebeef58cecf68096e35003907d8201a4d0d6f6e387ccaa"}, + {file = "aiohttp-3.10.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d55f011da0a843c3d3df2c2cf4e537b8070a419f891c930245f05d329c4b0689"}, + {file = "aiohttp-3.10.5-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:afe16a84498441d05e9189a15900640a2d2b5e76cf4efe8cbb088ab4f112ee57"}, + {file = "aiohttp-3.10.5-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:f8112fb501b1e0567a1251a2fd0747baae60a4ab325a871e975b7bb67e59221f"}, + {file = "aiohttp-3.10.5-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:1e72589da4c90337837fdfe2026ae1952c0f4a6e793adbbfbdd40efed7c63599"}, + {file = "aiohttp-3.10.5-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:4d46c7b4173415d8e583045fbc4daa48b40e31b19ce595b8d92cf639396c15d5"}, + {file = "aiohttp-3.10.5-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:33e6bc4bab477c772a541f76cd91e11ccb6d2efa2b8d7d7883591dfb523e5987"}, + {file = "aiohttp-3.10.5-cp313-cp313-win32.whl", hash = "sha256:c58c6837a2c2a7cf3133983e64173aec11f9c2cd8e87ec2fdc16ce727bcf1a04"}, + {file = "aiohttp-3.10.5-cp313-cp313-win_amd64.whl", hash = "sha256:38172a70005252b6893088c0f5e8a47d173df7cc2b2bd88650957eb84fcf5022"}, + {file = "aiohttp-3.10.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:f6f18898ace4bcd2d41a122916475344a87f1dfdec626ecde9ee802a711bc569"}, + {file = "aiohttp-3.10.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5ede29d91a40ba22ac1b922ef510aab871652f6c88ef60b9dcdf773c6d32ad7a"}, + {file = "aiohttp-3.10.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:673f988370f5954df96cc31fd99c7312a3af0a97f09e407399f61583f30da9bc"}, + {file = "aiohttp-3.10.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58718e181c56a3c02d25b09d4115eb02aafe1a732ce5714ab70326d9776457c3"}, + {file = "aiohttp-3.10.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4b38b1570242fbab8d86a84128fb5b5234a2f70c2e32f3070143a6d94bc854cf"}, + {file = "aiohttp-3.10.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:074d1bff0163e107e97bd48cad9f928fa5a3eb4b9d33366137ffce08a63e37fe"}, + {file = "aiohttp-3.10.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd31f176429cecbc1ba499d4aba31aaccfea488f418d60376b911269d3b883c5"}, + {file = "aiohttp-3.10.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7384d0b87d4635ec38db9263e6a3f1eb609e2e06087f0aa7f63b76833737b471"}, + {file = "aiohttp-3.10.5-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:8989f46f3d7ef79585e98fa991e6ded55d2f48ae56d2c9fa5e491a6e4effb589"}, + {file = "aiohttp-3.10.5-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:c83f7a107abb89a227d6c454c613e7606c12a42b9a4ca9c5d7dad25d47c776ae"}, + {file = "aiohttp-3.10.5-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:cde98f323d6bf161041e7627a5fd763f9fd829bcfcd089804a5fdce7bb6e1b7d"}, + {file = "aiohttp-3.10.5-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:676f94c5480d8eefd97c0c7e3953315e4d8c2b71f3b49539beb2aa676c58272f"}, + {file = "aiohttp-3.10.5-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:2d21ac12dc943c68135ff858c3a989f2194a709e6e10b4c8977d7fcd67dfd511"}, + {file = "aiohttp-3.10.5-cp38-cp38-win32.whl", hash = "sha256:17e997105bd1a260850272bfb50e2a328e029c941c2708170d9d978d5a30ad9a"}, + {file = "aiohttp-3.10.5-cp38-cp38-win_amd64.whl", hash = "sha256:1c19de68896747a2aa6257ae4cf6ef59d73917a36a35ee9d0a6f48cff0f94db8"}, + {file = "aiohttp-3.10.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7e2fe37ac654032db1f3499fe56e77190282534810e2a8e833141a021faaab0e"}, + {file = "aiohttp-3.10.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f5bf3ead3cb66ab990ee2561373b009db5bc0e857549b6c9ba84b20bc462e172"}, + {file = "aiohttp-3.10.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1b2c16a919d936ca87a3c5f0e43af12a89a3ce7ccbce59a2d6784caba945b68b"}, + {file = "aiohttp-3.10.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad146dae5977c4dd435eb31373b3fe9b0b1bf26858c6fc452bf6af394067e10b"}, + {file = "aiohttp-3.10.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8c5c6fa16412b35999320f5c9690c0f554392dc222c04e559217e0f9ae244b92"}, + {file = "aiohttp-3.10.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:95c4dc6f61d610bc0ee1edc6f29d993f10febfe5b76bb470b486d90bbece6b22"}, + {file = "aiohttp-3.10.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da452c2c322e9ce0cfef392e469a26d63d42860f829026a63374fde6b5c5876f"}, + {file = "aiohttp-3.10.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:898715cf566ec2869d5cb4d5fb4be408964704c46c96b4be267442d265390f32"}, + {file = "aiohttp-3.10.5-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:391cc3a9c1527e424c6865e087897e766a917f15dddb360174a70467572ac6ce"}, + {file = "aiohttp-3.10.5-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:380f926b51b92d02a34119d072f178d80bbda334d1a7e10fa22d467a66e494db"}, + {file = "aiohttp-3.10.5-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ce91db90dbf37bb6fa0997f26574107e1b9d5ff939315247b7e615baa8ec313b"}, + {file = "aiohttp-3.10.5-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:9093a81e18c45227eebe4c16124ebf3e0d893830c6aca7cc310bfca8fe59d857"}, + {file = "aiohttp-3.10.5-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:ee40b40aa753d844162dcc80d0fe256b87cba48ca0054f64e68000453caead11"}, + {file = "aiohttp-3.10.5-cp39-cp39-win32.whl", hash = "sha256:03f2645adbe17f274444953bdea69f8327e9d278d961d85657cb0d06864814c1"}, + {file = "aiohttp-3.10.5-cp39-cp39-win_amd64.whl", hash = "sha256:d17920f18e6ee090bdd3d0bfffd769d9f2cb4c8ffde3eb203777a3895c128862"}, + {file = "aiohttp-3.10.5.tar.gz", hash = "sha256:f071854b47d39591ce9a17981c46790acb30518e2f83dfca8db2dfa091178691"}, ] [package.dependencies] @@ -171,22 +186,22 @@ files = [ [[package]] name = "attrs" -version = "23.2.0" +version = "24.2.0" description = "Classes Without Boilerplate" optional = false python-versions = ">=3.7" files = [ - {file = "attrs-23.2.0-py3-none-any.whl", hash = "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"}, - {file = "attrs-23.2.0.tar.gz", hash = "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30"}, + {file = "attrs-24.2.0-py3-none-any.whl", hash = "sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2"}, + {file = "attrs-24.2.0.tar.gz", hash = "sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346"}, ] [package.extras] -cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] -dev = ["attrs[tests]", "pre-commit"] -docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] -tests = ["attrs[tests-no-zope]", "zope-interface"] -tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"] -tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"] +benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"] +tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] [[package]] name = "certifi" @@ -489,13 +504,13 @@ trio = ["trio (>=0.22.0,<0.26.0)"] [[package]] name = "httpx" -version = "0.27.0" +version = "0.27.2" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" files = [ - {file = "httpx-0.27.0-py3-none-any.whl", hash = "sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5"}, - {file = "httpx-0.27.0.tar.gz", hash = "sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5"}, + {file = "httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0"}, + {file = "httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2"}, ] [package.dependencies] @@ -510,16 +525,17 @@ brotli = ["brotli", "brotlicffi"] cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] +zstd = ["zstandard (>=0.18.0)"] [[package]] name = "idna" -version = "3.7" +version = "3.8" description = "Internationalized Domain Names in Applications (IDNA)" optional = false -python-versions = ">=3.5" +python-versions = ">=3.6" files = [ - {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, - {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, + {file = "idna-3.8-py3-none-any.whl", hash = "sha256:050b4e5baadcd44d760cedbd2b8e639f2ff89bbc7a5730fcc662954303377aac"}, + {file = "idna-3.8.tar.gz", hash = "sha256:d838c2c0ed6fced7693d5e8ab8e734d5f8fda53a039c0164afb0b82e771e3603"}, ] [[package]] @@ -560,7 +576,7 @@ files = [ [[package]] name = "langchain-core" -version = "0.2.26" +version = "0.2.35" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.8.1,<4.0" @@ -583,7 +599,7 @@ typing-extensions = ">=4.7" type = "git" url = "https://github.com/langchain-ai/langchain.git" reference = "HEAD" -resolved_reference = "7d1694040dcb017193c0c574de8ed751a9314268" +resolved_reference = "754f3c41f9dd3344b9d79cc9830791bd168422d6" subdirectory = "libs/core" [[package]] @@ -604,21 +620,22 @@ pytest = ">=7,<9" type = "git" url = "https://github.com/langchain-ai/langchain.git" reference = "HEAD" -resolved_reference = "210623b409bc185e4afe04f3978f5ebbc8c1d087" +resolved_reference = "754f3c41f9dd3344b9d79cc9830791bd168422d6" subdirectory = "libs/standard-tests" [[package]] name = "langsmith" -version = "0.1.94" +version = "0.1.106" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langsmith-0.1.94-py3-none-any.whl", hash = "sha256:0d01212086d58699f75814117b026784218042f7859877ce08a248a98d84aa8d"}, - {file = "langsmith-0.1.94.tar.gz", hash = "sha256:e44afcdc9eee6f238f6a87a02bba83111bd5fad376d881ae299834e06d39d712"}, + {file = "langsmith-0.1.106-py3-none-any.whl", hash = "sha256:a418161c98de72ee2c6eea6667c6217814b67db4b9a3a024788013384216ff35"}, + {file = "langsmith-0.1.106.tar.gz", hash = "sha256:64a890a05640d64692f5515ebb444b0457332a9cf9e7605c4651de6737a7d3a0"}, ] [package.dependencies] +httpx = ">=0.23.0,<1" orjson = ">=3.9.14,<4.0.0" pydantic = [ {version = ">=1,<3", markers = "python_full_version < \"3.12.4\""}, @@ -788,62 +805,68 @@ files = [ [[package]] name = "orjson" -version = "3.10.6" +version = "3.10.7" description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" optional = false python-versions = ">=3.8" files = [ - {file = "orjson-3.10.6-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:fb0ee33124db6eaa517d00890fc1a55c3bfe1cf78ba4a8899d71a06f2d6ff5c7"}, - {file = "orjson-3.10.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c1c4b53b24a4c06547ce43e5fee6ec4e0d8fe2d597f4647fc033fd205707365"}, - {file = "orjson-3.10.6-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eadc8fd310edb4bdbd333374f2c8fec6794bbbae99b592f448d8214a5e4050c0"}, - {file = "orjson-3.10.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:61272a5aec2b2661f4fa2b37c907ce9701e821b2c1285d5c3ab0207ebd358d38"}, - {file = "orjson-3.10.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57985ee7e91d6214c837936dc1608f40f330a6b88bb13f5a57ce5257807da143"}, - {file = "orjson-3.10.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:633a3b31d9d7c9f02d49c4ab4d0a86065c4a6f6adc297d63d272e043472acab5"}, - {file = "orjson-3.10.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:1c680b269d33ec444afe2bdc647c9eb73166fa47a16d9a75ee56a374f4a45f43"}, - {file = "orjson-3.10.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f759503a97a6ace19e55461395ab0d618b5a117e8d0fbb20e70cfd68a47327f2"}, - {file = "orjson-3.10.6-cp310-none-win32.whl", hash = "sha256:95a0cce17f969fb5391762e5719575217bd10ac5a189d1979442ee54456393f3"}, - {file = "orjson-3.10.6-cp310-none-win_amd64.whl", hash = "sha256:df25d9271270ba2133cc88ee83c318372bdc0f2cd6f32e7a450809a111efc45c"}, - {file = "orjson-3.10.6-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:b1ec490e10d2a77c345def52599311849fc063ae0e67cf4f84528073152bb2ba"}, - {file = "orjson-3.10.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55d43d3feb8f19d07e9f01e5b9be4f28801cf7c60d0fa0d279951b18fae1932b"}, - {file = "orjson-3.10.6-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac3045267e98fe749408eee1593a142e02357c5c99be0802185ef2170086a863"}, - {file = "orjson-3.10.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c27bc6a28ae95923350ab382c57113abd38f3928af3c80be6f2ba7eb8d8db0b0"}, - {file = "orjson-3.10.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d27456491ca79532d11e507cadca37fb8c9324a3976294f68fb1eff2dc6ced5a"}, - {file = "orjson-3.10.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05ac3d3916023745aa3b3b388e91b9166be1ca02b7c7e41045da6d12985685f0"}, - {file = "orjson-3.10.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1335d4ef59ab85cab66fe73fd7a4e881c298ee7f63ede918b7faa1b27cbe5212"}, - {file = "orjson-3.10.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4bbc6d0af24c1575edc79994c20e1b29e6fb3c6a570371306db0993ecf144dc5"}, - {file = "orjson-3.10.6-cp311-none-win32.whl", hash = "sha256:450e39ab1f7694465060a0550b3f6d328d20297bf2e06aa947b97c21e5241fbd"}, - {file = "orjson-3.10.6-cp311-none-win_amd64.whl", hash = "sha256:227df19441372610b20e05bdb906e1742ec2ad7a66ac8350dcfd29a63014a83b"}, - {file = "orjson-3.10.6-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:ea2977b21f8d5d9b758bb3f344a75e55ca78e3ff85595d248eee813ae23ecdfb"}, - {file = "orjson-3.10.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b6f3d167d13a16ed263b52dbfedff52c962bfd3d270b46b7518365bcc2121eed"}, - {file = "orjson-3.10.6-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f710f346e4c44a4e8bdf23daa974faede58f83334289df80bc9cd12fe82573c7"}, - {file = "orjson-3.10.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7275664f84e027dcb1ad5200b8b18373e9c669b2a9ec33d410c40f5ccf4b257e"}, - {file = "orjson-3.10.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0943e4c701196b23c240b3d10ed8ecd674f03089198cf503105b474a4f77f21f"}, - {file = "orjson-3.10.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:446dee5a491b5bc7d8f825d80d9637e7af43f86a331207b9c9610e2f93fee22a"}, - {file = "orjson-3.10.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:64c81456d2a050d380786413786b057983892db105516639cb5d3ee3c7fd5148"}, - {file = "orjson-3.10.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:960db0e31c4e52fa0fc3ecbaea5b2d3b58f379e32a95ae6b0ebeaa25b93dfd34"}, - {file = "orjson-3.10.6-cp312-none-win32.whl", hash = "sha256:a6ea7afb5b30b2317e0bee03c8d34c8181bc5a36f2afd4d0952f378972c4efd5"}, - {file = "orjson-3.10.6-cp312-none-win_amd64.whl", hash = "sha256:874ce88264b7e655dde4aeaacdc8fd772a7962faadfb41abe63e2a4861abc3dc"}, - {file = "orjson-3.10.6-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:66680eae4c4e7fc193d91cfc1353ad6d01b4801ae9b5314f17e11ba55e934183"}, - {file = "orjson-3.10.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:caff75b425db5ef8e8f23af93c80f072f97b4fb3afd4af44482905c9f588da28"}, - {file = "orjson-3.10.6-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3722fddb821b6036fd2a3c814f6bd9b57a89dc6337b9924ecd614ebce3271394"}, - {file = "orjson-3.10.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c2c116072a8533f2fec435fde4d134610f806bdac20188c7bd2081f3e9e0133f"}, - {file = "orjson-3.10.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6eeb13218c8cf34c61912e9df2de2853f1d009de0e46ea09ccdf3d757896af0a"}, - {file = "orjson-3.10.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:965a916373382674e323c957d560b953d81d7a8603fbeee26f7b8248638bd48b"}, - {file = "orjson-3.10.6-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:03c95484d53ed8e479cade8628c9cea00fd9d67f5554764a1110e0d5aa2de96e"}, - {file = "orjson-3.10.6-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:e060748a04cccf1e0a6f2358dffea9c080b849a4a68c28b1b907f272b5127e9b"}, - {file = "orjson-3.10.6-cp38-none-win32.whl", hash = "sha256:738dbe3ef909c4b019d69afc19caf6b5ed0e2f1c786b5d6215fbb7539246e4c6"}, - {file = "orjson-3.10.6-cp38-none-win_amd64.whl", hash = "sha256:d40f839dddf6a7d77114fe6b8a70218556408c71d4d6e29413bb5f150a692ff7"}, - {file = "orjson-3.10.6-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:697a35a083c4f834807a6232b3e62c8b280f7a44ad0b759fd4dce748951e70db"}, - {file = "orjson-3.10.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd502f96bf5ea9a61cbc0b2b5900d0dd68aa0da197179042bdd2be67e51a1e4b"}, - {file = "orjson-3.10.6-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f215789fb1667cdc874c1b8af6a84dc939fd802bf293a8334fce185c79cd359b"}, - {file = "orjson-3.10.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a2debd8ddce948a8c0938c8c93ade191d2f4ba4649a54302a7da905a81f00b56"}, - {file = "orjson-3.10.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5410111d7b6681d4b0d65e0f58a13be588d01b473822483f77f513c7f93bd3b2"}, - {file = "orjson-3.10.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb1f28a137337fdc18384079fa5726810681055b32b92253fa15ae5656e1dddb"}, - {file = "orjson-3.10.6-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:bf2fbbce5fe7cd1aa177ea3eab2b8e6a6bc6e8592e4279ed3db2d62e57c0e1b2"}, - {file = "orjson-3.10.6-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:79b9b9e33bd4c517445a62b90ca0cc279b0f1f3970655c3df9e608bc3f91741a"}, - {file = "orjson-3.10.6-cp39-none-win32.whl", hash = "sha256:30b0a09a2014e621b1adf66a4f705f0809358350a757508ee80209b2d8dae219"}, - {file = "orjson-3.10.6-cp39-none-win_amd64.whl", hash = "sha256:49e3bc615652617d463069f91b867a4458114c5b104e13b7ae6872e5f79d0844"}, - {file = "orjson-3.10.6.tar.gz", hash = "sha256:e54b63d0a7c6c54a5f5f726bc93a2078111ef060fec4ecbf34c5db800ca3b3a7"}, + {file = "orjson-3.10.7-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:74f4544f5a6405b90da8ea724d15ac9c36da4d72a738c64685003337401f5c12"}, + {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34a566f22c28222b08875b18b0dfbf8a947e69df21a9ed5c51a6bf91cfb944ac"}, + {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bf6ba8ebc8ef5792e2337fb0419f8009729335bb400ece005606336b7fd7bab7"}, + {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac7cf6222b29fbda9e3a472b41e6a5538b48f2c8f99261eecd60aafbdb60690c"}, + {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de817e2f5fc75a9e7dd350c4b0f54617b280e26d1631811a43e7e968fa71e3e9"}, + {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:348bdd16b32556cf8d7257b17cf2bdb7ab7976af4af41ebe79f9796c218f7e91"}, + {file = "orjson-3.10.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:479fd0844ddc3ca77e0fd99644c7fe2de8e8be1efcd57705b5c92e5186e8a250"}, + {file = "orjson-3.10.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:fdf5197a21dd660cf19dfd2a3ce79574588f8f5e2dbf21bda9ee2d2b46924d84"}, + {file = "orjson-3.10.7-cp310-none-win32.whl", hash = "sha256:d374d36726746c81a49f3ff8daa2898dccab6596864ebe43d50733275c629175"}, + {file = "orjson-3.10.7-cp310-none-win_amd64.whl", hash = "sha256:cb61938aec8b0ffb6eef484d480188a1777e67b05d58e41b435c74b9d84e0b9c"}, + {file = "orjson-3.10.7-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:7db8539039698ddfb9a524b4dd19508256107568cdad24f3682d5773e60504a2"}, + {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:480f455222cb7a1dea35c57a67578848537d2602b46c464472c995297117fa09"}, + {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8a9c9b168b3a19e37fe2778c0003359f07822c90fdff8f98d9d2a91b3144d8e0"}, + {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8de062de550f63185e4c1c54151bdddfc5625e37daf0aa1e75d2a1293e3b7d9a"}, + {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6b0dd04483499d1de9c8f6203f8975caf17a6000b9c0c54630cef02e44ee624e"}, + {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b58d3795dafa334fc8fd46f7c5dc013e6ad06fd5b9a4cc98cb1456e7d3558bd6"}, + {file = "orjson-3.10.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:33cfb96c24034a878d83d1a9415799a73dc77480e6c40417e5dda0710d559ee6"}, + {file = "orjson-3.10.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e724cebe1fadc2b23c6f7415bad5ee6239e00a69f30ee423f319c6af70e2a5c0"}, + {file = "orjson-3.10.7-cp311-none-win32.whl", hash = "sha256:82763b46053727a7168d29c772ed5c870fdae2f61aa8a25994c7984a19b1021f"}, + {file = "orjson-3.10.7-cp311-none-win_amd64.whl", hash = "sha256:eb8d384a24778abf29afb8e41d68fdd9a156cf6e5390c04cc07bbc24b89e98b5"}, + {file = "orjson-3.10.7-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:44a96f2d4c3af51bfac6bc4ef7b182aa33f2f054fd7f34cc0ee9a320d051d41f"}, + {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76ac14cd57df0572453543f8f2575e2d01ae9e790c21f57627803f5e79b0d3c3"}, + {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bdbb61dcc365dd9be94e8f7df91975edc9364d6a78c8f7adb69c1cdff318ec93"}, + {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b48b3db6bb6e0a08fa8c83b47bc169623f801e5cc4f24442ab2b6617da3b5313"}, + {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23820a1563a1d386414fef15c249040042b8e5d07b40ab3fe3efbfbbcbcb8864"}, + {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0c6a008e91d10a2564edbb6ee5069a9e66df3fbe11c9a005cb411f441fd2c09"}, + {file = "orjson-3.10.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d352ee8ac1926d6193f602cbe36b1643bbd1bbcb25e3c1a657a4390f3000c9a5"}, + {file = "orjson-3.10.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d2d9f990623f15c0ae7ac608103c33dfe1486d2ed974ac3f40b693bad1a22a7b"}, + {file = "orjson-3.10.7-cp312-none-win32.whl", hash = "sha256:7c4c17f8157bd520cdb7195f75ddbd31671997cbe10aee559c2d613592e7d7eb"}, + {file = "orjson-3.10.7-cp312-none-win_amd64.whl", hash = "sha256:1d9c0e733e02ada3ed6098a10a8ee0052dd55774de3d9110d29868d24b17faa1"}, + {file = "orjson-3.10.7-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:77d325ed866876c0fa6492598ec01fe30e803272a6e8b10e992288b009cbe149"}, + {file = "orjson-3.10.7-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ea2c232deedcb605e853ae1db2cc94f7390ac776743b699b50b071b02bea6fe"}, + {file = "orjson-3.10.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:3dcfbede6737fdbef3ce9c37af3fb6142e8e1ebc10336daa05872bfb1d87839c"}, + {file = "orjson-3.10.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:11748c135f281203f4ee695b7f80bb1358a82a63905f9f0b794769483ea854ad"}, + {file = "orjson-3.10.7-cp313-none-win32.whl", hash = "sha256:a7e19150d215c7a13f39eb787d84db274298d3f83d85463e61d277bbd7f401d2"}, + {file = "orjson-3.10.7-cp313-none-win_amd64.whl", hash = "sha256:eef44224729e9525d5261cc8d28d6b11cafc90e6bd0be2157bde69a52ec83024"}, + {file = "orjson-3.10.7-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:6ea2b2258eff652c82652d5e0f02bd5e0463a6a52abb78e49ac288827aaa1469"}, + {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:430ee4d85841e1483d487e7b81401785a5dfd69db5de01314538f31f8fbf7ee1"}, + {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4b6146e439af4c2472c56f8540d799a67a81226e11992008cb47e1267a9b3225"}, + {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:084e537806b458911137f76097e53ce7bf5806dda33ddf6aaa66a028f8d43a23"}, + {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4829cf2195838e3f93b70fd3b4292156fc5e097aac3739859ac0dcc722b27ac0"}, + {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1193b2416cbad1a769f868b1749535d5da47626ac29445803dae7cc64b3f5c98"}, + {file = "orjson-3.10.7-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:4e6c3da13e5a57e4b3dca2de059f243ebec705857522f188f0180ae88badd354"}, + {file = "orjson-3.10.7-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c31008598424dfbe52ce8c5b47e0752dca918a4fdc4a2a32004efd9fab41d866"}, + {file = "orjson-3.10.7-cp38-none-win32.whl", hash = "sha256:7122a99831f9e7fe977dc45784d3b2edc821c172d545e6420c375e5a935f5a1c"}, + {file = "orjson-3.10.7-cp38-none-win_amd64.whl", hash = "sha256:a763bc0e58504cc803739e7df040685816145a6f3c8a589787084b54ebc9f16e"}, + {file = "orjson-3.10.7-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:e76be12658a6fa376fcd331b1ea4e58f5a06fd0220653450f0d415b8fd0fbe20"}, + {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed350d6978d28b92939bfeb1a0570c523f6170efc3f0a0ef1f1df287cd4f4960"}, + {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:144888c76f8520e39bfa121b31fd637e18d4cc2f115727865fdf9fa325b10412"}, + {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09b2d92fd95ad2402188cf51573acde57eb269eddabaa60f69ea0d733e789fe9"}, + {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5b24a579123fa884f3a3caadaed7b75eb5715ee2b17ab5c66ac97d29b18fe57f"}, + {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e72591bcfe7512353bd609875ab38050efe3d55e18934e2f18950c108334b4ff"}, + {file = "orjson-3.10.7-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:f4db56635b58cd1a200b0a23744ff44206ee6aa428185e2b6c4a65b3197abdcd"}, + {file = "orjson-3.10.7-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:0fa5886854673222618638c6df7718ea7fe2f3f2384c452c9ccedc70b4a510a5"}, + {file = "orjson-3.10.7-cp39-none-win32.whl", hash = "sha256:8272527d08450ab16eb405f47e0f4ef0e5ff5981c3d82afe0efd25dcbef2bcd2"}, + {file = "orjson-3.10.7-cp39-none-win_amd64.whl", hash = "sha256:974683d4618c0c7dbf4f69c95a979734bf183d0658611760017f6e70a145af58"}, + {file = "orjson-3.10.7.tar.gz", hash = "sha256:75ef0640403f945f3a1f9f6400686560dbfb0fb5b16589ad62cd477043c4eee3"}, ] [[package]] @@ -1180,62 +1203,64 @@ six = ">=1.5" [[package]] name = "pyyaml" -version = "6.0.1" +version = "6.0.2" description = "YAML parser and emitter for Python" optional = false -python-versions = ">=3.6" +python-versions = ">=3.8" files = [ - {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, - {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, - {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, - {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, - {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, - {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, - {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, - {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, - {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, - {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, - {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, - {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, - {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, - {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, - {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, - {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, - {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, ] [[package]] @@ -1326,13 +1351,13 @@ files = [ [[package]] name = "syrupy" -version = "4.6.1" +version = "4.7.1" description = "Pytest Snapshot Test Utility" optional = false -python-versions = ">=3.8.1,<4" +python-versions = ">=3.8.1" files = [ - {file = "syrupy-4.6.1-py3-none-any.whl", hash = "sha256:203e52f9cb9fa749cf683f29bd68f02c16c3bc7e7e5fe8f2fc59bdfe488ce133"}, - {file = "syrupy-4.6.1.tar.gz", hash = "sha256:37a835c9ce7857eeef86d62145885e10b3cb9615bc6abeb4ce404b3f18e1bb36"}, + {file = "syrupy-4.7.1-py3-none-any.whl", hash = "sha256:be002267a512a4bedddfae2e026c93df1ea928ae10baadc09640516923376d41"}, + {file = "syrupy-4.7.1.tar.gz", hash = "sha256:f9d4485f3f27d0e5df6ed299cac6fa32eb40a441915d988e82be5a4bdda335c8"}, ] [package.dependencies] @@ -1366,13 +1391,13 @@ files = [ [[package]] name = "types-pillow" -version = "10.2.0.20240520" +version = "10.2.0.20240822" description = "Typing stubs for Pillow" optional = false python-versions = ">=3.8" files = [ - {file = "types-Pillow-10.2.0.20240520.tar.gz", hash = "sha256:130b979195465fa1e1676d8e81c9c7c30319e8e95b12fae945e8f0d525213107"}, - {file = "types_Pillow-10.2.0.20240520-py3-none-any.whl", hash = "sha256:33c36494b380e2a269bb742181bea5d9b00820367822dbd3760f07210a1da23d"}, + {file = "types-Pillow-10.2.0.20240822.tar.gz", hash = "sha256:559fb52a2ef991c326e4a0d20accb3bb63a7ba8d40eb493e0ecb0310ba52f0d3"}, + {file = "types_Pillow-10.2.0.20240822-py3-none-any.whl", hash = "sha256:d9dab025aba07aeb12fd50a6799d4eac52a9603488eca09d7662543983f16c5d"}, ] [[package]] @@ -1419,43 +1444,46 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "watchdog" -version = "4.0.1" +version = "4.0.2" description = "Filesystem events monitoring" optional = false python-versions = ">=3.8" files = [ - {file = "watchdog-4.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:da2dfdaa8006eb6a71051795856bedd97e5b03e57da96f98e375682c48850645"}, - {file = "watchdog-4.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e93f451f2dfa433d97765ca2634628b789b49ba8b504fdde5837cdcf25fdb53b"}, - {file = "watchdog-4.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ef0107bbb6a55f5be727cfc2ef945d5676b97bffb8425650dadbb184be9f9a2b"}, - {file = "watchdog-4.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:17e32f147d8bf9657e0922c0940bcde863b894cd871dbb694beb6704cfbd2fb5"}, - {file = "watchdog-4.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:03e70d2df2258fb6cb0e95bbdbe06c16e608af94a3ffbd2b90c3f1e83eb10767"}, - {file = "watchdog-4.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:123587af84260c991dc5f62a6e7ef3d1c57dfddc99faacee508c71d287248459"}, - {file = "watchdog-4.0.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:093b23e6906a8b97051191a4a0c73a77ecc958121d42346274c6af6520dec175"}, - {file = "watchdog-4.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:611be3904f9843f0529c35a3ff3fd617449463cb4b73b1633950b3d97fa4bfb7"}, - {file = "watchdog-4.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:62c613ad689ddcb11707f030e722fa929f322ef7e4f18f5335d2b73c61a85c28"}, - {file = "watchdog-4.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:d4925e4bf7b9bddd1c3de13c9b8a2cdb89a468f640e66fbfabaf735bd85b3e35"}, - {file = "watchdog-4.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cad0bbd66cd59fc474b4a4376bc5ac3fc698723510cbb64091c2a793b18654db"}, - {file = "watchdog-4.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a3c2c317a8fb53e5b3d25790553796105501a235343f5d2bf23bb8649c2c8709"}, - {file = "watchdog-4.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c9904904b6564d4ee8a1ed820db76185a3c96e05560c776c79a6ce5ab71888ba"}, - {file = "watchdog-4.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:667f3c579e813fcbad1b784db7a1aaa96524bed53437e119f6a2f5de4db04235"}, - {file = "watchdog-4.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d10a681c9a1d5a77e75c48a3b8e1a9f2ae2928eda463e8d33660437705659682"}, - {file = "watchdog-4.0.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0144c0ea9997b92615af1d94afc0c217e07ce2c14912c7b1a5731776329fcfc7"}, - {file = "watchdog-4.0.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:998d2be6976a0ee3a81fb8e2777900c28641fb5bfbd0c84717d89bca0addcdc5"}, - {file = "watchdog-4.0.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e7921319fe4430b11278d924ef66d4daa469fafb1da679a2e48c935fa27af193"}, - {file = "watchdog-4.0.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:f0de0f284248ab40188f23380b03b59126d1479cd59940f2a34f8852db710625"}, - {file = "watchdog-4.0.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:bca36be5707e81b9e6ce3208d92d95540d4ca244c006b61511753583c81c70dd"}, - {file = "watchdog-4.0.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:ab998f567ebdf6b1da7dc1e5accfaa7c6992244629c0fdaef062f43249bd8dee"}, - {file = "watchdog-4.0.1-py3-none-manylinux2014_aarch64.whl", hash = "sha256:dddba7ca1c807045323b6af4ff80f5ddc4d654c8bce8317dde1bd96b128ed253"}, - {file = "watchdog-4.0.1-py3-none-manylinux2014_armv7l.whl", hash = "sha256:4513ec234c68b14d4161440e07f995f231be21a09329051e67a2118a7a612d2d"}, - {file = "watchdog-4.0.1-py3-none-manylinux2014_i686.whl", hash = "sha256:4107ac5ab936a63952dea2a46a734a23230aa2f6f9db1291bf171dac3ebd53c6"}, - {file = "watchdog-4.0.1-py3-none-manylinux2014_ppc64.whl", hash = "sha256:6e8c70d2cd745daec2a08734d9f63092b793ad97612470a0ee4cbb8f5f705c57"}, - {file = "watchdog-4.0.1-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:f27279d060e2ab24c0aa98363ff906d2386aa6c4dc2f1a374655d4e02a6c5e5e"}, - {file = "watchdog-4.0.1-py3-none-manylinux2014_s390x.whl", hash = "sha256:f8affdf3c0f0466e69f5b3917cdd042f89c8c63aebdb9f7c078996f607cdb0f5"}, - {file = "watchdog-4.0.1-py3-none-manylinux2014_x86_64.whl", hash = "sha256:ac7041b385f04c047fcc2951dc001671dee1b7e0615cde772e84b01fbf68ee84"}, - {file = "watchdog-4.0.1-py3-none-win32.whl", hash = "sha256:206afc3d964f9a233e6ad34618ec60b9837d0582b500b63687e34011e15bb429"}, - {file = "watchdog-4.0.1-py3-none-win_amd64.whl", hash = "sha256:7577b3c43e5909623149f76b099ac49a1a01ca4e167d1785c76eb52fa585745a"}, - {file = "watchdog-4.0.1-py3-none-win_ia64.whl", hash = "sha256:d7b9f5f3299e8dd230880b6c55504a1f69cf1e4316275d1b215ebdd8187ec88d"}, - {file = "watchdog-4.0.1.tar.gz", hash = "sha256:eebaacf674fa25511e8867028d281e602ee6500045b57f43b08778082f7f8b44"}, + {file = "watchdog-4.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ede7f010f2239b97cc79e6cb3c249e72962404ae3865860855d5cbe708b0fd22"}, + {file = "watchdog-4.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a2cffa171445b0efa0726c561eca9a27d00a1f2b83846dbd5a4f639c4f8ca8e1"}, + {file = "watchdog-4.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c50f148b31b03fbadd6d0b5980e38b558046b127dc483e5e4505fcef250f9503"}, + {file = "watchdog-4.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:7c7d4bf585ad501c5f6c980e7be9c4f15604c7cc150e942d82083b31a7548930"}, + {file = "watchdog-4.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:914285126ad0b6eb2258bbbcb7b288d9dfd655ae88fa28945be05a7b475a800b"}, + {file = "watchdog-4.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:984306dc4720da5498b16fc037b36ac443816125a3705dfde4fd90652d8028ef"}, + {file = "watchdog-4.0.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1cdcfd8142f604630deef34722d695fb455d04ab7cfe9963055df1fc69e6727a"}, + {file = "watchdog-4.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d7ab624ff2f663f98cd03c8b7eedc09375a911794dfea6bf2a359fcc266bff29"}, + {file = "watchdog-4.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:132937547a716027bd5714383dfc40dc66c26769f1ce8a72a859d6a48f371f3a"}, + {file = "watchdog-4.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:cd67c7df93eb58f360c43802acc945fa8da70c675b6fa37a241e17ca698ca49b"}, + {file = "watchdog-4.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:bcfd02377be80ef3b6bc4ce481ef3959640458d6feaae0bd43dd90a43da90a7d"}, + {file = "watchdog-4.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:980b71510f59c884d684b3663d46e7a14b457c9611c481e5cef08f4dd022eed7"}, + {file = "watchdog-4.0.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:aa160781cafff2719b663c8a506156e9289d111d80f3387cf3af49cedee1f040"}, + {file = "watchdog-4.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f6ee8dedd255087bc7fe82adf046f0b75479b989185fb0bdf9a98b612170eac7"}, + {file = "watchdog-4.0.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0b4359067d30d5b864e09c8597b112fe0a0a59321a0f331498b013fb097406b4"}, + {file = "watchdog-4.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:770eef5372f146997638d737c9a3c597a3b41037cfbc5c41538fc27c09c3a3f9"}, + {file = "watchdog-4.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eeea812f38536a0aa859972d50c76e37f4456474b02bd93674d1947cf1e39578"}, + {file = "watchdog-4.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b2c45f6e1e57ebb4687690c05bc3a2c1fb6ab260550c4290b8abb1335e0fd08b"}, + {file = "watchdog-4.0.2-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:10b6683df70d340ac3279eff0b2766813f00f35a1d37515d2c99959ada8f05fa"}, + {file = "watchdog-4.0.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:f7c739888c20f99824f7aa9d31ac8a97353e22d0c0e54703a547a218f6637eb3"}, + {file = "watchdog-4.0.2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c100d09ac72a8a08ddbf0629ddfa0b8ee41740f9051429baa8e31bb903ad7508"}, + {file = "watchdog-4.0.2-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:f5315a8c8dd6dd9425b974515081fc0aadca1d1d61e078d2246509fd756141ee"}, + {file = "watchdog-4.0.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:2d468028a77b42cc685ed694a7a550a8d1771bb05193ba7b24006b8241a571a1"}, + {file = "watchdog-4.0.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:f15edcae3830ff20e55d1f4e743e92970c847bcddc8b7509bcd172aa04de506e"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_aarch64.whl", hash = "sha256:936acba76d636f70db8f3c66e76aa6cb5136a936fc2a5088b9ce1c7a3508fc83"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_armv7l.whl", hash = "sha256:e252f8ca942a870f38cf785aef420285431311652d871409a64e2a0a52a2174c"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_i686.whl", hash = "sha256:0e83619a2d5d436a7e58a1aea957a3c1ccbf9782c43c0b4fed80580e5e4acd1a"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_ppc64.whl", hash = "sha256:88456d65f207b39f1981bf772e473799fcdc10801062c36fd5ad9f9d1d463a73"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:32be97f3b75693a93c683787a87a0dc8db98bb84701539954eef991fb35f5fbc"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_s390x.whl", hash = "sha256:c82253cfc9be68e3e49282831afad2c1f6593af80c0daf1287f6a92657986757"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_x86_64.whl", hash = "sha256:c0b14488bd336c5b1845cee83d3e631a1f8b4e9c5091ec539406e4a324f882d8"}, + {file = "watchdog-4.0.2-py3-none-win32.whl", hash = "sha256:0d8a7e523ef03757a5aa29f591437d64d0d894635f8a50f370fe37f913ce4e19"}, + {file = "watchdog-4.0.2-py3-none-win_amd64.whl", hash = "sha256:c344453ef3bf875a535b0488e3ad28e341adbd5a9ffb0f7d62cefacc8824ef2b"}, + {file = "watchdog-4.0.2-py3-none-win_ia64.whl", hash = "sha256:baececaa8edff42cd16558a639a9b0ddf425f93d892e8392a56bf904f5eff22c"}, + {file = "watchdog-4.0.2.tar.gz", hash = "sha256:b4dfbb6c49221be4535623ea4474a4d6ee0a9cef4a80b20c28db4d858b64e270"}, ] [package.extras] @@ -1567,4 +1595,4 @@ multidict = ">=4.0" [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "9eea42719e4bc3e019b55d1b4cb08ddccbd0fe85b076d36ba6d4b6b346beba47" +content-hash = "3dd3831808dfcf8e57896a3c72a9aaec5718c6863e13773f9873597ea44aff7a" diff --git a/libs/ai-endpoints/pyproject.toml b/libs/ai-endpoints/pyproject.toml index d03d406b..5e86d8c7 100644 --- a/libs/ai-endpoints/pyproject.toml +++ b/libs/ai-endpoints/pyproject.toml @@ -12,7 +12,7 @@ license = "MIT" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" -langchain-core = ">=0.1.47,<0.3" +langchain-core = ">=0.2.22,<0.3" aiohttp = "^3.9.1" pillow = ">=10.0.0,<11.0.0" diff --git a/libs/ai-endpoints/scripts/check_pydantic.sh b/libs/ai-endpoints/scripts/check_pydantic.sh index 06b5bb81..d0fa31d6 100755 --- a/libs/ai-endpoints/scripts/check_pydantic.sh +++ b/libs/ai-endpoints/scripts/check_pydantic.sh @@ -14,7 +14,7 @@ fi repository_path="$1" # Search for lines matching the pattern within the specified repository -result=$(git -C "$repository_path" grep -E '^import pydantic|^from pydantic') +result=$(git -C "$repository_path" grep -E '^import pydantic|^from pydantic' | grep -v "# ignore: check_pydantic") # Check if any matching lines were found if [ -n "$result" ]; then diff --git a/libs/ai-endpoints/tests/integration_tests/conftest.py b/libs/ai-endpoints/tests/integration_tests/conftest.py index f1dd58f6..15671c7f 100644 --- a/libs/ai-endpoints/tests/integration_tests/conftest.py +++ b/libs/ai-endpoints/tests/integration_tests/conftest.py @@ -3,7 +3,12 @@ import pytest from langchain_core.documents import Document -from langchain_nvidia_ai_endpoints import ChatNVIDIA, NVIDIAEmbeddings, NVIDIARerank +from langchain_nvidia_ai_endpoints import ( + NVIDIA, + ChatNVIDIA, + NVIDIAEmbeddings, + NVIDIARerank, +) from langchain_nvidia_ai_endpoints._statics import MODEL_TABLE, Model @@ -39,6 +44,12 @@ def pytest_addoption(parser: pytest.Parser) -> None: nargs="+", help="Run tests for a specific qa model or list of models", ) + parser.addoption( + "--completions-model-id", + action="store", + nargs="+", + help="Run tests for a specific completions model or list of models", + ) parser.addoption( "--embedding-model-id", action="store", @@ -54,7 +65,8 @@ def pytest_addoption(parser: pytest.Parser) -> None: parser.addoption( "--vlm-model-id", action="store", - help="Run tests for a specific vlm model", + nargs="+", + help="Run tests for a specific vlm model or list of models", ) parser.addoption( "--all-models", @@ -98,6 +110,18 @@ def get_all_known_models() -> List[Model]: ] metafunc.parametrize("tool_model", models, ids=models) + if "completions_model" in metafunc.fixturenames: + models = [NVIDIA._default_model_name] + if model_list := metafunc.config.getoption("completions_model_id"): + models = model_list + if metafunc.config.getoption("all_models"): + models = [ + model.id + for model in NVIDIA(**mode).available_models + if model.model_type == "completions" + ] + metafunc.parametrize("completions_model", models, ids=models) + if "structured_model" in metafunc.fixturenames: models = ["meta/llama-3.1-8b-instruct"] if model_list := metafunc.config.getoption("structured_model_id"): @@ -120,8 +144,8 @@ def get_all_known_models() -> List[Model]: if "vlm_model" in metafunc.fixturenames: models = ["nvidia/neva-22b"] - if model := metafunc.config.getoption("vlm_model_id"): - models = [model] + if model_list := metafunc.config.getoption("vlm_model_id"): + models = model_list if metafunc.config.getoption("all_models"): models = [ model.id @@ -163,6 +187,7 @@ def mode(request: pytest.FixtureRequest) -> dict: ChatNVIDIA, NVIDIAEmbeddings, NVIDIARerank, + NVIDIA, ] ) def public_class(request: pytest.FixtureRequest) -> type: @@ -180,5 +205,7 @@ def _contact_service(instance: Any) -> None: instance.compress_documents( documents=[Document(page_content="World")], query="Hello" ) + elif isinstance(instance, NVIDIA): + instance.invoke("Hello") return _contact_service diff --git a/libs/ai-endpoints/tests/integration_tests/test_base_url.py b/libs/ai-endpoints/tests/integration_tests/test_base_url.py index 3c821623..7c522c92 100644 --- a/libs/ai-endpoints/tests/integration_tests/test_base_url.py +++ b/libs/ai-endpoints/tests/integration_tests/test_base_url.py @@ -13,6 +13,7 @@ def mock_endpoints(requests_mock: Mocker) -> None: "/v1/embeddings", "/v1/chat/completions", "/v1/ranking", + "/v1/completions", ]: requests_mock.post( re.compile(f".*{endpoint}"), diff --git a/libs/ai-endpoints/tests/integration_tests/test_completions_models.py b/libs/ai-endpoints/tests/integration_tests/test_completions_models.py new file mode 100644 index 00000000..74a7927e --- /dev/null +++ b/libs/ai-endpoints/tests/integration_tests/test_completions_models.py @@ -0,0 +1,175 @@ +# https://platform.openai.com/docs/api-reference/completions/create +# POST https://.../v1/completions +# model: str -- The ID of the model to use for completion. +# prompt: str | Array[str] -- The prompt(s) to generate completions for. +# best_of: Optional[int] (default: 1) -- An integer representing the number +# of completions to generate and score. +# The API will return the best completion +# of the group. +# echo: Optional[bool] (default: False) -- Whether to echo the prompt in addition +# to the completion. +# frequency_penalty: Optional[float] (default: 0.0) -- Float that penalizes new +# tokens. Range -2.0 to 2.0. +# logit_bias: Optional[Dict[str, float]] -- Dict containing token to logit bias. +# logprobs: Optional[int] (default: None) -- Integer representing the number of +# logprobs to return. 0 means no logprobs. +# Max value is 5. +# max_tokens: Optional[int] (default: 16) -- Integer representing the maximum number +# of tokens to generate. +# n: Optional[int] (default: 1) -- Integer representing the number of completions +# to generate. +# presence_penalty: Optional[float] (default: 0.0) -- Float that penalizes new tokens +# based on whether they appear in +# the text so far. Range -2.0 to +# 2.0. +# seed: Optional[int] (default: None) -- Integer seed that attempts to make the +# completions deterministic. +# stop: Optional[str|Array[str]] -- Token at which to stop generating completions. +# Up to 4 sequences. +# stream: Optional[bool] (default: False) -- Whether to stream back partial progress. +# stream_options: Optional[Dict["include_usage": bool]] -- Dict containing stream +# options. +# suffix: Optional[str] -- Suffix to add to the completion. +# temperature: Optional[float] (default: 1.0) -- Sampling temperature, between 0 and 2. +# top_p: Optional[float] (default: 1.0) -- Alternative to temperature sampling. +# user: Optional[str] -- User ID to associate with the request. +# +# Returns: +# id: str -- The ID of the completion. +# object: str -- Always "text_completion". +# created: int -- Unix timestamp of when the completion was created. +# model: str -- The ID of the model used to generate the completion. +# choices: List[{"finish_reason": "stop"|"length"|"content_filter", +# "index": int, +# "text": str, +# "logprobs": Optional[{"text_offset": array, +# "token_logprobs": array, +# "tokens": array, +# "top_logprobs": array}]}] -- +# List of completions generated by the model. +# usage: {"completion_tokens": int, +# "prompt_tokens": int, +# "total_tokens": int} -- Usage statistics for the model. +# system_fingerprint: str -- System fingerprint of the model used to generate +# the completion. + + +from typing import Any, Callable, Tuple + +import pytest + +from langchain_nvidia_ai_endpoints import NVIDIA + + +def invoke(llm: NVIDIA, prompt: str, **kwargs: Any) -> Tuple[str, int]: + return llm.invoke(prompt, **kwargs), 1 + + +def stream(llm: NVIDIA, prompt: str, **kwargs: Any) -> Tuple[str, int]: + response = "" + count = 0 + for chunk in llm.stream(prompt, **kwargs): + response += chunk + count += 1 + return response, count + + +async def ainvoke(llm: NVIDIA, prompt: str, **kwargs: Any) -> Tuple[str, int]: + return await llm.ainvoke(prompt, **kwargs), 1 + + +async def astream(llm: NVIDIA, prompt: str, **kwargs: Any) -> Tuple[str, int]: + response = "" + count = 0 + async for chunk in llm.astream(prompt, **kwargs): + response += chunk + count += 1 + return response, count + + +@pytest.mark.parametrize( + "func, count", [(invoke, 0), (stream, 1)], ids=["invoke", "stream"] +) +def test_basic(completions_model: str, mode: dict, func: Callable, count: int) -> None: + llm = NVIDIA(model=completions_model, **mode) + response, cnt = func(llm, "Hello, my name is") + assert isinstance(response, str) + assert cnt > count, "Should have received more chunks" + + +@pytest.mark.parametrize( + "func, count", [(ainvoke, 0), (astream, 1)], ids=["ainvoke", "astream"] +) +async def test_abasic( + completions_model: str, mode: dict, func: Callable, count: int +) -> None: + llm = NVIDIA(model=completions_model, **mode) + response, cnt = await func(llm, "Hello, my name is") + assert isinstance(response, str) + assert cnt > count, "Should have received more chunks" + + +@pytest.mark.parametrize( + "param, value", + [ + ("frequency_penalty", 0.5), + ("max_tokens", 32), + ("presence_penalty", 0.5), + ("seed", 1234), + ("stop", "Hello"), + ("temperature", 0.5), + ("top_p", 0.5), + ], +) +@pytest.mark.parametrize("func", [invoke, stream], ids=["invoke", "stream"]) +def test_params( + completions_model: str, mode: dict, param: str, value: Any, func: Callable +) -> None: + llm = NVIDIA(model=completions_model, **mode) + response, _ = func(llm, "Hello, my name is", **{param: value}) + assert isinstance(response, str) + + +@pytest.mark.parametrize( + "param, value", + [ + ("best_of", 5), + ("echo", True), + ("logit_bias", {"hello": 1.0}), + ("logprobs", 2), + ("n", 2), + ("suffix", "Hello"), + ("user", "1234"), + ], +) +@pytest.mark.parametrize("func", [invoke, stream], ids=["invoke", "stream"]) +@pytest.mark.xfail(reason="Not consistently implemented") +def test_params_incomplete( + completions_model: str, mode: dict, param: str, value: Any, func: Callable +) -> None: + llm = NVIDIA(model=completions_model, **mode) + response, _ = func(llm, "Hello, my name is", **{param: value}) + assert isinstance(response, str) + + +def test_invoke_with_stream_true(completions_model: str, mode: dict) -> None: + llm = NVIDIA(model=completions_model, **mode) + with pytest.warns(UserWarning) as record: + response = llm.invoke("Hello, my name is", stream=True) + assert isinstance(response, str) + assert len(record) == 1 + assert "stream set to true" in str(record[0].message) + assert "ignoring" in str(record[0].message) + + +def test_stream_with_stream_false(completions_model: str, mode: dict) -> None: + llm = NVIDIA(model=completions_model, **mode) + with pytest.warns(UserWarning) as record: + response = next(llm.stream("Hello, my name is", stream=False)) + assert isinstance(response, str) + assert len(record) == 1 + assert "stream set to false" in str(record[0].message) + assert "ignoring" in str(record[0].message) + + +# todo: check stream_options diff --git a/libs/ai-endpoints/tests/integration_tests/test_ranking.py b/libs/ai-endpoints/tests/integration_tests/test_ranking.py index 867aab64..06f9444b 100644 --- a/libs/ai-endpoints/tests/integration_tests/test_ranking.py +++ b/libs/ai-endpoints/tests/integration_tests/test_ranking.py @@ -196,14 +196,9 @@ def test_truncate_positive(rerank_model: str, mode: dict, truncate: str) -> None @pytest.mark.parametrize("truncate", [None, "NONE"]) -@pytest.mark.xfail( - reason=( - "truncation is inconsistent across models, " - "nv-rerank-qa-mistral-4b:1 truncates by default " - "while others do not" - ) -) def test_truncate_negative(rerank_model: str, mode: dict, truncate: str) -> None: + if rerank_model == "nv-rerank-qa-mistral-4b:1": + pytest.skip("nv-rerank-qa-mistral-4b:1 truncates by default") query = "What is acceleration?" documents = [ Document(page_content="NVIDIA " * length) diff --git a/libs/ai-endpoints/tests/integration_tests/test_register_model.py b/libs/ai-endpoints/tests/integration_tests/test_register_model.py index 6488aee3..238f2cb5 100644 --- a/libs/ai-endpoints/tests/integration_tests/test_register_model.py +++ b/libs/ai-endpoints/tests/integration_tests/test_register_model.py @@ -4,6 +4,7 @@ import pytest from langchain_nvidia_ai_endpoints import ( + NVIDIA, ChatNVIDIA, Model, NVIDIAEmbeddings, @@ -34,6 +35,11 @@ "nv-rerank-qa-mistral-4b:1", "https://api.nvcf.nvidia.com/v2/nvcf/pexec/functions/0bf77f50-5c35-4488-8e7a-f49bb1974af6", ), + ( + NVIDIA, + "bigcode/starcoder2-7b", + "https://api.nvcf.nvidia.com/v2/nvcf/pexec/functions/dd7b01e7-732d-4da5-8e8d-315f79165a23", + ), ], ) def test_registered_model_functional( diff --git a/libs/ai-endpoints/tests/unit_tests/conftest.py b/libs/ai-endpoints/tests/unit_tests/conftest.py index f0790214..4288819e 100644 --- a/libs/ai-endpoints/tests/unit_tests/conftest.py +++ b/libs/ai-endpoints/tests/unit_tests/conftest.py @@ -3,7 +3,12 @@ import pytest import requests_mock -from langchain_nvidia_ai_endpoints import ChatNVIDIA, NVIDIAEmbeddings, NVIDIARerank +from langchain_nvidia_ai_endpoints import ( + NVIDIA, + ChatNVIDIA, + NVIDIAEmbeddings, + NVIDIARerank, +) @pytest.fixture( @@ -11,6 +16,7 @@ ChatNVIDIA, NVIDIAEmbeddings, NVIDIARerank, + NVIDIA, ] ) def public_class(request: pytest.FixtureRequest) -> type: diff --git a/libs/ai-endpoints/tests/unit_tests/test_base_url.py b/libs/ai-endpoints/tests/unit_tests/test_base_url.py index b5c3a8ef..0baaca6c 100644 --- a/libs/ai-endpoints/tests/unit_tests/test_base_url.py +++ b/libs/ai-endpoints/tests/unit_tests/test_base_url.py @@ -94,6 +94,8 @@ def test_param_base_url_hosted(public_class: type, base_url: str) -> None: "https://localhost", "http://localhost:8888", "http://0.0.0.0:8888/v1", + "http://0.0.0.0:8888/v1/", + "http://blah/some/other/path/v1", ], ) def test_param_base_url_not_hosted(public_class: type, base_url: str) -> None: @@ -107,18 +109,9 @@ def test_param_base_url_not_hosted(public_class: type, base_url: str) -> None: [ "http://localhost:8888/embeddings", "http://0.0.0.0:8888/rankings", + "http://localhost:8888/embeddings/", + "http://0.0.0.0:8888/rankings/", "http://localhost:8888/chat/completions", - ], -) -def test_expect_error(public_class: type, base_url: str) -> None: - with pytest.raises(ValueError) as e: - public_class(model="model1", base_url=base_url) - assert "Expected format is" in str(e.value) - - -@pytest.mark.parametrize( - "base_url", - [ "http://localhost:8080/v1/embeddings", "http://0.0.0.0:8888/v1/rankings", ], diff --git a/libs/ai-endpoints/tests/unit_tests/test_bind_tools.py b/libs/ai-endpoints/tests/unit_tests/test_bind_tools.py index 4876f5f1..a8f044f9 100644 --- a/libs/ai-endpoints/tests/unit_tests/test_bind_tools.py +++ b/libs/ai-endpoints/tests/unit_tests/test_bind_tools.py @@ -1,7 +1,18 @@ +import json import warnings -from typing import Any +from functools import reduce +from operator import add +from typing import Any, List import pytest +import requests_mock +from langchain_core.messages import ( + AIMessage, + AIMessageChunk, + BaseMessage, + HumanMessage, + ToolMessage, +) from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.tools import tool @@ -62,3 +73,190 @@ def test_bind_tool_and_select_negative(tools: Any, choice: str) -> None: with pytest.raises(ValueError) as e: ChatNVIDIA(api_key="BOGUS").bind_tools(tools=tools, tool_choice=choice) assert "not found in the tools list" in str(e.value) + + +@pytest.fixture +def mock_v1_models(requests_mock: requests_mock.Mocker) -> None: + requests_mock.get("https://integrate.api.nvidia.com/v1/models", json={"data": []}) + + +### +# the invoke/stream response_parsing tests are here because of a bug in the +# server response where "" was returned as the arguments for the tool call. +# we're verifying expected results parse correctly. +### + + +@pytest.mark.parametrize( + "arguments", + [ + r"{}", + # r"", + r'{"input": 3}', + ], + ids=[ + "no-args-oai", + # "no-args-nim", + "one-arg-int", + ], +) +def test_invoke_response_parsing( + requests_mock: requests_mock.Mocker, + mock_v1_models: None, + arguments: str, +) -> None: + requests_mock.post( + "https://integrate.api.nvidia.com/v1/chat/completions", + json={ + "id": "chatcmpl-ID", + "object": "chat.completion", + "created": 1234567890, + "model": "BOGUS", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": None, + "tool_calls": [ + { + "id": "tool-ID", + "type": "function", + "function": { + "name": "magic", + "arguments": arguments, + }, + } + ], + }, + "logprobs": None, + "finish_reason": "tool_calls", + } + ], + "usage": { + "prompt_tokens": 22, + "completion_tokens": 20, + "total_tokens": 42, + }, + "system_fingerprint": None, + }, + ) + + llm = ChatNVIDIA(api_key="BOGUS") + response = llm.invoke("What's the magic?") + assert isinstance(response, AIMessage) + assert response.tool_calls + assert response.tool_calls[0]["name"] == "magic" + assert response.tool_calls[0]["args"] == json.loads(arguments) + + +@pytest.mark.parametrize( + "argument_chunks", + [ + [ + r'"{}"', + ], + [ + r'""', + ], + [ + r'"{\""', + r'"input\""', + r'"\":"', + r"3", + r'"}"', + ], + [r'"{\"intput\": 3}"'], + ], + ids=["no-args-oai", "no-args-nim", "one-arg-int-oai", "one-arg-int-nim"], +) +def test_stream_response_parsing( + requests_mock: requests_mock.Mocker, + mock_v1_models: None, + argument_chunks: List[str], +) -> None: + requests_mock.post( + "https://integrate.api.nvidia.com/v1/chat/completions", + text="\n\n".join( + [ + 'data: {"id":"ID0","object":"chat.completion.chunk","created":1234567890,"model":"BOGUS","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":null,"tool_calls":[{"index":0,"id":"ID1","type":"function","function":{"name":"magic","arguments":""}}]},"logprobs":null,"finish_reason":null}]}', # noqa: E501 + *[ + f'data: {{"id":"ID0","object":"chat.completion.chunk","created":1234567890,"model":"BOGUS","system_fingerprint":null,"choices":[{{"index":0,"delta":{{"tool_calls":[{{"index":0,"function":{{"arguments":{argument}}}}}]}},"logprobs":null,"finish_reason":null}}]}}' # noqa: E501 + for argument in argument_chunks + ], + 'data: {"id":"ID0","object":"chat.completion.chunk","created":1234567890,"model":"BOGUS","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}]}', # noqa: E501 + "data: [DONE]", + ] + ), + ) + + llm = ChatNVIDIA(api_key="BOGUS") + response = reduce(add, llm.stream("What's the magic?")) + assert isinstance(response, AIMessageChunk) + assert response.tool_calls + assert response.tool_calls[0]["name"] == "magic" + + +def test_regression_parsing_human_ai_tool_invoke( + requests_mock: requests_mock.Mocker, +) -> None: + """ + a bug existed in the inference for sequence - + 0. messages = [human message] + 1. messages.append(llm.invoke(messages)) + 2. llm.invoke(messages) <- raised ValueError: Message ... has no content + """ + requests_mock.post( + "https://integrate.api.nvidia.com/v1/chat/completions", + json={ + "id": "chatcmpl-ID", + "object": "chat.completion", + "created": 1234567890, + "model": "BOGUS", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": None, + "tool_calls": [ + { + "id": "tool-ID", + "type": "function", + "function": { + "name": "magic", + "arguments": "{}", + }, + } + ], + }, + "logprobs": None, + "finish_reason": "tool_calls", + } + ], + "usage": { + "prompt_tokens": 22, + "completion_tokens": 20, + "total_tokens": 42, + }, + "system_fingerprint": None, + }, + ) + + llm = ChatNVIDIA(api_key="BOGUS") + messages: List[BaseMessage] = [HumanMessage("THIS IS IGNORED")] + response0 = llm.invoke(messages) + messages.append(response0) + messages.append(ToolMessage(content="SO IS THIS", tool_call_id="BOGUS")) + llm.invoke(messages) + + +def test_regression_ai_null_content( + requests_mock: requests_mock.Mocker, +) -> None: + requests_mock.post("https://integrate.api.nvidia.com/v1/chat/completions", json={}) + llm = ChatNVIDIA(api_key="BOGUS") + assistant = AIMessage(content="SKIPPED") + assistant.content = None # type: ignore + llm.invoke([assistant]) + llm.stream([assistant]) diff --git a/libs/ai-endpoints/tests/unit_tests/test_completions_models.py b/libs/ai-endpoints/tests/unit_tests/test_completions_models.py new file mode 100644 index 00000000..24239bb7 --- /dev/null +++ b/libs/ai-endpoints/tests/unit_tests/test_completions_models.py @@ -0,0 +1,154 @@ +import json +from functools import reduce +from operator import add +from typing import Any, Callable, List + +import pytest +import requests_mock + +from langchain_nvidia_ai_endpoints import NVIDIA + + +def invoke(llm: NVIDIA, prompt: str, **kwargs: Any) -> str: + return llm.invoke(prompt, **kwargs) + + +def stream(llm: NVIDIA, prompt: str, **kwargs: Any) -> str: + return reduce(add, llm.stream(prompt, **kwargs)) + + +mock_response = { + "id": "ID", + "object": "text_completion", + "created": 1234567890, + "model": "BOGUS", + "choices": [ + { + "index": 0, + "text": "COMPLETION", + } + ], + "usage": {"prompt_tokens": 7, "total_tokens": 207, "completion_tokens": 200}, +} + + +@pytest.fixture(scope="function") +def mock_v1_completions_invoke( + requests_mock: requests_mock.Mocker, +) -> requests_mock.Mocker: + requests_mock.post( + "https://integrate.api.nvidia.com/v1/completions", + json=mock_response, + ) + return requests_mock + + +@pytest.fixture(scope="function") +def mock_v1_completions_stream( + requests_mock: requests_mock.Mocker, +) -> requests_mock.Mocker: + requests_mock.post( + "https://integrate.api.nvidia.com/v1/completions", + text="\n\n".join( + [ + f"data: {json.dumps(mock_response)}", + "data: [DONE]", + ] + ), + ) + return requests_mock + + +@pytest.mark.parametrize( + "param, value", + [ + ("frequency_penalty", [0.25, 0.5, 0.75]), + ("max_tokens", [2, 32, 512]), + ("presence_penalty", [0.25, 0.5, 0.75]), + ("seed", [1, 1234, 4321]), + ("stop", ["Hello", "There", "World"]), + ("temperature", [0, 0.5, 1]), + ("top_p", [0, 0.5, 1]), + ("best_of", [1, 5, 10]), + ("echo", [True, False, True]), + ("logit_bias", [{"hello": 1.0}, {"there": 1.0}, {"world": 1.0}]), + ("logprobs", [1, 2, 3]), + ("n", [1, 2, 3]), + ("suffix", ["Hello", "There", "World"]), + ("user", ["Bob", "Alice", "Eve"]), + ], +) +@pytest.mark.parametrize( + "func, mock_name", + [(invoke, "mock_v1_completions_invoke"), (stream, "mock_v1_completions_stream")], + ids=["invoke", "stream"], +) +def test_params( + param: str, + value: List[Any], + func: Callable, + mock_name: str, + request: pytest.FixtureRequest, +) -> None: + """ + This tests the following... + - priority order (init -> bind -> infer) + - param passed to init, bind, invoke / stream + ...for each known Completion API param. + """ + + mock = request.getfixturevalue(mock_name) + + init, bind, infer = value + + llm = NVIDIA(api_key="BOGUS", **{param: init}) + func(llm, "IGNORED") + request_payload = mock.last_request.json() + assert param in request_payload + assert request_payload[param] == init + + bound_llm = llm.bind(**{param: bind}) + func(bound_llm, "IGNORED") + request_payload = mock.last_request.json() + assert param in request_payload + assert request_payload[param] == bind + + func(bound_llm, "IGNORED", **{param: infer}) + request_payload = mock.last_request.json() + assert param in request_payload + assert request_payload[param] == infer + + +@pytest.mark.parametrize( + "func, mock_name", + [(invoke, "mock_v1_completions_invoke"), (stream, "mock_v1_completions_stream")], + ids=["invoke", "stream"], +) +def test_params_unknown( + func: Callable, + mock_name: str, + request: pytest.FixtureRequest, +) -> None: + request.getfixturevalue(mock_name) + + with pytest.warns(UserWarning) as record: + llm = NVIDIA(api_key="BOGUS", init_unknown="INIT") + assert len(record) == 1 + assert "Unrecognized, ignored arguments: {'init_unknown'}" in str(record[0].message) + + with pytest.warns(UserWarning) as record: + func(llm, "IGNORED", arg_unknown="ARG") + assert len(record) == 1 + assert "Unrecognized, ignored arguments: {'arg_unknown'}" in str(record[0].message) + + bound_llm = llm.bind(bind_unknown="BIND") + + with pytest.warns(UserWarning) as record: + func(bound_llm, "IGNORED") + assert len(record) == 1 + assert "Unrecognized, ignored arguments: {'bind_unknown'}" in str(record[0].message) + + +def test_identifying_params() -> None: + llm = NVIDIA(api_key="BOGUS") + assert set(llm._identifying_params.keys()) == {"model", "base_url"} diff --git a/libs/ai-endpoints/tests/unit_tests/test_imports.py b/libs/ai-endpoints/tests/unit_tests/test_imports.py index e72c2c6c..200bbea4 100644 --- a/libs/ai-endpoints/tests/unit_tests/test_imports.py +++ b/libs/ai-endpoints/tests/unit_tests/test_imports.py @@ -4,6 +4,7 @@ "ChatNVIDIA", "NVIDIAEmbeddings", "NVIDIARerank", + "NVIDIA", "register_model", "Model", ] diff --git a/libs/ai-endpoints/tests/unit_tests/test_register_model.py b/libs/ai-endpoints/tests/unit_tests/test_register_model.py index d42bdee5..482d40dc 100644 --- a/libs/ai-endpoints/tests/unit_tests/test_register_model.py +++ b/libs/ai-endpoints/tests/unit_tests/test_register_model.py @@ -3,6 +3,7 @@ import pytest from langchain_nvidia_ai_endpoints import ( + NVIDIA, ChatNVIDIA, Model, NVIDIAEmbeddings, @@ -16,12 +17,19 @@ [ ("chat", "NVIDIAEmbeddings"), ("chat", "NVIDIARerank"), + ("chat", "NVIDIA"), ("vlm", "NVIDIAEmbeddings"), ("vlm", "NVIDIARerank"), + ("vlm", "NVIDIA"), ("embeddings", "ChatNVIDIA"), ("embeddings", "NVIDIARerank"), + ("embeddings", "NVIDIA"), ("ranking", "ChatNVIDIA"), ("ranking", "NVIDIAEmbeddings"), + ("ranking", "NVIDIA"), + ("completions", "ChatNVIDIA"), + ("completions", "NVIDIAEmbeddings"), + ("completions", "NVIDIARerank"), ], ) def test_mismatched_type_client(model_type: str, client: str) -> None: @@ -53,6 +61,7 @@ def test_registered_model_usable(public_class: type) -> None: "ChatNVIDIA": "chat", "NVIDIAEmbeddings": "embedding", "NVIDIARerank": "ranking", + "NVIDIA": "completions", }[public_class.__name__] with warnings.catch_warnings(): warnings.simplefilter("error") @@ -112,21 +121,38 @@ def test_registered_model_is_available() -> None: endpoint="BOGUS", ) ) + register_model( + Model( + id="test/completions", + model_type="completions", + client="NVIDIA", + endpoint="BOGUS", + ) + ) chat_models = ChatNVIDIA.get_available_models(api_key="BOGUS") embedding_models = NVIDIAEmbeddings.get_available_models(api_key="BOGUS") ranking_models = NVIDIARerank.get_available_models(api_key="BOGUS") + completions_models = NVIDIA.get_available_models(api_key="BOGUS") assert "test/chat" in [model.id for model in chat_models] assert "test/chat" not in [model.id for model in embedding_models] assert "test/chat" not in [model.id for model in ranking_models] + assert "test/chat" not in [model.id for model in completions_models] assert "test/embedding" not in [model.id for model in chat_models] assert "test/embedding" in [model.id for model in embedding_models] assert "test/embedding" not in [model.id for model in ranking_models] + assert "test/embedding" not in [model.id for model in completions_models] assert "test/rerank" not in [model.id for model in chat_models] assert "test/rerank" not in [model.id for model in embedding_models] assert "test/rerank" in [model.id for model in ranking_models] + assert "test/rerank" not in [model.id for model in completions_models] + + assert "test/completions" not in [model.id for model in chat_models] + assert "test/completions" not in [model.id for model in embedding_models] + assert "test/completions" not in [model.id for model in ranking_models] + assert "test/completions" in [model.id for model in completions_models] def test_registered_model_without_client_is_not_listed(public_class: type) -> None: diff --git a/libs/ai-endpoints/tests/unit_tests/test_structured_output.py b/libs/ai-endpoints/tests/unit_tests/test_structured_output.py index 0c8aa626..053b10b3 100644 --- a/libs/ai-endpoints/tests/unit_tests/test_structured_output.py +++ b/libs/ai-endpoints/tests/unit_tests/test_structured_output.py @@ -1,14 +1,18 @@ import enum import warnings -from typing import Callable, List, Optional +from typing import Callable, List, Optional, Type import pytest -from langchain_core.pydantic_v1 import BaseModel, Field +import requests_mock +from langchain_core.pydantic_v1 import BaseModel as lc_pydanticV1BaseModel +from langchain_core.pydantic_v1 import Field +from pydantic import BaseModel as pydanticV2BaseModel # ignore: check_pydantic +from pydantic.v1 import BaseModel as pydanticV1BaseModel # ignore: check_pydantic from langchain_nvidia_ai_endpoints import ChatNVIDIA -class Joke(BaseModel): +class Joke(lc_pydanticV1BaseModel): """Joke to tell user.""" setup: str = Field(description="The setup of the joke") @@ -136,3 +140,52 @@ def test_stream_enum_incomplete( for chunk in structured_llm.stream("This is ignored."): response = chunk assert response is None + + +@pytest.mark.parametrize( + "pydanticBaseModel", + [ + lc_pydanticV1BaseModel, + pydanticV1BaseModel, + pydanticV2BaseModel, + ], + ids=["lc-pydantic-v1", "pydantic-v1", "pydantic-v2"], +) +def test_pydantic_version( + requests_mock: requests_mock.Mocker, + pydanticBaseModel: Type, +) -> None: + requests_mock.post( + "https://integrate.api.nvidia.com/v1/chat/completions", + json={ + "id": "chatcmpl-ID", + "object": "chat.completion", + "created": 1234567890, + "model": "BOGUS", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": '{"name": "Sam Doe"}', + }, + "logprobs": None, + "finish_reason": "stop", + } + ], + "usage": { + "prompt_tokens": 22, + "completion_tokens": 20, + "total_tokens": 42, + }, + "system_fingerprint": None, + }, + ) + + class Person(pydanticBaseModel): # type: ignore + name: str + + llm = ChatNVIDIA(api_key="BOGUS").with_structured_output(Person) + response = llm.invoke("This is ignored.") + assert isinstance(response, Person) + assert response.name == "Sam Doe"