From e8833eb4b92d84053e6bcdf475195c9b955c8a37 Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Mon, 24 Jun 2024 11:33:28 -0500 Subject: [PATCH 01/60] add register_model for users to add their own custom models with endpoints from langchain_nvidia_ai_endpoints import register_model, Model register_model(Model(id="my-custom-model-name", model_type="chat", client="ChatNVIDIA", endpoint="http://host:port/path-to-my-model")) llm = ChatNVIDIA(model="my-custom-model-name") --- .../langchain_nvidia_ai_endpoints/__init__.py | 3 +- .../langchain_nvidia_ai_endpoints/_statics.py | 100 +++++++++++++---- .../tests/integration_tests/conftest.py | 25 ++++- .../tests/integration_tests/test_api_key.py | 20 +--- .../tests/integration_tests/test_base_url.py | 5 +- .../integration_tests/test_register_model.py | 103 ++++++++++++++++++ .../ai-endpoints/tests/unit_tests/conftest.py | 9 +- .../tests/unit_tests/test_imports.py | 8 +- .../tests/unit_tests/test_register_model.py | 79 ++++++++++++++ 9 files changed, 300 insertions(+), 52 deletions(-) create mode 100644 libs/ai-endpoints/tests/integration_tests/test_register_model.py create mode 100644 libs/ai-endpoints/tests/unit_tests/test_register_model.py diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/__init__.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/__init__.py index aefdf7bd..d5796e3c 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/__init__.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/__init__.py @@ -39,8 +39,9 @@ ``` """ # noqa: E501 +from langchain_nvidia_ai_endpoints._statics import Model, register_model from langchain_nvidia_ai_endpoints.chat_models import ChatNVIDIA from langchain_nvidia_ai_endpoints.embeddings import NVIDIAEmbeddings from langchain_nvidia_ai_endpoints.reranking import NVIDIARerank -__all__ = ["ChatNVIDIA", "NVIDIAEmbeddings", "NVIDIARerank"] +__all__ = ["ChatNVIDIA", "NVIDIAEmbeddings", "NVIDIARerank", "register_model", "Model"] diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py index 610c6d12..17746c27 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py @@ -1,23 +1,28 @@ import warnings -from typing import Optional +from typing import Literal, Optional -from langchain_core.pydantic_v1 import BaseModel +from langchain_core.pydantic_v1 import BaseModel, validator -# -# Model information -# - id: unique identifier for the model, passed as model parameter for requests -# - model_type: API type (chat, vlm, embedding, ranking, completion) -# - client: client name -# - endpoint: custom endpoint for the model -# - aliases: list of aliases for the model -# -# All aliases are deprecated and will trigger a warning when used. -# class Model(BaseModel): + """ + Model information. + + id: unique identifier for the model, passed as model parameter for requests + model_type: API type (chat, vlm, embedding, ranking, completion) + client: client name, e.g. ChatNVIDIA, NVIDIAEmbeddings, NVIDIARerank + endpoint: custom endpoint for the model + aliases: list of aliases for the model + + All aliases are deprecated and will trigger a warning when used. + """ + id: str - model_type: Optional[str] = None - client: Optional[str] = None + # why do we have a model_type? because ChatNVIDIA can speak both chat and vlm. + model_type: Optional[ + Literal["chat", "vlm", "embedding", "ranking", "completion"] + ] = None + client: Optional[Literal["ChatNVIDIA", "NVIDIAEmbeddings", "NVIDIARerank"]] = None endpoint: Optional[str] = None aliases: Optional[list] = None base_model: Optional[str] = None @@ -25,6 +30,21 @@ class Model(BaseModel): def __hash__(self) -> int: return hash(self.id) + @validator("client", always=True) + def validate_client(cls, client: str, values: dict) -> str: + if client: + supported = { + "ChatNVIDIA": ("chat", "vlm"), + "NVIDIAEmbeddings": ("embedding",), + "NVIDIARerank": ("ranking",), + } + model_type = values.get("model_type") + if model_type not in supported[client]: + raise ValueError( + f"Model type '{model_type}' not supported by client '{client}'" + ) + return client + CHAT_MODEL_TABLE = { "meta/codellama-70b": Model( @@ -361,14 +381,14 @@ def __hash__(self) -> int: ), } -COMPLETION_MODEL_TABLE = { - "mistralai/mixtral-8x22b-v0.1": Model( - id="mistralai/mixtral-8x22b-v0.1", - model_type="completion", - client="NVIDIA", - aliases=["ai-mixtral-8x22b"], - ), -} +# COMPLETION_MODEL_TABLE = { +# "mistralai/mixtral-8x22b-v0.1": Model( +# id="mistralai/mixtral-8x22b-v0.1", +# model_type="completion", +# client="NVIDIA", +# aliases=["ai-mixtral-8x22b"], +# ), +# } MODEL_TABLE = { **CHAT_MODEL_TABLE, @@ -379,6 +399,42 @@ def __hash__(self) -> int: } +def register_model(model: Model) -> None: + """ + Register a model as a known model. This must be done at the + beginning of a program, at least before the model is used or + available models are listed. + + For instance - + ``` + from langchain_nvidia_ai_endpoints import register_model, Model + register_model(Model(id="my-custom-model-name", + model_type="chat", + client="ChatNVIDIA", + endpoint="http://host:port/path-to-my-model")) + llm = ChatNVIDIA(model="my-custom-model-name") + ``` + + Be sure that the `id` matches the model parameter the endpoint expects. + + Supported model types are: + - chat models must accept and produce chat completion payloads + Supported model clients are: + - ChatNVIDIA for chat models + + Endpoint is required. + """ + if model.id in MODEL_TABLE: + warnings.warn( + f"Model {model.id} is already registered. " + f"Overriding {MODEL_TABLE[model.id]}", + UserWarning, + ) + if not model.endpoint: + raise ValueError(f"Model {model.id} does not have an endpoint.") + MODEL_TABLE[model.id] = model + + def lookup_model(name: str) -> Optional[Model]: """ Lookup a model by name, using only the table of known models. diff --git a/libs/ai-endpoints/tests/integration_tests/conftest.py b/libs/ai-endpoints/tests/integration_tests/conftest.py index 9fc7b77a..30388a96 100644 --- a/libs/ai-endpoints/tests/integration_tests/conftest.py +++ b/libs/ai-endpoints/tests/integration_tests/conftest.py @@ -1,9 +1,8 @@ -import inspect -from typing import List +from typing import Any, List import pytest +from langchain_core.documents import Document -import langchain_nvidia_ai_endpoints from langchain_nvidia_ai_endpoints import ChatNVIDIA, NVIDIAEmbeddings, NVIDIARerank from langchain_nvidia_ai_endpoints._statics import MODEL_TABLE, Model @@ -129,9 +128,25 @@ def mode(request: pytest.FixtureRequest) -> dict: @pytest.fixture( params=[ - member[1] - for member in inspect.getmembers(langchain_nvidia_ai_endpoints, inspect.isclass) + ChatNVIDIA, + NVIDIAEmbeddings, + NVIDIARerank, ] ) def public_class(request: pytest.FixtureRequest) -> type: return request.param + + +@pytest.fixture +def contact_service() -> Any: + def _contact_service(instance: Any) -> None: + if isinstance(instance, ChatNVIDIA): + instance.invoke("Hello") + elif isinstance(instance, NVIDIAEmbeddings): + instance.embed_documents(["Hello"]) + elif isinstance(instance, NVIDIARerank): + instance.compress_documents( + documents=[Document(page_content="World")], query="Hello" + ) + + return _contact_service diff --git a/libs/ai-endpoints/tests/integration_tests/test_api_key.py b/libs/ai-endpoints/tests/integration_tests/test_api_key.py index b78ad1c3..07906f5c 100644 --- a/libs/ai-endpoints/tests/integration_tests/test_api_key.py +++ b/libs/ai-endpoints/tests/integration_tests/test_api_key.py @@ -2,26 +2,14 @@ from typing import Any import pytest -from langchain_core.documents import Document from langchain_core.messages import HumanMessage -from langchain_nvidia_ai_endpoints import ChatNVIDIA, NVIDIAEmbeddings, NVIDIARerank +from langchain_nvidia_ai_endpoints import ChatNVIDIA from ..unit_tests.test_api_key import no_env_var -def contact_service(instance: Any) -> None: - if isinstance(instance, ChatNVIDIA): - instance.invoke("Hello") - elif isinstance(instance, NVIDIAEmbeddings): - instance.embed_documents(["Hello"]) - elif isinstance(instance, NVIDIARerank): - instance.compress_documents( - documents=[Document(page_content="World")], query="Hello" - ) - - -def test_missing_api_key_error(public_class: type) -> None: +def test_missing_api_key_error(public_class: type, contact_service: Any) -> None: with no_env_var("NVIDIA_API_KEY"): with pytest.warns(UserWarning): client = public_class() @@ -33,7 +21,7 @@ def test_missing_api_key_error(public_class: type) -> None: assert "API key" in message -def test_bogus_api_key_error(public_class: type) -> None: +def test_bogus_api_key_error(public_class: type, contact_service: Any) -> None: with no_env_var("NVIDIA_API_KEY"): client = public_class(nvidia_api_key="BOGUS") with pytest.raises(Exception) as exc_info: @@ -45,7 +33,7 @@ def test_bogus_api_key_error(public_class: type) -> None: @pytest.mark.parametrize("param", ["nvidia_api_key", "api_key"]) -def test_api_key(public_class: type, param: str) -> None: +def test_api_key(public_class: type, param: str, contact_service: Any) -> None: api_key = os.environ.get("NVIDIA_API_KEY") with no_env_var("NVIDIA_API_KEY"): client = public_class(**{param: api_key}) diff --git a/libs/ai-endpoints/tests/integration_tests/test_base_url.py b/libs/ai-endpoints/tests/integration_tests/test_base_url.py index ab49accc..06389a5b 100644 --- a/libs/ai-endpoints/tests/integration_tests/test_base_url.py +++ b/libs/ai-endpoints/tests/integration_tests/test_base_url.py @@ -1,8 +1,8 @@ +from typing import Any + import pytest from requests.exceptions import ConnectionError -from .test_api_key import contact_service - @pytest.mark.parametrize( "base_url", @@ -13,6 +13,7 @@ def test_endpoint_unavailable( public_class: type, base_url: str, + contact_service: Any, ) -> None: # we test this with a bogus model because users should supply # a model when using their own base_url diff --git a/libs/ai-endpoints/tests/integration_tests/test_register_model.py b/libs/ai-endpoints/tests/integration_tests/test_register_model.py new file mode 100644 index 00000000..6a08c2fe --- /dev/null +++ b/libs/ai-endpoints/tests/integration_tests/test_register_model.py @@ -0,0 +1,103 @@ +from typing import Any + +import pytest + +from langchain_nvidia_ai_endpoints import ( + ChatNVIDIA, + Model, + NVIDIAEmbeddings, + NVIDIARerank, + register_model, +) + + +# +# if this test is failing it may be because the function uuids have changed. +# you will have to find the new ones from https://api.nvcf.nvidia.com/v2/nvcf/functions +# +@pytest.mark.parametrize( + "client, id, endpoint", + [ + ( + ChatNVIDIA, + "meta/llama3-8b-instruct", + "https://api.nvcf.nvidia.com/v2/nvcf/pexec/functions/a5a3ad64-ec2c-4bfc-8ef7-5636f26630fe", + ), + ( + NVIDIAEmbeddings, + "NV-Embed-QA", + "https://api.nvcf.nvidia.com/v2/nvcf/pexec/functions/09c64e32-2b65-4892-a285-2f585408d118", + ), + ( + NVIDIARerank, + "nv-rerank-qa-mistral-4b:1", + "https://api.nvcf.nvidia.com/v2/nvcf/pexec/functions/0bf77f50-5c35-4488-8e7a-f49bb1974af6", + ), + ], +) +def test_registered_model_functional( + client: type, id: str, endpoint: str, contact_service: Any +) -> None: + model = Model(id=id, endpoint=endpoint) + with pytest.warns( + UserWarning + ) as record: # warns because we're overriding known models + register_model(model) + contact_service(client(model=id)) + assert len(record) == 1 + assert isinstance(record[0].message, UserWarning) + assert "already registered" in str(record[0].message) + assert "Overriding" in str(record[0].message) + + +def test_registered_model_is_available() -> None: + register_model( + Model( + id="test/chat", + model_type="chat", + client="ChatNVIDIA", + endpoint="BOGUS", + ) + ) + register_model( + Model( + id="test/embedding", + model_type="embedding", + client="NVIDIAEmbeddings", + endpoint="BOGUS", + ) + ) + register_model( + Model( + id="test/rerank", + model_type="ranking", + client="NVIDIARerank", + endpoint="BOGUS", + ) + ) + chat_models = ChatNVIDIA.get_available_models() + embedding_models = NVIDIAEmbeddings.get_available_models() + ranking_models = NVIDIARerank.get_available_models() + + assert "test/chat" in [model.id for model in chat_models] + assert "test/chat" not in [model.id for model in embedding_models] + assert "test/chat" not in [model.id for model in ranking_models] + + assert "test/embedding" not in [model.id for model in chat_models] + assert "test/embedding" in [model.id for model in embedding_models] + assert "test/embedding" not in [model.id for model in ranking_models] + + assert "test/rerank" not in [model.id for model in chat_models] + assert "test/rerank" not in [model.id for model in embedding_models] + assert "test/rerank" in [model.id for model in ranking_models] + + +def test_registered_model_without_client_is_not_listed() -> None: + register_model(Model(id="test/no_client", endpoint="BOGUS")) + chat_models = ChatNVIDIA.get_available_models() + embedding_models = NVIDIAEmbeddings.get_available_models() + ranking_models = NVIDIARerank.get_available_models() + + assert "test/no_client" not in [model.id for model in chat_models] + assert "test/no_client" not in [model.id for model in embedding_models] + assert "test/no_client" not in [model.id for model in ranking_models] diff --git a/libs/ai-endpoints/tests/unit_tests/conftest.py b/libs/ai-endpoints/tests/unit_tests/conftest.py index f1258ad7..9abdd6b0 100644 --- a/libs/ai-endpoints/tests/unit_tests/conftest.py +++ b/libs/ai-endpoints/tests/unit_tests/conftest.py @@ -1,14 +1,13 @@ -import inspect - import pytest -import langchain_nvidia_ai_endpoints +from langchain_nvidia_ai_endpoints import ChatNVIDIA, NVIDIAEmbeddings, NVIDIARerank @pytest.fixture( params=[ - member[1] - for member in inspect.getmembers(langchain_nvidia_ai_endpoints, inspect.isclass) + ChatNVIDIA, + NVIDIAEmbeddings, + NVIDIARerank, ] ) def public_class(request: pytest.FixtureRequest) -> type: diff --git a/libs/ai-endpoints/tests/unit_tests/test_imports.py b/libs/ai-endpoints/tests/unit_tests/test_imports.py index 97e2c7c4..e72c2c6c 100644 --- a/libs/ai-endpoints/tests/unit_tests/test_imports.py +++ b/libs/ai-endpoints/tests/unit_tests/test_imports.py @@ -1,6 +1,12 @@ from langchain_nvidia_ai_endpoints import __all__ -EXPECTED_ALL = ["ChatNVIDIA", "NVIDIAEmbeddings", "NVIDIARerank"] +EXPECTED_ALL = [ + "ChatNVIDIA", + "NVIDIAEmbeddings", + "NVIDIARerank", + "register_model", + "Model", +] def test_all_imports() -> None: diff --git a/libs/ai-endpoints/tests/unit_tests/test_register_model.py b/libs/ai-endpoints/tests/unit_tests/test_register_model.py new file mode 100644 index 00000000..4c7c0d38 --- /dev/null +++ b/libs/ai-endpoints/tests/unit_tests/test_register_model.py @@ -0,0 +1,79 @@ +import warnings + +import pytest + +from langchain_nvidia_ai_endpoints import Model, register_model + + +@pytest.mark.parametrize( + "model_type, client", + [ + ("chat", "NVIDIAEmbeddings"), + ("chat", "NVIDIARerank"), + ("vlm", "NVIDIAEmbeddings"), + ("vlm", "NVIDIARerank"), + ("embeddings", "ChatNVIDIA"), + ("embeddings", "NVIDIARerank"), + ("ranking", "ChatNVIDIA"), + ("ranking", "NVIDIAEmbeddings"), + ], +) +def test_mismatched_type_client(model_type: str, client: str) -> None: + with pytest.raises(ValueError) as e: + register_model( + Model( + id=f"{model_type}-{client}", + model_type=model_type, + client=client, + endpoint="BOGUS", + ) + ) + assert "not supported" in str(e.value) + + +def test_duplicate_model_warns() -> None: + model = Model(id="registered-model", endpoint="BOGUS") + register_model(model) + with pytest.warns(UserWarning) as record: + register_model(model) + assert len(record) == 1 + assert isinstance(record[0].message, UserWarning) + assert "already registered" in str(record[0].message) + assert "Overriding" in str(record[0].message) + + +def test_registered_model_usable(public_class: type) -> None: + model_type = { + "ChatNVIDIA": "chat", + "NVIDIAEmbeddings": "embedding", + "NVIDIARerank": "ranking", + }[public_class.__name__] + with warnings.catch_warnings(): + warnings.simplefilter("error") + id = f"registered-model-{model_type}" + model = Model( + id=id, + model_type=model_type, + client=public_class.__name__, + endpoint="BOGUS", + ) + register_model(model) + x = public_class(model=id, nvidia_api_key="a-bogus-key") + assert x.model == id + + +def test_registered_model_without_client_usable(public_class: type) -> None: + id = f"test/no-client-{public_class.__name__}" + model = Model(id=id, endpoint="BOGUS") + register_model(model) + # todo: this should warn that the model is known but type is not + # and therefore inference may not work + public_class(model=id, nvidia_api_key="a-bogus-key") + + +def test_missing_endpoint() -> None: + with pytest.raises(ValueError) as e: + register_model( + Model(id="missing-endpoint", model_type="chat", client="ChatNVIDIA") + ) + assert "does not have an endpoint" in str(e.value) From 029f030ddd5db08c82e8b5df171c820fc860bdb2 Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Thu, 4 Jul 2024 07:16:07 -0400 Subject: [PATCH 02/60] add clarifying documentation about when to use register_model vs base_url --- .../langchain_nvidia_ai_endpoints/_statics.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py index 17746c27..e3525eda 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py @@ -418,11 +418,17 @@ def register_model(model: Model) -> None: Be sure that the `id` matches the model parameter the endpoint expects. Supported model types are: - - chat models must accept and produce chat completion payloads + - chat models, which must accept and produce chat completion payloads Supported model clients are: - - ChatNVIDIA for chat models + - ChatNVIDIA, for chat models Endpoint is required. + + Use this instead of passing `base_url` to a client constructor + when the model's endpoint supports inference and not /v1/models + listing. Use `base_url` when the model's endpoint supports + /v1/models listing and inference on a known path, + e.g. /v1/chat/completions. """ if model.id in MODEL_TABLE: warnings.warn( From 2b0dc7f70dcda2dab72f6cab4aa4bdaa76fab15e Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Thu, 4 Jul 2024 07:25:22 -0400 Subject: [PATCH 03/60] add model_type=qa as supported by ChatNVIDIA --- libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py index e3525eda..bc9900f7 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py @@ -20,7 +20,7 @@ class Model(BaseModel): id: str # why do we have a model_type? because ChatNVIDIA can speak both chat and vlm. model_type: Optional[ - Literal["chat", "vlm", "embedding", "ranking", "completion"] + Literal["chat", "vlm", "embedding", "ranking", "completion", "qa"] ] = None client: Optional[Literal["ChatNVIDIA", "NVIDIAEmbeddings", "NVIDIARerank"]] = None endpoint: Optional[str] = None @@ -34,7 +34,7 @@ def __hash__(self) -> int: def validate_client(cls, client: str, values: dict) -> str: if client: supported = { - "ChatNVIDIA": ("chat", "vlm"), + "ChatNVIDIA": ("chat", "vlm", "qa"), "NVIDIAEmbeddings": ("embedding",), "NVIDIARerank": ("ranking",), } From dfa53adaece2737f930e51e620d962fda521620d Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Mon, 1 Jul 2024 21:28:39 -0500 Subject: [PATCH 04/60] remove broken stop word detection, rely on server side implementation --- .../langchain_nvidia_ai_endpoints/_common.py | 13 --- .../integration_tests/test_chat_models.py | 79 ++++++++++++++----- 2 files changed, 58 insertions(+), 34 deletions(-) diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py index 2dbdc7a2..aed48dd0 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py @@ -362,7 +362,6 @@ def postprocess( """ msg_list = self._process_response(response) msg, is_stopped = self._aggregate_msgs(msg_list) - msg, is_stopped = self._early_stop_msg(msg, is_stopped, stop=stop) return msg, is_stopped def _aggregate_msgs(self, msg_list: Sequence[dict]) -> Tuple[dict, bool]: @@ -396,18 +395,6 @@ def _aggregate_msgs(self, msg_list: Sequence[dict]) -> Tuple[dict, bool]: content_holder.update(token_usage=usage_holder) #### return content_holder, is_stopped - def _early_stop_msg( - self, msg: dict, is_stopped: bool, stop: Optional[Sequence[str]] = None - ) -> Tuple[dict, bool]: - """Try to early-terminate streaming or generation by iterating over stop list""" - content = msg.get("content", "") - if content and stop: - for stop_str in stop: - if stop_str and stop_str in content: - msg["content"] = content[: content.find(stop_str) + 1] - is_stopped = True - return msg, is_stopped - #################################################################################### ## Streaming interface to allow you to iterate through progressive generations diff --git a/libs/ai-endpoints/tests/integration_tests/test_chat_models.py b/libs/ai-endpoints/tests/integration_tests/test_chat_models.py index 7f745545..4940c5b5 100644 --- a/libs/ai-endpoints/tests/integration_tests/test_chat_models.py +++ b/libs/ai-endpoints/tests/integration_tests/test_chat_models.py @@ -202,27 +202,6 @@ def test_ai_endpoints_invoke(chat_model: str, mode: dict) -> None: assert isinstance(result.content, str) -# todo: test that stop is cased and works with multiple words -@pytest.mark.xfail(reason="stop is not consistently implemented") -def test_invoke_stop(chat_model: str, mode: dict) -> None: - """Test invoke's stop words.""" - llm = ChatNVIDIA(model=chat_model, **mode, stop=["10"]) - result = llm.invoke("please count to 20 by 1s, e.g. 1 2 3 4") - assert isinstance(result.content, str) - assert "10" not in result.content - - -@pytest.mark.xfail(reason="stop is not consistently implemented") -def test_stream_stop(chat_model: str, mode: dict) -> None: - """Test stream's stop words.""" - llm = ChatNVIDIA(model=chat_model, **mode, stop=["10"]) - result = "" - for token in llm.stream("please count to 20 by 1s, e.g. 1 2 3 4"): - assert isinstance(token.content, str) - result += f"{token.content}|" - assert "10" not in result - - # todo: max_tokens test for ainvoke, batch, abatch, stream, astream @@ -383,3 +362,61 @@ def test_serialize_chatnvidia(chat_model: str, mode: dict) -> None: model = loads(dumps(llm), valid_namespaces=["langchain_nvidia_ai_endpoints"]) result = model.invoke("What is there if there is nothing?") assert isinstance(result.content, str) + + +# todo: test that stop is cased and works with multiple words + + +@pytest.mark.parametrize( + "prop", + [ + False, + True, + ], + ids=["no_prop", "prop"], +) +@pytest.mark.parametrize( + "param", + [ + False, + True, + ], + ids=["no_param", "param"], +) +@pytest.mark.parametrize( + "targets", + [["5"], ["6", "100"], ["100", "7"]], + ids=["5", "6,100", "100,7"], +) +@pytest.mark.parametrize( + "func", + [ + "invoke", + "stream", + ], +) +@pytest.mark.xfail(reason="stop is not consistently implemented") +def test_stop( + chat_model: str, mode: dict, func: str, prop: bool, param: bool, targets: List[str] +) -> None: + if not prop and not param: + pytest.skip("Skipping test, no stop parameter") + llm = ChatNVIDIA( + model=chat_model, stop=targets if prop else None, max_tokens=512, **mode + ) + result = "" + if func == "invoke": + response = llm.invoke( + "please count to 20 by 1s, e.g. 1 2 3 4", + stop=targets if param else None, + ) # invoke returns Union[str, List[Union[str, Dict[Any, Any]]]] + assert isinstance(response.content, str) + result = response.content + elif func == "stream": + for token in llm.stream( + "please count to 20 by 1s, e.g. 1 2 3 4", + stop=targets if param else None, + ): + assert isinstance(token.content, str) + result += f"{token.content}|" + assert all(target not in result for target in targets) From f0c0b48ffd9ea5cdb5d2137d91f3cccd4b2b8e52 Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Fri, 5 Jul 2024 11:17:51 -0400 Subject: [PATCH 05/60] remove extraneous stop params --- .../langchain_nvidia_ai_endpoints/_common.py | 10 ++++------ .../langchain_nvidia_ai_endpoints/chat_models.py | 2 +- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py index aed48dd0..dd65ad6f 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py @@ -355,14 +355,13 @@ def get_req( return self._wait(response, session) def postprocess( - self, response: Union[str, Response], stop: Optional[Sequence[str]] = None + self, + response: Union[str, Response], ) -> Tuple[dict, bool]: """Parses a response from the AI Foundation Model Function API. Strongly assumes that the API will return a single response. """ - msg_list = self._process_response(response) - msg, is_stopped = self._aggregate_msgs(msg_list) - return msg, is_stopped + return self._aggregate_msgs(self._process_response(response)) def _aggregate_msgs(self, msg_list: Sequence[dict]) -> Tuple[dict, bool]: """Dig out relevant details of aggregated message""" @@ -402,7 +401,6 @@ def get_req_stream( self, payload: dict = {}, invoke_url: Optional[str] = None, - stop: Optional[Sequence[str]] = None, ) -> Iterator: invoke_url = self._get_invoke_url(invoke_url) if payload.get("stream", True) is False: @@ -425,7 +423,7 @@ def out_gen() -> Generator[dict, Any, Any]: for line in response.iter_lines(): if line and line.strip() != b"data: [DONE]": line = line.decode("utf-8") - msg, final_line = call.postprocess(line, stop=stop) + msg, final_line = call.postprocess(line) yield msg if final_line: break diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py index 1808bddc..7cce280d 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py @@ -212,7 +212,7 @@ def _generate( inputs = self._custom_preprocess(messages) payload = self._get_payload(inputs=inputs, stop=stop, stream=False, **kwargs) response = self._client.client.get_req(payload=payload) - responses, _ = self._client.client.postprocess(response, stop=stop) + responses, _ = self._client.client.postprocess(response) self._set_callback_out(responses, run_manager) message = ChatMessage(**self._custom_postprocess(responses)) generation = ChatGeneration(message=message) From 753dfcd3af738e2ae947a2084f18cf2fe0471e6e Mon Sep 17 00:00:00 2001 From: Erick Friis Date: Mon, 8 Jul 2024 13:37:16 -0700 Subject: [PATCH 06/60] release permissions --- .github/workflows/_release.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/_release.yml b/.github/workflows/_release.yml index 33fee9f0..4cc28648 100644 --- a/.github/workflows/_release.yml +++ b/.github/workflows/_release.yml @@ -72,6 +72,7 @@ jobs: - build uses: ./.github/workflows/_test_release.yml + permissions: write-all with: working-directory: ${{ inputs.working-directory }} secrets: inherit From 7b588ec8365bd8a01241129856ce44338e126fd5 Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Tue, 9 Jul 2024 07:30:00 -0400 Subject: [PATCH 07/60] bump version 0.1.3 --- libs/ai-endpoints/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/ai-endpoints/pyproject.toml b/libs/ai-endpoints/pyproject.toml index ffa4bf03..d3bfb2a6 100644 --- a/libs/ai-endpoints/pyproject.toml +++ b/libs/ai-endpoints/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langchain-nvidia-ai-endpoints" -version = "0.1.2" +version = "0.1.3" description = "An integration package connecting NVIDIA AI Endpoints and LangChain" authors = [] readme = "README.md" From a579a1e66d0457498a84ffb1fa99623f4540d77f Mon Sep 17 00:00:00 2001 From: raspawar Date: Tue, 9 Jul 2024 19:12:41 +0530 Subject: [PATCH 08/60] initial code with embedding testing --- .../langchain_nvidia_ai_endpoints/_common.py | 23 ++++++++++++++++--- .../chat_models.py | 1 + .../embeddings.py | 1 + .../reranking.py | 1 + 4 files changed, 23 insertions(+), 3 deletions(-) diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py index dd65ad6f..97fdcf1c 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py @@ -18,7 +18,7 @@ Tuple, Union, ) -from urllib.parse import urlparse +from urllib.parse import urljoin, urlparse import aiohttp import requests @@ -41,6 +41,17 @@ def default_payload_fn(payload: dict) -> dict: return payload +def check_endpoint_health(base_url: str) -> None: + try: + response = requests.get(urljoin(base_url, "v1/health/live")) + response.raise_for_status() # Raise an HTTPError for bad responses + except requests.exceptions.RequestException as e: + raise ValueError( + f"Unable to reach endpoint {base_url}. Error: {e}. \ + \n Make sure the NIM is running and healthy." + ) + + class NVEModel(BaseModel): """ @@ -133,9 +144,15 @@ def _validate_base_url(cls, v: str) -> str: # Ensure scheme and netloc (domain name) are present if not (result.scheme and result.netloc): raise ValueError( - f"Invalid base_url, minimally needs scheme and netloc: {v}" + f"Invalid base_url, Expected format is 'http://host:port'.: {v}" + ) + if result.path and result.path != "/": + raise ValueError( + f"Endpoint {v} ends with {result.path.rsplit('/', 1)[-1]}. \ + \n Expected format is 'http://host:port'" ) - return v + check_endpoint_health(v) + return urljoin(v, "v1") @root_validator(pre=True) def _validate_model(cls, values: Dict[str, Any]) -> Dict[str, Any]: diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py index 7cce280d..ad87fffa 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py @@ -158,6 +158,7 @@ def __init__(self, **kwargs: Any): nvidia_api_key (str): The API key to use for connecting to the hosted NIM. api_key (str): Alternative to nvidia_api_key. base_url (str): The base URL of the NIM to connect to. + Format for base URL is http://host:port temperature (float): Sampling temperature in [0, 1]. max_tokens (int): Maximum number of tokens to generate. top_p (float): Top-p for distribution sampling. diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/embeddings.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/embeddings.py index a151c5dd..46f8a440 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/embeddings.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/embeddings.py @@ -60,6 +60,7 @@ def __init__(self, **kwargs: Any): nvidia_api_key (str): The API key to use for connecting to the hosted NIM. api_key (str): Alternative to nvidia_api_key. base_url (str): The base URL of the NIM to connect to. + Format for base URL is http://host:port trucate (str): "NONE", "START", "END", truncate input text if it exceeds the model's context length. Default is "NONE", which raises an error if an input is too long. diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/reranking.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/reranking.py index 17e5dd01..32d0e5a9 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/reranking.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/reranking.py @@ -54,6 +54,7 @@ def __init__(self, **kwargs: Any): nvidia_api_key (str): The API key to use for connecting to the hosted NIM. api_key (str): Alternative to nvidia_api_key. base_url (str): The base URL of the NIM to connect to. + Format for base URL is http://host:port API Key: - The recommended way to provide the API key is through the `NVIDIA_API_KEY` From 1bd65d4554dc6b790e706c8108ee450d3842c4de Mon Sep 17 00:00:00 2001 From: raspawar Date: Tue, 9 Jul 2024 19:22:25 +0530 Subject: [PATCH 09/60] changes to include /v1 in route and health check for nim --- .../langchain_nvidia_ai_endpoints/_common.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py index 97fdcf1c..09a9004a 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py @@ -146,12 +146,11 @@ def _validate_base_url(cls, v: str) -> str: raise ValueError( f"Invalid base_url, Expected format is 'http://host:port'.: {v}" ) - if result.path and result.path != "/": + if result.path and result.path!="/v1" and result.path != "/": raise ValueError( f"Endpoint {v} ends with {result.path.rsplit('/', 1)[-1]}. \ \n Expected format is 'http://host:port'" ) - check_endpoint_health(v) return urljoin(v, "v1") @root_validator(pre=True) @@ -471,7 +470,11 @@ def _preprocess_args(cls, values: Any) -> Any: "integrate.api.nvidia.com", "ai.api.nvidia.com", ] - + + # check health for local NIM mode + if not values["is_hosted"]: + check_endpoint_health(values["base_url"]) + # set default model for hosted endpoint if values["is_hosted"] and not values["model"]: values["model"] = values["default_model"] From df7d25173b6257ecb57d4cbd52ecd682efebdf42 Mon Sep 17 00:00:00 2001 From: raspawar Date: Tue, 9 Jul 2024 19:36:11 +0530 Subject: [PATCH 10/60] test cases update for embedding changes --- .../langchain_nvidia_ai_endpoints/_common.py | 6 +++--- .../ai-endpoints/tests/unit_tests/test_api_key.py | 10 +++++++++- .../tests/unit_tests/test_base_url.py | 15 ++++++++++++--- libs/ai-endpoints/tests/unit_tests/test_model.py | 8 ++++++++ 4 files changed, 32 insertions(+), 7 deletions(-) diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py index 09a9004a..1fe8a783 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py @@ -146,7 +146,7 @@ def _validate_base_url(cls, v: str) -> str: raise ValueError( f"Invalid base_url, Expected format is 'http://host:port'.: {v}" ) - if result.path and result.path!="/v1" and result.path != "/": + if result.path and result.path != "/v1" and result.path != "/": raise ValueError( f"Endpoint {v} ends with {result.path.rsplit('/', 1)[-1]}. \ \n Expected format is 'http://host:port'" @@ -470,11 +470,11 @@ def _preprocess_args(cls, values: Any) -> Any: "integrate.api.nvidia.com", "ai.api.nvidia.com", ] - + # check health for local NIM mode if not values["is_hosted"]: check_endpoint_health(values["base_url"]) - + # set default model for hosted endpoint if values["is_hosted"] and not values["model"]: values["model"] = values["default_model"] diff --git a/libs/ai-endpoints/tests/unit_tests/test_api_key.py b/libs/ai-endpoints/tests/unit_tests/test_api_key.py index c79b0017..02faa32d 100644 --- a/libs/ai-endpoints/tests/unit_tests/test_api_key.py +++ b/libs/ai-endpoints/tests/unit_tests/test_api_key.py @@ -36,6 +36,14 @@ def mock_v1_local_models(requests_mock: Mocker) -> None: ) +@pytest.fixture(autouse=True) +def mock_local_health(requests_mock: Mocker) -> None: + requests_mock.get( + "https://test_url/v1/health/live", + json={"object": "health-response", "message": "Service is live."}, + ) + + def test_create_without_api_key(public_class: type) -> None: with no_env_var("NVIDIA_API_KEY"): with pytest.warns(UserWarning): @@ -43,7 +51,7 @@ def test_create_without_api_key(public_class: type) -> None: def test_create_unknown_url_no_api_key(public_class: type) -> None: - with no_env_var("NVIDIA_API_KEY"): + with no_env_var("NVIDIA_API_KEY") and pytest.warns(UserWarning): public_class(base_url="https://test_url/v1") diff --git a/libs/ai-endpoints/tests/unit_tests/test_base_url.py b/libs/ai-endpoints/tests/unit_tests/test_base_url.py index ee48ec50..923cdb47 100644 --- a/libs/ai-endpoints/tests/unit_tests/test_base_url.py +++ b/libs/ai-endpoints/tests/unit_tests/test_base_url.py @@ -28,7 +28,7 @@ def test_param_base_url_hosted(public_class: type, base_url: str) -> None: @pytest.fixture(autouse=True) def mock_v1_local_models(requests_mock: Mocker, base_url: str) -> None: requests_mock.get( - f"{base_url}/models", + f"{base_url}/v1/models", json={ "data": [ { @@ -43,6 +43,14 @@ def mock_v1_local_models(requests_mock: Mocker, base_url: str) -> None: ) +@pytest.fixture(autouse=True) +def mock_local_health(requests_mock: Mocker, base_url: str) -> None: + requests_mock.get( + f"{base_url}/v1/health/live", + json={"object": "health-response", "message": "Service is live."}, + ) + + @pytest.mark.parametrize( "base_url", [ @@ -52,5 +60,6 @@ def mock_v1_local_models(requests_mock: Mocker, base_url: str) -> None: ], ) def test_param_base_url_not_hosted(public_class: type, base_url: str) -> None: - client = public_class(base_url=base_url) - assert not client._client.is_hosted + with pytest.warns(UserWarning): + client = public_class(base_url=base_url) + assert not client._client.is_hosted diff --git a/libs/ai-endpoints/tests/unit_tests/test_model.py b/libs/ai-endpoints/tests/unit_tests/test_model.py index 4f06417b..17147845 100644 --- a/libs/ai-endpoints/tests/unit_tests/test_model.py +++ b/libs/ai-endpoints/tests/unit_tests/test_model.py @@ -51,6 +51,14 @@ def mock_v1_local_models(requests_mock: Mocker, known_unknown: str) -> None: ) +@pytest.fixture(autouse=True) +def mock_local_health(requests_mock: Mocker) -> None: + requests_mock.get( + "http://localhost:8000/v1/health/live", + json={"object": "health-response", "message": "Service is live."}, + ) + + @pytest.mark.parametrize( "alias", [ From 93b846c8b44cf2638e5efdf5c73b830394857432 Mon Sep 17 00:00:00 2001 From: raspawar Date: Wed, 10 Jul 2024 10:14:03 +0000 Subject: [PATCH 11/60] test case updates --- .../langchain_nvidia_ai_endpoints/_common.py | 46 +++++++++---------- .../tests/integration_tests/test_base_url.py | 24 +++++++++- .../integration_tests/test_chat_models.py | 12 ++++- 3 files changed, 55 insertions(+), 27 deletions(-) diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py index 1fe8a783..45935d52 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py @@ -146,7 +146,7 @@ def _validate_base_url(cls, v: str) -> str: raise ValueError( f"Invalid base_url, Expected format is 'http://host:port'.: {v}" ) - if result.path and result.path != "/v1" and result.path != "/": + if result.path and result.path not in ["/v1", "/"]: raise ValueError( f"Endpoint {v} ends with {result.path.rsplit('/', 1)[-1]}. \ \n Expected format is 'http://host:port'" @@ -511,28 +511,28 @@ def _postprocess_args(cls, values: Any) -> Any: raise ValueError( f"Model {name} is unknown, check `available_models`" ) - else: - # set default model - if not name: - if not (client := values.get("client")): - warnings.warn(f"Unable to determine validity of {name}") - else: - valid_models = [ - model.id - for model in client.available_models - if not model.base_model or model.base_model == model.id - ] - name = next(iter(valid_models), None) - if name: - warnings.warn( - f"Default model is set as: {name}. \n" - "Set model using model parameter. \n" - "To get available models use available_models property.", - UserWarning, - ) - values["model"] = name - else: - raise ValueError("No locally hosted model was found.") + # else: + # # set default model + # if not name: + # if not (client := values.get("client")): + # warnings.warn(f"Unable to determine validity of {name}") + # else: + # valid_models = [ + # model.id + # for model in client.available_models + # if not model.base_model or model.base_model == model.id + # ] + # name = next(iter(valid_models), None) + # if name: + # warnings.warn( + # f"Default model is set as: {name}. \n" + # "Set model using model parameter. \n" + # "To get available models use available_models property.", + # UserWarning, + # ) + # values["model"] = name + # else: + # raise ValueError("No locally hosted model was found.") return values @classmethod diff --git a/libs/ai-endpoints/tests/integration_tests/test_base_url.py b/libs/ai-endpoints/tests/integration_tests/test_base_url.py index 06389a5b..0614254f 100644 --- a/libs/ai-endpoints/tests/integration_tests/test_base_url.py +++ b/libs/ai-endpoints/tests/integration_tests/test_base_url.py @@ -2,8 +2,26 @@ import pytest from requests.exceptions import ConnectionError +from requests_mock import Mocker +# Fixture setup (mocking /v1/health/live and /v1/chat/completions endpoints) +@pytest.fixture() +def mock_endpoints(requests_mock: Mocker, base_url: str) -> None: + # Mock the /v1/health/live endpoint + requests_mock.get( + f"{base_url}/v1/health/live", + json={"object": "health-response", "message": "Service is live."}, + ) + + for endpoint in ["/v1/embeddings", "/v1/chat/completions", "/v1/ranking"]: + requests_mock.post( + f"{base_url}{endpoint}", + exc=ConnectionError(f"Mocked ConnectionError for {endpoint}"), + ) + + +# Test function using the mock_endpoints fixture @pytest.mark.parametrize( "base_url", [ @@ -14,9 +32,11 @@ def test_endpoint_unavailable( public_class: type, base_url: str, contact_service: Any, + mock_endpoints: None, # Inject the mock_endpoints fixture ) -> None: - # we test this with a bogus model because users should supply - # a model when using their own base_url + # Create a client instance client = public_class(model="not-a-model", base_url=base_url) + + # Attempt to contact the service and expect a ConnectionError with pytest.raises(ConnectionError): contact_service(client) diff --git a/libs/ai-endpoints/tests/integration_tests/test_chat_models.py b/libs/ai-endpoints/tests/integration_tests/test_chat_models.py index 4940c5b5..1c6bc64b 100644 --- a/libs/ai-endpoints/tests/integration_tests/test_chat_models.py +++ b/libs/ai-endpoints/tests/integration_tests/test_chat_models.py @@ -6,9 +6,11 @@ from langchain_core.load.dump import dumps from langchain_core.load.load import loads from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, SystemMessage +from requests_mock import Mocker from langchain_nvidia_ai_endpoints.chat_models import ChatNVIDIA + # # we setup an --all-models flag in conftest.py, when passed it configures chat_model # and image_in_model to be all available models of type chat or image_in @@ -16,6 +18,12 @@ # note: currently --all-models only works with the default mode because different # modes may have different available models # +@pytest.fixture +def mock_local_health(requests_mock: Mocker) -> None: + requests_mock.get( + "http://localhost:12321/v1/health/live", + json={"object": "health-response", "message": "Service is live."}, + ) def test_chat_ai_endpoints(chat_model: str, mode: dict) -> None: @@ -32,8 +40,8 @@ def test_unknown_model() -> None: ChatNVIDIA(model="unknown_model") -def test_base_url_unknown_model() -> None: - llm = ChatNVIDIA(model="unknown_model", base_url="http://localhost:88888/v1") +def test_base_url_unknown_model(mock_local_health: None) -> None: + llm = ChatNVIDIA(model="unknown_model", base_url="http://localhost:12321/v1") assert llm.model == "unknown_model" From a2e81d9cb0cb55c8f92eb1957ef9b110658edbc2 Mon Sep 17 00:00:00 2001 From: raspawar Date: Wed, 10 Jul 2024 10:19:40 +0000 Subject: [PATCH 12/60] doc fixes --- .../langchain_nvidia_ai_endpoints/_common.py | 44 +++++++++---------- .../tests/integration_tests/test_base_url.py | 5 +-- 2 files changed, 24 insertions(+), 25 deletions(-) diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py index 45935d52..6a26f2e6 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py @@ -511,28 +511,28 @@ def _postprocess_args(cls, values: Any) -> Any: raise ValueError( f"Model {name} is unknown, check `available_models`" ) - # else: - # # set default model - # if not name: - # if not (client := values.get("client")): - # warnings.warn(f"Unable to determine validity of {name}") - # else: - # valid_models = [ - # model.id - # for model in client.available_models - # if not model.base_model or model.base_model == model.id - # ] - # name = next(iter(valid_models), None) - # if name: - # warnings.warn( - # f"Default model is set as: {name}. \n" - # "Set model using model parameter. \n" - # "To get available models use available_models property.", - # UserWarning, - # ) - # values["model"] = name - # else: - # raise ValueError("No locally hosted model was found.") + else: + # set default model + if not name: + if not (client := values.get("client")): + warnings.warn(f"Unable to determine validity of {name}") + else: + valid_models = [ + model.id + for model in client.available_models + if not model.base_model or model.base_model == model.id + ] + name = next(iter(valid_models), None) + if name: + warnings.warn( + f"Default model is set as: {name}. \n" + "Set model using model parameter. \n" + "To get available models use available_models property.", + UserWarning, + ) + values["model"] = name + else: + raise ValueError("No locally hosted model was found.") return values @classmethod diff --git a/libs/ai-endpoints/tests/integration_tests/test_base_url.py b/libs/ai-endpoints/tests/integration_tests/test_base_url.py index 0614254f..66c235af 100644 --- a/libs/ai-endpoints/tests/integration_tests/test_base_url.py +++ b/libs/ai-endpoints/tests/integration_tests/test_base_url.py @@ -34,9 +34,8 @@ def test_endpoint_unavailable( contact_service: Any, mock_endpoints: None, # Inject the mock_endpoints fixture ) -> None: - # Create a client instance + # we test this with a bogus model because users should supply + # a model when using their own base_url client = public_class(model="not-a-model", base_url=base_url) - - # Attempt to contact the service and expect a ConnectionError with pytest.raises(ConnectionError): contact_service(client) From e2bac415466e7ed74a04bb66225de02a354a65d0 Mon Sep 17 00:00:00 2001 From: raspawar Date: Wed, 10 Jul 2024 16:02:56 +0530 Subject: [PATCH 13/60] test cases for url validation --- .../tests/unit_tests/test_base_url.py | 33 +++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/libs/ai-endpoints/tests/unit_tests/test_base_url.py b/libs/ai-endpoints/tests/unit_tests/test_base_url.py index 923cdb47..e54aa8de 100644 --- a/libs/ai-endpoints/tests/unit_tests/test_base_url.py +++ b/libs/ai-endpoints/tests/unit_tests/test_base_url.py @@ -51,6 +51,11 @@ def mock_local_health(requests_mock: Mocker, base_url: str) -> None: ) +@pytest.fixture +def mock_local_unhealthy(requests_mock: Mocker, base_url: str) -> None: + requests_mock.get(f"{base_url}/v1/health/live", status_code=404) + + @pytest.mark.parametrize( "base_url", [ @@ -63,3 +68,31 @@ def test_param_base_url_not_hosted(public_class: type, base_url: str) -> None: with pytest.warns(UserWarning): client = public_class(base_url=base_url) assert not client._client.is_hosted + + +# test case for base_url warnings +@pytest.mark.parametrize( + "base_url", + [ + "localhost", + "http://localhost:8888/embeddings", + "http://0.0.0.0:8888/ranking", + "http://localhost:8888/v1/chat/completion", + ], +) +def test_base_url_warning_not_hosted(public_class: type, base_url: str) -> None: + with pytest.raises(ValueError): + public_class(base_url=base_url) + + +@pytest.mark.parametrize( + "base_url", + [ + "http://localhost:8888", + ], +) +def test_base_url_unhealthy_not_hosted( + public_class: type, base_url: str, mock_local_unhealthy: None +) -> None: + with pytest.raises(ValueError): + public_class(base_url=base_url) From 13821d6086b52aabbf07b1194658b3fd36ccdfa9 Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Thu, 11 Jul 2024 13:18:32 -0400 Subject: [PATCH 14/60] update ranking nim support for nims w/ /v1/models and multiple names --- .../langchain_nvidia_ai_endpoints/reranking.py | 3 +-- .../ai-endpoints/tests/integration_tests/conftest.py | 12 ++---------- 2 files changed, 3 insertions(+), 12 deletions(-) diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/reranking.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/reranking.py index 17e5dd01..03e7862e 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/reranking.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/reranking.py @@ -27,7 +27,6 @@ class Config: _client: _NVIDIAClient = PrivateAttr(_NVIDIAClient) _default_batch_size: int = 32 - _deprecated_model: str = "ai-rerank-qa-mistral-4b" _default_model_name: str = "nv-rerank-qa-mistral-4b:1" base_url: str = Field( @@ -92,7 +91,7 @@ def get_available_models( def _rank(self, documents: List[str], query: str) -> List[Ranking]: response = self._client.client.get_req( payload={ - "model": "nv-rerank-qa-mistral-4b:1", + "model": self.model, "query": {"text": query}, "passages": [{"text": passage} for passage in documents], }, diff --git a/libs/ai-endpoints/tests/integration_tests/conftest.py b/libs/ai-endpoints/tests/integration_tests/conftest.py index 30388a96..d05ae598 100644 --- a/libs/ai-endpoints/tests/integration_tests/conftest.py +++ b/libs/ai-endpoints/tests/integration_tests/conftest.py @@ -73,19 +73,11 @@ def get_all_known_models() -> List[Model]: metafunc.parametrize("chat_model", models, ids=models) if "rerank_model" in metafunc.fixturenames: - models = ["nv-rerank-qa-mistral-4b:1"] + models = [NVIDIARerank._default_model_name] if model := metafunc.config.getoption("rerank_model_id"): models = [model] - # nim-mode reranking does not support model listing via /v1/models endpoint if metafunc.config.getoption("all_models"): - if mode.get("mode", None) == "nim": - models = [model.id for model in NVIDIARerank(**mode).available_models] - else: - models = [ - model.id - for model in get_all_known_models() - if model.model_type == "ranking" - ] + models = [model.id for model in NVIDIARerank(**mode).available_models] metafunc.parametrize("rerank_model", models, ids=models) if "vlm_model" in metafunc.fixturenames: From 2084ff06ce1e2eab1983d4ba9175cc2052815808 Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Thu, 11 Jul 2024 15:50:54 -0400 Subject: [PATCH 15/60] skip nvolveqa_40k compat test when run against local nim --- libs/ai-endpoints/tests/integration_tests/test_embeddings.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/libs/ai-endpoints/tests/integration_tests/test_embeddings.py b/libs/ai-endpoints/tests/integration_tests/test_embeddings.py index b1eabd30..2316b1e4 100644 --- a/libs/ai-endpoints/tests/integration_tests/test_embeddings.py +++ b/libs/ai-endpoints/tests/integration_tests/test_embeddings.py @@ -102,6 +102,8 @@ def test_embed_documents_truncate( @pytest.mark.parametrize("nvolveqa_40k", ["playground_nvolveqa_40k", "nvolveqa_40k"]) def test_embed_nvolveqa_40k_compat(nvolveqa_40k: str, mode: dict) -> None: + if mode: + pytest.skip("Test only relevant for API Catalog") with pytest.warns(UserWarning): embedding = NVIDIAEmbeddings(model=nvolveqa_40k, truncate="NONE", **mode) text = "nvidia " * 2048 From 8fdfeb7897bfa8a0806bfc1eef5b7cbad3f3ded0 Mon Sep 17 00:00:00 2001 From: raspawar Date: Fri, 12 Jul 2024 19:37:05 +0530 Subject: [PATCH 16/60] skip test cases for mistralai --- .../tests/integration_tests/test_chat_models.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/libs/ai-endpoints/tests/integration_tests/test_chat_models.py b/libs/ai-endpoints/tests/integration_tests/test_chat_models.py index 4940c5b5..e65341f8 100644 --- a/libs/ai-endpoints/tests/integration_tests/test_chat_models.py +++ b/libs/ai-endpoints/tests/integration_tests/test_chat_models.py @@ -125,6 +125,12 @@ def test_messages( ) -> None: if not system and not exchange: pytest.skip("No messages to test") + if ( + chat_model == "mistralai/mixtral-8x7b-instruct-v0.1" + and exchange + and isinstance(exchange[0], AIMessage) + ): + pytest.skip("mistralai does not support system=>AIMessage") chat = ChatNVIDIA(model=chat_model, max_tokens=36, **mode) response = chat.invoke(system + exchange) assert isinstance(response, BaseMessage) From 4cd9b390bb9c25bcea0d4eeb16bf7d20643b08b3 Mon Sep 17 00:00:00 2001 From: raspawar Date: Mon, 15 Jul 2024 16:17:26 +0530 Subject: [PATCH 17/60] mark test case as xfail --- libs/ai-endpoints/tests/integration_tests/test_chat_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/ai-endpoints/tests/integration_tests/test_chat_models.py b/libs/ai-endpoints/tests/integration_tests/test_chat_models.py index e65341f8..f1053a80 100644 --- a/libs/ai-endpoints/tests/integration_tests/test_chat_models.py +++ b/libs/ai-endpoints/tests/integration_tests/test_chat_models.py @@ -130,7 +130,7 @@ def test_messages( and exchange and isinstance(exchange[0], AIMessage) ): - pytest.skip("mistralai does not support system=>AIMessage") + pytest.xfail("mistralai does not support system=>AIMessage") chat = ChatNVIDIA(model=chat_model, max_tokens=36, **mode) response = chat.invoke(system + exchange) assert isinstance(response, BaseMessage) From 2681caf33987e0d62f8765fbcd86ff813375c4ad Mon Sep 17 00:00:00 2001 From: raspawar Date: Mon, 15 Jul 2024 18:37:24 +0530 Subject: [PATCH 18/60] mark test_message as xfail --- .../tests/integration_tests/test_chat_models.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/libs/ai-endpoints/tests/integration_tests/test_chat_models.py b/libs/ai-endpoints/tests/integration_tests/test_chat_models.py index f1053a80..8cf145b8 100644 --- a/libs/ai-endpoints/tests/integration_tests/test_chat_models.py +++ b/libs/ai-endpoints/tests/integration_tests/test_chat_models.py @@ -120,17 +120,14 @@ def test_chat_ai_endpoints_system_message(chat_model: str, mode: dict) -> None: ), ], ) +@pytest.mark.xfail( + reason="mistralai recent impl does not support AIMessage followed by SystemAI" +) def test_messages( chat_model: str, mode: dict, system: List, exchange: List[BaseMessage] ) -> None: if not system and not exchange: pytest.skip("No messages to test") - if ( - chat_model == "mistralai/mixtral-8x7b-instruct-v0.1" - and exchange - and isinstance(exchange[0], AIMessage) - ): - pytest.xfail("mistralai does not support system=>AIMessage") chat = ChatNVIDIA(model=chat_model, max_tokens=36, **mode) response = chat.invoke(system + exchange) assert isinstance(response, BaseMessage) From 2d47c51c1b669dfb18fc7cab89bd7ee12a56ac47 Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Mon, 15 Jul 2024 15:54:33 -0400 Subject: [PATCH 19/60] update default chat model to meta/llama3-8b-instruct (most features & deployment options) --- libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py index 7cce280d..09400aa3 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py @@ -130,7 +130,7 @@ class ChatNVIDIA(BaseChatModel): """ _client: _NVIDIAClient = PrivateAttr(_NVIDIAClient) - _default_model: str = "mistralai/mixtral-8x7b-instruct-v0.1" + _default_model: str = "meta/llama3-8b-instruct" base_url: str = Field( "https://integrate.api.nvidia.com/v1", description="Base url for model listing an invocation", From dd56826bd26b27fb675ec5c0cb2c2d7825af0964 Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Tue, 16 Jul 2024 09:02:54 -0400 Subject: [PATCH 20/60] bump version to 0.1.4 --- libs/ai-endpoints/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/ai-endpoints/pyproject.toml b/libs/ai-endpoints/pyproject.toml index d3bfb2a6..c1363672 100644 --- a/libs/ai-endpoints/pyproject.toml +++ b/libs/ai-endpoints/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langchain-nvidia-ai-endpoints" -version = "0.1.3" +version = "0.1.4" description = "An integration package connecting NVIDIA AI Endpoints and LangChain" authors = [] readme = "README.md" From 42230b3af30f5c3e3de60424d6ccb99572bdfbd5 Mon Sep 17 00:00:00 2001 From: raspawar Date: Thu, 18 Jul 2024 12:12:51 +0530 Subject: [PATCH 21/60] final code --- .../langchain_nvidia_ai_endpoints/_common.py | 64 +++++++++++-------- .../integration_tests/test_chat_models.py | 10 +-- .../integration_tests/test_register_model.py | 2 +- .../tests/unit_tests/test_base_url.py | 55 ++++++++++++---- .../tests/unit_tests/test_model.py | 31 ++++++--- 5 files changed, 101 insertions(+), 61 deletions(-) diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py index 6a26f2e6..7624c073 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py @@ -18,7 +18,7 @@ Tuple, Union, ) -from urllib.parse import urljoin, urlparse +from urllib.parse import urlparse, urlunparse import aiohttp import requests @@ -41,17 +41,6 @@ def default_payload_fn(payload: dict) -> dict: return payload -def check_endpoint_health(base_url: str) -> None: - try: - response = requests.get(urljoin(base_url, "v1/health/live")) - response.raise_for_status() # Raise an HTTPError for bad responses - except requests.exceptions.RequestException as e: - raise ValueError( - f"Unable to reach endpoint {base_url}. Error: {e}. \ - \n Make sure the NIM is running and healthy." - ) - - class NVEModel(BaseModel): """ @@ -141,17 +130,11 @@ def headers(self) -> dict: def _validate_base_url(cls, v: str) -> str: if v is not None: result = urlparse(v) + expected_format = "Expected format is 'http://host:port'." # Ensure scheme and netloc (domain name) are present if not (result.scheme and result.netloc): - raise ValueError( - f"Invalid base_url, Expected format is 'http://host:port'.: {v}" - ) - if result.path and result.path not in ["/v1", "/"]: - raise ValueError( - f"Endpoint {v} ends with {result.path.rsplit('/', 1)[-1]}. \ - \n Expected format is 'http://host:port'" - ) - return urljoin(v, "v1") + raise ValueError(f"Invalid base_url format. {expected_format} Got: {v}") + return v @root_validator(pre=True) def _validate_model(cls, values: Dict[str, Any]) -> Dict[str, Any]: @@ -463,20 +446,45 @@ class _NVIDIAClient(BaseModel): @root_validator(pre=True) def _preprocess_args(cls, values: Any) -> Any: - values["client"] = NVEModel(**values) - if "base_url" in values: - values["is_hosted"] = urlparse(values["base_url"]).netloc in [ + is_hosted = urlparse(values["base_url"]).netloc in [ "integrate.api.nvidia.com", "ai.api.nvidia.com", ] - # check health for local NIM mode - if not values["is_hosted"]: - check_endpoint_health(values["base_url"]) + ## Making sure /v1 in added to the url, followed by infer_path + if "base_url" in values: + result = urlparse(values["base_url"]) + expected_format = "Expected format is 'http://host:port'." + + if result.path: + normalized_path = result.path.strip("/") + if normalized_path in [ + "v1", + "v1/embeddings", + "v1/completions", + "v1/rankings", + ]: + warnings.warn(f"{expected_format} Rest is ingnored.") + else: + raise ValueError( + f"Base URL path is not recognized. {expected_format}" + ) + + base_url = urlunparse((result.scheme, result.netloc, "v1", "", "", "")) + values["base_url"] = base_url + values["infer_path"] = values["infer_path"].format(base_url=base_url) + + values["client"] = NVEModel(**values) + if "base_url" in values: + values["client"].listing_path = values["client"].listing_path.format( + base_url=values["base_url"] + ) + + values["is_hosted"] = is_hosted # set default model for hosted endpoint - if values["is_hosted"] and not values["model"]: + if is_hosted and not values["model"]: values["model"] = values["default_model"] return values diff --git a/libs/ai-endpoints/tests/integration_tests/test_chat_models.py b/libs/ai-endpoints/tests/integration_tests/test_chat_models.py index 1c6bc64b..21f2c455 100644 --- a/libs/ai-endpoints/tests/integration_tests/test_chat_models.py +++ b/libs/ai-endpoints/tests/integration_tests/test_chat_models.py @@ -6,11 +6,9 @@ from langchain_core.load.dump import dumps from langchain_core.load.load import loads from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, SystemMessage -from requests_mock import Mocker from langchain_nvidia_ai_endpoints.chat_models import ChatNVIDIA - # # we setup an --all-models flag in conftest.py, when passed it configures chat_model # and image_in_model to be all available models of type chat or image_in @@ -18,12 +16,6 @@ # note: currently --all-models only works with the default mode because different # modes may have different available models # -@pytest.fixture -def mock_local_health(requests_mock: Mocker) -> None: - requests_mock.get( - "http://localhost:12321/v1/health/live", - json={"object": "health-response", "message": "Service is live."}, - ) def test_chat_ai_endpoints(chat_model: str, mode: dict) -> None: @@ -40,7 +32,7 @@ def test_unknown_model() -> None: ChatNVIDIA(model="unknown_model") -def test_base_url_unknown_model(mock_local_health: None) -> None: +def test_base_url_unknown_model() -> None: llm = ChatNVIDIA(model="unknown_model", base_url="http://localhost:12321/v1") assert llm.model == "unknown_model" diff --git a/libs/ai-endpoints/tests/integration_tests/test_register_model.py b/libs/ai-endpoints/tests/integration_tests/test_register_model.py index 6a08c2fe..77e5aa50 100644 --- a/libs/ai-endpoints/tests/integration_tests/test_register_model.py +++ b/libs/ai-endpoints/tests/integration_tests/test_register_model.py @@ -44,7 +44,7 @@ def test_registered_model_functional( ) as record: # warns because we're overriding known models register_model(model) contact_service(client(model=id)) - assert len(record) == 1 + assert len(record) == 2 assert isinstance(record[0].message, UserWarning) assert "already registered" in str(record[0].message) assert "Overriding" in str(record[0].message) diff --git a/libs/ai-endpoints/tests/unit_tests/test_base_url.py b/libs/ai-endpoints/tests/unit_tests/test_base_url.py index e54aa8de..c4c7df3f 100644 --- a/libs/ai-endpoints/tests/unit_tests/test_base_url.py +++ b/libs/ai-endpoints/tests/unit_tests/test_base_url.py @@ -1,3 +1,5 @@ +from urllib.parse import urlparse, urlunparse + import pytest from requests_mock import Mocker @@ -25,7 +27,7 @@ def test_param_base_url_hosted(public_class: type, base_url: str) -> None: assert client._client.is_hosted -@pytest.fixture(autouse=True) +@pytest.fixture() def mock_v1_local_models(requests_mock: Mocker, base_url: str) -> None: requests_mock.get( f"{base_url}/v1/models", @@ -43,17 +45,41 @@ def mock_v1_local_models(requests_mock: Mocker, base_url: str) -> None: ) -@pytest.fixture(autouse=True) -def mock_local_health(requests_mock: Mocker, base_url: str) -> None: +@pytest.fixture() +def mock_v1_local_models2(requests_mock: Mocker, base_url: str) -> None: + result = urlparse(base_url) + base_url = urlunparse((result.scheme, result.netloc, "v1", "", "", "")) requests_mock.get( - f"{base_url}/v1/health/live", - json={"object": "health-response", "message": "Service is live."}, + f"{base_url}/models", + json={ + "data": [ + { + "id": "model1", + "object": "model", + "created": 1234567890, + "owned_by": "OWNER", + "root": "model1", + }, + ] + }, ) -@pytest.fixture -def mock_local_unhealthy(requests_mock: Mocker, base_url: str) -> None: - requests_mock.get(f"{base_url}/v1/health/live", status_code=404) +# @pytest.fixture(autouse=True) +# def mock_local_health(requests_mock: Mocker, base_url: str) -> None: +# result = urlparse(base_url) +# base_url = urlunparse((result.scheme, result.netloc, "v1", "", "", "")) +# requests_mock.get( +# f"{base_url}/health/live", +# json={"object": "health-response", "message": "Service is live."}, +# ) + + +# @pytest.fixture +# def mock_local_unhealthy(requests_mock: Mocker, base_url: str) -> None: +# result = urlparse(base_url) +# base_url = urlunparse((result.scheme, result.netloc, "v1", "", "", "")) +# requests_mock.get(f"{base_url}/health/live", status_code=404) @pytest.mark.parametrize( @@ -64,7 +90,9 @@ def mock_local_unhealthy(requests_mock: Mocker, base_url: str) -> None: "http://0.0.0.0:8888", ], ) -def test_param_base_url_not_hosted(public_class: type, base_url: str) -> None: +def test_param_base_url_not_hosted( + public_class: type, base_url: str, mock_v1_local_models: None +) -> None: with pytest.warns(UserWarning): client = public_class(base_url=base_url) assert not client._client.is_hosted @@ -74,13 +102,14 @@ def test_param_base_url_not_hosted(public_class: type, base_url: str) -> None: @pytest.mark.parametrize( "base_url", [ - "localhost", "http://localhost:8888/embeddings", "http://0.0.0.0:8888/ranking", "http://localhost:8888/v1/chat/completion", ], ) -def test_base_url_warning_not_hosted(public_class: type, base_url: str) -> None: +def test_base_url_warning_not_hosted( + public_class: type, base_url: str, mock_v1_local_models2: None +) -> None: with pytest.raises(ValueError): public_class(base_url=base_url) @@ -92,7 +121,7 @@ def test_base_url_warning_not_hosted(public_class: type, base_url: str) -> None: ], ) def test_base_url_unhealthy_not_hosted( - public_class: type, base_url: str, mock_local_unhealthy: None + public_class: type, base_url: str, mock_v1_local_models: None ) -> None: - with pytest.raises(ValueError): + with pytest.warns(UserWarning): public_class(base_url=base_url) diff --git a/libs/ai-endpoints/tests/unit_tests/test_model.py b/libs/ai-endpoints/tests/unit_tests/test_model.py index 17147845..28f95694 100644 --- a/libs/ai-endpoints/tests/unit_tests/test_model.py +++ b/libs/ai-endpoints/tests/unit_tests/test_model.py @@ -76,8 +76,14 @@ def test_aliases(public_class: type, alias: str) -> None: with pytest.warns(UserWarning) as record: x = public_class(model=alias, nvidia_api_key="a-bogus-key") assert x.model == x._client.model + + # Check the warnings + assert len(record) >= 1 # Ensure at least one warning was issued assert isinstance(record[0].message, Warning) - assert "deprecated" in record[0].message.args[0] + assert ( + isinstance(record[1].message, Warning) + and "deprecated" in record[1].message.args[0] + ) def test_known(public_class: type) -> None: @@ -104,8 +110,13 @@ def test_known_unknown(public_class: type, known_unknown: str) -> None: x = public_class(model=known_unknown, nvidia_api_key="a-bogus-key") assert x.model == known_unknown assert isinstance(record[0].message, Warning) - assert "Found" in record[0].message.args[0] - assert "unknown" in record[0].message.args[0] + assert ( + isinstance(record[1].message, Warning) and "Found" in record[1].message.args[0] + ) + assert ( + isinstance(record[1].message, Warning) + and "unknown" in record[1].message.args[0] + ) def test_unknown_unknown(public_class: type) -> None: @@ -129,10 +140,10 @@ def test_default_known(public_class: type, known_unknown: str) -> None: assert x.model == known_unknown -def test_default_lora(public_class: type) -> None: - """ - Test that a model in the model table will be accepted. - """ - # find a model that matches the public_class under test - x = public_class(base_url="http://localhost:8000/v1", model="lora1") - assert x.model == "lora1" +# def test_default_lora(public_class: type) -> None: +# """ +# Test that a model in the model table will be accepted. +# """ +# # find a model that matches the public_class under test +# x = public_class(base_url="http://localhost:8000/v1", model="lora1") +# assert x.model == "lora1" From e4ae0b129ef82ded971863c2b6af19f9297f810f Mon Sep 17 00:00:00 2001 From: raspawar Date: Thu, 18 Jul 2024 12:23:42 +0530 Subject: [PATCH 22/60] test cases cleanup --- .../tests/unit_tests/test_api_key.py | 8 ----- .../tests/unit_tests/test_base_url.py | 33 +++++-------------- .../tests/unit_tests/test_model.py | 22 ++++--------- 3 files changed, 16 insertions(+), 47 deletions(-) diff --git a/libs/ai-endpoints/tests/unit_tests/test_api_key.py b/libs/ai-endpoints/tests/unit_tests/test_api_key.py index 02faa32d..3420abf7 100644 --- a/libs/ai-endpoints/tests/unit_tests/test_api_key.py +++ b/libs/ai-endpoints/tests/unit_tests/test_api_key.py @@ -36,14 +36,6 @@ def mock_v1_local_models(requests_mock: Mocker) -> None: ) -@pytest.fixture(autouse=True) -def mock_local_health(requests_mock: Mocker) -> None: - requests_mock.get( - "https://test_url/v1/health/live", - json={"object": "health-response", "message": "Service is live."}, - ) - - def test_create_without_api_key(public_class: type) -> None: with no_env_var("NVIDIA_API_KEY"): with pytest.warns(UserWarning): diff --git a/libs/ai-endpoints/tests/unit_tests/test_base_url.py b/libs/ai-endpoints/tests/unit_tests/test_base_url.py index c4c7df3f..c0aa8e83 100644 --- a/libs/ai-endpoints/tests/unit_tests/test_base_url.py +++ b/libs/ai-endpoints/tests/unit_tests/test_base_url.py @@ -65,23 +65,6 @@ def mock_v1_local_models2(requests_mock: Mocker, base_url: str) -> None: ) -# @pytest.fixture(autouse=True) -# def mock_local_health(requests_mock: Mocker, base_url: str) -> None: -# result = urlparse(base_url) -# base_url = urlunparse((result.scheme, result.netloc, "v1", "", "", "")) -# requests_mock.get( -# f"{base_url}/health/live", -# json={"object": "health-response", "message": "Service is live."}, -# ) - - -# @pytest.fixture -# def mock_local_unhealthy(requests_mock: Mocker, base_url: str) -> None: -# result = urlparse(base_url) -# base_url = urlunparse((result.scheme, result.netloc, "v1", "", "", "")) -# requests_mock.get(f"{base_url}/health/live", status_code=404) - - @pytest.mark.parametrize( "base_url", [ @@ -98,16 +81,16 @@ def test_param_base_url_not_hosted( assert not client._client.is_hosted -# test case for base_url warnings +# test case for invalid base_url @pytest.mark.parametrize( "base_url", [ "http://localhost:8888/embeddings", - "http://0.0.0.0:8888/ranking", - "http://localhost:8888/v1/chat/completion", + "http://0.0.0.0:8888/rankings", + "http://localhost:8888/chat/completions", ], ) -def test_base_url_warning_not_hosted( +def test_base_url_invalid_not_hosted( public_class: type, base_url: str, mock_v1_local_models2: None ) -> None: with pytest.raises(ValueError): @@ -117,11 +100,13 @@ def test_base_url_warning_not_hosted( @pytest.mark.parametrize( "base_url", [ - "http://localhost:8888", + "http://localhost:8888/v1", + "http://localhost:8080/v1/embeddings", + "http://0.0.0.0:8888/v1/rankings", ], ) -def test_base_url_unhealthy_not_hosted( - public_class: type, base_url: str, mock_v1_local_models: None +def test_base_url_valid_not_hosted( + public_class: type, base_url: str, mock_v1_local_models2: None ) -> None: with pytest.warns(UserWarning): public_class(base_url=base_url) diff --git a/libs/ai-endpoints/tests/unit_tests/test_model.py b/libs/ai-endpoints/tests/unit_tests/test_model.py index 28f95694..ee2bd9bf 100644 --- a/libs/ai-endpoints/tests/unit_tests/test_model.py +++ b/libs/ai-endpoints/tests/unit_tests/test_model.py @@ -51,14 +51,6 @@ def mock_v1_local_models(requests_mock: Mocker, known_unknown: str) -> None: ) -@pytest.fixture(autouse=True) -def mock_local_health(requests_mock: Mocker) -> None: - requests_mock.get( - "http://localhost:8000/v1/health/live", - json={"object": "health-response", "message": "Service is live."}, - ) - - @pytest.mark.parametrize( "alias", [ @@ -140,10 +132,10 @@ def test_default_known(public_class: type, known_unknown: str) -> None: assert x.model == known_unknown -# def test_default_lora(public_class: type) -> None: -# """ -# Test that a model in the model table will be accepted. -# """ -# # find a model that matches the public_class under test -# x = public_class(base_url="http://localhost:8000/v1", model="lora1") -# assert x.model == "lora1" +def test_default_lora(public_class: type) -> None: + """ + Test that a model in the model table will be accepted. + """ + # find a model that matches the public_class under test + x = public_class(base_url="http://localhost:8000/v1", model="lora1") + assert x.model == "lora1" From 9e6ec8decd3127df6f3404d30c8374a655b44586 Mon Sep 17 00:00:00 2001 From: raspawar Date: Thu, 18 Jul 2024 12:28:46 +0530 Subject: [PATCH 23/60] test cases healthcheck mock removal --- libs/ai-endpoints/tests/integration_tests/test_base_url.py | 6 ------ .../tests/integration_tests/test_chat_models.py | 2 +- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/libs/ai-endpoints/tests/integration_tests/test_base_url.py b/libs/ai-endpoints/tests/integration_tests/test_base_url.py index 66c235af..40bafbb0 100644 --- a/libs/ai-endpoints/tests/integration_tests/test_base_url.py +++ b/libs/ai-endpoints/tests/integration_tests/test_base_url.py @@ -8,12 +8,6 @@ # Fixture setup (mocking /v1/health/live and /v1/chat/completions endpoints) @pytest.fixture() def mock_endpoints(requests_mock: Mocker, base_url: str) -> None: - # Mock the /v1/health/live endpoint - requests_mock.get( - f"{base_url}/v1/health/live", - json={"object": "health-response", "message": "Service is live."}, - ) - for endpoint in ["/v1/embeddings", "/v1/chat/completions", "/v1/ranking"]: requests_mock.post( f"{base_url}{endpoint}", diff --git a/libs/ai-endpoints/tests/integration_tests/test_chat_models.py b/libs/ai-endpoints/tests/integration_tests/test_chat_models.py index 21f2c455..4940c5b5 100644 --- a/libs/ai-endpoints/tests/integration_tests/test_chat_models.py +++ b/libs/ai-endpoints/tests/integration_tests/test_chat_models.py @@ -33,7 +33,7 @@ def test_unknown_model() -> None: def test_base_url_unknown_model() -> None: - llm = ChatNVIDIA(model="unknown_model", base_url="http://localhost:12321/v1") + llm = ChatNVIDIA(model="unknown_model", base_url="http://localhost:88888/v1") assert llm.model == "unknown_model" From 60953305046b54910debc038489201395720c5e0 Mon Sep 17 00:00:00 2001 From: raspawar Date: Thu, 18 Jul 2024 12:37:15 +0530 Subject: [PATCH 24/60] allow /v1 in path --- .../langchain_nvidia_ai_endpoints/_common.py | 5 +++-- .../tests/integration_tests/test_base_url.py | 2 +- libs/ai-endpoints/tests/unit_tests/test_model.py | 15 +++------------ 3 files changed, 7 insertions(+), 15 deletions(-) diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py index 7624c073..c2715c5f 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py @@ -459,8 +459,9 @@ def _preprocess_args(cls, values: Any) -> Any: if result.path: normalized_path = result.path.strip("/") - if normalized_path in [ - "v1", + if normalized_path == "v1": + pass + elif normalized_path in [ "v1/embeddings", "v1/completions", "v1/rankings", diff --git a/libs/ai-endpoints/tests/integration_tests/test_base_url.py b/libs/ai-endpoints/tests/integration_tests/test_base_url.py index 40bafbb0..81cb0119 100644 --- a/libs/ai-endpoints/tests/integration_tests/test_base_url.py +++ b/libs/ai-endpoints/tests/integration_tests/test_base_url.py @@ -5,7 +5,7 @@ from requests_mock import Mocker -# Fixture setup (mocking /v1/health/live and /v1/chat/completions endpoints) +# Fixture setup /v1/chat/completions endpoints @pytest.fixture() def mock_endpoints(requests_mock: Mocker, base_url: str) -> None: for endpoint in ["/v1/embeddings", "/v1/chat/completions", "/v1/ranking"]: diff --git a/libs/ai-endpoints/tests/unit_tests/test_model.py b/libs/ai-endpoints/tests/unit_tests/test_model.py index ee2bd9bf..c8b28c0f 100644 --- a/libs/ai-endpoints/tests/unit_tests/test_model.py +++ b/libs/ai-endpoints/tests/unit_tests/test_model.py @@ -70,12 +70,8 @@ def test_aliases(public_class: type, alias: str) -> None: assert x.model == x._client.model # Check the warnings - assert len(record) >= 1 # Ensure at least one warning was issued assert isinstance(record[0].message, Warning) - assert ( - isinstance(record[1].message, Warning) - and "deprecated" in record[1].message.args[0] - ) + assert "deprecated" in record[0].message.args[0] def test_known(public_class: type) -> None: @@ -102,13 +98,8 @@ def test_known_unknown(public_class: type, known_unknown: str) -> None: x = public_class(model=known_unknown, nvidia_api_key="a-bogus-key") assert x.model == known_unknown assert isinstance(record[0].message, Warning) - assert ( - isinstance(record[1].message, Warning) and "Found" in record[1].message.args[0] - ) - assert ( - isinstance(record[1].message, Warning) - and "unknown" in record[1].message.args[0] - ) + assert "Found" in record[0].message.args[0] + assert "unknown" in record[0].message.args[0] def test_unknown_unknown(public_class: type) -> None: From 14938e1491f646b838cbef91492ff667fecdbc57 Mon Sep 17 00:00:00 2001 From: raspawar Date: Thu, 18 Jul 2024 12:47:38 +0530 Subject: [PATCH 25/60] more cleanup of test cases --- .../tests/integration_tests/test_register_model.py | 2 +- libs/ai-endpoints/tests/unit_tests/test_model.py | 6 ++---- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/libs/ai-endpoints/tests/integration_tests/test_register_model.py b/libs/ai-endpoints/tests/integration_tests/test_register_model.py index 77e5aa50..6a08c2fe 100644 --- a/libs/ai-endpoints/tests/integration_tests/test_register_model.py +++ b/libs/ai-endpoints/tests/integration_tests/test_register_model.py @@ -44,7 +44,7 @@ def test_registered_model_functional( ) as record: # warns because we're overriding known models register_model(model) contact_service(client(model=id)) - assert len(record) == 2 + assert len(record) == 1 assert isinstance(record[0].message, UserWarning) assert "already registered" in str(record[0].message) assert "Overriding" in str(record[0].message) diff --git a/libs/ai-endpoints/tests/unit_tests/test_model.py b/libs/ai-endpoints/tests/unit_tests/test_model.py index c8b28c0f..82d03424 100644 --- a/libs/ai-endpoints/tests/unit_tests/test_model.py +++ b/libs/ai-endpoints/tests/unit_tests/test_model.py @@ -68,10 +68,8 @@ def test_aliases(public_class: type, alias: str) -> None: with pytest.warns(UserWarning) as record: x = public_class(model=alias, nvidia_api_key="a-bogus-key") assert x.model == x._client.model - - # Check the warnings - assert isinstance(record[0].message, Warning) - assert "deprecated" in record[0].message.args[0] + assert isinstance(record[0].message, Warning) + assert "deprecated" in record[0].message.args[0] def test_known(public_class: type) -> None: From c12a1b8940a113578fa92e983d87d1c4670af428 Mon Sep 17 00:00:00 2001 From: raspawar Date: Thu, 18 Jul 2024 12:49:03 +0530 Subject: [PATCH 26/60] fix indentation --- libs/ai-endpoints/tests/unit_tests/test_model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libs/ai-endpoints/tests/unit_tests/test_model.py b/libs/ai-endpoints/tests/unit_tests/test_model.py index 82d03424..4f06417b 100644 --- a/libs/ai-endpoints/tests/unit_tests/test_model.py +++ b/libs/ai-endpoints/tests/unit_tests/test_model.py @@ -68,8 +68,8 @@ def test_aliases(public_class: type, alias: str) -> None: with pytest.warns(UserWarning) as record: x = public_class(model=alias, nvidia_api_key="a-bogus-key") assert x.model == x._client.model - assert isinstance(record[0].message, Warning) - assert "deprecated" in record[0].message.args[0] + assert isinstance(record[0].message, Warning) + assert "deprecated" in record[0].message.args[0] def test_known(public_class: type) -> None: From 43bdb6b6a71b245055fbe53c5ef4fa93b2ba879f Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Thu, 18 Jul 2024 05:00:37 -0400 Subject: [PATCH 27/60] add support for reranking truncate parameter --- .../docs/retrievers/nvidia_rerank.ipynb | 16 +++- .../reranking.py | 30 ++++++-- .../tests/integration_tests/test_ranking.py | 38 ++++++++++ .../tests/unit_tests/test_ranking.py | 73 +++++++++++++++++++ 4 files changed, 147 insertions(+), 10 deletions(-) create mode 100644 libs/ai-endpoints/tests/unit_tests/test_ranking.py diff --git a/libs/ai-endpoints/docs/retrievers/nvidia_rerank.ipynb b/libs/ai-endpoints/docs/retrievers/nvidia_rerank.ipynb index 3164e8e0..207d167a 100644 --- a/libs/ai-endpoints/docs/retrievers/nvidia_rerank.ipynb +++ b/libs/ai-endpoints/docs/retrievers/nvidia_rerank.ipynb @@ -574,7 +574,19 @@ "source": [ "#### Combine and rank documents\n", "\n", - "Let's combine the BM25 as well as semantic search results. The resulting `docs` will be ordered by their relevance to the query by the reranking NIM." + "Let's combine the BM25 as well as semantic search results. The resulting `docs` will be ordered by their relevance to the query by the reranking NIM.\n", + "\n", + "#### Note on truncation\n", + "\n", + "Reranking models typically have a fixed context window that determines the maximum number of input tokens that can be processed. This limit could be a hard limit, equal to the model's maximum input token length, or an effective limit, beyond which the accuracy of the ranking decreases.\n", + "\n", + "Since models operate on tokens and applications usually work with text, it can be challenging for an application to ensure that its input stays within the model's token limits. By default, an exception is thrown if the input is too large.\n", + "\n", + "To assist with this, NVIDIA's NIMs (API Catalog or local) provide a `truncate` parameter that truncates the input on the server side if it's too large.\n", + "\n", + "The `truncate` parameter has three options:\n", + " - \"NONE\": The default option. An exception is thrown if the input is too large.\n", + " - \"END\": The server truncates the input from the end (right), discarding tokens as necessary." ] }, { @@ -598,7 +610,7 @@ } ], "source": [ - "ranker = NVIDIARerank()\n", + "ranker = NVIDIARerank(truncate=\"END\")\n", "\n", "all_docs = bm25_docs + sem_docs\n", "\n", diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/reranking.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/reranking.py index 03e7862e..85ce4f73 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/reranking.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/reranking.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Any, Generator, List, Optional, Sequence +from typing import Any, Generator, List, Literal, Optional, Sequence from langchain_core.callbacks.manager import Callbacks from langchain_core.documents import Document @@ -35,6 +35,13 @@ class Config: ) top_n: int = Field(5, ge=0, description="The number of documents to return.") model: Optional[str] = Field(description="The model to use for reranking.") + truncate: Optional[Literal["NONE", "END"]] = Field( + description=( + "Truncate input text if it exceeds the model's maximum token length. " + "Default is model dependent and is likely to raise error if an " + "input is too long." + ), + ) max_batch_size: int = Field( _default_batch_size, ge=1, description="The maximum batch size." ) @@ -53,6 +60,9 @@ def __init__(self, **kwargs: Any): nvidia_api_key (str): The API key to use for connecting to the hosted NIM. api_key (str): Alternative to nvidia_api_key. base_url (str): The base URL of the NIM to connect to. + truncate (str): "NONE", "END", truncate input text if it exceeds + the model's context length. Default is model dependent and + is likely to raise an error if an input is too long. API Key: - The recommended way to provide the API key is through the `NVIDIA_API_KEY` @@ -89,13 +99,14 @@ def get_available_models( # todo: batching when len(documents) > endpoint's max batch size def _rank(self, documents: List[str], query: str) -> List[Ranking]: - response = self._client.client.get_req( - payload={ - "model": self.model, - "query": {"text": query}, - "passages": [{"text": passage} for passage in documents], - }, - ) + payload = { + "model": self.model, + "query": {"text": query}, + "passages": [{"text": passage} for passage in documents], + } + if self.truncate: + payload["truncate"] = self.truncate + response = self._client.client.get_req(payload=payload) if response.status_code != 200: response.raise_for_status() # todo: handle errors @@ -134,6 +145,9 @@ def batch(ls: list, size: int) -> Generator[List[Document], None, None]: query=query, documents=[d.page_content for d in doc_batch] ) for ranking in rankings: + assert ( + 0 <= ranking.index < len(doc_batch) + ), "invalid response from server: index out of range" doc = doc_batch[ranking.index] doc.metadata["relevance_score"] = ranking.logit results.append(doc) diff --git a/libs/ai-endpoints/tests/integration_tests/test_ranking.py b/libs/ai-endpoints/tests/integration_tests/test_ranking.py index 5937cb30..867aab64 100644 --- a/libs/ai-endpoints/tests/integration_tests/test_ranking.py +++ b/libs/ai-endpoints/tests/integration_tests/test_ranking.py @@ -179,3 +179,41 @@ def test_rerank_batching( # result_docs[i].page_content == reference_docs[i].page_content # for i in range(top_n) # ), "batched results do not match unbatched results" + + +@pytest.mark.parametrize("truncate", ["END"]) +def test_truncate_positive(rerank_model: str, mode: dict, truncate: str) -> None: + query = "What is acceleration?" + documents = [ + Document(page_content="NVIDIA " * length) + for length in [32, 1024, 64, 128, 2048, 256, 512] + ] + client = NVIDIARerank( + model=rerank_model, top_n=len(documents), truncate=truncate, **mode + ) + response = client.compress_documents(documents=documents, query=query) + assert len(response) == len(documents) + + +@pytest.mark.parametrize("truncate", [None, "NONE"]) +@pytest.mark.xfail( + reason=( + "truncation is inconsistent across models, " + "nv-rerank-qa-mistral-4b:1 truncates by default " + "while others do not" + ) +) +def test_truncate_negative(rerank_model: str, mode: dict, truncate: str) -> None: + query = "What is acceleration?" + documents = [ + Document(page_content="NVIDIA " * length) + for length in [32, 1024, 64, 128, 2048, 256, 512] + ] + truncate_param = {} + if truncate: + truncate_param = {"truncate": truncate} + client = NVIDIARerank(model=rerank_model, **truncate_param, **mode) + with pytest.raises(Exception) as e: + client.compress_documents(documents=documents, query=query) + assert "400" in str(e.value) + assert "exceeds maximum allowed" in str(e.value) diff --git a/libs/ai-endpoints/tests/unit_tests/test_ranking.py b/libs/ai-endpoints/tests/unit_tests/test_ranking.py new file mode 100644 index 00000000..887ef3e9 --- /dev/null +++ b/libs/ai-endpoints/tests/unit_tests/test_ranking.py @@ -0,0 +1,73 @@ +from typing import Any, Literal, Optional + +import pytest +from langchain_core.documents import Document +from requests_mock import Mocker + +from langchain_nvidia_ai_endpoints import NVIDIARerank + + +@pytest.fixture(autouse=True) +def mock_v1_models(requests_mock: Mocker) -> None: + requests_mock.get( + "https://integrate.api.nvidia.com/v1/models", + json={ + "data": [ + { + "id": "mock-model", + "object": "model", + "created": 1234567890, + "owned_by": "OWNER", + } + ] + }, + ) + + +@pytest.fixture(autouse=True) +def mock_v1_ranking(requests_mock: Mocker) -> None: + requests_mock.post( + "https://integrate.api.nvidia.com/v1/ranking", + json={ + "rankings": [ + {"index": 0, "logit": 4.2}, + ] + }, + ) + + +@pytest.mark.parametrize( + "truncate", + [ + None, + "END", + "NONE", + ], +) +def test_truncate( + requests_mock: Mocker, + truncate: Optional[Literal["END", "NONE"]], +) -> None: + truncate_param = {} + if truncate: + truncate_param = {"truncate": truncate} + client = NVIDIARerank(model="mock-model", **truncate_param) + response = client.compress_documents( + documents=[Document(page_content="Nothing really.")], query="What is it?" + ) + + assert len(response) == 1 + + assert requests_mock.last_request is not None + request_payload = requests_mock.last_request.json() + if truncate is None: + assert "truncate" not in request_payload + else: + assert "truncate" in request_payload + assert request_payload["truncate"] == truncate + + +@pytest.mark.parametrize("truncate", [True, False, 1, 0, 1.0, "START", "BOGUS"]) +def test_truncate_invalid(truncate: Any) -> None: + with pytest.raises(ValueError): + NVIDIARerank(truncate=truncate) From 2ae37aa7895dee44ca20e0d97d404882026acf1c Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Fri, 19 Jul 2024 05:52:34 -0400 Subject: [PATCH 28/60] add nv-mistralai/mistral-nemo-12b-instruct to set of supported chat models --- libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py | 5 +++++ .../ai-endpoints/tests/integration_tests/test_chat_models.py | 5 ++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py index fee823c1..7f29bd10 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py @@ -277,6 +277,11 @@ def validate_client(cls, client: str, values: dict) -> str: client="ChatNVIDIA", aliases=["ai-deepseek-coder-6_7b-instruct"], ), + "nv-mistralai/mistral-nemo-12b-instruct": Model( + id="nv-mistralai/mistral-nemo-12b-instruct", + model_type="chat", + client="ChatNVIDIA", + ), } QA_MODEL_TABLE = { diff --git a/libs/ai-endpoints/tests/integration_tests/test_chat_models.py b/libs/ai-endpoints/tests/integration_tests/test_chat_models.py index 8cf145b8..bb1490de 100644 --- a/libs/ai-endpoints/tests/integration_tests/test_chat_models.py +++ b/libs/ai-endpoints/tests/integration_tests/test_chat_models.py @@ -121,7 +121,10 @@ def test_chat_ai_endpoints_system_message(chat_model: str, mode: dict) -> None: ], ) @pytest.mark.xfail( - reason="mistralai recent impl does not support AIMessage followed by SystemAI" + reason=( + "not all endpoints support system messages, " + "repeated message types or ending with an ai message" + ) ) def test_messages( chat_model: str, mode: dict, system: List, exchange: List[BaseMessage] From 36a5f636aa31ebaee3b77d48cd33349a3e563119 Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Fri, 19 Jul 2024 05:53:05 -0400 Subject: [PATCH 29/60] bump version to 0.1.5 --- libs/ai-endpoints/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/ai-endpoints/pyproject.toml b/libs/ai-endpoints/pyproject.toml index c1363672..f5953f5d 100644 --- a/libs/ai-endpoints/pyproject.toml +++ b/libs/ai-endpoints/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langchain-nvidia-ai-endpoints" -version = "0.1.4" +version = "0.1.5" description = "An integration package connecting NVIDIA AI Endpoints and LangChain" authors = [] readme = "README.md" From 3f9a66826124cb10415c57408ade4cc09506108e Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Fri, 19 Jul 2024 06:15:26 -0400 Subject: [PATCH 30/60] allow multiple embedding and rerank models for tests --- .../tests/integration_tests/conftest.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/libs/ai-endpoints/tests/integration_tests/conftest.py b/libs/ai-endpoints/tests/integration_tests/conftest.py index d05ae598..1b658767 100644 --- a/libs/ai-endpoints/tests/integration_tests/conftest.py +++ b/libs/ai-endpoints/tests/integration_tests/conftest.py @@ -30,12 +30,14 @@ def pytest_addoption(parser: pytest.Parser) -> None: parser.addoption( "--embedding-model-id", action="store", - help="Run tests for a specific embedding model", + nargs="+", + help="Run tests for a specific embedding model or list of models", ) parser.addoption( "--rerank-model-id", action="store", - help="Run tests for a specific rerank model", + nargs="+", + help="Run tests for a specific rerank model or list of models", ) parser.addoption( "--vlm-model-id", @@ -74,8 +76,8 @@ def get_all_known_models() -> List[Model]: if "rerank_model" in metafunc.fixturenames: models = [NVIDIARerank._default_model_name] - if model := metafunc.config.getoption("rerank_model_id"): - models = [model] + if model_list := metafunc.config.getoption("rerank_model_id"): + models = model_list if metafunc.config.getoption("all_models"): models = [model.id for model in NVIDIARerank(**mode).available_models] metafunc.parametrize("rerank_model", models, ids=models) @@ -106,8 +108,10 @@ def get_all_known_models() -> List[Model]: if "embedding_model" in metafunc.fixturenames: models = [NVIDIAEmbeddings._default_model] - if metafunc.config.getoption("embedding_model_id"): - models = [metafunc.config.getoption("embedding_model_id")] + if metafunc.config.getoption("all_models"): + models = [model.id for model in NVIDIAEmbeddings(**mode).available_models] + if model_list := metafunc.config.getoption("embedding_model_id"): + models = model_list if metafunc.config.getoption("all_models"): models = [model.id for model in NVIDIAEmbeddings(**mode).available_models] metafunc.parametrize("embedding_model", models, ids=models) From d58e3347a45afe78ea3a69734e25f3bbee8c00ed Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Fri, 19 Jul 2024 06:30:42 -0400 Subject: [PATCH 31/60] add support for nvidia/nv-rerankqa-mistral-4b-v3 (ranking) and nvidia/nv-embedqa-mistral-7b-v2 + nvidia/nv-embedqa-e5-v5 (embedding) --- .../langchain_nvidia_ai_endpoints/_statics.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py index 7f29bd10..1151143e 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py @@ -386,6 +386,16 @@ def validate_client(cls, client: str, values: dict) -> str: client="NVIDIAEmbeddings", aliases=["ai-nv-embed-v1"], ), + "nvidia/nv-embedqa-mistral-7b-v2": Model( + id="nvidia/nv-embedqa-mistral-7b-v2", + model_type="embedding", + client="NVIDIAEmbeddings", + ), + "nvidia/nv-embedqa-e5-v5": Model( + id="nvidia/nv-embedqa-e5-v5", + model_type="embedding", + client="NVIDIAEmbeddings", + ), } RANKING_MODEL_TABLE = { @@ -396,6 +406,12 @@ def validate_client(cls, client: str, values: dict) -> str: endpoint="https://ai.api.nvidia.com/v1/retrieval/nvidia/reranking", aliases=["ai-rerank-qa-mistral-4b"], ), + "nvidia/nv-rerankqa-mistral-4b-v3": Model( + id="nvidia/nv-rerankqa-mistral-4b-v3", + model_type="ranking", + client="NVIDIARerank", + endpoint="https://ai.api.nvidia.com/v1/retrieval/nvidia/nv-rerankqa-mistral-4b-v3/reranking", + ), } # COMPLETION_MODEL_TABLE = { From 223816e8f3281698f8e9c0e3fdbbf8bd66a1858c Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Fri, 19 Jul 2024 06:31:05 -0400 Subject: [PATCH 32/60] bump version to 0.1.6 --- libs/ai-endpoints/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/ai-endpoints/pyproject.toml b/libs/ai-endpoints/pyproject.toml index f5953f5d..ba34df24 100644 --- a/libs/ai-endpoints/pyproject.toml +++ b/libs/ai-endpoints/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langchain-nvidia-ai-endpoints" -version = "0.1.5" +version = "0.1.6" description = "An integration package connecting NVIDIA AI Endpoints and LangChain" authors = [] readme = "README.md" From 779fe33705d076e69fa8defe9b88399d214c5844 Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Fri, 19 Jul 2024 09:37:35 -0400 Subject: [PATCH 33/60] merge langchain chat docs back into langchain-nvidia --- .../docs/chat/nvidia_ai_endpoints.ipynb | 33 ++++--------------- 1 file changed, 7 insertions(+), 26 deletions(-) diff --git a/libs/ai-endpoints/docs/chat/nvidia_ai_endpoints.ipynb b/libs/ai-endpoints/docs/chat/nvidia_ai_endpoints.ipynb index e0eb5f55..666c80b9 100644 --- a/libs/ai-endpoints/docs/chat/nvidia_ai_endpoints.ipynb +++ b/libs/ai-endpoints/docs/chat/nvidia_ai_endpoints.ipynb @@ -137,28 +137,6 @@ "llm = ChatNVIDIA(base_url=\"http://localhost:8000/v1\", model=\"meta/llama3-8b-instruct\")" ] }, - { - "cell_type": "code", - "execution_count": 4, - "id": "7d4a4e2e", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/home/ubuntu/raspawar/langchain-nvidia/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py:583: UserWarning: Default model is set as: meta/llama3-8b-instruct. \n", - "Set model using model parameter. \n", - "To get available models use available_models property.\n", - " UserWarning,\n" - ] - } - ], - "source": [ - "# OR connect to an embedding NIM running at localhost:8000, with default model(first available model)\n", - "llm = ChatNVIDIA(base_url=\"http://localhost:8000/v1\")" - ] - }, { "cell_type": "markdown", "id": "71d37987-d568-4a73-9d2a-8bd86323f8bf", @@ -594,7 +572,12 @@ "from langchain.chains import ConversationChain\n", "from langchain.memory import ConversationBufferMemory\n", "\n", - "chat = ChatNVIDIA(model=\"mistralai/mixtral-8x22b-instruct-v0.1\", temperature=0.1, max_tokens=100, top_p=1.0)\n", + "chat = ChatNVIDIA(\n", + " model=\"mistralai/mixtral-8x22b-instruct-v0.1\",\n", + " temperature=0.1,\n", + " max_tokens=100,\n", + " top_p=1.0,\n", + ")\n", "\n", "conversation = ConversationChain(llm=chat, memory=ConversationBufferMemory())" ] @@ -649,9 +632,7 @@ }, "outputs": [], "source": [ - "conversation.invoke(\"Tell me about yourself.\")[\n", - " \"response\"\n", - "]\n" + "conversation.invoke(\"Tell me about yourself.\")[\"response\"]" ] } ], From 17d0008c28ea909c2916a8d9c494720c64ed5a67 Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Fri, 19 Jul 2024 09:39:02 -0400 Subject: [PATCH 34/60] remove chat docs about unsupported payload forcing --- .../docs/chat/nvidia_ai_endpoints.ipynb | 86 ------------------- 1 file changed, 86 deletions(-) diff --git a/libs/ai-endpoints/docs/chat/nvidia_ai_endpoints.ipynb b/libs/ai-endpoints/docs/chat/nvidia_ai_endpoints.ipynb index 666c80b9..dc2236a9 100644 --- a/libs/ai-endpoints/docs/chat/nvidia_ai_endpoints.ipynb +++ b/libs/ai-endpoints/docs/chat/nvidia_ai_endpoints.ipynb @@ -444,92 +444,6 @@ "llm.invoke(f'What\\'s in this image?\\n')" ] }, - { - "cell_type": "markdown", - "id": "3e61d868", - "metadata": {}, - "source": [ - "#### **Advanced Use Case:** Forcing Payload \n", - "\n", - "You may notice that some newer models may have strong parameter expectations that the LangChain connector may not support by default. For example, we cannot invoke the [Kosmos](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/ai-foundation/models/kosmos-2) model at the time of this notebook's latest release due to the lack of a streaming argument on the server side: " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d143e0d6", - "metadata": {}, - "outputs": [], - "source": [ - "from langchain_nvidia_ai_endpoints import ChatNVIDIA\n", - "\n", - "kosmos = ChatNVIDIA(model=\"microsoft/kosmos-2\")\n", - "\n", - "from langchain_core.messages import HumanMessage\n", - "\n", - "# kosmos.invoke(\n", - "# [\n", - "# HumanMessage(\n", - "# content=[\n", - "# {\"type\": \"text\", \"text\": \"Describe this image:\"},\n", - "# {\"type\": \"image_url\", \"image_url\": {\"url\": image_url}},\n", - "# ]\n", - "# )\n", - "# ]\n", - "# )\n", - "\n", - "# Exception: [422] Unprocessable Entity\n", - "# body -> stream\n", - "# Extra inputs are not permitted (type=extra_forbidden)\n", - "# RequestID: 35538c9a-4b45-4616-8b75-7ef816fccf38" - ] - }, - { - "cell_type": "markdown", - "id": "1e230b70", - "metadata": {}, - "source": [ - "For a simple use case like this, we can actually try to force the payload argument of our underlying client by specifying the `payload_fn` function as follows: " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0925b2b1", - "metadata": {}, - "outputs": [], - "source": [ - "def drop_streaming_key(d):\n", - " \"\"\"Takes in payload dictionary, outputs new payload dictionary\"\"\"\n", - " if \"stream\" in d:\n", - " d.pop(\"stream\")\n", - " return d\n", - "\n", - "\n", - "## Override the payload passthrough. Default is to pass through the payload as is.\n", - "kosmos = ChatNVIDIA(model=\"microsoft/kosmos-2\")\n", - "kosmos.client.payload_fn = drop_streaming_key\n", - "\n", - "kosmos.invoke(\n", - " [\n", - " HumanMessage(\n", - " content=[\n", - " {\"type\": \"text\", \"text\": \"Describe this image:\"},\n", - " {\"type\": \"image_url\", \"image_url\": {\"url\": image_url}},\n", - " ]\n", - " )\n", - " ]\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "fe6e1758", - "metadata": {}, - "source": [ - "For more advanced or custom use-cases (i.e. supporting the diffusion models), you may be interested in leveraging the `NVEModel` client as a requests backbone. The `NVIDIAEmbeddings` class is a good source of inspiration for this. " - ] - }, { "cell_type": "markdown", "id": "137662a6", From c2651210cd324ceaa46abb07b84f5acd28204ab0 Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Fri, 19 Jul 2024 11:41:17 -0400 Subject: [PATCH 35/60] add empty /v1/models fixture to ensure unit tests run without network connectivity --- libs/ai-endpoints/tests/unit_tests/conftest.py | 6 ++++++ libs/ai-endpoints/tests/unit_tests/test_chat_models.py | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/libs/ai-endpoints/tests/unit_tests/conftest.py b/libs/ai-endpoints/tests/unit_tests/conftest.py index 9abdd6b0..8925a6ad 100644 --- a/libs/ai-endpoints/tests/unit_tests/conftest.py +++ b/libs/ai-endpoints/tests/unit_tests/conftest.py @@ -1,4 +1,5 @@ import pytest +import requests_mock from langchain_nvidia_ai_endpoints import ChatNVIDIA, NVIDIAEmbeddings, NVIDIARerank @@ -12,3 +13,8 @@ ) def public_class(request: pytest.FixtureRequest) -> type: return request.param + + +@pytest.fixture +def empty_v1_models(requests_mock: requests_mock.Mocker) -> None: + requests_mock.get("https://integrate.api.nvidia.com/v1/models", json={"data": []}) diff --git a/libs/ai-endpoints/tests/unit_tests/test_chat_models.py b/libs/ai-endpoints/tests/unit_tests/test_chat_models.py index 643f9dd5..1c6ffa72 100644 --- a/libs/ai-endpoints/tests/unit_tests/test_chat_models.py +++ b/libs/ai-endpoints/tests/unit_tests/test_chat_models.py @@ -18,6 +18,6 @@ def test_integration_initialization() -> None: ChatNVIDIA(model="meta/llama2-70b", nvidia_api_key="nvapi-...") -def test_unavailable() -> None: +def test_unavailable(empty_v1_models: None) -> None: with pytest.raises(ValueError): ChatNVIDIA(model="not-a-real-model") From aa201fa4fec1673eceabebb9330327fc72a06f4b Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Wed, 12 Jun 2024 11:44:42 -0400 Subject: [PATCH 36/60] add tests for tool calling --- .../langchain_nvidia_ai_endpoints/_statics.py | 2 + .../tests/integration_tests/conftest.py | 18 + .../integration_tests/test_bind_tools.py | 382 ++++++++++++++++++ 3 files changed, 402 insertions(+) create mode 100644 libs/ai-endpoints/tests/integration_tests/test_bind_tools.py diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py index 1151143e..d5649623 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py @@ -13,6 +13,7 @@ class Model(BaseModel): client: client name, e.g. ChatNVIDIA, NVIDIAEmbeddings, NVIDIARerank endpoint: custom endpoint for the model aliases: list of aliases for the model + supports_tools: whether the model supports tool calling All aliases are deprecated and will trigger a warning when used. """ @@ -25,6 +26,7 @@ class Model(BaseModel): client: Optional[Literal["ChatNVIDIA", "NVIDIAEmbeddings", "NVIDIARerank"]] = None endpoint: Optional[str] = None aliases: Optional[list] = None + supports_tools: Optional[bool] = False base_model: Optional[str] = None def __hash__(self) -> int: diff --git a/libs/ai-endpoints/tests/integration_tests/conftest.py b/libs/ai-endpoints/tests/integration_tests/conftest.py index 1b658767..5240a89c 100644 --- a/libs/ai-endpoints/tests/integration_tests/conftest.py +++ b/libs/ai-endpoints/tests/integration_tests/conftest.py @@ -21,6 +21,12 @@ def pytest_addoption(parser: pytest.Parser) -> None: nargs="+", help="Run tests for a specific chat model or list of models", ) + parser.addoption( + "--tool-model-id", + action="store", + nargs="+", + help="Run tests for a specific chat models that support tool calling", + ) parser.addoption( "--qa-model-id", action="store", @@ -74,6 +80,18 @@ def get_all_known_models() -> List[Model]: ] metafunc.parametrize("chat_model", models, ids=models) + if "tool_model" in metafunc.fixturenames: + models = [] + if model_list := metafunc.config.getoption("tool_model_id"): + models = model_list + if metafunc.config.getoption("all_models"): + models = [ + model.id + for model in ChatNVIDIA(**mode).available_models + if model.model_type == "chat" and model.supports_tools + ] + metafunc.parametrize("tool_model", models, ids=models) + if "rerank_model" in metafunc.fixturenames: models = [NVIDIARerank._default_model_name] if model_list := metafunc.config.getoption("rerank_model_id"): diff --git a/libs/ai-endpoints/tests/integration_tests/test_bind_tools.py b/libs/ai-endpoints/tests/integration_tests/test_bind_tools.py new file mode 100644 index 00000000..83dff6b0 --- /dev/null +++ b/libs/ai-endpoints/tests/integration_tests/test_bind_tools.py @@ -0,0 +1,382 @@ +import json +import warnings +from typing import Any, List, Literal, Optional, Union + +import pytest +from langchain_core.messages import AIMessage, ChatMessage +from langchain_core.pydantic_v1 import Field +from langchain_core.tools import tool + +from langchain_nvidia_ai_endpoints import ChatNVIDIA + +# +# ways to specify tools: +# 0. bind_tools +# ways to specify tool_choice: +# 1. invoke +# 2. bind_tools +# 3. stream +# tool_choice levels: +# 4. "none" +# 5. "auto" (accuracy only) +# 6. None (accuracy only) +# 7. "required" +# 8. {"function": {"name": tool name}} (partial function) +# 9. {"type": "function", "function": {"name": tool name}} +# 10. "any" (bind_tools only) +# 11. tool name (bind_tools only) +# 12. True (bind_tools only) +# 13. False (bind_tools only) +# tools levels: +# 14. no tools +# 15. one tool +# 16. multiple tools (accuracy only) +# test types: +# 17. deterministic (minimial accuracy tests; relies on basic tool calling skills) +# 18. accuracy (proper tool; proper arguments) +# negative tests: +# 19. require unknown named tool (invoke/stream only) +# 20. partial function (invoke/stream only) +# + +# todo: streaming +# todo: test tool with no arguments +# todo: parallel_tool_calls + + +@tool +def xxyyzz( + a: int = Field(..., description="First number"), + b: int = Field(..., description="Second number"), +) -> int: + """xxyyzz two numbers""" + return (a**b) % (b - a) + + +@tool +def zzyyxx( + a: int = Field(..., description="First number"), + b: int = Field(..., description="Second number"), +) -> int: + """zzyyxx two numbers""" + return (b**a) % (a - b) + + +def check_response_structure(response: AIMessage) -> None: + assert not response.content # should be `response.content is None` but + # AIMessage.content: Union[str, List[Union[str, Dict]]] cannot be None. + for tool_call in response.tool_calls: + assert tool_call["id"] is not None + assert response.response_metadata is not None + assert isinstance(response.response_metadata, dict) + assert "finish_reason" in response.response_metadata + assert response.response_metadata["finish_reason"] in [ + "tool_calls", + "stop", + ] # todo: remove "stop" + assert len(response.tool_calls) > 0 + + +# users can also get at the tool calls from the response.additional_kwargs +@pytest.mark.xfail(reason="Accuracy test") +def test_accuracy_default_invoke_additional_kwargs(tool_model: str, mode: dict) -> None: + llm = ChatNVIDIA(temperature=0, model=tool_model, **mode).bind_tools([xxyyzz]) + response = llm.invoke("What is 11 xxyyzz 3?") + assert not response.content # should be `response.content is None` but + # AIMessage.content: Union[str, List[Union[str, Dict]]] cannot be None. + assert response.additional_kwargs is not None + assert "tool_calls" in response.additional_kwargs + assert isinstance(response.additional_kwargs["tool_calls"], list) + assert response.additional_kwargs["tool_calls"] + for tool_call in response.additional_kwargs["tool_calls"]: + assert "id" in tool_call + assert tool_call["id"] is not None + assert "type" in tool_call + assert tool_call["type"] == "function" + assert "function" in tool_call + assert response.response_metadata is not None + assert isinstance(response.response_metadata, dict) + assert "content" in response.response_metadata + assert response.response_metadata["content"] is None + assert "finish_reason" in response.response_metadata + assert response.response_metadata["finish_reason"] in [ + "tool_calls", + "stop", + ] # todo: remove "stop" + assert len(response.additional_kwargs["tool_calls"]) > 0 + tool_call = response.additional_kwargs["tool_calls"][0] + assert tool_call["function"]["name"] == "xxyyzz" + assert json.loads(tool_call["function"]["arguments"]) == {"a": 11, "b": 3} + + +@pytest.mark.parametrize( + "tool_choice", + [ + "none", + "required", + {"function": {"name": "xxyyzz"}}, + {"type": "function", "function": {"name": "xxyyzz"}}, + ], + ids=["none", "required", "partial", "function"], +) +def test_invoke_tool_choice_with_no_tool( + tool_model: str, mode: dict, tool_choice: Any +) -> None: + llm = ChatNVIDIA(model=tool_model, **mode) + with pytest.raises(Exception) as e: + llm.invoke("What is 11 xxyyzz 3?", tool_choice=tool_choice) + assert "400" in str(e.value) or "###" in str( + e.value + ) # todo: stop transforming 400 -> ### + assert ( + "Value error, When using `tool_choice`, `tools` must be set." in str(e.value) + or ( + "Value error, Invalid value for `tool_choice`: `tool_choice` is only " + "allowed when `tools` are specified." + ) + in str(e.value) + or "invalid_request_error" in str(e.value) + ) + + +def test_invoke_tool_choice_none(tool_model: str, mode: dict) -> None: + llm = ChatNVIDIA(model=tool_model, **mode).bind_tools(tools=[xxyyzz]) + response = llm.invoke("What is 11 xxyyzz 3?", tool_choice="none") # type: ignore + assert isinstance(response, ChatMessage) + assert "tool_calls" not in response.additional_kwargs + + +@pytest.mark.parametrize( + "tool_choice", + [ + {"function": {"name": "xxyyzz"}}, + ], + ids=["partial"], +) +def test_invoke_tool_choice_negative( + tool_model: str, + mode: dict, + tool_choice: Optional[ + Union[dict, str, Literal["auto", "none", "any", "required"], bool] + ], +) -> None: + llm = ChatNVIDIA(model=tool_model, **mode).bind_tools([xxyyzz]) + with pytest.raises(Exception) as e: + llm.invoke("What is 11 xxyyzz 3?", tool_choice=tool_choice) # type: ignore + assert "400" in str(e.value) or "###" in str( + e.value + ) # todo: stop transforming 400 -> ### + assert "invalid_request_error" in str(e.value) or "value_error" in str(e.value) + + +@pytest.mark.parametrize( + "tool_choice", + [ + "required", + {"type": "function", "function": {"name": "xxyyzz"}}, + ], + ids=["required", "function"], +) +def test_invoke_tool_choice( + tool_model: str, + mode: dict, + tool_choice: Optional[ + Union[dict, str, Literal["auto", "none", "any", "required"], bool] + ], +) -> None: + llm = ChatNVIDIA(model=tool_model, **mode).bind_tools([xxyyzz]) + response = llm.invoke("What is 11 xxyyzz 3?", tool_choice=tool_choice) # type: ignore + assert isinstance(response, AIMessage) + check_response_structure(response) + + +@pytest.mark.parametrize( + "tool_choice", + [ + "auto", + None, + "required", + {"type": "function", "function": {"name": "xxyyzz"}}, + ], + ids=["auto", "absent", "required", "function"], +) +@pytest.mark.parametrize( + "tools", + [[xxyyzz], [xxyyzz, zzyyxx], [zzyyxx, xxyyzz]], + ids=["xxyyzz", "xxyyzz_and_zzyyxx", "zzyyxx_and_xxyyzz"], +) +@pytest.mark.xfail(reason="Accuracy test") +def test_accuracy_invoke_tool_choice( + tool_model: str, + mode: dict, + tools: List, + tool_choice: Any, +) -> None: + llm = ChatNVIDIA(temperature=0, model=tool_model, **mode).bind_tools(tools) + response = llm.invoke("What is 11 xxyyzz 3?", tool_choice=tool_choice) # type: ignore + assert isinstance(response, AIMessage) + check_response_structure(response) + tool_call = response.tool_calls[0] + assert tool_call["name"] == "xxyyzz" + assert tool_call["args"] == {"b": 3, "a": 11} + + +def test_invoke_tool_choice_with_unknown_tool(tool_model: str, mode: dict) -> None: + llm = ChatNVIDIA(model=tool_model, **mode).bind_tools(tools=[xxyyzz]) + with pytest.raises(Exception) as e: + llm.invoke( + "What is 11 xxyyzz 3?", + tool_choice={"type": "function", "function": {"name": "zzyyxx"}}, + ) # type: ignore + assert ( + "not found in the tools list" in str(e.value) + or "no function named" in str(e.value) + or "does not match any of the specified" in str(e.value) + ) + + +@pytest.mark.parametrize( + "tool_choice", + [ + {"function": {"name": "xxyyzz"}}, + {"type": "function", "function": {"name": "xxyyzz"}}, + "xxyyzz", + ], + ids=["partial", "function", "name"], +) +def test_bind_tool_tool_choice_with_no_tool_client( + tool_model: str, + mode: dict, + tool_choice: Optional[ + Union[dict, str, Literal["auto", "none", "any", "required"], bool] + ], +) -> None: + with pytest.raises(ValueError) as e: + ChatNVIDIA(model=tool_model, **mode).bind_tools( + tools=[], tool_choice=tool_choice + ) + assert "not found in the tools list" in str(e.value) + + +@pytest.mark.parametrize( + "tool_choice", + [ + "none", + "required", + "any", + True, + False, + ], + ids=["none", "required", "any", "True", "False"], +) +def test_bind_tool_tool_choice_with_no_tool_server( + tool_model: str, mode: dict, tool_choice: Any +) -> None: + llm = ChatNVIDIA(model=tool_model, **mode).bind_tools([], tool_choice=tool_choice) + with pytest.raises(Exception) as e: + llm.invoke("What is 11 xxyyzz 3?") + assert "400" in str(e.value) or "###" in str( + e.value + ) # todo: stop transforming 400 -> ### + assert ( + "Value error, When using `tool_choice`, `tools` must be set." in str(e.value) + or ( + "Value error, Invalid value for `tool_choice`: `tool_choice` is only " + "allowed when `tools` are specified." + ) + in str(e.value) + or "Expected an array with minimum length" in str(e.value) + or "should be non-empty" in str(e.value) + ) + + +@pytest.mark.parametrize( + "tool_choice", + ["none", False], +) +def test_bind_tool_tool_choice_none( + tool_model: str, mode: dict, tool_choice: Any +) -> None: + llm = ChatNVIDIA(model=tool_model, **mode).bind_tools( + tools=[xxyyzz], tool_choice=tool_choice + ) + response = llm.invoke("What is 11 xxyyzz 3?") + assert isinstance(response, ChatMessage) + assert "tool_calls" not in response.additional_kwargs + + +@pytest.mark.parametrize( + "tool_choice", + [ + "required", + {"function": {"name": "xxyyzz"}}, + {"type": "function", "function": {"name": "xxyyzz"}}, + "any", + "xxyyzz", + True, + ], + ids=["required", "partial", "function", "any", "name", "True"], +) +def test_bind_tool_tool_choice( + tool_model: str, + mode: dict, + tool_choice: Optional[ + Union[dict, str, Literal["auto", "none", "any", "required"], bool] + ], +) -> None: + llm = ChatNVIDIA(model=tool_model, **mode).bind_tools( + [xxyyzz], tool_choice=tool_choice + ) + response = llm.invoke("What is 11 xxyyzz 3?") + assert isinstance(response, AIMessage) + check_response_structure(response) + + +@pytest.mark.parametrize( + "tool_choice", + [ + "auto", + None, + "required", + {"function": {"name": "xxyyzz"}}, + {"type": "function", "function": {"name": "xxyyzz"}}, + "any", + "xxyyzz", + True, + ], + ids=["auto", "absent", "required", "partial", "function", "any", "name", "True"], +) +@pytest.mark.parametrize( + "tools", + [[xxyyzz], [xxyyzz, zzyyxx], [zzyyxx, xxyyzz]], + ids=["xxyyzz", "xxyyzz_and_zzyyxx", "zzyyxx_and_xxyyzz"], +) +@pytest.mark.xfail(reason="Accuracy test") +def test_accuracy_bind_tool_tool_choice( + tool_model: str, + mode: dict, + tools: List, + tool_choice: Any, +) -> None: + llm = ChatNVIDIA(temperature=0, model=tool_model, **mode).bind_tools( + tools=tools, tool_choice=tool_choice + ) + response = llm.invoke("What is 11 xxyyzz 3?") + assert isinstance(response, AIMessage) + check_response_structure(response) + tool_call = response.tool_calls[0] + assert tool_call["name"] == "xxyyzz" + assert tool_call["args"] == {"b": 3, "a": 11} + + +def test_known_does_not_warn(tool_model: str, mode: dict) -> None: + with warnings.catch_warnings(): + warnings.simplefilter("error") + ChatNVIDIA(model=tool_model, **mode).bind_tools([xxyyzz]) + + +def test_unknown_warns(mode: dict) -> None: + with pytest.warns(UserWarning) as record: + ChatNVIDIA(model="mock-model", **mode).bind_tools([xxyyzz]) + assert len(record) == 1 + assert "not known to support tools" in str(record[0].message) From 54dec90f55d8b41ba8c633488065d54b227c44f9 Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Mon, 17 Jun 2024 18:59:52 -0400 Subject: [PATCH 37/60] add langchain-community dep for openai convert_message_to_dict --- libs/ai-endpoints/poetry.lock | 378 +++++++++++++++++- libs/ai-endpoints/pyproject.toml | 1 + .../integration_tests/test_vlm_models.py | 7 + 3 files changed, 375 insertions(+), 11 deletions(-) diff --git a/libs/ai-endpoints/poetry.lock b/libs/ai-endpoints/poetry.lock index a4464a70..77f7b29c 100644 --- a/libs/ai-endpoints/poetry.lock +++ b/libs/ai-endpoints/poetry.lock @@ -292,6 +292,21 @@ files = [ {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] +[[package]] +name = "dataclasses-json" +version = "0.6.7" +description = "Easily serialize dataclasses to and from JSON." +optional = false +python-versions = "<4.0,>=3.7" +files = [ + {file = "dataclasses_json-0.6.7-py3-none-any.whl", hash = "sha256:0dbf33f26c8d5305befd61b39d2b3414e8a407bedc2834dea9b8d642666fb40a"}, + {file = "dataclasses_json-0.6.7.tar.gz", hash = "sha256:b6b3e528266ea45b9535223bc53ca645f5208833c29229e847b3f26a1cc55fc0"}, +] + +[package.dependencies] +marshmallow = ">=3.18.0,<4.0.0" +typing-inspect = ">=0.4.0,<1" + [[package]] name = "exceptiongroup" version = "1.2.1" @@ -421,6 +436,77 @@ files = [ {file = "frozenlist-1.4.1.tar.gz", hash = "sha256:c037a86e8513059a2613aaba4d817bb90b9d9b6b69aace3ce9c877e8c8ed402b"}, ] +[[package]] +name = "greenlet" +version = "3.0.3" +description = "Lightweight in-process concurrent programming" +optional = false +python-versions = ">=3.7" +files = [ + {file = "greenlet-3.0.3-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:9da2bd29ed9e4f15955dd1595ad7bc9320308a3b766ef7f837e23ad4b4aac31a"}, + {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d353cadd6083fdb056bb46ed07e4340b0869c305c8ca54ef9da3421acbdf6881"}, + {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dca1e2f3ca00b84a396bc1bce13dd21f680f035314d2379c4160c98153b2059b"}, + {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3ed7fb269f15dc662787f4119ec300ad0702fa1b19d2135a37c2c4de6fadfd4a"}, + {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd4f49ae60e10adbc94b45c0b5e6a179acc1736cf7a90160b404076ee283cf83"}, + {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:73a411ef564e0e097dbe7e866bb2dda0f027e072b04da387282b02c308807405"}, + {file = "greenlet-3.0.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7f362975f2d179f9e26928c5b517524e89dd48530a0202570d55ad6ca5d8a56f"}, + {file = "greenlet-3.0.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:649dde7de1a5eceb258f9cb00bdf50e978c9db1b996964cd80703614c86495eb"}, + {file = "greenlet-3.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:68834da854554926fbedd38c76e60c4a2e3198c6fbed520b106a8986445caaf9"}, + {file = "greenlet-3.0.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:b1b5667cced97081bf57b8fa1d6bfca67814b0afd38208d52538316e9422fc61"}, + {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:52f59dd9c96ad2fc0d5724107444f76eb20aaccb675bf825df6435acb7703559"}, + {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:afaff6cf5200befd5cec055b07d1c0a5a06c040fe5ad148abcd11ba6ab9b114e"}, + {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fe754d231288e1e64323cfad462fcee8f0288654c10bdf4f603a39ed923bef33"}, + {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2797aa5aedac23af156bbb5a6aa2cd3427ada2972c828244eb7d1b9255846379"}, + {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b7f009caad047246ed379e1c4dbcb8b020f0a390667ea74d2387be2998f58a22"}, + {file = "greenlet-3.0.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c5e1536de2aad7bf62e27baf79225d0d64360d4168cf2e6becb91baf1ed074f3"}, + {file = "greenlet-3.0.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:894393ce10ceac937e56ec00bb71c4c2f8209ad516e96033e4b3b1de270e200d"}, + {file = "greenlet-3.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:1ea188d4f49089fc6fb283845ab18a2518d279c7cd9da1065d7a84e991748728"}, + {file = "greenlet-3.0.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:70fb482fdf2c707765ab5f0b6655e9cfcf3780d8d87355a063547b41177599be"}, + {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4d1ac74f5c0c0524e4a24335350edad7e5f03b9532da7ea4d3c54d527784f2e"}, + {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:149e94a2dd82d19838fe4b2259f1b6b9957d5ba1b25640d2380bea9c5df37676"}, + {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:15d79dd26056573940fcb8c7413d84118086f2ec1a8acdfa854631084393efcc"}, + {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b7db1ebff4ba09aaaeae6aa491daeb226c8150fc20e836ad00041bcb11230"}, + {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fcd2469d6a2cf298f198f0487e0a5b1a47a42ca0fa4dfd1b6862c999f018ebbf"}, + {file = "greenlet-3.0.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1f672519db1796ca0d8753f9e78ec02355e862d0998193038c7073045899f305"}, + {file = "greenlet-3.0.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2516a9957eed41dd8f1ec0c604f1cdc86758b587d964668b5b196a9db5bfcde6"}, + {file = "greenlet-3.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:bba5387a6975598857d86de9eac14210a49d554a77eb8261cc68b7d082f78ce2"}, + {file = "greenlet-3.0.3-cp37-cp37m-macosx_11_0_universal2.whl", hash = "sha256:5b51e85cb5ceda94e79d019ed36b35386e8c37d22f07d6a751cb659b180d5274"}, + {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:daf3cb43b7cf2ba96d614252ce1684c1bccee6b2183a01328c98d36fcd7d5cb0"}, + {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99bf650dc5d69546e076f413a87481ee1d2d09aaaaaca058c9251b6d8c14783f"}, + {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2dd6e660effd852586b6a8478a1d244b8dc90ab5b1321751d2ea15deb49ed414"}, + {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3391d1e16e2a5a1507d83e4a8b100f4ee626e8eca43cf2cadb543de69827c4c"}, + {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e1f145462f1fa6e4a4ae3c0f782e580ce44d57c8f2c7aae1b6fa88c0b2efdb41"}, + {file = "greenlet-3.0.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1a7191e42732df52cb5f39d3527217e7ab73cae2cb3694d241e18f53d84ea9a7"}, + {file = "greenlet-3.0.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0448abc479fab28b00cb472d278828b3ccca164531daab4e970a0458786055d6"}, + {file = "greenlet-3.0.3-cp37-cp37m-win32.whl", hash = "sha256:b542be2440edc2d48547b5923c408cbe0fc94afb9f18741faa6ae970dbcb9b6d"}, + {file = "greenlet-3.0.3-cp37-cp37m-win_amd64.whl", hash = "sha256:01bc7ea167cf943b4c802068e178bbf70ae2e8c080467070d01bfa02f337ee67"}, + {file = "greenlet-3.0.3-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:1996cb9306c8595335bb157d133daf5cf9f693ef413e7673cb07e3e5871379ca"}, + {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ddc0f794e6ad661e321caa8d2f0a55ce01213c74722587256fb6566049a8b04"}, + {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c9db1c18f0eaad2f804728c67d6c610778456e3e1cc4ab4bbd5eeb8e6053c6fc"}, + {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7170375bcc99f1a2fbd9c306f5be8764eaf3ac6b5cb968862cad4c7057756506"}, + {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b66c9c1e7ccabad3a7d037b2bcb740122a7b17a53734b7d72a344ce39882a1b"}, + {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:098d86f528c855ead3479afe84b49242e174ed262456c342d70fc7f972bc13c4"}, + {file = "greenlet-3.0.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:81bb9c6d52e8321f09c3d165b2a78c680506d9af285bfccbad9fb7ad5a5da3e5"}, + {file = "greenlet-3.0.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fd096eb7ffef17c456cfa587523c5f92321ae02427ff955bebe9e3c63bc9f0da"}, + {file = "greenlet-3.0.3-cp38-cp38-win32.whl", hash = "sha256:d46677c85c5ba00a9cb6f7a00b2bfa6f812192d2c9f7d9c4f6a55b60216712f3"}, + {file = "greenlet-3.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:419b386f84949bf0e7c73e6032e3457b82a787c1ab4a0e43732898a761cc9dbf"}, + {file = "greenlet-3.0.3-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:da70d4d51c8b306bb7a031d5cff6cc25ad253affe89b70352af5f1cb68e74b53"}, + {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:086152f8fbc5955df88382e8a75984e2bb1c892ad2e3c80a2508954e52295257"}, + {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d73a9fe764d77f87f8ec26a0c85144d6a951a6c438dfe50487df5595c6373eac"}, + {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b7dcbe92cc99f08c8dd11f930de4d99ef756c3591a5377d1d9cd7dd5e896da71"}, + {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1551a8195c0d4a68fac7a4325efac0d541b48def35feb49d803674ac32582f61"}, + {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:64d7675ad83578e3fc149b617a444fab8efdafc9385471f868eb5ff83e446b8b"}, + {file = "greenlet-3.0.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b37eef18ea55f2ffd8f00ff8fe7c8d3818abd3e25fb73fae2ca3b672e333a7a6"}, + {file = "greenlet-3.0.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:77457465d89b8263bca14759d7c1684df840b6811b2499838cc5b040a8b5b113"}, + {file = "greenlet-3.0.3-cp39-cp39-win32.whl", hash = "sha256:57e8974f23e47dac22b83436bdcf23080ade568ce77df33159e019d161ce1d1e"}, + {file = "greenlet-3.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:c5ee858cfe08f34712f548c3c363e807e7186f03ad7a5039ebadb29e8c6be067"}, + {file = "greenlet-3.0.3.tar.gz", hash = "sha256:43374442353259554ce33599da8b692d5aa96f8976d567d4badf263371fbe491"}, +] + +[package.extras] +docs = ["Sphinx", "furo"] +test = ["objgraph", "psutil"] + [[package]] name = "idna" version = "3.7" @@ -468,9 +554,62 @@ files = [ {file = "jsonpointer-2.4.tar.gz", hash = "sha256:585cee82b70211fa9e6043b7bb89db6e1aa49524340dde8ad6b63206ea689d88"}, ] +[[package]] +name = "langchain" +version = "0.2.5" +description = "Building applications with LLMs through composability" +optional = false +python-versions = "<4.0,>=3.8.1" +files = [ + {file = "langchain-0.2.5-py3-none-any.whl", hash = "sha256:9aded9a65348254e1c93dcdaacffe4d1b6a5e7f74ef80c160c88ff78ad299228"}, + {file = "langchain-0.2.5.tar.gz", hash = "sha256:ffdbf4fcea46a10d461bcbda2402220fcfd72a0c70e9f4161ae0510067b9b3bd"}, +] + +[package.dependencies] +aiohttp = ">=3.8.3,<4.0.0" +async-timeout = {version = ">=4.0.0,<5.0.0", markers = "python_version < \"3.11\""} +langchain-core = ">=0.2.7,<0.3.0" +langchain-text-splitters = ">=0.2.0,<0.3.0" +langsmith = ">=0.1.17,<0.2.0" +numpy = [ + {version = ">=1,<2", markers = "python_version < \"3.12\""}, + {version = ">=1.26.0,<2.0.0", markers = "python_version >= \"3.12\""}, +] +pydantic = ">=1,<3" +PyYAML = ">=5.3" +requests = ">=2,<3" +SQLAlchemy = ">=1.4,<3" +tenacity = ">=8.1.0,<9.0.0" + +[[package]] +name = "langchain-community" +version = "0.2.5" +description = "Community contributed LangChain integrations." +optional = false +python-versions = "<4.0,>=3.8.1" +files = [ + {file = "langchain_community-0.2.5-py3-none-any.whl", hash = "sha256:bf37a334952e42c7676d083cf2d2c4cbfbb7de1949c4149fe19913e2b06c485f"}, + {file = "langchain_community-0.2.5.tar.gz", hash = "sha256:476787b8c8c213b67e7b0eceb53346e787f00fbae12d8e680985bd4f93b0bf64"}, +] + +[package.dependencies] +aiohttp = ">=3.8.3,<4.0.0" +dataclasses-json = ">=0.5.7,<0.7" +langchain = ">=0.2.5,<0.3.0" +langchain-core = ">=0.2.7,<0.3.0" +langsmith = ">=0.1.0,<0.2.0" +numpy = [ + {version = ">=1,<2", markers = "python_version < \"3.12\""}, + {version = ">=1.26.0,<2.0.0", markers = "python_version >= \"3.12\""}, +] +PyYAML = ">=5.3" +requests = ">=2,<3" +SQLAlchemy = ">=1.4,<3" +tenacity = ">=8.1.0,<9.0.0" + [[package]] name = "langchain-core" -version = "0.2.0rc1" +version = "0.2.8" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.8.1,<4.0" @@ -479,31 +618,45 @@ develop = false [package.dependencies] jsonpatch = "^1.33" -langsmith = "^0.1.0" -packaging = "^23.2" +langsmith = "^0.1.75" +packaging = ">=23.2,<25" pydantic = ">=1,<3" PyYAML = ">=5.3" tenacity = "^8.1.0" -[package.extras] -extended-testing = ["jinja2 (>=3,<4)"] - [package.source] type = "git" url = "https://github.com/langchain-ai/langchain.git" reference = "HEAD" -resolved_reference = "cd1879f5e75fc9e6a8c04ac839909e0d6f2fb541" +resolved_reference = "c2b2e3266ce97ea647d4b86eedadbb7cd77d0381" subdirectory = "libs/core" +[[package]] +name = "langchain-text-splitters" +version = "0.2.1" +description = "LangChain text splitting utilities" +optional = false +python-versions = "<4.0,>=3.8.1" +files = [ + {file = "langchain_text_splitters-0.2.1-py3-none-any.whl", hash = "sha256:c2774a85f17189eaca50339629d2316d13130d4a8d9f1a1a96f3a03670c4a138"}, + {file = "langchain_text_splitters-0.2.1.tar.gz", hash = "sha256:06853d17d7241ecf5c97c7b6ef01f600f9b0fb953dd997838142a527a4f32ea4"}, +] + +[package.dependencies] +langchain-core = ">=0.2.0,<0.3.0" + +[package.extras] +extended-testing = ["beautifulsoup4 (>=4.12.3,<5.0.0)", "lxml (>=4.9.3,<6.0)"] + [[package]] name = "langsmith" -version = "0.1.50" +version = "0.1.78" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langsmith-0.1.50-py3-none-any.whl", hash = "sha256:a81e9809fcaa277bfb314d729e58116554f186d1478fcfdf553b1c2ccce54b85"}, - {file = "langsmith-0.1.50.tar.gz", hash = "sha256:9fd22df8c689c044058536ea5af66f5302067e7551b60d7a335fede8d479572b"}, + {file = "langsmith-0.1.78-py3-none-any.whl", hash = "sha256:87bc5d9072bfcb6392d7552cbcd6089dcc1faed36d688b1587d80bd48a1acba2"}, + {file = "langsmith-0.1.78.tar.gz", hash = "sha256:d9112d2e9298ec6b02d3b1afec6ed557df9db3746c79d34ef3b448fc18e116cd"}, ] [package.dependencies] @@ -511,6 +664,25 @@ orjson = ">=3.9.14,<4.0.0" pydantic = ">=1,<3" requests = ">=2,<3" +[[package]] +name = "marshmallow" +version = "3.21.3" +description = "A lightweight library for converting complex datatypes to and from native Python datatypes." +optional = false +python-versions = ">=3.8" +files = [ + {file = "marshmallow-3.21.3-py3-none-any.whl", hash = "sha256:86ce7fb914aa865001a4b2092c4c2872d13bc347f3d42673272cabfdbad386f1"}, + {file = "marshmallow-3.21.3.tar.gz", hash = "sha256:4f57c5e050a54d66361e826f94fba213eb10b67b2fdb02c3e0343ce207ba1662"}, +] + +[package.dependencies] +packaging = ">=17.0" + +[package.extras] +dev = ["marshmallow[tests]", "pre-commit (>=3.5,<4.0)", "tox"] +docs = ["alabaster (==0.7.16)", "autodocsumm (==0.2.12)", "sphinx (==7.3.7)", "sphinx-issues (==4.1.0)", "sphinx-version-warning (==1.1.2)"] +tests = ["pytest", "pytz", "simplejson"] + [[package]] name = "multidict" version = "6.0.5" @@ -671,6 +843,88 @@ files = [ {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, ] +[[package]] +name = "numpy" +version = "1.24.4" +description = "Fundamental package for array computing in Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "numpy-1.24.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c0bfb52d2169d58c1cdb8cc1f16989101639b34c7d3ce60ed70b19c63eba0b64"}, + {file = "numpy-1.24.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ed094d4f0c177b1b8e7aa9cba7d6ceed51c0e569a5318ac0ca9a090680a6a1b1"}, + {file = "numpy-1.24.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79fc682a374c4a8ed08b331bef9c5f582585d1048fa6d80bc6c35bc384eee9b4"}, + {file = "numpy-1.24.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ffe43c74893dbf38c2b0a1f5428760a1a9c98285553c89e12d70a96a7f3a4d6"}, + {file = "numpy-1.24.4-cp310-cp310-win32.whl", hash = "sha256:4c21decb6ea94057331e111a5bed9a79d335658c27ce2adb580fb4d54f2ad9bc"}, + {file = "numpy-1.24.4-cp310-cp310-win_amd64.whl", hash = "sha256:b4bea75e47d9586d31e892a7401f76e909712a0fd510f58f5337bea9572c571e"}, + {file = "numpy-1.24.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f136bab9c2cfd8da131132c2cf6cc27331dd6fae65f95f69dcd4ae3c3639c810"}, + {file = "numpy-1.24.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e2926dac25b313635e4d6cf4dc4e51c8c0ebfed60b801c799ffc4c32bf3d1254"}, + {file = "numpy-1.24.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:222e40d0e2548690405b0b3c7b21d1169117391c2e82c378467ef9ab4c8f0da7"}, + {file = "numpy-1.24.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7215847ce88a85ce39baf9e89070cb860c98fdddacbaa6c0da3ffb31b3350bd5"}, + {file = "numpy-1.24.4-cp311-cp311-win32.whl", hash = "sha256:4979217d7de511a8d57f4b4b5b2b965f707768440c17cb70fbf254c4b225238d"}, + {file = "numpy-1.24.4-cp311-cp311-win_amd64.whl", hash = "sha256:b7b1fc9864d7d39e28f41d089bfd6353cb5f27ecd9905348c24187a768c79694"}, + {file = "numpy-1.24.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1452241c290f3e2a312c137a9999cdbf63f78864d63c79039bda65ee86943f61"}, + {file = "numpy-1.24.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:04640dab83f7c6c85abf9cd729c5b65f1ebd0ccf9de90b270cd61935eef0197f"}, + {file = "numpy-1.24.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5425b114831d1e77e4b5d812b69d11d962e104095a5b9c3b641a218abcc050e"}, + {file = "numpy-1.24.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd80e219fd4c71fc3699fc1dadac5dcf4fd882bfc6f7ec53d30fa197b8ee22dc"}, + {file = "numpy-1.24.4-cp38-cp38-win32.whl", hash = "sha256:4602244f345453db537be5314d3983dbf5834a9701b7723ec28923e2889e0bb2"}, + {file = "numpy-1.24.4-cp38-cp38-win_amd64.whl", hash = "sha256:692f2e0f55794943c5bfff12b3f56f99af76f902fc47487bdfe97856de51a706"}, + {file = "numpy-1.24.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2541312fbf09977f3b3ad449c4e5f4bb55d0dbf79226d7724211acc905049400"}, + {file = "numpy-1.24.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9667575fb6d13c95f1b36aca12c5ee3356bf001b714fc354eb5465ce1609e62f"}, + {file = "numpy-1.24.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3a86ed21e4f87050382c7bc96571755193c4c1392490744ac73d660e8f564a9"}, + {file = "numpy-1.24.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d11efb4dbecbdf22508d55e48d9c8384db795e1b7b51ea735289ff96613ff74d"}, + {file = "numpy-1.24.4-cp39-cp39-win32.whl", hash = "sha256:6620c0acd41dbcb368610bb2f4d83145674040025e5536954782467100aa8835"}, + {file = "numpy-1.24.4-cp39-cp39-win_amd64.whl", hash = "sha256:befe2bf740fd8373cf56149a5c23a0f601e82869598d41f8e188a0e9869926f8"}, + {file = "numpy-1.24.4-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:31f13e25b4e304632a4619d0e0777662c2ffea99fcae2029556b17d8ff958aef"}, + {file = "numpy-1.24.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95f7ac6540e95bc440ad77f56e520da5bf877f87dca58bd095288dce8940532a"}, + {file = "numpy-1.24.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e98f220aa76ca2a977fe435f5b04d7b3470c0a2e6312907b37ba6068f26787f2"}, + {file = "numpy-1.24.4.tar.gz", hash = "sha256:80f5e3a4e498641401868df4208b74581206afbee7cf7b8329daae82676d9463"}, +] + +[[package]] +name = "numpy" +version = "1.26.4" +description = "Fundamental package for array computing in Python" +optional = false +python-versions = ">=3.9" +files = [ + {file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"}, + {file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"}, + {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d209d8969599b27ad20994c8e41936ee0964e6da07478d6c35016bc386b66ad4"}, + {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffa75af20b44f8dba823498024771d5ac50620e6915abac414251bd971b4529f"}, + {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:62b8e4b1e28009ef2846b4c7852046736bab361f7aeadeb6a5b89ebec3c7055a"}, + {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a4abb4f9001ad2858e7ac189089c42178fcce737e4169dc61321660f1a96c7d2"}, + {file = "numpy-1.26.4-cp310-cp310-win32.whl", hash = "sha256:bfe25acf8b437eb2a8b2d49d443800a5f18508cd811fea3181723922a8a82b07"}, + {file = "numpy-1.26.4-cp310-cp310-win_amd64.whl", hash = "sha256:b97fe8060236edf3662adfc2c633f56a08ae30560c56310562cb4f95500022d5"}, + {file = "numpy-1.26.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c66707fabe114439db9068ee468c26bbdf909cac0fb58686a42a24de1760c71"}, + {file = "numpy-1.26.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:edd8b5fe47dab091176d21bb6de568acdd906d1887a4584a15a9a96a1dca06ef"}, + {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ab55401287bfec946ced39700c053796e7cc0e3acbef09993a9ad2adba6ca6e"}, + {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:666dbfb6ec68962c033a450943ded891bed2d54e6755e35e5835d63f4f6931d5"}, + {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:96ff0b2ad353d8f990b63294c8986f1ec3cb19d749234014f4e7eb0112ceba5a"}, + {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:60dedbb91afcbfdc9bc0b1f3f402804070deed7392c23eb7a7f07fa857868e8a"}, + {file = "numpy-1.26.4-cp311-cp311-win32.whl", hash = "sha256:1af303d6b2210eb850fcf03064d364652b7120803a0b872f5211f5234b399f20"}, + {file = "numpy-1.26.4-cp311-cp311-win_amd64.whl", hash = "sha256:cd25bcecc4974d09257ffcd1f098ee778f7834c3ad767fe5db785be9a4aa9cb2"}, + {file = "numpy-1.26.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218"}, + {file = "numpy-1.26.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b"}, + {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b"}, + {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed"}, + {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a"}, + {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0"}, + {file = "numpy-1.26.4-cp312-cp312-win32.whl", hash = "sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110"}, + {file = "numpy-1.26.4-cp312-cp312-win_amd64.whl", hash = "sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818"}, + {file = "numpy-1.26.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7349ab0fa0c429c82442a27a9673fc802ffdb7c7775fad780226cb234965e53c"}, + {file = "numpy-1.26.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:52b8b60467cd7dd1e9ed082188b4e6bb35aa5cdd01777621a1658910745b90be"}, + {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5241e0a80d808d70546c697135da2c613f30e28251ff8307eb72ba696945764"}, + {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f870204a840a60da0b12273ef34f7051e98c3b5961b61b0c2c1be6dfd64fbcd3"}, + {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:679b0076f67ecc0138fd2ede3a8fd196dddc2ad3254069bcb9faf9a79b1cebcd"}, + {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:47711010ad8555514b434df65f7d7b076bb8261df1ca9bb78f53d3b2db02e95c"}, + {file = "numpy-1.26.4-cp39-cp39-win32.whl", hash = "sha256:a354325ee03388678242a4d7ebcd08b5c727033fcff3b2f536aea978e15ee9e6"}, + {file = "numpy-1.26.4-cp39-cp39-win_amd64.whl", hash = "sha256:3373d5d70a5fe74a2c1bb6d2cfd9609ecf686d47a2d7b1d37a8f3b6bf6003aea"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:afedb719a9dcfc7eaf2287b839d8198e06dcd4cb5d276a3df279231138e83d30"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95a7476c59002f2f6c590b9b7b998306fba6a5aa646b1e22ddfeaf8f78c3a29c"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7e50d0a0cc3189f9cb0aeb3a6a6af18c16f59f004b866cd2be1c14b36134a4a0"}, + {file = "numpy-1.26.4.tar.gz", hash = "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010"}, +] + [[package]] name = "orjson" version = "3.10.1" @@ -1174,6 +1428,93 @@ files = [ {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, ] +[[package]] +name = "sqlalchemy" +version = "2.0.30" +description = "Database Abstraction Library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "SQLAlchemy-2.0.30-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3b48154678e76445c7ded1896715ce05319f74b1e73cf82d4f8b59b46e9c0ddc"}, + {file = "SQLAlchemy-2.0.30-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2753743c2afd061bb95a61a51bbb6a1a11ac1c44292fad898f10c9839a7f75b2"}, + {file = "SQLAlchemy-2.0.30-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7bfc726d167f425d4c16269a9a10fe8630ff6d14b683d588044dcef2d0f6be7"}, + {file = "SQLAlchemy-2.0.30-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4f61ada6979223013d9ab83a3ed003ded6959eae37d0d685db2c147e9143797"}, + {file = "SQLAlchemy-2.0.30-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3a365eda439b7a00732638f11072907c1bc8e351c7665e7e5da91b169af794af"}, + {file = "SQLAlchemy-2.0.30-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bba002a9447b291548e8d66fd8c96a6a7ed4f2def0bb155f4f0a1309fd2735d5"}, + {file = "SQLAlchemy-2.0.30-cp310-cp310-win32.whl", hash = "sha256:0138c5c16be3600923fa2169532205d18891b28afa817cb49b50e08f62198bb8"}, + {file = "SQLAlchemy-2.0.30-cp310-cp310-win_amd64.whl", hash = "sha256:99650e9f4cf3ad0d409fed3eec4f071fadd032e9a5edc7270cd646a26446feeb"}, + {file = "SQLAlchemy-2.0.30-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:955991a09f0992c68a499791a753523f50f71a6885531568404fa0f231832aa0"}, + {file = "SQLAlchemy-2.0.30-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f69e4c756ee2686767eb80f94c0125c8b0a0b87ede03eacc5c8ae3b54b99dc46"}, + {file = "SQLAlchemy-2.0.30-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69c9db1ce00e59e8dd09d7bae852a9add716efdc070a3e2068377e6ff0d6fdaa"}, + {file = "SQLAlchemy-2.0.30-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1429a4b0f709f19ff3b0cf13675b2b9bfa8a7e79990003207a011c0db880a13"}, + {file = "SQLAlchemy-2.0.30-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:efedba7e13aa9a6c8407c48facfdfa108a5a4128e35f4c68f20c3407e4376aa9"}, + {file = "SQLAlchemy-2.0.30-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:16863e2b132b761891d6c49f0a0f70030e0bcac4fd208117f6b7e053e68668d0"}, + {file = "SQLAlchemy-2.0.30-cp311-cp311-win32.whl", hash = "sha256:2ecabd9ccaa6e914e3dbb2aa46b76dede7eadc8cbf1b8083c94d936bcd5ffb49"}, + {file = "SQLAlchemy-2.0.30-cp311-cp311-win_amd64.whl", hash = "sha256:0b3f4c438e37d22b83e640f825ef0f37b95db9aa2d68203f2c9549375d0b2260"}, + {file = "SQLAlchemy-2.0.30-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5a79d65395ac5e6b0c2890935bad892eabb911c4aa8e8015067ddb37eea3d56c"}, + {file = "SQLAlchemy-2.0.30-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9a5baf9267b752390252889f0c802ea13b52dfee5e369527da229189b8bd592e"}, + {file = "SQLAlchemy-2.0.30-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cb5a646930c5123f8461f6468901573f334c2c63c795b9af350063a736d0134"}, + {file = "SQLAlchemy-2.0.30-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:296230899df0b77dec4eb799bcea6fbe39a43707ce7bb166519c97b583cfcab3"}, + {file = "SQLAlchemy-2.0.30-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c62d401223f468eb4da32627bffc0c78ed516b03bb8a34a58be54d618b74d472"}, + {file = "SQLAlchemy-2.0.30-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3b69e934f0f2b677ec111b4d83f92dc1a3210a779f69bf905273192cf4ed433e"}, + {file = "SQLAlchemy-2.0.30-cp312-cp312-win32.whl", hash = "sha256:77d2edb1f54aff37e3318f611637171e8ec71472f1fdc7348b41dcb226f93d90"}, + {file = "SQLAlchemy-2.0.30-cp312-cp312-win_amd64.whl", hash = "sha256:b6c7ec2b1f4969fc19b65b7059ed00497e25f54069407a8701091beb69e591a5"}, + {file = "SQLAlchemy-2.0.30-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5a8e3b0a7e09e94be7510d1661339d6b52daf202ed2f5b1f9f48ea34ee6f2d57"}, + {file = "SQLAlchemy-2.0.30-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b60203c63e8f984df92035610c5fb76d941254cf5d19751faab7d33b21e5ddc0"}, + {file = "SQLAlchemy-2.0.30-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1dc3eabd8c0232ee8387fbe03e0a62220a6f089e278b1f0aaf5e2d6210741ad"}, + {file = "SQLAlchemy-2.0.30-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:40ad017c672c00b9b663fcfcd5f0864a0a97828e2ee7ab0c140dc84058d194cf"}, + {file = "SQLAlchemy-2.0.30-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:e42203d8d20dc704604862977b1470a122e4892791fe3ed165f041e4bf447a1b"}, + {file = "SQLAlchemy-2.0.30-cp37-cp37m-win32.whl", hash = "sha256:2a4f4da89c74435f2bc61878cd08f3646b699e7d2eba97144030d1be44e27584"}, + {file = "SQLAlchemy-2.0.30-cp37-cp37m-win_amd64.whl", hash = "sha256:b6bf767d14b77f6a18b6982cbbf29d71bede087edae495d11ab358280f304d8e"}, + {file = "SQLAlchemy-2.0.30-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bc0c53579650a891f9b83fa3cecd4e00218e071d0ba00c4890f5be0c34887ed3"}, + {file = "SQLAlchemy-2.0.30-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:311710f9a2ee235f1403537b10c7687214bb1f2b9ebb52702c5aa4a77f0b3af7"}, + {file = "SQLAlchemy-2.0.30-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:408f8b0e2c04677e9c93f40eef3ab22f550fecb3011b187f66a096395ff3d9fd"}, + {file = "SQLAlchemy-2.0.30-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:37a4b4fb0dd4d2669070fb05b8b8824afd0af57587393015baee1cf9890242d9"}, + {file = "SQLAlchemy-2.0.30-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a943d297126c9230719c27fcbbeab57ecd5d15b0bd6bfd26e91bfcfe64220621"}, + {file = "SQLAlchemy-2.0.30-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0a089e218654e740a41388893e090d2e2c22c29028c9d1353feb38638820bbeb"}, + {file = "SQLAlchemy-2.0.30-cp38-cp38-win32.whl", hash = "sha256:fa561138a64f949f3e889eb9ab8c58e1504ab351d6cf55259dc4c248eaa19da6"}, + {file = "SQLAlchemy-2.0.30-cp38-cp38-win_amd64.whl", hash = "sha256:7d74336c65705b986d12a7e337ba27ab2b9d819993851b140efdf029248e818e"}, + {file = "SQLAlchemy-2.0.30-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ae8c62fe2480dd61c532ccafdbce9b29dacc126fe8be0d9a927ca3e699b9491a"}, + {file = "SQLAlchemy-2.0.30-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2383146973a15435e4717f94c7509982770e3e54974c71f76500a0136f22810b"}, + {file = "SQLAlchemy-2.0.30-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8409de825f2c3b62ab15788635ccaec0c881c3f12a8af2b12ae4910a0a9aeef6"}, + {file = "SQLAlchemy-2.0.30-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0094c5dc698a5f78d3d1539853e8ecec02516b62b8223c970c86d44e7a80f6c7"}, + {file = "SQLAlchemy-2.0.30-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:edc16a50f5e1b7a06a2dcc1f2205b0b961074c123ed17ebda726f376a5ab0953"}, + {file = "SQLAlchemy-2.0.30-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:f7703c2010355dd28f53deb644a05fc30f796bd8598b43f0ba678878780b6e4c"}, + {file = "SQLAlchemy-2.0.30-cp39-cp39-win32.whl", hash = "sha256:1f9a727312ff6ad5248a4367358e2cf7e625e98b1028b1d7ab7b806b7d757513"}, + {file = "SQLAlchemy-2.0.30-cp39-cp39-win_amd64.whl", hash = "sha256:a0ef36b28534f2a5771191be6edb44cc2673c7b2edf6deac6562400288664221"}, + {file = "SQLAlchemy-2.0.30-py3-none-any.whl", hash = "sha256:7108d569d3990c71e26a42f60474b4c02c8586c4681af5fd67e51a044fdea86a"}, + {file = "SQLAlchemy-2.0.30.tar.gz", hash = "sha256:2b1708916730f4830bc69d6f49d37f7698b5bd7530aca7f04f785f8849e95255"}, +] + +[package.dependencies] +greenlet = {version = "!=0.4.17", markers = "platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\""} +typing-extensions = ">=4.6.0" + +[package.extras] +aiomysql = ["aiomysql (>=0.2.0)", "greenlet (!=0.4.17)"] +aioodbc = ["aioodbc", "greenlet (!=0.4.17)"] +aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing_extensions (!=3.10.0.1)"] +asyncio = ["greenlet (!=0.4.17)"] +asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (!=0.4.17)"] +mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5)"] +mssql = ["pyodbc"] +mssql-pymssql = ["pymssql"] +mssql-pyodbc = ["pyodbc"] +mypy = ["mypy (>=0.910)"] +mysql = ["mysqlclient (>=1.4.0)"] +mysql-connector = ["mysql-connector-python"] +oracle = ["cx_oracle (>=8)"] +oracle-oracledb = ["oracledb (>=1.0.1)"] +postgresql = ["psycopg2 (>=2.7)"] +postgresql-asyncpg = ["asyncpg", "greenlet (!=0.4.17)"] +postgresql-pg8000 = ["pg8000 (>=1.29.1)"] +postgresql-psycopg = ["psycopg (>=3.0.7)"] +postgresql-psycopg2binary = ["psycopg2-binary"] +postgresql-psycopg2cffi = ["psycopg2cffi"] +postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"] +pymysql = ["pymysql"] +sqlcipher = ["sqlcipher3_binary"] + [[package]] name = "syrupy" version = "4.6.1" @@ -1249,6 +1590,21 @@ files = [ {file = "typing_extensions-4.11.0.tar.gz", hash = "sha256:83f085bd5ca59c80295fc2a82ab5dac679cbe02b9f33f7d83af68e241bea51b0"}, ] +[[package]] +name = "typing-inspect" +version = "0.9.0" +description = "Runtime inspection utilities for typing module." +optional = false +python-versions = "*" +files = [ + {file = "typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f"}, + {file = "typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78"}, +] + +[package.dependencies] +mypy-extensions = ">=0.3.0" +typing-extensions = ">=3.7.4" + [[package]] name = "urllib3" version = "2.2.1" @@ -1413,4 +1769,4 @@ multidict = ">=4.0" [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "540028a924d26a41cf40e1109b2540a642dc668618f338f6bb35d1b40bcaeed4" +content-hash = "e9a538766aca94f7cf2fa991936319857ac32d78e79b0b815f691b81476a27a6" diff --git a/libs/ai-endpoints/pyproject.toml b/libs/ai-endpoints/pyproject.toml index ba34df24..89c36ab1 100644 --- a/libs/ai-endpoints/pyproject.toml +++ b/libs/ai-endpoints/pyproject.toml @@ -15,6 +15,7 @@ python = ">=3.8.1,<4.0" langchain-core = ">=0.1.27,<0.3" aiohttp = "^3.9.1" pillow = ">=10.0.0,<11.0.0" +langchain-community = "^0.2.5" [tool.poetry.group.test] optional = true diff --git a/libs/ai-endpoints/tests/integration_tests/test_vlm_models.py b/libs/ai-endpoints/tests/integration_tests/test_vlm_models.py index c181e175..260073c6 100644 --- a/libs/ai-endpoints/tests/integration_tests/test_vlm_models.py +++ b/libs/ai-endpoints/tests/integration_tests/test_vlm_models.py @@ -19,7 +19,11 @@ # - openai api supports server-side image download, api catalog does not # - ChatNVIDIA does client side download to simulate the same behavior # - ChatNVIDIA will automatically read local files and convert them to base64 +# - openai api uses {"image_url": {"url": "..."}} +# where api catalog uses {"image_url": "..."} # + + @pytest.mark.parametrize( "content", [ @@ -54,3 +58,6 @@ def test_vlm_model( response = chat.invoke([HumanMessage(content=content)]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) + + for token in chat.stream([HumanMessage(content=content)]): + assert isinstance(token.content, str) From a549a400d286e272b8eab15eed68a53034e3b3d5 Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Sat, 6 Jul 2024 20:33:10 -0400 Subject: [PATCH 38/60] add tool calling implementation (invoke; no streaming) --- .../langchain_nvidia_ai_endpoints/_common.py | 8 +- .../chat_models.py | 220 ++++++++++++------ 2 files changed, 162 insertions(+), 66 deletions(-) diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py index dd65ad6f..2a9e673b 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_common.py @@ -368,13 +368,17 @@ def _aggregate_msgs(self, msg_list: Sequence[dict]) -> Tuple[dict, bool]: content_buffer: Dict[str, Any] = dict() content_holder: Dict[Any, Any] = dict() usage_holder: Dict[Any, Any] = dict() #### + finish_reason_holder: Optional[str] = None is_stopped = False for msg in msg_list: usage_holder = msg.get("usage", {}) #### if "choices" in msg: ## Tease out ['choices'][0]...['delta'/'message'] msg = msg.get("choices", [{}])[0] - is_stopped = msg.get("finish_reason", "") == "stop" + # todo: this meeds to be fixed, the fact we only + # use the first choice breaks the interface + finish_reason_holder = msg.get("finish_reason", None) + is_stopped = finish_reason_holder == "stop" msg = msg.get("delta", msg.get("message", msg.get("text", ""))) if not isinstance(msg, dict): msg = {"content": msg} @@ -392,6 +396,8 @@ def _aggregate_msgs(self, msg_list: Sequence[dict]) -> Tuple[dict, bool]: content_holder = {**content_holder, **content_buffer} if usage_holder: content_holder.update(token_usage=usage_holder) #### + if finish_reason_holder: + content_holder.update(finish_reason=finish_reason_holder) return content_holder, is_stopped #################################################################################### diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py index 09400aa3..9c13c924 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py @@ -8,6 +8,7 @@ import os import sys import urllib.parse +import warnings from typing import ( Any, Callable, @@ -15,7 +16,6 @@ Iterator, List, Literal, - Mapping, Optional, Sequence, Type, @@ -23,12 +23,14 @@ ) import requests +from langchain_community.adapters.openai import convert_message_to_dict from langchain_core.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain_core.language_models import BaseChatModel, LanguageModelInput from langchain_core.messages import ( + AIMessage, BaseMessage, ChatMessage, ChatMessageChunk, @@ -41,6 +43,7 @@ from langchain_core.pydantic_v1 import BaseModel, Field, PrivateAttr from langchain_core.runnables import Runnable from langchain_core.tools import BaseTool +from langchain_core.utils.function_calling import convert_to_openai_tool from langchain_nvidia_ai_endpoints._common import _NVIDIAClient from langchain_nvidia_ai_endpoints._statics import Model @@ -116,6 +119,53 @@ def _url_to_b64_string(image_source: str) -> str: raise ValueError(f"Unable to process the provided image source: {e}") +def _nv_vlm_adjust_input(message_dict: Dict[str, Any]) -> Dict[str, Any]: + """ + The NVIDIA VLM API input message.content: + { + "role": "user", + "content": [ + ..., + { + "type": "image_url", + "image_url": "{data}" + }, + ... + ] + } + where OpenAI VLM API input message.content: + { + "role": "user", + "content": [ + ..., + { + "type": "image_url", + "image_url": { + "url": "{url | data}" + } + }, + ... + ] + } + + This function converts the OpenAI VLM API input message to + NVIDIA VLM API input message, in place. + + In the process, it accepts a url or file and converts them to + data urls. + """ + if content := message_dict.get("content"): + if isinstance(content, list): + for part in content: + if isinstance(part, dict) and "image_url" in part: + if ( + isinstance(part["image_url"], dict) + and "url" in part["image_url"] + ): + part["image_url"] = _url_to_b64_string(part["image_url"]["url"]) + return message_dict + + class ChatNVIDIA(BaseChatModel): """NVIDIA chat model. @@ -209,12 +259,22 @@ def _generate( run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: - inputs = self._custom_preprocess(messages) + inputs = [ + _nv_vlm_adjust_input(message) + for message in [convert_message_to_dict(message) for message in messages] + ] payload = self._get_payload(inputs=inputs, stop=stop, stream=False, **kwargs) response = self._client.client.get_req(payload=payload) responses, _ = self._client.client.postprocess(response) self._set_callback_out(responses, run_manager) - message = ChatMessage(**self._custom_postprocess(responses)) + parsed_response = self._custom_postprocess(responses) + # arguably we should always return an AIMessage, but to maintain + # API compatibility, we only return it for tool_calls. we can + # change this for an API breaking 1.0. + if "tool_calls" in parsed_response["additional_kwargs"]: + message: BaseMessage = AIMessage(**parsed_response) + else: + message = ChatMessage(**parsed_response) generation = ChatGeneration(message=message) return ChatResult(generations=[generation], llm_output=responses) @@ -226,10 +286,14 @@ def _stream( **kwargs: Any, ) -> Iterator[ChatGenerationChunk]: """Allows streaming to model!""" - inputs = self._custom_preprocess(messages) + inputs = [ + _nv_vlm_adjust_input(message) + for message in [convert_message_to_dict(message) for message in messages] + ] payload = self._get_payload(inputs=inputs, stop=stop, stream=True, **kwargs) for response in self._client.client.get_req_stream(payload=payload): self._set_callback_out(response, run_manager) + # todo: AIMessageChunk for tool_calls chunk = ChatGenerationChunk( message=ChatMessageChunk(**self._custom_postprocess(response)) ) @@ -248,54 +312,6 @@ def _set_callback_out( if hasattr(cb, "llm_output"): cb.llm_output = result - def _custom_preprocess( # todo: remove - self, msg_list: Sequence[BaseMessage] - ) -> List[Dict[str, str]]: - def _preprocess_msg(msg: BaseMessage) -> Dict[str, str]: - if isinstance(msg, BaseMessage): - role_convert = {"ai": "assistant", "human": "user"} - if isinstance(msg, ChatMessage): - role = msg.role - else: - role = msg.type - role = role_convert.get(role, role) - content = self._process_content(msg.content) - return {"role": role, "content": content} - raise ValueError(f"Invalid message: {repr(msg)} of type {type(msg)}") - - return [_preprocess_msg(m) for m in msg_list] - - def _process_content(self, content: Union[str, List[Union[dict, str]]]) -> str: - if isinstance(content, str): - return content - string_array: list = [] - - for part in content: - if isinstance(part, str): - string_array.append(part) - elif isinstance(part, Mapping): - # OpenAI Format - if "type" in part: - if part["type"] == "text": - string_array.append(str(part["text"])) - elif part["type"] == "image_url": - img_url = part["image_url"] - if isinstance(img_url, dict): - if "url" not in img_url: - raise ValueError( - f"Unrecognized message image format: {img_url}" - ) - img_url = img_url["url"] - b64_string = _url_to_b64_string(img_url) - string_array.append(f'') - else: - raise ValueError( - f"Unrecognized message part type: {part['type']}" - ) - else: - raise ValueError(f"Unrecognized message part format: {part}") - return "".join(string_array) - def _custom_postprocess(self, msg: dict) -> dict: # todo: remove kw_left = msg.copy() out_dict = { @@ -306,9 +322,8 @@ def _custom_postprocess(self, msg: dict) -> dict: # todo: remove "additional_kwargs": {}, "response_metadata": {}, } - for k in list(kw_left.keys()): - if "tool" in k: - out_dict["additional_kwargs"][k] = kw_left.pop(k) + if tool_calls := kw_left.pop("tool_calls", None): + out_dict["additional_kwargs"]["tool_calls"] = tool_calls out_dict["response_metadata"] = kw_left return out_dict @@ -365,13 +380,92 @@ def bind_tools( self, tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]], *, - tool_choice: Optional[Union[dict, str, Literal["auto", "none"], bool]] = None, + tool_choice: Optional[ + Union[dict, str, Literal["auto", "none", "any", "required"], bool] + ] = None, **kwargs: Any, ) -> Runnable[LanguageModelInput, BaseMessage]: - raise NotImplementedError( - "Not implemented, awaiting server-side function-recieving API" - " Consider following open-source LLM agent spec techniques:" - " https://huggingface.co/blog/open-source-llms-as-agents" + """ + Bind tools to the model. + + Args: + tools (list): A list of tools to bind to the model. + tool_choice (Optional[Union[dict, + str, + Literal["auto", "none", "any", "required"], + bool]]): + Control tool choice. + "any" and "required" - force a tool call. + "auto" - let the model decide. + "none" - force no tool call. + string or dict - force a specific tool call. + bool - if True, force a tool call; if False, force no tool call. + Defaults to passing no value. + **kwargs: Additional keyword arguments. + + see https://python.langchain.com/v0.1/docs/modules/model_io/chat/function_calling/#request-forcing-a-tool-call + """ + # check if the model supports tools, warn if it does not + known_good = False + # todo: we need to store model: Model in this class + # instead of model: str (= Model.id) + # this should be: if not self.model.supports_tools: warnings.warn... + candidates = [ + model for model in self.available_models if model.id == self.model + ] + if not candidates: # user must have specified the model themselves + known_good = False + else: + assert len(candidates) == 1, "Multiple models with the same id" + known_good = candidates[0].supports_tools is True + if not known_good: + warnings.warn( + f"Model '{self.model}' is not known to support tools. " + "Your tool binding may fail at inference time." + ) + + tool_name = None + if isinstance(tool_choice, bool): + tool_choice = "required" if tool_choice else "none" + elif isinstance(tool_choice, str): + # LangChain documents "any" as an option, server API uses "required" + if tool_choice == "any": + tool_choice = "required" + # if a string that's not "auto", "none", or "required" + # then it must be a tool name + if tool_choice not in ["auto", "none", "required"]: + tool_name = tool_choice + tool_choice = { + "type": "function", + "function": {"name": tool_choice}, + } + elif isinstance(tool_choice, dict): + # if a dict, it must be a tool choice dict, e.g. + # {"type": "function", "function": {"name": "my_tool"}} + if "type" not in tool_choice: + tool_choice["type"] = "function" + if "function" not in tool_choice: + raise ValueError("Tool choice dict must have a 'function' key") + if "name" not in tool_choice["function"]: + raise ValueError("Tool choice function dict must have a 'name' key") + tool_name = tool_choice["function"]["name"] + + # check that the specified tool is in the tools list + if tool_name: + if not any( + isinstance(tool, BaseTool) and tool.name == tool_name for tool in tools + ) and not any( + isinstance(tool, dict) and tool.get("name") == tool_name + for tool in tools + ): + raise ValueError( + f"Tool choice '{tool_name}' not found in the tools list" + ) + + return super().bind( + tools=[convert_to_openai_tool(tool) for tool in tools], + tool_choice=tool_choice, + **kwargs, ) def bind_functions( @@ -380,11 +474,7 @@ def bind_functions( function_call: Optional[str] = None, **kwargs: Any, ) -> Runnable[LanguageModelInput, BaseMessage]: - raise NotImplementedError( - "Not implemented, awaiting server-side function-recieving API" - " Consider following open-source LLM agent spec techniques:" - " https://huggingface.co/blog/open-source-llms-as-agents" - ) + raise NotImplementedError("Not implemented, use `bind_tools` instead.") def with_structured_output( self, From 2b9779e813aa3362e643a4cceea4feebdac095bf Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Wed, 10 Jul 2024 08:38:38 -0400 Subject: [PATCH 39/60] add tests for tool calling (streaming) --- .../integration_tests/test_bind_tools.py | 310 +++++++++++++++--- 1 file changed, 271 insertions(+), 39 deletions(-) diff --git a/libs/ai-endpoints/tests/integration_tests/test_bind_tools.py b/libs/ai-endpoints/tests/integration_tests/test_bind_tools.py index 83dff6b0..15c47832 100644 --- a/libs/ai-endpoints/tests/integration_tests/test_bind_tools.py +++ b/libs/ai-endpoints/tests/integration_tests/test_bind_tools.py @@ -1,9 +1,15 @@ import json import warnings -from typing import Any, List, Literal, Optional, Union +from typing import Any, Callable, List, Literal, Optional, Union import pytest -from langchain_core.messages import AIMessage, ChatMessage +from langchain_core.messages import ( + AIMessage, + AIMessageChunk, + BaseMessage, + BaseMessageChunk, + ChatMessage, +) from langchain_core.pydantic_v1 import Field from langchain_core.tools import tool @@ -34,14 +40,18 @@ # test types: # 17. deterministic (minimial accuracy tests; relies on basic tool calling skills) # 18. accuracy (proper tool; proper arguments) -# negative tests: +# edge/negative tests: # 19. require unknown named tool (invoke/stream only) # 20. partial function (invoke/stream only) +# 21. not enough tokens to generate tool call +# 22. tool with no arguments +# 23. duplicate tool names +# 24. unknown tool (invoke/stream only) # -# todo: streaming -# todo: test tool with no arguments +# todo: async methods # todo: parallel_tool_calls +# todo: too many tools @tool @@ -62,6 +72,31 @@ def zzyyxx( return (b**a) % (a - b) +@tool +def tool_no_args() -> str: + """8-ball""" + return "lookin' good" + + +def eval_stream(llm: ChatNVIDIA, msg: str, tool_choice: Any = None) -> BaseMessageChunk: + if tool_choice: + generator = llm.stream(msg, tool_choice=tool_choice) # type: ignore + else: + generator = llm.stream(msg) + response = next(generator) + for chunk in generator: + assert isinstance(chunk, AIMessageChunk) + response += chunk + return response + + +def eval_invoke(llm: ChatNVIDIA, msg: str, tool_choice: Any = None) -> BaseMessage: + if tool_choice: + return llm.invoke(msg, tool_choice=tool_choice) # type: ignore + else: + return llm.invoke(msg) + + def check_response_structure(response: AIMessage) -> None: assert not response.content # should be `response.content is None` but # AIMessage.content: Union[str, List[Union[str, Dict]]] cannot be None. @@ -77,36 +112,49 @@ def check_response_structure(response: AIMessage) -> None: assert len(response.tool_calls) > 0 -# users can also get at the tool calls from the response.additional_kwargs +@pytest.mark.parametrize( + "func", + [eval_invoke, eval_stream], + ids=["invoke", "stream"], +) @pytest.mark.xfail(reason="Accuracy test") -def test_accuracy_default_invoke_additional_kwargs(tool_model: str, mode: dict) -> None: +def test_accuracy_extra(tool_model: str, mode: dict, func: Callable) -> None: llm = ChatNVIDIA(temperature=0, model=tool_model, **mode).bind_tools([xxyyzz]) - response = llm.invoke("What is 11 xxyyzz 3?") + response = func(llm, "What is 11 xxyyzz 3?") assert not response.content # should be `response.content is None` but # AIMessage.content: Union[str, List[Union[str, Dict]]] cannot be None. assert response.additional_kwargs is not None - assert "tool_calls" in response.additional_kwargs - assert isinstance(response.additional_kwargs["tool_calls"], list) - assert response.additional_kwargs["tool_calls"] - for tool_call in response.additional_kwargs["tool_calls"]: - assert "id" in tool_call - assert tool_call["id"] is not None - assert "type" in tool_call - assert tool_call["type"] == "function" - assert "function" in tool_call + # todo: this is not good, should not care about the param + if func == eval_invoke: + assert isinstance(response, AIMessage) + assert "tool_calls" in response.additional_kwargs + assert isinstance(response.additional_kwargs["tool_calls"], list) + assert response.additional_kwargs["tool_calls"] + assert response.tool_calls + for tool_call in response.additional_kwargs["tool_calls"]: + assert "id" in tool_call + assert tool_call["id"] is not None + assert "type" in tool_call + assert tool_call["type"] == "function" + assert "function" in tool_call + assert len(response.additional_kwargs["tool_calls"]) > 0 + tool_call = response.additional_kwargs["tool_calls"][0] + assert tool_call["function"]["name"] == "xxyyzz" + assert json.loads(tool_call["function"]["arguments"]) == {"a": 11, "b": 3} + else: + assert isinstance(response, AIMessageChunk) + assert response.tool_call_chunks assert response.response_metadata is not None assert isinstance(response.response_metadata, dict) - assert "content" in response.response_metadata - assert response.response_metadata["content"] is None + if "content" in response.response_metadata: + assert response.response_metadata["content"] is None + assert "model_name" in response.response_metadata + assert response.response_metadata["model_name"] == tool_model assert "finish_reason" in response.response_metadata assert response.response_metadata["finish_reason"] in [ "tool_calls", "stop", ] # todo: remove "stop" - assert len(response.additional_kwargs["tool_calls"]) > 0 - tool_call = response.additional_kwargs["tool_calls"][0] - assert tool_call["function"]["name"] == "xxyyzz" - assert json.loads(tool_call["function"]["arguments"]) == {"a": 11, "b": 3} @pytest.mark.parametrize( @@ -119,12 +167,17 @@ def test_accuracy_default_invoke_additional_kwargs(tool_model: str, mode: dict) ], ids=["none", "required", "partial", "function"], ) -def test_invoke_tool_choice_with_no_tool( - tool_model: str, mode: dict, tool_choice: Any +@pytest.mark.parametrize( + "func", + [eval_invoke, eval_stream], + ids=["invoke", "stream"], +) +def test_tool_choice_with_no_tool( + tool_model: str, mode: dict, tool_choice: Any, func: Callable ) -> None: llm = ChatNVIDIA(model=tool_model, **mode) with pytest.raises(Exception) as e: - llm.invoke("What is 11 xxyyzz 3?", tool_choice=tool_choice) + func(llm, "What is 11 xxyyzz 3?", tool_choice=tool_choice) assert "400" in str(e.value) or "###" in str( e.value ) # todo: stop transforming 400 -> ### @@ -139,10 +192,14 @@ def test_invoke_tool_choice_with_no_tool( ) -def test_invoke_tool_choice_none(tool_model: str, mode: dict) -> None: +@pytest.mark.parametrize( + "func", + [eval_invoke, eval_stream], + ids=["invoke", "stream"], +) +def test_tool_choice_none(tool_model: str, mode: dict, func: Callable) -> None: llm = ChatNVIDIA(model=tool_model, **mode).bind_tools(tools=[xxyyzz]) - response = llm.invoke("What is 11 xxyyzz 3?", tool_choice="none") # type: ignore - assert isinstance(response, ChatMessage) + response = func(llm, "What is 11 xxyyzz 3?", tool_choice="none") assert "tool_calls" not in response.additional_kwargs @@ -153,20 +210,173 @@ def test_invoke_tool_choice_none(tool_model: str, mode: dict) -> None: ], ids=["partial"], ) -def test_invoke_tool_choice_negative( +@pytest.mark.parametrize( + "func", + [eval_invoke, eval_stream], + ids=["invoke", "stream"], +) +def test_tool_choice_negative( tool_model: str, mode: dict, tool_choice: Optional[ Union[dict, str, Literal["auto", "none", "any", "required"], bool] ], + func: Callable, ) -> None: llm = ChatNVIDIA(model=tool_model, **mode).bind_tools([xxyyzz]) with pytest.raises(Exception) as e: - llm.invoke("What is 11 xxyyzz 3?", tool_choice=tool_choice) # type: ignore + func(llm, "What is 11 xxyyzz 3?", tool_choice=tool_choice) + assert "400" in str(e.value) or "###" in str( + e.value + ) # todo: stop transforming 400 -> ### + assert ( + "invalid_request_error" in str(e.value) + or "value_error" in str(e.value) + or "Incorrectly formatted `tool_choice`" in str(e.value) + ) + + +@pytest.mark.parametrize( + "func", + [eval_invoke, eval_stream], + ids=["invoke", "stream"], +) +def test_tool_choice_negative_max_tokens_required( + tool_model: str, + mode: dict, + func: Callable, +) -> None: + llm = ChatNVIDIA(max_tokens=5, model=tool_model, **mode).bind_tools([xxyyzz]) + with pytest.raises(Exception) as e: + func(llm, "What is 11 xxyyzz 3?", tool_choice="required") assert "400" in str(e.value) or "###" in str( e.value ) # todo: stop transforming 400 -> ### - assert "invalid_request_error" in str(e.value) or "value_error" in str(e.value) + assert "invalid_request_error" in str(e.value) + assert ( + "Could not finish the message because max_tokens was reached. " + "Please try again with higher max_tokens." + ) in str(e.value) + + +@pytest.mark.parametrize( + "func", + [eval_invoke, eval_stream], + ids=["invoke", "stream"], +) +def test_tool_choice_negative_max_tokens_function( + tool_model: str, + mode: dict, + func: Callable, +) -> None: + llm = ChatNVIDIA(max_tokens=5, model=tool_model, **mode).bind_tools([xxyyzz]) + response = func( + llm, + "What is 11 xxyyzz 3?", + tool_choice={"type": "function", "function": {"name": "xxyyzz"}}, + ) + # todo: this is not good, should not care about the param + if func == eval_invoke: + assert isinstance(response, AIMessage) + assert "tool_calls" in response.additional_kwargs + assert response.invalid_tool_calls + else: + assert isinstance(response, AIMessageChunk) + assert response.tool_call_chunks + assert "finish_reason" in response.response_metadata + assert response.response_metadata["finish_reason"] == "length" + + +@pytest.mark.parametrize( + "tool_choice", + [ + "required", + {"type": "function", "function": {"name": "tool_no_args"}}, + ], + ids=["required", "function"], +) +@pytest.mark.parametrize( + "func", + [eval_invoke, eval_stream], + ids=["invoke", "stream"], +) +def test_tool_choice_negative_no_args( + tool_model: str, + mode: dict, + tool_choice: Optional[ + Union[dict, str, Literal["auto", "none", "any", "required"], bool] + ], + func: Callable, +) -> None: + llm = ChatNVIDIA(model=tool_model, **mode).bind_tools([tool_no_args]) + response = func(llm, "What does the 8-ball say?", tool_choice=tool_choice) + # todo: this is not good, should not care about the param + if func == eval_invoke: + assert isinstance(response, AIMessage) + assert response.tool_calls + else: + assert isinstance(response, AIMessageChunk) + assert response.tool_call_chunks + # assert "tool_calls" in response.additional_kwargs + + +@pytest.mark.parametrize( + "tool_choice", + [ + "required", + {"type": "function", "function": {"name": "tool_no_args"}}, + ], + ids=["required", "function"], +) +@pytest.mark.parametrize( + "func", + [eval_invoke, eval_stream], + ids=["invoke", "stream"], +) +@pytest.mark.xfail(reason="Accuracy test") +def test_accuracy_tool_choice_negative_no_args( + tool_model: str, + mode: dict, + tool_choice: Optional[ + Union[dict, str, Literal["auto", "none", "any", "required"], bool] + ], + func: Callable, +) -> None: + llm = ChatNVIDIA(model=tool_model, **mode).bind_tools([tool_no_args]) + response = func(llm, "What does the 8-ball say?", tool_choice=tool_choice) + assert isinstance(response, AIMessage) + # assert "tool_calls" in response.additional_kwargs + assert response.tool_calls + assert response.tool_calls[0]["name"] == "tool_no_args" + assert response.tool_calls[0]["args"] == {} + + +@pytest.mark.parametrize( + "tool_choice", + [ + "required", + {"type": "function", "function": {"name": "xxyyzz"}}, + ], + ids=["required", "function"], +) +@pytest.mark.parametrize( + "func", + [eval_invoke, eval_stream], + ids=["invoke", "stream"], +) +def test_tool_choice_negative_duplicate_tool( + tool_model: str, + mode: dict, + tool_choice: Optional[ + Union[dict, str, Literal["auto", "none", "any", "required"], bool] + ], + func: Callable, +) -> None: + llm = ChatNVIDIA(model=tool_model, **mode).bind_tools([xxyyzz, xxyyzz]) + response = func(llm, "What is 11 xxyyzz 3?", tool_choice=tool_choice) + assert isinstance(response, AIMessage) + assert response.tool_calls + # assert "tool_calls" in response.additional_kwargs @pytest.mark.parametrize( @@ -177,15 +387,21 @@ def test_invoke_tool_choice_negative( ], ids=["required", "function"], ) -def test_invoke_tool_choice( +@pytest.mark.parametrize( + "func", + [eval_invoke, eval_stream], + ids=["invoke", "stream"], +) +def test_tool_choice( tool_model: str, mode: dict, tool_choice: Optional[ Union[dict, str, Literal["auto", "none", "any", "required"], bool] ], + func: Callable, ) -> None: llm = ChatNVIDIA(model=tool_model, **mode).bind_tools([xxyyzz]) - response = llm.invoke("What is 11 xxyyzz 3?", tool_choice=tool_choice) # type: ignore + response = func(llm, "What is 11 xxyyzz 3?", tool_choice=tool_choice) assert isinstance(response, AIMessage) check_response_structure(response) @@ -205,15 +421,21 @@ def test_invoke_tool_choice( [[xxyyzz], [xxyyzz, zzyyxx], [zzyyxx, xxyyzz]], ids=["xxyyzz", "xxyyzz_and_zzyyxx", "zzyyxx_and_xxyyzz"], ) +@pytest.mark.parametrize( + "func", + [eval_invoke, eval_stream], + ids=["invoke", "stream"], +) @pytest.mark.xfail(reason="Accuracy test") -def test_accuracy_invoke_tool_choice( +def test_accuracy_tool_choice( tool_model: str, mode: dict, tools: List, tool_choice: Any, + func: Callable, ) -> None: llm = ChatNVIDIA(temperature=0, model=tool_model, **mode).bind_tools(tools) - response = llm.invoke("What is 11 xxyyzz 3?", tool_choice=tool_choice) # type: ignore + response = func(llm, "What is 11 xxyyzz 3?", tool_choice=tool_choice) assert isinstance(response, AIMessage) check_response_structure(response) tool_call = response.tool_calls[0] @@ -221,13 +443,23 @@ def test_accuracy_invoke_tool_choice( assert tool_call["args"] == {"b": 3, "a": 11} -def test_invoke_tool_choice_with_unknown_tool(tool_model: str, mode: dict) -> None: +@pytest.mark.parametrize( + "func", + [eval_invoke, eval_stream], + ids=["invoke", "stream"], +) +def test_tool_choice_negative_unknown_tool( + tool_model: str, + mode: dict, + func: Callable, +) -> None: llm = ChatNVIDIA(model=tool_model, **mode).bind_tools(tools=[xxyyzz]) with pytest.raises(Exception) as e: - llm.invoke( + func( + llm, "What is 11 xxyyzz 3?", tool_choice={"type": "function", "function": {"name": "zzyyxx"}}, - ) # type: ignore + ) assert ( "not found in the tools list" in str(e.value) or "no function named" in str(e.value) From d05acdc939871be54e96f95f4d43e944d910de71 Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Wed, 10 Jul 2024 08:39:18 -0400 Subject: [PATCH 40/60] add tool calling implementation (streaming) --- .../langchain_nvidia_ai_endpoints/_statics.py | 16 +++++ .../chat_models.py | 61 +++++++++++++++---- 2 files changed, 65 insertions(+), 12 deletions(-) diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py index d5649623..e3deb95b 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py @@ -1,3 +1,4 @@ +import os import warnings from typing import Literal, Optional @@ -425,6 +426,18 @@ def validate_client(cls, client: str, values: dict) -> str: # ), # } + +OPENAI_MODEL_TABLE = { + "gpt-3.5-turbo": Model( + id="gpt-3.5-turbo", + model_type="chat", + client="ChatNVIDIA", + endpoint="https://api.openai.com/v1/chat/completions", + supports_tools=True, + ), +} + + MODEL_TABLE = { **CHAT_MODEL_TABLE, **QA_MODEL_TABLE, @@ -433,6 +446,9 @@ def validate_client(cls, client: str, values: dict) -> str: **RANKING_MODEL_TABLE, } +if "_INCLUDE_OPENAI" in os.environ: + MODEL_TABLE.update(OPENAI_MODEL_TABLE) + def register_model(model: Model) -> None: """ diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py index 9c13c924..af2138f3 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py @@ -31,9 +31,9 @@ from langchain_core.language_models import BaseChatModel, LanguageModelInput from langchain_core.messages import ( AIMessage, + AIMessageChunk, BaseMessage, ChatMessage, - ChatMessageChunk, ) from langchain_core.outputs import ( ChatGeneration, @@ -267,10 +267,10 @@ def _generate( response = self._client.client.get_req(payload=payload) responses, _ = self._client.client.postprocess(response) self._set_callback_out(responses, run_manager) - parsed_response = self._custom_postprocess(responses) - # arguably we should always return an AIMessage, but to maintain - # API compatibility, we only return it for tool_calls. we can - # change this for an API breaking 1.0. + parsed_response = self._custom_postprocess(responses, streaming=False) + # todo: we should always return an AIMessage, but to maintain + # API compatibility, we only return it for tool_calls. we can + # change this for an API breaking 1.0. if "tool_calls" in parsed_response["additional_kwargs"]: message: BaseMessage = AIMessage(**parsed_response) else: @@ -293,10 +293,16 @@ def _stream( payload = self._get_payload(inputs=inputs, stop=stop, stream=True, **kwargs) for response in self._client.client.get_req_stream(payload=payload): self._set_callback_out(response, run_manager) - # todo: AIMessageChunk for tool_calls - chunk = ChatGenerationChunk( - message=ChatMessageChunk(**self._custom_postprocess(response)) - ) + parsed_response = self._custom_postprocess(response, streaming=True) + # todo: we should always return an AIMessage, but to maintain + # API compatibility, we only return it for tool_calls. we can + # change this for an API breaking 1.0. + # if "tool_calls" in parsed_response["additional_kwargs"]: + # message: BaseMessageChunk = AIMessageChunk(**parsed_response) + # else: + # message = ChatMessageChunk(**parsed_response) + message = AIMessageChunk(**parsed_response) + chunk = ChatGenerationChunk(message=message) if run_manager: run_manager.on_llm_new_token(chunk.text, chunk=chunk) yield chunk @@ -312,7 +318,9 @@ def _set_callback_out( if hasattr(cb, "llm_output"): cb.llm_output = result - def _custom_postprocess(self, msg: dict) -> dict: # todo: remove + def _custom_postprocess( + self, msg: dict, streaming: bool = False + ) -> dict: # todo: remove kw_left = msg.copy() out_dict = { "role": kw_left.pop("role", "assistant") or "assistant", @@ -322,9 +330,38 @@ def _custom_postprocess(self, msg: dict) -> dict: # todo: remove "additional_kwargs": {}, "response_metadata": {}, } + # "tool_calls" is set for invoke and stream responses if tool_calls := kw_left.pop("tool_calls", None): - out_dict["additional_kwargs"]["tool_calls"] = tool_calls - out_dict["response_metadata"] = kw_left + assert isinstance( + tool_calls, list + ), "invalid response from server: tool_calls must be a list" + # todo: break this into post-processing for invoke and stream + if not streaming: + out_dict["additional_kwargs"]["tool_calls"] = tool_calls + elif streaming: + out_dict["tool_call_chunks"] = [] + for tool_call in tool_calls: + assert "index" in tool_call, ( + "invalid response from server: " + "tool_call must have an 'index' key" + ) + assert "function" in tool_call, ( + "invalid response from server: " + "tool_call must have a 'function' key" + ) + out_dict["tool_call_chunks"].append( + { + "index": tool_call.get("index", None), + "id": tool_call.get("id", None), + "name": tool_call["function"].get("name", None), + "args": tool_call["function"].get("arguments", None), + } + ) + # we only create the response_metadata from the last message in a stream. + # if we do it for all messages, we'll end up with things like + # "model_name" = "mode-xyz" * # messages. + if "finish_reason" in kw_left: + out_dict["response_metadata"] = kw_left return out_dict ###################################################################################### From 493020fe7cc432f7547af102d664f75b97b45be9 Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Mon, 15 Jul 2024 14:29:03 -0400 Subject: [PATCH 41/60] add tests for parallel_tool_calls (invoke, stream) --- .../integration_tests/test_bind_tools.py | 139 +++++++++++++-- .../unit_tests/test_parallel_tool_calls.py | 160 ++++++++++++++++++ 2 files changed, 289 insertions(+), 10 deletions(-) create mode 100644 libs/ai-endpoints/tests/unit_tests/test_parallel_tool_calls.py diff --git a/libs/ai-endpoints/tests/integration_tests/test_bind_tools.py b/libs/ai-endpoints/tests/integration_tests/test_bind_tools.py index 15c47832..1d64e576 100644 --- a/libs/ai-endpoints/tests/integration_tests/test_bind_tools.py +++ b/libs/ai-endpoints/tests/integration_tests/test_bind_tools.py @@ -47,10 +47,13 @@ # 22. tool with no arguments # 23. duplicate tool names # 24. unknown tool (invoke/stream only) -# +# ways to specify parallel_tool_calls: (accuracy only) +# 25. invoke +# 26. stream +# todo: parallel_tool_calls w/ bind_tools +# todo: parallel_tool_calls w/ tool_choice = function # todo: async methods -# todo: parallel_tool_calls # todo: too many tools @@ -78,11 +81,31 @@ def tool_no_args() -> str: return "lookin' good" -def eval_stream(llm: ChatNVIDIA, msg: str, tool_choice: Any = None) -> BaseMessageChunk: +@tool +def get_current_weather( + location: str = Field(..., description="The location to get the weather for"), + scale: Optional[str] = Field( + default="Fahrenheit", + description="The temperature scale (e.g., Celsius or Fahrenheit)", + ), +) -> str: + """Get the current weather for a location""" + return f"The current weather in {location} is sunny." + + +def eval_stream( + llm: ChatNVIDIA, + msg: str, + tool_choice: Any = None, + parallel_tool_calls: bool = False, +) -> BaseMessageChunk: + params = {} if tool_choice: - generator = llm.stream(msg, tool_choice=tool_choice) # type: ignore - else: - generator = llm.stream(msg) + params["tool_choice"] = tool_choice + if parallel_tool_calls: + params["parallel_tool_calls"] = True + + generator = llm.stream(msg, **params) # type: ignore response = next(generator) for chunk in generator: assert isinstance(chunk, AIMessageChunk) @@ -90,11 +113,19 @@ def eval_stream(llm: ChatNVIDIA, msg: str, tool_choice: Any = None) -> BaseMessa return response -def eval_invoke(llm: ChatNVIDIA, msg: str, tool_choice: Any = None) -> BaseMessage: +def eval_invoke( + llm: ChatNVIDIA, + msg: str, + tool_choice: Any = None, + parallel_tool_calls: bool = False, +) -> BaseMessage: + params = {} if tool_choice: - return llm.invoke(msg, tool_choice=tool_choice) # type: ignore - else: - return llm.invoke(msg) + params["tool_choice"] = tool_choice + if parallel_tool_calls: + params["parallel_tool_calls"] = True + + return llm.invoke(msg, **params) # type: ignore def check_response_structure(response: AIMessage) -> None: @@ -612,3 +643,91 @@ def test_unknown_warns(mode: dict) -> None: ChatNVIDIA(model="mock-model", **mode).bind_tools([xxyyzz]) assert len(record) == 1 assert "not known to support tools" in str(record[0].message) + + +@pytest.mark.parametrize( + "tool_choice", + [ + "auto", + None, + "required", + ], + ids=["auto", "absent", "required"], +) +@pytest.mark.parametrize( + "tools", + [[xxyyzz, zzyyxx], [zzyyxx, xxyyzz]], + ids=["xxyyzz_and_zzyyxx", "zzyyxx_and_xxyyzz"], +) +@pytest.mark.parametrize( + "func", + [eval_invoke, eval_stream], + ids=["invoke", "stream"], +) +@pytest.mark.xfail(reason="Accuracy test") +def test_accuracy_parallel_tool_calls_hard( + tool_model: str, + mode: dict, + tools: List, + tool_choice: Any, + func: Callable, +) -> None: + llm = ChatNVIDIA(seed=42, temperature=1, model=tool_model, **mode).bind_tools(tools) + response = func( + llm, + "What is 11 xxyyzz 3 zzyyxx 5?", + tool_choice=tool_choice, + parallel_tool_calls=True, + ) + assert isinstance(response, AIMessage) + check_response_structure(response) + assert len(response.tool_calls) == 2 + valid_tool_names = ["xxyyzz", "zzyyxx"] + tool_call0 = response.tool_calls[0] + assert tool_call0["name"] in valid_tool_names + valid_tool_names.remove(tool_call0["name"]) + tool_call1 = response.tool_calls[1] + assert tool_call1["name"] in valid_tool_names + + +@pytest.mark.parametrize( + "tool_choice", + [ + "auto", + None, + "required", + ], + ids=["auto", "absent", "required"], +) +@pytest.mark.parametrize( + "func", + [eval_invoke, eval_stream], + ids=["invoke", "stream"], +) +@pytest.mark.xfail(reason="Accuracy test") +def test_accuracy_parallel_tool_calls_easy( + tool_model: str, + mode: dict, + tool_choice: Any, + func: Callable, +) -> None: + llm = ChatNVIDIA(seed=42, temperature=1, model=tool_model, **mode).bind_tools( + tools=[get_current_weather], + ) + response = func( + llm, + "What is the weather in Boston, and what is the weather in Dublin?", + tool_choice=tool_choice, + parallel_tool_calls=True, + ) + assert isinstance(response, AIMessage) + check_response_structure(response) + assert len(response.tool_calls) == 2 + valid_args = ["Boston", "Dublin"] + tool_call0 = response.tool_calls[0] + assert tool_call0["name"] == "get_current_weather" + assert tool_call0["args"]["location"] in valid_args + valid_args.remove(tool_call0["args"]["location"]) + tool_call1 = response.tool_calls[1] + assert tool_call1["name"] == "get_current_weather" + assert tool_call1["args"]["location"] in valid_args diff --git a/libs/ai-endpoints/tests/unit_tests/test_parallel_tool_calls.py b/libs/ai-endpoints/tests/unit_tests/test_parallel_tool_calls.py new file mode 100644 index 00000000..7f1abd2d --- /dev/null +++ b/libs/ai-endpoints/tests/unit_tests/test_parallel_tool_calls.py @@ -0,0 +1,160 @@ +import warnings + +import pytest +import requests_mock +from langchain_core.messages import AIMessage + +from langchain_nvidia_ai_endpoints import ChatNVIDIA + + +@pytest.fixture(autouse=True) +def mock_v1_models(requests_mock: requests_mock.Mocker) -> None: + requests_mock.get( + "https://integrate.api.nvidia.com/v1/models", + json={ + "data": [ + { + "id": "magic-model", + "object": "model", + "created": 1234567890, + "owned_by": "OWNER", + "root": "magic-model", + }, + ] + }, + ) + + +def test_invoke_parallel_tool_calls(requests_mock: requests_mock.Mocker) -> None: + requests_mock.post( + "https://integrate.api.nvidia.com/v1/chat/completions", + json={ + "id": "cmpl-100f0463deb8421480ab18ed32cb2581", + "object": "chat.completion", + "created": 1721154188, + "model": "magic-model", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": None, + "tool_calls": [ + { + "id": "chatcmpl-tool-7980a682cc24446a8da9148c2c3e37ce", + "type": "function", + "function": { + "name": "xxyyzz", + "arguments": '{"a": 11, "b": 3}', + }, + }, + { + "id": "chatcmpl-tool-299964d0c5fe4fc1b917c8eaabd1cda2", + "type": "function", + "function": { + "name": "zzyyxx", + "arguments": '{"a": 11, "b": 5}', + }, + }, + ], + }, + "logprobs": None, + "finish_reason": "tool_calls", + "stop_reason": None, + } + ], + "usage": { + "prompt_tokens": 194, + "total_tokens": 259, + "completion_tokens": 65, + }, + }, + ) + + warnings.filterwarnings("ignore", r".*Found magic-model in available_models.*") + llm = ChatNVIDIA(model="magic-model") + response = llm.invoke( + "What is 11 xxyyzz 3 zzyyxx 5?", + ) + assert isinstance(response, AIMessage) + assert len(response.tool_calls) == 2 + tool_call0 = response.tool_calls[0] + assert tool_call0["name"] == "xxyyzz" + assert tool_call0["args"] == {"b": 3, "a": 11} + tool_call1 = response.tool_calls[1] + assert tool_call1["name"] == "zzyyxx" + assert tool_call1["args"] == {"b": 5, "a": 11} + + +def test_stream_parallel_tool_calls_A(requests_mock: requests_mock.Mocker) -> None: + response_contents = "\n\n".join( + [ + 'data: {"id":"chatcmpl-ID0","object":"chat.completion.chunk","created":1721155403,"model":"magic-model","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":null},"logprobs":null,"finish_reason":null}]}', # noqa: E501 + 'data: {"id":"chatcmpl-ID0","object":"chat.completion.chunk","created":1721155403,"model":"magic-model","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"id":"call_ID0","type":"function","function":{"name":"xxyyzz","arguments":""}}]},"logprobs":null,"finish_reason":null}]}', # noqa: E501 + 'data: {"id":"chatcmpl-ID0","object":"chat.completion.chunk","created":1721155403,"model":"magic-model","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{\\"a\\""}}]},"logprobs":null,"finish_reason":null}]}', # noqa: E501 + 'data: {"id":"chatcmpl-ID0","object":"chat.completion.chunk","created":1721155403,"model":"magic-model","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":": 11,"}}]},"logprobs":null,"finish_reason":null}]}', # noqa: E501 + 'data: {"id":"chatcmpl-ID0","object":"chat.completion.chunk","created":1721155403,"model":"magic-model","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" \\"b\\": "}}]},"logprobs":null,"finish_reason":null}]}', # noqa: E501 + 'data: {"id":"chatcmpl-ID0","object":"chat.completion.chunk","created":1721155403,"model":"magic-model","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"3}"}}]},"logprobs":null,"finish_reason":null}]}', # noqa: E501 + 'data: {"id":"chatcmpl-ID0","object":"chat.completion.chunk","created":1721155403,"model":"magic-model","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":1,"id":"call_ID1","type":"function","function":{"name":"zzyyxx","arguments":""}}]},"logprobs":null,"finish_reason":null}]}', # noqa: E501 + 'data: {"id":"chatcmpl-ID0","object":"chat.completion.chunk","created":1721155403,"model":"magic-model","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":1,"function":{"arguments":"{\\"a\\""}}]},"logprobs":null,"finish_reason":null}]}', # noqa: E501 + 'data: {"id":"chatcmpl-ID0","object":"chat.completion.chunk","created":1721155403,"model":"magic-model","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":1,"function":{"arguments":": 5, "}}]},"logprobs":null,"finish_reason":null}]}', # noqa: E501 + 'data: {"id":"chatcmpl-ID0","object":"chat.completion.chunk","created":1721155403,"model":"magic-model","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":1,"function":{"arguments":"\\"b\\": 3"}}]},"logprobs":null,"finish_reason":null}]}', # noqa: E501 + 'data: {"id":"chatcmpl-ID0","object":"chat.completion.chunk","created":1721155403,"model":"magic-model","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":1,"function":{"arguments":"}"}}]},"logprobs":null,"finish_reason":null}]}', # noqa: E501 + 'data: {"id":"chatcmpl-ID0","object":"chat.completion.chunk","created":1721155403,"model":"magic-model","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}]}', # noqa: E501 + "data: [DONE]", + ] + ) + + requests_mock.post( + "https://integrate.api.nvidia.com/v1/chat/completions", + text=response_contents, + ) + + warnings.filterwarnings("ignore", r".*Found magic-model in available_models.*") + llm = ChatNVIDIA(model="magic-model") + generator = llm.stream( + "What is 11 xxyyzz 3 zzyyxx 5?", + ) + response = next(generator) + for chunk in generator: + response += chunk + assert isinstance(response, AIMessage) + assert len(response.tool_calls) == 2 + tool_call0 = response.tool_calls[0] + assert tool_call0["name"] == "xxyyzz" + assert tool_call0["args"] == {"b": 3, "a": 11} + tool_call1 = response.tool_calls[1] + assert tool_call1["name"] == "zzyyxx" + assert tool_call1["args"] == {"b": 3, "a": 5} + + +def test_stream_parallel_tool_calls_B(requests_mock: requests_mock.Mocker) -> None: + response_contents = "\n\n".join( + [ + 'data: {"id":"cmpl-call_ID0","object":"chat.completion.chunk","created":1721155320,"model":"magic-model","choices":[{"index":0,"delta":{"role":"assistant","content":null},"logprobs":null,"finish_reason":null}]}', # noqa: E501 + 'data: {"id":"cmpl-call_ID0","object":"chat.completion.chunk","created":1721155320,"model":"magic-model","choices":[{"index":0,"delta":{"role":null,"content":null,"tool_calls":[{"index":0,"id":"chatcmpl-tool-IDA","type":"function","function":{"name":"xxyyzz","arguments":"{\\"a\\": 11, \\"b\\": 3}"}},{"index":1,"id":"chatcmpl-tool-IDB","type":"function","function":{"name":"zzyyxx","arguments":"{\\"a\\": 11, \\"b\\": 5}"}}]},"logprobs":null,"finish_reason":"tool_calls","stop_reason":null}]}', # noqa: E501 + "data: [DONE]", + ] + ) + + requests_mock.post( + "https://integrate.api.nvidia.com/v1/chat/completions", + text=response_contents, + ) + + warnings.filterwarnings("ignore", r".*Found magic-model in available_models.*") + llm = ChatNVIDIA(model="magic-model") + generator = llm.stream( + "What is 11 xxyyzz 3 zzyyxx 5?", + ) + response = next(generator) + for chunk in generator: + response += chunk + assert isinstance(response, AIMessage) + assert len(response.tool_calls) == 2 + tool_call0 = response.tool_calls[0] + assert tool_call0["name"] == "xxyyzz" + assert tool_call0["args"] == {"b": 3, "a": 11} + tool_call1 = response.tool_calls[1] + assert tool_call1["name"] == "zzyyxx" + assert tool_call1["args"] == {"b": 5, "a": 11} From bfbcd47f2a9c9cd5162befe44aaab4c0faefa87a Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Wed, 17 Jul 2024 10:26:54 -0400 Subject: [PATCH 42/60] to support langchain.agents.AgentExecutor, change invoke response to be an AIMessage in all cases (bump version to 0.2.0) applications that were expecting a ChatMessage may break --- .../chat_models.py | 17 +---------------- libs/ai-endpoints/pyproject.toml | 2 +- .../tests/integration_tests/test_bind_tools.py | 3 +-- 3 files changed, 3 insertions(+), 19 deletions(-) diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py index af2138f3..d6f40593 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py @@ -33,7 +33,6 @@ AIMessage, AIMessageChunk, BaseMessage, - ChatMessage, ) from langchain_core.outputs import ( ChatGeneration, @@ -268,14 +267,7 @@ def _generate( responses, _ = self._client.client.postprocess(response) self._set_callback_out(responses, run_manager) parsed_response = self._custom_postprocess(responses, streaming=False) - # todo: we should always return an AIMessage, but to maintain - # API compatibility, we only return it for tool_calls. we can - # change this for an API breaking 1.0. - if "tool_calls" in parsed_response["additional_kwargs"]: - message: BaseMessage = AIMessage(**parsed_response) - else: - message = ChatMessage(**parsed_response) - generation = ChatGeneration(message=message) + generation = ChatGeneration(message=AIMessage(**parsed_response)) return ChatResult(generations=[generation], llm_output=responses) def _stream( @@ -294,13 +286,6 @@ def _stream( for response in self._client.client.get_req_stream(payload=payload): self._set_callback_out(response, run_manager) parsed_response = self._custom_postprocess(response, streaming=True) - # todo: we should always return an AIMessage, but to maintain - # API compatibility, we only return it for tool_calls. we can - # change this for an API breaking 1.0. - # if "tool_calls" in parsed_response["additional_kwargs"]: - # message: BaseMessageChunk = AIMessageChunk(**parsed_response) - # else: - # message = ChatMessageChunk(**parsed_response) message = AIMessageChunk(**parsed_response) chunk = ChatGenerationChunk(message=message) if run_manager: diff --git a/libs/ai-endpoints/pyproject.toml b/libs/ai-endpoints/pyproject.toml index 89c36ab1..5e6f2a29 100644 --- a/libs/ai-endpoints/pyproject.toml +++ b/libs/ai-endpoints/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langchain-nvidia-ai-endpoints" -version = "0.1.6" +version = "0.2.0" description = "An integration package connecting NVIDIA AI Endpoints and LangChain" authors = [] readme = "README.md" diff --git a/libs/ai-endpoints/tests/integration_tests/test_bind_tools.py b/libs/ai-endpoints/tests/integration_tests/test_bind_tools.py index 1d64e576..f4e9a415 100644 --- a/libs/ai-endpoints/tests/integration_tests/test_bind_tools.py +++ b/libs/ai-endpoints/tests/integration_tests/test_bind_tools.py @@ -8,7 +8,6 @@ AIMessageChunk, BaseMessage, BaseMessageChunk, - ChatMessage, ) from langchain_core.pydantic_v1 import Field from langchain_core.tools import tool @@ -564,7 +563,7 @@ def test_bind_tool_tool_choice_none( tools=[xxyyzz], tool_choice=tool_choice ) response = llm.invoke("What is 11 xxyyzz 3?") - assert isinstance(response, ChatMessage) + assert isinstance(response, AIMessage) assert "tool_calls" not in response.additional_kwargs From 63d48bb1ffb95f8a5bb1cdbe0201e2111421f43a Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Fri, 19 Jul 2024 08:43:03 -0400 Subject: [PATCH 43/60] add compatibility role property to mitigate ChatMessage -> AIMessage change note: this does not work for AIMessageChunk compatibility --- .../chat_models.py | 8 +++++++ .../integration_tests/test_chat_models.py | 21 ++++++++++++++++--- 2 files changed, 26 insertions(+), 3 deletions(-) diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py index d6f40593..e3661659 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py @@ -267,6 +267,9 @@ def _generate( responses, _ = self._client.client.postprocess(response) self._set_callback_out(responses, run_manager) parsed_response = self._custom_postprocess(responses, streaming=False) + # for pre 0.2 compatibility w/ ChatMessage + # ChatMessage had a role property that was not present in AIMessage + parsed_response.update({"role": "assistant"}) generation = ChatGeneration(message=AIMessage(**parsed_response)) return ChatResult(generations=[generation], llm_output=responses) @@ -286,6 +289,11 @@ def _stream( for response in self._client.client.get_req_stream(payload=payload): self._set_callback_out(response, run_manager) parsed_response = self._custom_postprocess(response, streaming=True) + # for pre 0.2 compatibility w/ ChatMessageChunk + # ChatMessageChunk had a role property that was not + # present in AIMessageChunk + # unfortunately, AIMessageChunk does not have extensible propery + # parsed_response.update({"role": "assistant"}) message = AIMessageChunk(**parsed_response) chunk = ChatGenerationChunk(message=message) if run_manager: diff --git a/libs/ai-endpoints/tests/integration_tests/test_chat_models.py b/libs/ai-endpoints/tests/integration_tests/test_chat_models.py index bb1490de..6ed7c4ae 100644 --- a/libs/ai-endpoints/tests/integration_tests/test_chat_models.py +++ b/libs/ai-endpoints/tests/integration_tests/test_chat_models.py @@ -5,7 +5,12 @@ import pytest from langchain_core.load.dump import dumps from langchain_core.load.load import loads -from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, SystemMessage +from langchain_core.messages import ( + AIMessage, + BaseMessage, + HumanMessage, + SystemMessage, +) from langchain_nvidia_ai_endpoints.chat_models import ChatNVIDIA @@ -25,6 +30,10 @@ def test_chat_ai_endpoints(chat_model: str, mode: dict) -> None: response = chat.invoke([message]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) + # compatibility test for ChatMessage (pre 0.2) + # assert isinstance(response, ChatMessage) + assert hasattr(response, "role") + assert response.role == "assistant" def test_unknown_model() -> None: @@ -145,11 +154,17 @@ def test_ai_endpoints_streaming(chat_model: str, mode: dict) -> None: """Test streaming tokens from ai endpoints.""" llm = ChatNVIDIA(model=chat_model, max_tokens=36, **mode) + generator = llm.stream("I'm Pickle Rick") + response = next(generator) cnt = 0 - for token in llm.stream("I'm Pickle Rick"): - assert isinstance(token.content, str) + for chunk in generator: + assert isinstance(chunk.content, str) + response += chunk cnt += 1 assert cnt > 1 + # compatibility test for ChatMessageChunk (pre 0.2) + # assert hasattr(response, "role") + # assert response.role == "assistant" # does not work, role not passed through async def test_ai_endpoints_astream(chat_model: str, mode: dict) -> None: From d9592df15c9d85e47714ab6ab893e1d01c46f094 Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Fri, 19 Jul 2024 10:27:02 -0400 Subject: [PATCH 44/60] add tool calling section to doc notebook --- .../docs/chat/nvidia_ai_endpoints.ipynb | 69 +++++++++++++++++++ 1 file changed, 69 insertions(+) diff --git a/libs/ai-endpoints/docs/chat/nvidia_ai_endpoints.ipynb b/libs/ai-endpoints/docs/chat/nvidia_ai_endpoints.ipynb index dc2236a9..e1025759 100644 --- a/libs/ai-endpoints/docs/chat/nvidia_ai_endpoints.ipynb +++ b/libs/ai-endpoints/docs/chat/nvidia_ai_endpoints.ipynb @@ -548,6 +548,75 @@ "source": [ "conversation.invoke(\"Tell me about yourself.\")[\"response\"]" ] + }, + { + "cell_type": "markdown", + "id": "f3cbbba0", + "metadata": {}, + "source": [ + "## Tool calling\n", + "\n", + "Starting in v0.2, `ChatNVIDIA` supports [bind_tools](https://api.python.langchain.com/en/latest/language_models/langchain_core.language_models.chat_models.BaseChatModel.html#langchain_core.language_models.chat_models.BaseChatModel.bind_tools).\n", + "\n", + "`ChatNVIDIA` provides integration with the variety of models on [build.nvidia.com](https://build.nvidia.com) as well as local NIMs. Not all these models are trained for tool calling. Be sure to select a model that does have tool calling for your experimention and applications." + ] + }, + { + "cell_type": "markdown", + "id": "6f7b535e", + "metadata": {}, + "source": [ + "You can get a list of models that are known to support tool calling with," + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e36c8911", + "metadata": {}, + "outputs": [], + "source": [ + "tool_models = [model for model in ChatNVIDIA.get_available_models() if model.supports_tools]\n", + "tool_models" + ] + }, + { + "cell_type": "markdown", + "id": "b01d75a7", + "metadata": {}, + "source": [ + "With a tool capable model," + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bd54f174", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_core.pydantic_v1 import Field\n", + "from langchain_core.tools import tool\n", + "\n", + "@tool\n", + "def get_current_weather(\n", + " location: str = Field(..., description=\"The location to get the weather for.\")\n", + "):\n", + " \"\"\"Get the current weather for a location.\"\"\"\n", + " ...\n", + "\n", + "llm = ChatNVIDIA(model=tool_models[0].id).bind_tools(tools=[get_current_weather])\n", + "response = llm.invoke(\"What is the weather in Boston?\")\n", + "response.tool_calls" + ] + }, + { + "cell_type": "markdown", + "id": "e08df68c", + "metadata": {}, + "source": [ + "See [How to use chat models to call tools](https://python.langchain.com/v0.2/docs/how_to/tool_calling/) for additional examples." + ] } ], "metadata": { From f743d781710b4f7cacdf141dfc44a1a3a70f65fc Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Tue, 23 Jul 2024 09:42:34 -0400 Subject: [PATCH 45/60] workaround for missing index field on streamed tool calls (revert when nim bug fixed) --- .../langchain_nvidia_ai_endpoints/chat_models.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py index e3661659..86a6a8ca 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py @@ -334,10 +334,16 @@ def _custom_postprocess( elif streaming: out_dict["tool_call_chunks"] = [] for tool_call in tool_calls: - assert "index" in tool_call, ( - "invalid response from server: " - "tool_call must have an 'index' key" - ) + # todo: the nim api does not return the function index + # for tool calls in stream responses. this is + # an issue that needs to be resolved server-side. + # the only reason we can skip this for now + # is because the nim endpoint returns only full + # tool calls, no deltas. + # assert "index" in tool_call, ( + # "invalid response from server: " + # "tool_call must have an 'index' key" + # ) assert "function" in tool_call, ( "invalid response from server: " "tool_call must have a 'function' key" From 1059157a898828dec0fa2e4b798b97b8c2e236ab Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Tue, 23 Jul 2024 11:34:36 -0400 Subject: [PATCH 46/60] add support for meta/llama-3.1-8b-instruct, meta/llama-3.1-70b-instruct & meta/llama-3.1-405b-instruct --- .../langchain_nvidia_ai_endpoints/_statics.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py index 1151143e..41886f4a 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py @@ -282,6 +282,21 @@ def validate_client(cls, client: str, values: dict) -> str: model_type="chat", client="ChatNVIDIA", ), + "meta/llama-3.1-8b-instruct": Model( + id="meta/llama-3.1-8b-instruct", + model_type="chat", + client="ChatNVIDIA", + ), + "meta/llama-3.1-70b-instruct": Model( + id="meta/llama-3.1-70b-instruct", + model_type="chat", + client="ChatNVIDIA", + ), + "meta/llama-3.1-405b-instruct": Model( + id="meta/llama-3.1-405b-instruct", + model_type="chat", + client="ChatNVIDIA", + ), } QA_MODEL_TABLE = { From bb67c4f920f34ab28f171e30183c313dc15d51aa Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Tue, 23 Jul 2024 11:35:29 -0400 Subject: [PATCH 47/60] bump version to 0.1.7 --- libs/ai-endpoints/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/ai-endpoints/pyproject.toml b/libs/ai-endpoints/pyproject.toml index ba34df24..40da6d3b 100644 --- a/libs/ai-endpoints/pyproject.toml +++ b/libs/ai-endpoints/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langchain-nvidia-ai-endpoints" -version = "0.1.6" +version = "0.1.7" description = "An integration package connecting NVIDIA AI Endpoints and LangChain" authors = [] readme = "README.md" From a26088ee2c9e43478c1a728f50424a42f2739093 Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Tue, 23 Jul 2024 12:15:35 -0400 Subject: [PATCH 48/60] add tool support for meta/llama-3.1-8b-instruct, meta/llama-3.1-70b-instruct & meta/llama-3.1-405b-instruct --- libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py index cbfcf7f4..7c51466b 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py @@ -289,16 +289,19 @@ def validate_client(cls, client: str, values: dict) -> str: id="meta/llama-3.1-8b-instruct", model_type="chat", client="ChatNVIDIA", + supports_tools=True, ), "meta/llama-3.1-70b-instruct": Model( id="meta/llama-3.1-70b-instruct", model_type="chat", client="ChatNVIDIA", + supports_tools=True, ), "meta/llama-3.1-405b-instruct": Model( id="meta/llama-3.1-405b-instruct", model_type="chat", client="ChatNVIDIA", + supports_tools=True, ), } From 150c6c39fefad3090ebdee814de65bdc471e2d49 Mon Sep 17 00:00:00 2001 From: Hayden Wolff Date: Tue, 23 Jul 2024 13:42:21 -0700 Subject: [PATCH 49/60] added notebook to cookbooks --- cookbook/nvidia_nim_agents_llama3.1.ipynb | 352 ++++++++++++++++++++++ 1 file changed, 352 insertions(+) create mode 100644 cookbook/nvidia_nim_agents_llama3.1.ipynb diff --git a/cookbook/nvidia_nim_agents_llama3.1.ipynb b/cookbook/nvidia_nim_agents_llama3.1.ipynb new file mode 100644 index 00000000..e3e13dce --- /dev/null +++ b/cookbook/nvidia_nim_agents_llama3.1.ipynb @@ -0,0 +1,352 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "77d79657", + "metadata": {}, + "source": [ + "\n", + "# NVIDIA NIMs with Tool Calling for Agents" + ] + }, + { + "cell_type": "markdown", + "id": "f8fdde48", + "metadata": { + "jp-MarkdownHeadingCollapsed": true + }, + "source": [ + "This notebook will use a [NVIDIA Llama 3.1 NIM](https://developer.nvidia.com/blog/supercharging-llama-3-1-across-nvidia-platforms/) with tool-calling agent capabilities in generative AI solutions. As mentioned in this [Introductory Blog on LLM Agents](https://developer.nvidia.com/blog/introduction-to-llm-agents/), agents can be described as AI systems that use LLMs to reason through a problem, create a plan to solve the problem, execute the plan with the help of a set of tools, and use memory to store meaningful context of the system state. \n", + "\n", + "The notebook is designed to provide an intro to merely one of the capabilities of agent systems: **tool calling**. \n", + "\n", + "**Tools** are interfaces that accept input, execute an action, and then return a result of that action in a structured output according to a pre-defined schema. They often encompass external API calls that the agent can use to perform tasks that go beyond the capabilities of the LLM, but do not have to be external API calls. For example, to get the current weather in San Diego, a weather tool might be used. Or to get the current score of the 49ers game, a generic web search tool or ESPN tool might be used. \n", + "\n", + "## What is NVIDIA NIM and How do They Support Tool Calling for Agents?\n", + "### What is NIM?\n", + "NIM supports models across domains like chat, embedding, and re-ranking models \n", + "from the community as well as NVIDIA. These models are optimized by NVIDIA to deliver the best performance on NVIDIA \n", + "accelerated infrastructure and deployed as a NIM, an easy-to-use, prebuilt containers that deploy anywhere using a single \n", + "command on NVIDIA accelerated infrastructure. If you're new to NIMs with LangChain, check out the [documentation](https://python.langchain.com/v0.2/docs/integrations/providers/nvidia/).\n", + "\n", + "Now, NIMs support tool calling, also known as \"function calling\" for models that have the aforementioned capability. \n", + "\n", + "This notebook will demonstrate a model that supports function calling, [Llama 3.1 8b-instruct](https://build.nvidia.com/meta/llama-3_1-8b-instruct). \n", + "\n", + "### What does it mean for NIM to support tool usage?\n", + "In order to support tool usage in an agent workflow, first an LLM must be trained to detect when a function should be called and output a structured response like JSON that contains the function to be called and its arguments. \n", + "\n", + "Next, the model is packaged as a NIM, meaning it's optimized to deliver best performance on NVIDIA accelerated infrastructure and easy to deploy as well as use. This microservice packaging also uses OpenAI compatible APIs, so developers can build world-class generative AI agents with ease.\n", + "\n", + "Let's see how to use tools in a couple of examples." + ] + }, + { + "cell_type": "markdown", + "id": "120455e4", + "metadata": {}, + "source": [ + "## 🔨 Tool Usage -- Web Search\n", + "\n", + "Since a LLM does not have access to the most up-to-date information on the Internet, [Tavily Search](https://docs.tavily.com/docs/tavily-api/introduction) acts as a tool to provide a generative AI application with real-time online information. Tavily is a search engmine that is optimized for AI developers and AI agents. A singular API call abstracts searching, scraping, filtering, and extracting relevant information from online sources. \n", + "\n", + "We'll enhance our NIM, [Llama 3.1-8b-instruct](https://build.nvidia.com/meta/llama-3_1-8b-instruct), with Tavily search. " + ] + }, + { + "cell_type": "markdown", + "id": "1b8f8b6f", + "metadata": {}, + "source": [ + "Install pre-requesites. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fe4ec61f", + "metadata": {}, + "outputs": [], + "source": [ + "%pip install -U langchain langgraph langchain-nvidia-ai-endpoints langchain-community langchain-openai tavily-python geocoder" + ] + }, + { + "cell_type": "markdown", + "id": "6c65b376", + "metadata": {}, + "source": [ + "If you're using NVIDIA hosted NIMs, you'll need to use an API key which you can setup below. Follow [NVIDIA NIMs LangChain documentation](https://python.langchain.com/v0.2/docs/integrations/chat/nvidia_ai_endpoints/) for more information on accessing and using NIMs. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "aaeb35a9", + "metadata": {}, + "outputs": [], + "source": [ + "import getpass\n", + "import os\n", + "\n", + "os.environ[\"NVIDIA_API_KEY\"] = getpass.getpass(\"Enter your NVIDIA API key: \")" + ] + }, + { + "cell_type": "markdown", + "id": "e190dc5e", + "metadata": {}, + "source": [ + "Declare your model that supports tool calling. In this example, we use [Llama 3.1-8b-instruct](https://build.nvidia.com/meta/llama-3_1-8b-instruct). " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "579881ca", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_nvidia_ai_endpoints import ChatNVIDIA\n", + "\n", + "llm = ChatNVIDIA(model=\"meta/llama-3.1-8b-instruct\")" + ] + }, + { + "cell_type": "markdown", + "id": "9ce17567", + "metadata": {}, + "source": [ + "Initialize [Tavily Tool](https://python.langchain.com/v0.2/docs/integrations/tools/tavily_search/)\n", + "\n", + "Note that this requires an API key - they have a free tier, but if you don't have one or don't want to create one, you can always ignore this step or use a different tool. \n", + "\n", + "Once you create your API key, you will need to set it in the environment." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c8832545-d3c1-404f-afdb-6a00891f84c9", + "metadata": {}, + "outputs": [], + "source": [ + "import getpass\n", + "import os\n", + "\n", + "os.environ[\"TAVILY_API_KEY\"] = getpass.getpass(\"Enter your Tavily API key: \")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e1d1511d", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_community.tools.tavily_search import TavilySearchResults\n", + "\n", + "# Declare a single tool, Tavily search\n", + "tools = [TavilySearchResults(max_results=1)]" + ] + }, + { + "cell_type": "markdown", + "id": "cd230847", + "metadata": {}, + "source": [ + "Create [ReAct agent](https://python.langchain.com/v0.2/docs/concepts/#react-agents), prebuilt in [LangGraph](https://langchain-ai.github.io/langgraph/#overview). " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "da73ae35", + "metadata": {}, + "outputs": [], + "source": [ + "from langgraph.prebuilt import create_react_agent\n", + "from langchain.callbacks.tracers import ConsoleCallbackHandler\n", + "\n", + "app = create_react_agent(llm, tools)" + ] + }, + { + "cell_type": "markdown", + "id": "be70d7ee", + "metadata": {}, + "source": [ + "Run agent; a callback is passed to provide more verbose output." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "02a109cc", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "query = \"What is LangChain?\"\n", + "messages = app.invoke({\"messages\": [(\"human\", query)]}, config={'callbacks': [ConsoleCallbackHandler()]})\n", + "{\n", + " \"input\": query,\n", + " \"output\": messages[\"messages\"][-1].content,\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "b5e9bbb9", + "metadata": {}, + "source": [ + "## 🔨 Tool Usage -- Adding on a Custom Tool\n", + "\n", + "Let's see how to [define a custom tool](https://python.langchain.com/v0.2/docs/how_to/custom_tools/) for your NIM agent and how it handles multiple tools. \n", + "\n", + "We'll enhance the NIM with Tavily search with some custom tools to determine a user's current location (based on IP address) and return a latitude and longitude. We will use these tools to have Tavily look up the weather in the user's current location." + ] + }, + { + "cell_type": "markdown", + "id": "46052285-7331-44c2-a7dc-34ebbe4d6b8c", + "metadata": {}, + "source": [ + "First, let's create a custom tool to determine a user's location based off IP address. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e9d8ed5f-b6e9-495f-85ff-e431d39475c4", + "metadata": {}, + "outputs": [], + "source": [ + "import geocoder\n", + "from langchain.tools import tool\n", + "from typing import Tuple\n", + "\n", + "@tool\n", + "def get_current_location() -> list:\n", + " \"\"\"Return the current location of the user based on IP address\"\"\"\n", + " loc = geocoder.ip('me')\n", + " return loc.latlng " + ] + }, + { + "cell_type": "markdown", + "id": "089e3223-50f3-4e8e-9043-24c792ca7daf", + "metadata": {}, + "source": [ + "Let's update the tools to use the Tavily tool delcared earlier and also add the `get_current_location` tool." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b71d7d05-d3ec-4005-911c-3e44df8102b4", + "metadata": {}, + "outputs": [], + "source": [ + "# Declare two tools: Tavily and custom get_current_location tool.\n", + "tools = [TavilySearchResults(max_results=1), get_current_location]" + ] + }, + { + "cell_type": "markdown", + "id": "cd04f130-3f9b-4a0d-a018-d954dc41ad4b", + "metadata": {}, + "source": [ + "We already declared our LLM, so we don't need to redeclare it. However, we do want to update the agent to have the updated tools." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "64a0eead-ee86-4b0b-8ae3-fb194ea69186", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "from langgraph.prebuilt import create_react_agent\n", + "from langchain.globals import set_verbose\n", + "from langchain.callbacks.tracers import ConsoleCallbackHandler\n", + "\n", + "set_verbose(True) # verbose output to follow function calling\n", + "\n", + "query = \"What is the current weather where I am?\"\n", + "app = create_react_agent(llm, tools)\n", + "\n", + "\n", + "messages = app.invoke({\"messages\": [(\"human\", query)]}, config={'callbacks': [ConsoleCallbackHandler()]})\n", + "{\n", + " \"input\": query,\n", + " \"output\": messages[\"messages\"][-1].content,\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "147c4727", + "metadata": {}, + "source": [ + "In order to execute this query, first a tool to get the current location needs to be called. Then a tool to get the current weather at that location needs to be called. \n", + "Finally, the result is returned to the user." + ] + }, + { + "cell_type": "markdown", + "id": "8ace95bd-f2f7-469e-9d9e-ea7b4c57e8f4", + "metadata": {}, + "source": [ + "Below, you can see a diagram of the application's graph. The agent continues to use tools until the query is resolved." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "128b55cf-5ee3-42d2-897b-173a6d696921", + "metadata": {}, + "outputs": [], + "source": [ + "from IPython.display import Image, display\n", + "\n", + "display(Image(app.get_graph(xray=True).draw_mermaid_png()))" + ] + }, + { + "cell_type": "markdown", + "id": "42ce0ec8-d5bb-4ba8-b2d6-6fe3a0c0aeec", + "metadata": {}, + "source": [ + "## Conclusion\n", + "You've now seen how to use NIMs to do tool calling, an important capability of agents. As mentioned earlier, tools are just one part of agent capabilities, so check out other notebook so see how tools can be used with othe techniques to create agent workflows.\n", + "\n", + "If you're ready to explore more complicated agent workflows, check out [this blog](https://developer.nvidia.com/blog/build-an-agentic-rag-pipeline-with-llama-3-1-and-nvidia-nemo-retriever-nims/) on how to improve your RAG pipeline with agents with Llama 3.1 and NVIDIA NemMo Retriever NIMs." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From e7a9e3a6e3a534a366cb98732449be0ace043a4c Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Tue, 23 Jul 2024 17:03:42 -0400 Subject: [PATCH 50/60] add default tool model and mark tests as xfail for server issues --- libs/ai-endpoints/tests/integration_tests/conftest.py | 2 +- .../tests/integration_tests/test_bind_tools.py | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/libs/ai-endpoints/tests/integration_tests/conftest.py b/libs/ai-endpoints/tests/integration_tests/conftest.py index 5240a89c..2c45386f 100644 --- a/libs/ai-endpoints/tests/integration_tests/conftest.py +++ b/libs/ai-endpoints/tests/integration_tests/conftest.py @@ -81,7 +81,7 @@ def get_all_known_models() -> List[Model]: metafunc.parametrize("chat_model", models, ids=models) if "tool_model" in metafunc.fixturenames: - models = [] + models = ["meta/llama-3.1-8b-instruct"] if model_list := metafunc.config.getoption("tool_model_id"): models = model_list if metafunc.config.getoption("all_models"): diff --git a/libs/ai-endpoints/tests/integration_tests/test_bind_tools.py b/libs/ai-endpoints/tests/integration_tests/test_bind_tools.py index f4e9a415..c9a84b69 100644 --- a/libs/ai-endpoints/tests/integration_tests/test_bind_tools.py +++ b/libs/ai-endpoints/tests/integration_tests/test_bind_tools.py @@ -271,6 +271,7 @@ def test_tool_choice_negative( [eval_invoke, eval_stream], ids=["invoke", "stream"], ) +@pytest.mark.xfail(reason="Server side is broken") def test_tool_choice_negative_max_tokens_required( tool_model: str, mode: dict, @@ -294,6 +295,7 @@ def test_tool_choice_negative_max_tokens_required( [eval_invoke, eval_stream], ids=["invoke", "stream"], ) +@pytest.mark.xfail(reason="Server side is broken") def test_tool_choice_negative_max_tokens_function( tool_model: str, mode: dict, @@ -638,8 +640,12 @@ def test_known_does_not_warn(tool_model: str, mode: dict) -> None: def test_unknown_warns(mode: dict) -> None: + candidates = [ + model for model in ChatNVIDIA.get_available_models() if not model.supports_tools + ] + assert candidates, "All models support tools" with pytest.warns(UserWarning) as record: - ChatNVIDIA(model="mock-model", **mode).bind_tools([xxyyzz]) + ChatNVIDIA(model=candidates[0].id, **mode).bind_tools([xxyyzz]) assert len(record) == 1 assert "not known to support tools" in str(record[0].message) From f8c99bb2b9a6916d8d057e85826b143738226fdb Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Tue, 23 Jul 2024 17:42:07 -0400 Subject: [PATCH 51/60] update minimum langchain-core versino to 0.1.47 for tool_calls support --- libs/ai-endpoints/poetry.lock | 765 ++++++++++++++++--------------- libs/ai-endpoints/pyproject.toml | 2 +- 2 files changed, 399 insertions(+), 368 deletions(-) diff --git a/libs/ai-endpoints/poetry.lock b/libs/ai-endpoints/poetry.lock index 77f7b29c..26a503a6 100644 --- a/libs/ai-endpoints/poetry.lock +++ b/libs/ai-endpoints/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. [[package]] name = "aiohttp" @@ -112,13 +112,13 @@ frozenlist = ">=1.1.0" [[package]] name = "annotated-types" -version = "0.6.0" +version = "0.7.0" description = "Reusable constraint types to use with typing.Annotated" optional = false python-versions = ">=3.8" files = [ - {file = "annotated_types-0.6.0-py3-none-any.whl", hash = "sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43"}, - {file = "annotated_types-0.6.0.tar.gz", hash = "sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d"}, + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, ] [package.dependencies] @@ -156,13 +156,13 @@ tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "p [[package]] name = "certifi" -version = "2024.2.2" +version = "2024.7.4" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" files = [ - {file = "certifi-2024.2.2-py3-none-any.whl", hash = "sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1"}, - {file = "certifi-2024.2.2.tar.gz", hash = "sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f"}, + {file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"}, + {file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"}, ] [[package]] @@ -266,13 +266,13 @@ files = [ [[package]] name = "codespell" -version = "2.2.6" +version = "2.3.0" description = "Codespell" optional = false python-versions = ">=3.8" files = [ - {file = "codespell-2.2.6-py3-none-any.whl", hash = "sha256:9ee9a3e5df0990604013ac2a9f22fa8e57669c827124a2e961fe8a1da4cacc07"}, - {file = "codespell-2.2.6.tar.gz", hash = "sha256:a8c65d8eb3faa03deabab6b3bbe798bea72e1799c7e9e955d57eca4096abcff9"}, + {file = "codespell-2.3.0-py3-none-any.whl", hash = "sha256:a9c7cef2501c9cfede2110fd6d4e5e62296920efe9abfb84648df866e47f58d1"}, + {file = "codespell-2.3.0.tar.gz", hash = "sha256:360c7d10f75e65f67bad720af7007e1060a5d395670ec11a7ed1fed9dd17471f"}, ] [package.extras] @@ -309,13 +309,13 @@ typing-inspect = ">=0.4.0,<1" [[package]] name = "exceptiongroup" -version = "1.2.1" +version = "1.2.2" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" files = [ - {file = "exceptiongroup-1.2.1-py3-none-any.whl", hash = "sha256:5258b9ed329c5bbdd31a309f53cbfb0b155341807f6ff7606a1e801a891b29ad"}, - {file = "exceptiongroup-1.2.1.tar.gz", hash = "sha256:a4785e48b045528f5bfe627b6ad554ff32def154f42372786903b7abcfe1aa16"}, + {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, + {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, ] [package.extras] @@ -323,13 +323,13 @@ test = ["pytest (>=6)"] [[package]] name = "faker" -version = "24.11.0" +version = "24.14.1" description = "Faker is a Python package that generates fake data for you." optional = false python-versions = ">=3.8" files = [ - {file = "Faker-24.11.0-py3-none-any.whl", hash = "sha256:adb98e771073a06bdc5d2d6710d8af07ac5da64c8dc2ae3b17bb32319e66fd82"}, - {file = "Faker-24.11.0.tar.gz", hash = "sha256:34b947581c2bced340c39b35f89dbfac4f356932cfff8fe893bde854903f0e6e"}, + {file = "Faker-24.14.1-py3-none-any.whl", hash = "sha256:a5edba3aa17a1d689c8907e5b0cd1653079c2466a4807f083aa7b5f80a00225d"}, + {file = "Faker-24.14.1.tar.gz", hash = "sha256:380a3697e696ae4fcf50a93a3d9e0286fab7dfbf05a9caa4421fa4727c6b1e89"}, ] [package.dependencies] @@ -338,13 +338,13 @@ typing-extensions = {version = ">=3.10.0.1", markers = "python_version <= \"3.8\ [[package]] name = "freezegun" -version = "1.5.0" +version = "1.5.1" description = "Let your Python tests travel through time" optional = false python-versions = ">=3.7" files = [ - {file = "freezegun-1.5.0-py3-none-any.whl", hash = "sha256:ec3f4ba030e34eb6cf7e1e257308aee2c60c3d038ff35996d7475760c9ff3719"}, - {file = "freezegun-1.5.0.tar.gz", hash = "sha256:200a64359b363aa3653d8aac289584078386c7c3da77339d257e46a01fb5c77c"}, + {file = "freezegun-1.5.1-py3-none-any.whl", hash = "sha256:bf111d7138a8abe55ab48a71755673dbaa4ab87f4cff5634a4442dfec34c15f1"}, + {file = "freezegun-1.5.1.tar.gz", hash = "sha256:b29dedfcda6d5e8e083ce71b2b542753ad48cfec44037b3fc79702e2980a89e9"}, ] [package.dependencies] @@ -545,30 +545,30 @@ jsonpointer = ">=1.9" [[package]] name = "jsonpointer" -version = "2.4" +version = "3.0.0" description = "Identify specific nodes in a JSON document (RFC 6901)" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*" +python-versions = ">=3.7" files = [ - {file = "jsonpointer-2.4-py2.py3-none-any.whl", hash = "sha256:15d51bba20eea3165644553647711d150376234112651b4f1811022aecad7d7a"}, - {file = "jsonpointer-2.4.tar.gz", hash = "sha256:585cee82b70211fa9e6043b7bb89db6e1aa49524340dde8ad6b63206ea689d88"}, + {file = "jsonpointer-3.0.0-py2.py3-none-any.whl", hash = "sha256:13e088adc14fca8b6aa8177c044e12701e6ad4b28ff10e65f2267a90109c9942"}, + {file = "jsonpointer-3.0.0.tar.gz", hash = "sha256:2b2d729f2091522d61c3b31f82e11870f60b68f43fbc705cb76bf4b832af59ef"}, ] [[package]] name = "langchain" -version = "0.2.5" +version = "0.2.11" description = "Building applications with LLMs through composability" optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langchain-0.2.5-py3-none-any.whl", hash = "sha256:9aded9a65348254e1c93dcdaacffe4d1b6a5e7f74ef80c160c88ff78ad299228"}, - {file = "langchain-0.2.5.tar.gz", hash = "sha256:ffdbf4fcea46a10d461bcbda2402220fcfd72a0c70e9f4161ae0510067b9b3bd"}, + {file = "langchain-0.2.11-py3-none-any.whl", hash = "sha256:5a7a8b4918f3d3bebce9b4f23b92d050699e6f7fb97591e8941177cf07a260a2"}, + {file = "langchain-0.2.11.tar.gz", hash = "sha256:d7a9e4165f02dca0bd78addbc2319d5b9286b5d37c51d784124102b57e9fd297"}, ] [package.dependencies] aiohttp = ">=3.8.3,<4.0.0" async-timeout = {version = ">=4.0.0,<5.0.0", markers = "python_version < \"3.11\""} -langchain-core = ">=0.2.7,<0.3.0" +langchain-core = ">=0.2.23,<0.3.0" langchain-text-splitters = ">=0.2.0,<0.3.0" langsmith = ">=0.1.17,<0.2.0" numpy = [ @@ -579,24 +579,24 @@ pydantic = ">=1,<3" PyYAML = ">=5.3" requests = ">=2,<3" SQLAlchemy = ">=1.4,<3" -tenacity = ">=8.1.0,<9.0.0" +tenacity = ">=8.1.0,<8.4.0 || >8.4.0,<9.0.0" [[package]] name = "langchain-community" -version = "0.2.5" +version = "0.2.10" description = "Community contributed LangChain integrations." optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langchain_community-0.2.5-py3-none-any.whl", hash = "sha256:bf37a334952e42c7676d083cf2d2c4cbfbb7de1949c4149fe19913e2b06c485f"}, - {file = "langchain_community-0.2.5.tar.gz", hash = "sha256:476787b8c8c213b67e7b0eceb53346e787f00fbae12d8e680985bd4f93b0bf64"}, + {file = "langchain_community-0.2.10-py3-none-any.whl", hash = "sha256:9f4d1b5ab7f0b0a704f538e26e50fce45a461da6d2bf6b7b636d24f22fbc088a"}, + {file = "langchain_community-0.2.10.tar.gz", hash = "sha256:3a0404bad4bd07d6f86affdb62fb3d080a456c66191754d586a409d9d6024d62"}, ] [package.dependencies] aiohttp = ">=3.8.3,<4.0.0" dataclasses-json = ">=0.5.7,<0.7" -langchain = ">=0.2.5,<0.3.0" -langchain-core = ">=0.2.7,<0.3.0" +langchain = ">=0.2.9,<0.3.0" +langchain-core = ">=0.2.23,<0.3.0" langsmith = ">=0.1.0,<0.2.0" numpy = [ {version = ">=1,<2", markers = "python_version < \"3.12\""}, @@ -605,11 +605,11 @@ numpy = [ PyYAML = ">=5.3" requests = ">=2,<3" SQLAlchemy = ">=1.4,<3" -tenacity = ">=8.1.0,<9.0.0" +tenacity = ">=8.1.0,<8.4.0 || >8.4.0,<9.0.0" [[package]] name = "langchain-core" -version = "0.2.8" +version = "0.2.23" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.8.1,<4.0" @@ -620,48 +620,51 @@ develop = false jsonpatch = "^1.33" langsmith = "^0.1.75" packaging = ">=23.2,<25" -pydantic = ">=1,<3" +pydantic = [ + {version = ">=1,<3", markers = "python_full_version < \"3.12.4\""}, + {version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""}, +] PyYAML = ">=5.3" -tenacity = "^8.1.0" +tenacity = "^8.1.0,!=8.4.0" [package.source] type = "git" url = "https://github.com/langchain-ai/langchain.git" reference = "HEAD" -resolved_reference = "c2b2e3266ce97ea647d4b86eedadbb7cd77d0381" +resolved_reference = "03881c674358df6bc243f67a77050aa95c660bcd" subdirectory = "libs/core" [[package]] name = "langchain-text-splitters" -version = "0.2.1" +version = "0.2.2" description = "LangChain text splitting utilities" optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langchain_text_splitters-0.2.1-py3-none-any.whl", hash = "sha256:c2774a85f17189eaca50339629d2316d13130d4a8d9f1a1a96f3a03670c4a138"}, - {file = "langchain_text_splitters-0.2.1.tar.gz", hash = "sha256:06853d17d7241ecf5c97c7b6ef01f600f9b0fb953dd997838142a527a4f32ea4"}, + {file = "langchain_text_splitters-0.2.2-py3-none-any.whl", hash = "sha256:1c80d4b11b55e2995f02d2a326c0323ee1eeff24507329bb22924e420c782dff"}, + {file = "langchain_text_splitters-0.2.2.tar.gz", hash = "sha256:a1e45de10919fa6fb080ef0525deab56557e9552083600455cb9fa4238076140"}, ] [package.dependencies] -langchain-core = ">=0.2.0,<0.3.0" - -[package.extras] -extended-testing = ["beautifulsoup4 (>=4.12.3,<5.0.0)", "lxml (>=4.9.3,<6.0)"] +langchain-core = ">=0.2.10,<0.3.0" [[package]] name = "langsmith" -version = "0.1.78" +version = "0.1.93" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langsmith-0.1.78-py3-none-any.whl", hash = "sha256:87bc5d9072bfcb6392d7552cbcd6089dcc1faed36d688b1587d80bd48a1acba2"}, - {file = "langsmith-0.1.78.tar.gz", hash = "sha256:d9112d2e9298ec6b02d3b1afec6ed557df9db3746c79d34ef3b448fc18e116cd"}, + {file = "langsmith-0.1.93-py3-none-any.whl", hash = "sha256:811210b9d5f108f36431bd7b997eb9476a9ecf5a2abd7ddbb606c1cdcf0f43ce"}, + {file = "langsmith-0.1.93.tar.gz", hash = "sha256:285b6ad3a54f50fa8eb97b5f600acc57d0e37e139dd8cf2111a117d0435ba9b4"}, ] [package.dependencies] orjson = ">=3.9.14,<4.0.0" -pydantic = ">=1,<3" +pydantic = [ + {version = ">=1,<3", markers = "python_full_version < \"3.12.4\""}, + {version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""}, +] requests = ">=2,<3" [[package]] @@ -927,155 +930,166 @@ files = [ [[package]] name = "orjson" -version = "3.10.1" +version = "3.10.6" description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" optional = false python-versions = ">=3.8" files = [ - {file = "orjson-3.10.1-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:8ec2fc456d53ea4a47768f622bb709be68acd455b0c6be57e91462259741c4f3"}, - {file = "orjson-3.10.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e900863691d327758be14e2a491931605bd0aded3a21beb6ce133889830b659"}, - {file = "orjson-3.10.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ab6ecbd6fe57785ebc86ee49e183f37d45f91b46fc601380c67c5c5e9c0014a2"}, - {file = "orjson-3.10.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8af7c68b01b876335cccfb4eee0beef2b5b6eae1945d46a09a7c24c9faac7a77"}, - {file = "orjson-3.10.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:915abfb2e528677b488a06eba173e9d7706a20fdfe9cdb15890b74ef9791b85e"}, - {file = "orjson-3.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe3fd4a36eff9c63d25503b439531d21828da9def0059c4f472e3845a081aa0b"}, - {file = "orjson-3.10.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d229564e72cfc062e6481a91977a5165c5a0fdce11ddc19ced8471847a67c517"}, - {file = "orjson-3.10.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:9e00495b18304173ac843b5c5fbea7b6f7968564d0d49bef06bfaeca4b656f4e"}, - {file = "orjson-3.10.1-cp310-none-win32.whl", hash = "sha256:fd78ec55179545c108174ba19c1795ced548d6cac4d80d014163033c047ca4ea"}, - {file = "orjson-3.10.1-cp310-none-win_amd64.whl", hash = "sha256:50ca42b40d5a442a9e22eece8cf42ba3d7cd4cd0f2f20184b4d7682894f05eec"}, - {file = "orjson-3.10.1-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:b345a3d6953628df2f42502297f6c1e1b475cfbf6268013c94c5ac80e8abc04c"}, - {file = "orjson-3.10.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:caa7395ef51af4190d2c70a364e2f42138e0e5fcb4bc08bc9b76997659b27dab"}, - {file = "orjson-3.10.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b01d701decd75ae092e5f36f7b88a1e7a1d3bb7c9b9d7694de850fb155578d5a"}, - {file = "orjson-3.10.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b5028981ba393f443d8fed9049211b979cadc9d0afecf162832f5a5b152c6297"}, - {file = "orjson-3.10.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:31ff6a222ea362b87bf21ff619598a4dc1106aaafaea32b1c4876d692891ec27"}, - {file = "orjson-3.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e852a83d7803d3406135fb7a57cf0c1e4a3e73bac80ec621bd32f01c653849c5"}, - {file = "orjson-3.10.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2567bc928ed3c3fcd90998009e8835de7c7dc59aabcf764b8374d36044864f3b"}, - {file = "orjson-3.10.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4ce98cac60b7bb56457bdd2ed7f0d5d7f242d291fdc0ca566c83fa721b52e92d"}, - {file = "orjson-3.10.1-cp311-none-win32.whl", hash = "sha256:813905e111318acb356bb8029014c77b4c647f8b03f314e7b475bd9ce6d1a8ce"}, - {file = "orjson-3.10.1-cp311-none-win_amd64.whl", hash = "sha256:03a3ca0b3ed52bed1a869163a4284e8a7b0be6a0359d521e467cdef7e8e8a3ee"}, - {file = "orjson-3.10.1-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:f02c06cee680b1b3a8727ec26c36f4b3c0c9e2b26339d64471034d16f74f4ef5"}, - {file = "orjson-3.10.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1aa2f127ac546e123283e437cc90b5ecce754a22306c7700b11035dad4ccf85"}, - {file = "orjson-3.10.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2cf29b4b74f585225196944dffdebd549ad2af6da9e80db7115984103fb18a96"}, - {file = "orjson-3.10.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a1b130c20b116f413caf6059c651ad32215c28500dce9cd029a334a2d84aa66f"}, - {file = "orjson-3.10.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d31f9a709e6114492136e87c7c6da5e21dfedebefa03af85f3ad72656c493ae9"}, - {file = "orjson-3.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d1d169461726f271ab31633cf0e7e7353417e16fb69256a4f8ecb3246a78d6e"}, - {file = "orjson-3.10.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:57c294d73825c6b7f30d11c9e5900cfec9a814893af7f14efbe06b8d0f25fba9"}, - {file = "orjson-3.10.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d7f11dbacfa9265ec76b4019efffabaabba7a7ebf14078f6b4df9b51c3c9a8ea"}, - {file = "orjson-3.10.1-cp312-none-win32.whl", hash = "sha256:d89e5ed68593226c31c76ab4de3e0d35c760bfd3fbf0a74c4b2be1383a1bf123"}, - {file = "orjson-3.10.1-cp312-none-win_amd64.whl", hash = "sha256:aa76c4fe147fd162107ce1692c39f7189180cfd3a27cfbc2ab5643422812da8e"}, - {file = "orjson-3.10.1-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:a2c6a85c92d0e494c1ae117befc93cf8e7bca2075f7fe52e32698da650b2c6d1"}, - {file = "orjson-3.10.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9813f43da955197d36a7365eb99bed42b83680801729ab2487fef305b9ced866"}, - {file = "orjson-3.10.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ec917b768e2b34b7084cb6c68941f6de5812cc26c6f1a9fecb728e36a3deb9e8"}, - {file = "orjson-3.10.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5252146b3172d75c8a6d27ebca59c9ee066ffc5a277050ccec24821e68742fdf"}, - {file = "orjson-3.10.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:536429bb02791a199d976118b95014ad66f74c58b7644d21061c54ad284e00f4"}, - {file = "orjson-3.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7dfed3c3e9b9199fb9c3355b9c7e4649b65f639e50ddf50efdf86b45c6de04b5"}, - {file = "orjson-3.10.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:2b230ec35f188f003f5b543644ae486b2998f6afa74ee3a98fc8ed2e45960afc"}, - {file = "orjson-3.10.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:01234249ba19c6ab1eb0b8be89f13ea21218b2d72d496ef085cfd37e1bae9dd8"}, - {file = "orjson-3.10.1-cp38-none-win32.whl", hash = "sha256:8a884fbf81a3cc22d264ba780920d4885442144e6acaa1411921260416ac9a54"}, - {file = "orjson-3.10.1-cp38-none-win_amd64.whl", hash = "sha256:dab5f802d52b182163f307d2b1f727d30b1762e1923c64c9c56dd853f9671a49"}, - {file = "orjson-3.10.1-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:a51fd55d4486bc5293b7a400f9acd55a2dc3b5fc8420d5ffe9b1d6bb1a056a5e"}, - {file = "orjson-3.10.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:53521542a6db1411b3bfa1b24ddce18605a3abdc95a28a67b33f9145f26aa8f2"}, - {file = "orjson-3.10.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:27d610df96ac18ace4931411d489637d20ab3b8f63562b0531bba16011998db0"}, - {file = "orjson-3.10.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:79244b1456e5846d44e9846534bd9e3206712936d026ea8e6a55a7374d2c0694"}, - {file = "orjson-3.10.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d751efaa8a49ae15cbebdda747a62a9ae521126e396fda8143858419f3b03610"}, - {file = "orjson-3.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27ff69c620a4fff33267df70cfd21e0097c2a14216e72943bd5414943e376d77"}, - {file = "orjson-3.10.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:ebc58693464146506fde0c4eb1216ff6d4e40213e61f7d40e2f0dde9b2f21650"}, - {file = "orjson-3.10.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5be608c3972ed902e0143a5b8776d81ac1059436915d42defe5c6ae97b3137a4"}, - {file = "orjson-3.10.1-cp39-none-win32.whl", hash = "sha256:4ae10753e7511d359405aadcbf96556c86e9dbf3a948d26c2c9f9a150c52b091"}, - {file = "orjson-3.10.1-cp39-none-win_amd64.whl", hash = "sha256:fb5bc4caa2c192077fdb02dce4e5ef8639e7f20bec4e3a834346693907362932"}, - {file = "orjson-3.10.1.tar.gz", hash = "sha256:a883b28d73370df23ed995c466b4f6c708c1f7a9bdc400fe89165c96c7603204"}, + {file = "orjson-3.10.6-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:fb0ee33124db6eaa517d00890fc1a55c3bfe1cf78ba4a8899d71a06f2d6ff5c7"}, + {file = "orjson-3.10.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c1c4b53b24a4c06547ce43e5fee6ec4e0d8fe2d597f4647fc033fd205707365"}, + {file = "orjson-3.10.6-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eadc8fd310edb4bdbd333374f2c8fec6794bbbae99b592f448d8214a5e4050c0"}, + {file = "orjson-3.10.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:61272a5aec2b2661f4fa2b37c907ce9701e821b2c1285d5c3ab0207ebd358d38"}, + {file = "orjson-3.10.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57985ee7e91d6214c837936dc1608f40f330a6b88bb13f5a57ce5257807da143"}, + {file = "orjson-3.10.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:633a3b31d9d7c9f02d49c4ab4d0a86065c4a6f6adc297d63d272e043472acab5"}, + {file = "orjson-3.10.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:1c680b269d33ec444afe2bdc647c9eb73166fa47a16d9a75ee56a374f4a45f43"}, + {file = "orjson-3.10.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f759503a97a6ace19e55461395ab0d618b5a117e8d0fbb20e70cfd68a47327f2"}, + {file = "orjson-3.10.6-cp310-none-win32.whl", hash = "sha256:95a0cce17f969fb5391762e5719575217bd10ac5a189d1979442ee54456393f3"}, + {file = "orjson-3.10.6-cp310-none-win_amd64.whl", hash = "sha256:df25d9271270ba2133cc88ee83c318372bdc0f2cd6f32e7a450809a111efc45c"}, + {file = "orjson-3.10.6-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:b1ec490e10d2a77c345def52599311849fc063ae0e67cf4f84528073152bb2ba"}, + {file = "orjson-3.10.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55d43d3feb8f19d07e9f01e5b9be4f28801cf7c60d0fa0d279951b18fae1932b"}, + {file = "orjson-3.10.6-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac3045267e98fe749408eee1593a142e02357c5c99be0802185ef2170086a863"}, + {file = "orjson-3.10.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c27bc6a28ae95923350ab382c57113abd38f3928af3c80be6f2ba7eb8d8db0b0"}, + {file = "orjson-3.10.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d27456491ca79532d11e507cadca37fb8c9324a3976294f68fb1eff2dc6ced5a"}, + {file = "orjson-3.10.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05ac3d3916023745aa3b3b388e91b9166be1ca02b7c7e41045da6d12985685f0"}, + {file = "orjson-3.10.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1335d4ef59ab85cab66fe73fd7a4e881c298ee7f63ede918b7faa1b27cbe5212"}, + {file = "orjson-3.10.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4bbc6d0af24c1575edc79994c20e1b29e6fb3c6a570371306db0993ecf144dc5"}, + {file = "orjson-3.10.6-cp311-none-win32.whl", hash = "sha256:450e39ab1f7694465060a0550b3f6d328d20297bf2e06aa947b97c21e5241fbd"}, + {file = "orjson-3.10.6-cp311-none-win_amd64.whl", hash = "sha256:227df19441372610b20e05bdb906e1742ec2ad7a66ac8350dcfd29a63014a83b"}, + {file = "orjson-3.10.6-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:ea2977b21f8d5d9b758bb3f344a75e55ca78e3ff85595d248eee813ae23ecdfb"}, + {file = "orjson-3.10.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b6f3d167d13a16ed263b52dbfedff52c962bfd3d270b46b7518365bcc2121eed"}, + {file = "orjson-3.10.6-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f710f346e4c44a4e8bdf23daa974faede58f83334289df80bc9cd12fe82573c7"}, + {file = "orjson-3.10.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7275664f84e027dcb1ad5200b8b18373e9c669b2a9ec33d410c40f5ccf4b257e"}, + {file = "orjson-3.10.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0943e4c701196b23c240b3d10ed8ecd674f03089198cf503105b474a4f77f21f"}, + {file = "orjson-3.10.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:446dee5a491b5bc7d8f825d80d9637e7af43f86a331207b9c9610e2f93fee22a"}, + {file = "orjson-3.10.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:64c81456d2a050d380786413786b057983892db105516639cb5d3ee3c7fd5148"}, + {file = "orjson-3.10.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:960db0e31c4e52fa0fc3ecbaea5b2d3b58f379e32a95ae6b0ebeaa25b93dfd34"}, + {file = "orjson-3.10.6-cp312-none-win32.whl", hash = "sha256:a6ea7afb5b30b2317e0bee03c8d34c8181bc5a36f2afd4d0952f378972c4efd5"}, + {file = "orjson-3.10.6-cp312-none-win_amd64.whl", hash = "sha256:874ce88264b7e655dde4aeaacdc8fd772a7962faadfb41abe63e2a4861abc3dc"}, + {file = "orjson-3.10.6-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:66680eae4c4e7fc193d91cfc1353ad6d01b4801ae9b5314f17e11ba55e934183"}, + {file = "orjson-3.10.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:caff75b425db5ef8e8f23af93c80f072f97b4fb3afd4af44482905c9f588da28"}, + {file = "orjson-3.10.6-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3722fddb821b6036fd2a3c814f6bd9b57a89dc6337b9924ecd614ebce3271394"}, + {file = "orjson-3.10.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c2c116072a8533f2fec435fde4d134610f806bdac20188c7bd2081f3e9e0133f"}, + {file = "orjson-3.10.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6eeb13218c8cf34c61912e9df2de2853f1d009de0e46ea09ccdf3d757896af0a"}, + {file = "orjson-3.10.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:965a916373382674e323c957d560b953d81d7a8603fbeee26f7b8248638bd48b"}, + {file = "orjson-3.10.6-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:03c95484d53ed8e479cade8628c9cea00fd9d67f5554764a1110e0d5aa2de96e"}, + {file = "orjson-3.10.6-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:e060748a04cccf1e0a6f2358dffea9c080b849a4a68c28b1b907f272b5127e9b"}, + {file = "orjson-3.10.6-cp38-none-win32.whl", hash = "sha256:738dbe3ef909c4b019d69afc19caf6b5ed0e2f1c786b5d6215fbb7539246e4c6"}, + {file = "orjson-3.10.6-cp38-none-win_amd64.whl", hash = "sha256:d40f839dddf6a7d77114fe6b8a70218556408c71d4d6e29413bb5f150a692ff7"}, + {file = "orjson-3.10.6-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:697a35a083c4f834807a6232b3e62c8b280f7a44ad0b759fd4dce748951e70db"}, + {file = "orjson-3.10.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd502f96bf5ea9a61cbc0b2b5900d0dd68aa0da197179042bdd2be67e51a1e4b"}, + {file = "orjson-3.10.6-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f215789fb1667cdc874c1b8af6a84dc939fd802bf293a8334fce185c79cd359b"}, + {file = "orjson-3.10.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a2debd8ddce948a8c0938c8c93ade191d2f4ba4649a54302a7da905a81f00b56"}, + {file = "orjson-3.10.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5410111d7b6681d4b0d65e0f58a13be588d01b473822483f77f513c7f93bd3b2"}, + {file = "orjson-3.10.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb1f28a137337fdc18384079fa5726810681055b32b92253fa15ae5656e1dddb"}, + {file = "orjson-3.10.6-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:bf2fbbce5fe7cd1aa177ea3eab2b8e6a6bc6e8592e4279ed3db2d62e57c0e1b2"}, + {file = "orjson-3.10.6-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:79b9b9e33bd4c517445a62b90ca0cc279b0f1f3970655c3df9e608bc3f91741a"}, + {file = "orjson-3.10.6-cp39-none-win32.whl", hash = "sha256:30b0a09a2014e621b1adf66a4f705f0809358350a757508ee80209b2d8dae219"}, + {file = "orjson-3.10.6-cp39-none-win_amd64.whl", hash = "sha256:49e3bc615652617d463069f91b867a4458114c5b104e13b7ae6872e5f79d0844"}, + {file = "orjson-3.10.6.tar.gz", hash = "sha256:e54b63d0a7c6c54a5f5f726bc93a2078111ef060fec4ecbf34c5db800ca3b3a7"}, ] [[package]] name = "packaging" -version = "23.2" +version = "24.1" description = "Core utilities for Python packages" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"}, - {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"}, + {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, + {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, ] [[package]] name = "pillow" -version = "10.3.0" +version = "10.4.0" description = "Python Imaging Library (Fork)" optional = false python-versions = ">=3.8" files = [ - {file = "pillow-10.3.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:90b9e29824800e90c84e4022dd5cc16eb2d9605ee13f05d47641eb183cd73d45"}, - {file = "pillow-10.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a2c405445c79c3f5a124573a051062300936b0281fee57637e706453e452746c"}, - {file = "pillow-10.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78618cdbccaa74d3f88d0ad6cb8ac3007f1a6fa5c6f19af64b55ca170bfa1edf"}, - {file = "pillow-10.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:261ddb7ca91fcf71757979534fb4c128448b5b4c55cb6152d280312062f69599"}, - {file = "pillow-10.3.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:ce49c67f4ea0609933d01c0731b34b8695a7a748d6c8d186f95e7d085d2fe475"}, - {file = "pillow-10.3.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b14f16f94cbc61215115b9b1236f9c18403c15dd3c52cf629072afa9d54c1cbf"}, - {file = "pillow-10.3.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d33891be6df59d93df4d846640f0e46f1a807339f09e79a8040bc887bdcd7ed3"}, - {file = "pillow-10.3.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b50811d664d392f02f7761621303eba9d1b056fb1868c8cdf4231279645c25f5"}, - {file = "pillow-10.3.0-cp310-cp310-win32.whl", hash = "sha256:ca2870d5d10d8726a27396d3ca4cf7976cec0f3cb706debe88e3a5bd4610f7d2"}, - {file = "pillow-10.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:f0d0591a0aeaefdaf9a5e545e7485f89910c977087e7de2b6c388aec32011e9f"}, - {file = "pillow-10.3.0-cp310-cp310-win_arm64.whl", hash = "sha256:ccce24b7ad89adb5a1e34a6ba96ac2530046763912806ad4c247356a8f33a67b"}, - {file = "pillow-10.3.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:5f77cf66e96ae734717d341c145c5949c63180842a545c47a0ce7ae52ca83795"}, - {file = "pillow-10.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e4b878386c4bf293578b48fc570b84ecfe477d3b77ba39a6e87150af77f40c57"}, - {file = "pillow-10.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fdcbb4068117dfd9ce0138d068ac512843c52295ed996ae6dd1faf537b6dbc27"}, - {file = "pillow-10.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9797a6c8fe16f25749b371c02e2ade0efb51155e767a971c61734b1bf6293994"}, - {file = "pillow-10.3.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:9e91179a242bbc99be65e139e30690e081fe6cb91a8e77faf4c409653de39451"}, - {file = "pillow-10.3.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:1b87bd9d81d179bd8ab871603bd80d8645729939f90b71e62914e816a76fc6bd"}, - {file = "pillow-10.3.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:81d09caa7b27ef4e61cb7d8fbf1714f5aec1c6b6c5270ee53504981e6e9121ad"}, - {file = "pillow-10.3.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:048ad577748b9fa4a99a0548c64f2cb8d672d5bf2e643a739ac8faff1164238c"}, - {file = "pillow-10.3.0-cp311-cp311-win32.whl", hash = "sha256:7161ec49ef0800947dc5570f86568a7bb36fa97dd09e9827dc02b718c5643f09"}, - {file = "pillow-10.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:8eb0908e954d093b02a543dc963984d6e99ad2b5e36503d8a0aaf040505f747d"}, - {file = "pillow-10.3.0-cp311-cp311-win_arm64.whl", hash = "sha256:4e6f7d1c414191c1199f8996d3f2282b9ebea0945693fb67392c75a3a320941f"}, - {file = "pillow-10.3.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:e46f38133e5a060d46bd630faa4d9fa0202377495df1f068a8299fd78c84de84"}, - {file = "pillow-10.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:50b8eae8f7334ec826d6eeffaeeb00e36b5e24aa0b9df322c247539714c6df19"}, - {file = "pillow-10.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9d3bea1c75f8c53ee4d505c3e67d8c158ad4df0d83170605b50b64025917f338"}, - {file = "pillow-10.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:19aeb96d43902f0a783946a0a87dbdad5c84c936025b8419da0a0cd7724356b1"}, - {file = "pillow-10.3.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:74d28c17412d9caa1066f7a31df8403ec23d5268ba46cd0ad2c50fb82ae40462"}, - {file = "pillow-10.3.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:ff61bfd9253c3915e6d41c651d5f962da23eda633cf02262990094a18a55371a"}, - {file = "pillow-10.3.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d886f5d353333b4771d21267c7ecc75b710f1a73d72d03ca06df49b09015a9ef"}, - {file = "pillow-10.3.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b5ec25d8b17217d635f8935dbc1b9aa5907962fae29dff220f2659487891cd3"}, - {file = "pillow-10.3.0-cp312-cp312-win32.whl", hash = "sha256:51243f1ed5161b9945011a7360e997729776f6e5d7005ba0c6879267d4c5139d"}, - {file = "pillow-10.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:412444afb8c4c7a6cc11a47dade32982439925537e483be7c0ae0cf96c4f6a0b"}, - {file = "pillow-10.3.0-cp312-cp312-win_arm64.whl", hash = "sha256:798232c92e7665fe82ac085f9d8e8ca98826f8e27859d9a96b41d519ecd2e49a"}, - {file = "pillow-10.3.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:4eaa22f0d22b1a7e93ff0a596d57fdede2e550aecffb5a1ef1106aaece48e96b"}, - {file = "pillow-10.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cd5e14fbf22a87321b24c88669aad3a51ec052eb145315b3da3b7e3cc105b9a2"}, - {file = "pillow-10.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1530e8f3a4b965eb6a7785cf17a426c779333eb62c9a7d1bbcf3ffd5bf77a4aa"}, - {file = "pillow-10.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d512aafa1d32efa014fa041d38868fda85028e3f930a96f85d49c7d8ddc0383"}, - {file = "pillow-10.3.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:339894035d0ede518b16073bdc2feef4c991ee991a29774b33e515f1d308e08d"}, - {file = "pillow-10.3.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:aa7e402ce11f0885305bfb6afb3434b3cd8f53b563ac065452d9d5654c7b86fd"}, - {file = "pillow-10.3.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0ea2a783a2bdf2a561808fe4a7a12e9aa3799b701ba305de596bc48b8bdfce9d"}, - {file = "pillow-10.3.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c78e1b00a87ce43bb37642c0812315b411e856a905d58d597750eb79802aaaa3"}, - {file = "pillow-10.3.0-cp38-cp38-win32.whl", hash = "sha256:72d622d262e463dfb7595202d229f5f3ab4b852289a1cd09650362db23b9eb0b"}, - {file = "pillow-10.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:2034f6759a722da3a3dbd91a81148cf884e91d1b747992ca288ab88c1de15999"}, - {file = "pillow-10.3.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:2ed854e716a89b1afcedea551cd85f2eb2a807613752ab997b9974aaa0d56936"}, - {file = "pillow-10.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dc1a390a82755a8c26c9964d457d4c9cbec5405896cba94cf51f36ea0d855002"}, - {file = "pillow-10.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4203efca580f0dd6f882ca211f923168548f7ba334c189e9eab1178ab840bf60"}, - {file = "pillow-10.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3102045a10945173d38336f6e71a8dc71bcaeed55c3123ad4af82c52807b9375"}, - {file = "pillow-10.3.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:6fb1b30043271ec92dc65f6d9f0b7a830c210b8a96423074b15c7bc999975f57"}, - {file = "pillow-10.3.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:1dfc94946bc60ea375cc39cff0b8da6c7e5f8fcdc1d946beb8da5c216156ddd8"}, - {file = "pillow-10.3.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b09b86b27a064c9624d0a6c54da01c1beaf5b6cadfa609cf63789b1d08a797b9"}, - {file = "pillow-10.3.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d3b2348a78bc939b4fed6552abfd2e7988e0f81443ef3911a4b8498ca084f6eb"}, - {file = "pillow-10.3.0-cp39-cp39-win32.whl", hash = "sha256:45ebc7b45406febf07fef35d856f0293a92e7417ae7933207e90bf9090b70572"}, - {file = "pillow-10.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:0ba26351b137ca4e0db0342d5d00d2e355eb29372c05afd544ebf47c0956ffeb"}, - {file = "pillow-10.3.0-cp39-cp39-win_arm64.whl", hash = "sha256:50fd3f6b26e3441ae07b7c979309638b72abc1a25da31a81a7fbd9495713ef4f"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:6b02471b72526ab8a18c39cb7967b72d194ec53c1fd0a70b050565a0f366d355"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8ab74c06ffdab957d7670c2a5a6e1a70181cd10b727cd788c4dd9005b6a8acd9"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:048eeade4c33fdf7e08da40ef402e748df113fd0b4584e32c4af74fe78baaeb2"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e2ec1e921fd07c7cda7962bad283acc2f2a9ccc1b971ee4b216b75fad6f0463"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:4c8e73e99da7db1b4cad7f8d682cf6abad7844da39834c288fbfa394a47bbced"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:16563993329b79513f59142a6b02055e10514c1a8e86dca8b48a893e33cf91e3"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:dd78700f5788ae180b5ee8902c6aea5a5726bac7c364b202b4b3e3ba2d293170"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:aff76a55a8aa8364d25400a210a65ff59d0168e0b4285ba6bf2bd83cf675ba32"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:b7bc2176354defba3edc2b9a777744462da2f8e921fbaf61e52acb95bafa9828"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:793b4e24db2e8742ca6423d3fde8396db336698c55cd34b660663ee9e45ed37f"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d93480005693d247f8346bc8ee28c72a2191bdf1f6b5db469c096c0c867ac015"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c83341b89884e2b2e55886e8fbbf37c3fa5efd6c8907124aeb72f285ae5696e5"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1a1d1915db1a4fdb2754b9de292642a39a7fb28f1736699527bb649484fb966a"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a0eaa93d054751ee9964afa21c06247779b90440ca41d184aeb5d410f20ff591"}, - {file = "pillow-10.3.0.tar.gz", hash = "sha256:9d2455fbf44c914840c793e89aa82d0e1763a14253a000743719ae5946814b2d"}, + {file = "pillow-10.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:4d9667937cfa347525b319ae34375c37b9ee6b525440f3ef48542fcf66f2731e"}, + {file = "pillow-10.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:543f3dc61c18dafb755773efc89aae60d06b6596a63914107f75459cf984164d"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7928ecbf1ece13956b95d9cbcfc77137652b02763ba384d9ab508099a2eca856"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4d49b85c4348ea0b31ea63bc75a9f3857869174e2bf17e7aba02945cd218e6f"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:6c762a5b0997f5659a5ef2266abc1d8851ad7749ad9a6a5506eb23d314e4f46b"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a985e028fc183bf12a77a8bbf36318db4238a3ded7fa9df1b9a133f1cb79f8fc"}, + {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:812f7342b0eee081eaec84d91423d1b4650bb9828eb53d8511bcef8ce5aecf1e"}, + {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ac1452d2fbe4978c2eec89fb5a23b8387aba707ac72810d9490118817d9c0b46"}, + {file = "pillow-10.4.0-cp310-cp310-win32.whl", hash = "sha256:bcd5e41a859bf2e84fdc42f4edb7d9aba0a13d29a2abadccafad99de3feff984"}, + {file = "pillow-10.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:ecd85a8d3e79cd7158dec1c9e5808e821feea088e2f69a974db5edf84dc53141"}, + {file = "pillow-10.4.0-cp310-cp310-win_arm64.whl", hash = "sha256:ff337c552345e95702c5fde3158acb0625111017d0e5f24bf3acdb9cc16b90d1"}, + {file = "pillow-10.4.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:0a9ec697746f268507404647e531e92889890a087e03681a3606d9b920fbee3c"}, + {file = "pillow-10.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe91cb65544a1321e631e696759491ae04a2ea11d36715eca01ce07284738be"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dc6761a6efc781e6a1544206f22c80c3af4c8cf461206d46a1e6006e4429ff3"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e84b6cc6a4a3d76c153a6b19270b3526a5a8ed6b09501d3af891daa2a9de7d6"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:bbc527b519bd3aa9d7f429d152fea69f9ad37c95f0b02aebddff592688998abe"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:76a911dfe51a36041f2e756b00f96ed84677cdeb75d25c767f296c1c1eda1319"}, + {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59291fb29317122398786c2d44427bbd1a6d7ff54017075b22be9d21aa59bd8d"}, + {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:416d3a5d0e8cfe4f27f574362435bc9bae57f679a7158e0096ad2beb427b8696"}, + {file = "pillow-10.4.0-cp311-cp311-win32.whl", hash = "sha256:7086cc1d5eebb91ad24ded9f58bec6c688e9f0ed7eb3dbbf1e4800280a896496"}, + {file = "pillow-10.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cbed61494057c0f83b83eb3a310f0bf774b09513307c434d4366ed64f4128a91"}, + {file = "pillow-10.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:f5f0c3e969c8f12dd2bb7e0b15d5c468b51e5017e01e2e867335c81903046a22"}, + {file = "pillow-10.4.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:673655af3eadf4df6b5457033f086e90299fdd7a47983a13827acf7459c15d94"}, + {file = "pillow-10.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:866b6942a92f56300012f5fbac71f2d610312ee65e22f1aa2609e491284e5597"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29dbdc4207642ea6aad70fbde1a9338753d33fb23ed6956e706936706f52dd80"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf2342ac639c4cf38799a44950bbc2dfcb685f052b9e262f446482afaf4bffca"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:f5b92f4d70791b4a67157321c4e8225d60b119c5cc9aee8ecf153aace4aad4ef"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:86dcb5a1eb778d8b25659d5e4341269e8590ad6b4e8b44d9f4b07f8d136c414a"}, + {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:780c072c2e11c9b2c7ca37f9a2ee8ba66f44367ac3e5c7832afcfe5104fd6d1b"}, + {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:37fb69d905be665f68f28a8bba3c6d3223c8efe1edf14cc4cfa06c241f8c81d9"}, + {file = "pillow-10.4.0-cp312-cp312-win32.whl", hash = "sha256:7dfecdbad5c301d7b5bde160150b4db4c659cee2b69589705b6f8a0c509d9f42"}, + {file = "pillow-10.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:1d846aea995ad352d4bdcc847535bd56e0fd88d36829d2c90be880ef1ee4668a"}, + {file = "pillow-10.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:e553cad5179a66ba15bb18b353a19020e73a7921296a7979c4a2b7f6a5cd57f9"}, + {file = "pillow-10.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8bc1a764ed8c957a2e9cacf97c8b2b053b70307cf2996aafd70e91a082e70df3"}, + {file = "pillow-10.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6209bb41dc692ddfee4942517c19ee81b86c864b626dbfca272ec0f7cff5d9fb"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bee197b30783295d2eb680b311af15a20a8b24024a19c3a26431ff83eb8d1f70"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ef61f5dd14c300786318482456481463b9d6b91ebe5ef12f405afbba77ed0be"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:297e388da6e248c98bc4a02e018966af0c5f92dfacf5a5ca22fa01cb3179bca0"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:e4db64794ccdf6cb83a59d73405f63adbe2a1887012e308828596100a0b2f6cc"}, + {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bd2880a07482090a3bcb01f4265f1936a903d70bc740bfcb1fd4e8a2ffe5cf5a"}, + {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b35b21b819ac1dbd1233317adeecd63495f6babf21b7b2512d244ff6c6ce309"}, + {file = "pillow-10.4.0-cp313-cp313-win32.whl", hash = "sha256:551d3fd6e9dc15e4c1eb6fc4ba2b39c0c7933fa113b220057a34f4bb3268a060"}, + {file = "pillow-10.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:030abdbe43ee02e0de642aee345efa443740aa4d828bfe8e2eb11922ea6a21ea"}, + {file = "pillow-10.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:5b001114dd152cfd6b23befeb28d7aee43553e2402c9f159807bf55f33af8a8d"}, + {file = "pillow-10.4.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:8d4d5063501b6dd4024b8ac2f04962d661222d120381272deea52e3fc52d3736"}, + {file = "pillow-10.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7c1ee6f42250df403c5f103cbd2768a28fe1a0ea1f0f03fe151c8741e1469c8b"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b15e02e9bb4c21e39876698abf233c8c579127986f8207200bc8a8f6bb27acf2"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a8d4bade9952ea9a77d0c3e49cbd8b2890a399422258a77f357b9cc9be8d680"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:43efea75eb06b95d1631cb784aa40156177bf9dd5b4b03ff38979e048258bc6b"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:950be4d8ba92aca4b2bb0741285a46bfae3ca699ef913ec8416c1b78eadd64cd"}, + {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d7480af14364494365e89d6fddc510a13e5a2c3584cb19ef65415ca57252fb84"}, + {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:73664fe514b34c8f02452ffb73b7a92c6774e39a647087f83d67f010eb9a0cf0"}, + {file = "pillow-10.4.0-cp38-cp38-win32.whl", hash = "sha256:e88d5e6ad0d026fba7bdab8c3f225a69f063f116462c49892b0149e21b6c0a0e"}, + {file = "pillow-10.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:5161eef006d335e46895297f642341111945e2c1c899eb406882a6c61a4357ab"}, + {file = "pillow-10.4.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0ae24a547e8b711ccaaf99c9ae3cd975470e1a30caa80a6aaee9a2f19c05701d"}, + {file = "pillow-10.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:298478fe4f77a4408895605f3482b6cc6222c018b2ce565c2b6b9c354ac3229b"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:134ace6dc392116566980ee7436477d844520a26a4b1bd4053f6f47d096997fd"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:930044bb7679ab003b14023138b50181899da3f25de50e9dbee23b61b4de2126"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c76e5786951e72ed3686e122d14c5d7012f16c8303a674d18cdcd6d89557fc5b"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b2724fdb354a868ddf9a880cb84d102da914e99119211ef7ecbdc613b8c96b3c"}, + {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dbc6ae66518ab3c5847659e9988c3b60dc94ffb48ef9168656e0019a93dbf8a1"}, + {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:06b2f7898047ae93fad74467ec3d28fe84f7831370e3c258afa533f81ef7f3df"}, + {file = "pillow-10.4.0-cp39-cp39-win32.whl", hash = "sha256:7970285ab628a3779aecc35823296a7869f889b8329c16ad5a71e4901a3dc4ef"}, + {file = "pillow-10.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:961a7293b2457b405967af9c77dcaa43cc1a8cd50d23c532e62d48ab6cdd56f5"}, + {file = "pillow-10.4.0-cp39-cp39-win_arm64.whl", hash = "sha256:32cda9e3d601a52baccb2856b8ea1fc213c90b340c542dcef77140dfa3278a9e"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5b4815f2e65b30f5fbae9dfffa8636d992d49705723fe86a3661806e069352d4"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8f0aef4ef59694b12cadee839e2ba6afeab89c0f39a3adc02ed51d109117b8da"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f4727572e2918acaa9077c919cbbeb73bd2b3ebcfe033b72f858fc9fbef0026"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff25afb18123cea58a591ea0244b92eb1e61a1fd497bf6d6384f09bc3262ec3e"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:dc3e2db6ba09ffd7d02ae9141cfa0ae23393ee7687248d46a7507b75d610f4f5"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:02a2be69f9c9b8c1e97cf2713e789d4e398c751ecfd9967c18d0ce304efbf885"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:0755ffd4a0c6f267cccbae2e9903d95477ca2f77c4fcf3a3a09570001856c8a5"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:a02364621fe369e06200d4a16558e056fe2805d3468350df3aef21e00d26214b"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:1b5dea9831a90e9d0721ec417a80d4cbd7022093ac38a568db2dd78363b00908"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b885f89040bb8c4a1573566bbb2f44f5c505ef6e74cec7ab9068c900047f04b"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87dd88ded2e6d74d31e1e0a99a726a6765cda32d00ba72dc37f0651f306daaa8"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:2db98790afc70118bd0255c2eeb465e9767ecf1f3c25f9a1abb8ffc8cfd1fe0a"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f7baece4ce06bade126fb84b8af1c33439a76d8a6fd818970215e0560ca28c27"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:cfdd747216947628af7b259d274771d84db2268ca062dd5faf373639d00113a3"}, + {file = "pillow-10.4.0.tar.gz", hash = "sha256:166c1cd4d24309b30d61f79f4a9114b7b2313d7450912277855ff5dfd7cd4a06"}, ] [package.extras] -docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"] +docs = ["furo", "olefile", "sphinx (>=7.3)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] fpx = ["olefile"] mic = ["olefile"] tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] @@ -1099,109 +1113,122 @@ testing = ["pytest", "pytest-benchmark"] [[package]] name = "pydantic" -version = "2.7.1" +version = "2.8.2" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.7.1-py3-none-any.whl", hash = "sha256:e029badca45266732a9a79898a15ae2e8b14840b1eabbb25844be28f0b33f3d5"}, - {file = "pydantic-2.7.1.tar.gz", hash = "sha256:e9dbb5eada8abe4d9ae5f46b9939aead650cd2b68f249bb3a8139dbe125803cc"}, + {file = "pydantic-2.8.2-py3-none-any.whl", hash = "sha256:73ee9fddd406dc318b885c7a2eab8a6472b68b8fb5ba8150949fc3db939f23c8"}, + {file = "pydantic-2.8.2.tar.gz", hash = "sha256:6f62c13d067b0755ad1c21a34bdd06c0c12625a22b0fc09c6b149816604f7c2a"}, ] [package.dependencies] annotated-types = ">=0.4.0" -pydantic-core = "2.18.2" -typing-extensions = ">=4.6.1" +pydantic-core = "2.20.1" +typing-extensions = [ + {version = ">=4.6.1", markers = "python_version < \"3.13\""}, + {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, +] [package.extras] email = ["email-validator (>=2.0.0)"] [[package]] name = "pydantic-core" -version = "2.18.2" +version = "2.20.1" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.18.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:9e08e867b306f525802df7cd16c44ff5ebbe747ff0ca6cf3fde7f36c05a59a81"}, - {file = "pydantic_core-2.18.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f0a21cbaa69900cbe1a2e7cad2aa74ac3cf21b10c3efb0fa0b80305274c0e8a2"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0680b1f1f11fda801397de52c36ce38ef1c1dc841a0927a94f226dea29c3ae3d"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:95b9d5e72481d3780ba3442eac863eae92ae43a5f3adb5b4d0a1de89d42bb250"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4fcf5cd9c4b655ad666ca332b9a081112cd7a58a8b5a6ca7a3104bc950f2038"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b5155ff768083cb1d62f3e143b49a8a3432e6789a3abee8acd005c3c7af1c74"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:553ef617b6836fc7e4df130bb851e32fe357ce36336d897fd6646d6058d980af"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b89ed9eb7d616ef5714e5590e6cf7f23b02d0d539767d33561e3675d6f9e3857"}, - {file = "pydantic_core-2.18.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:75f7e9488238e920ab6204399ded280dc4c307d034f3924cd7f90a38b1829563"}, - {file = "pydantic_core-2.18.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ef26c9e94a8c04a1b2924149a9cb081836913818e55681722d7f29af88fe7b38"}, - {file = "pydantic_core-2.18.2-cp310-none-win32.whl", hash = "sha256:182245ff6b0039e82b6bb585ed55a64d7c81c560715d1bad0cbad6dfa07b4027"}, - {file = "pydantic_core-2.18.2-cp310-none-win_amd64.whl", hash = "sha256:e23ec367a948b6d812301afc1b13f8094ab7b2c280af66ef450efc357d2ae543"}, - {file = "pydantic_core-2.18.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:219da3f096d50a157f33645a1cf31c0ad1fe829a92181dd1311022f986e5fbe3"}, - {file = "pydantic_core-2.18.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cc1cfd88a64e012b74e94cd00bbe0f9c6df57049c97f02bb07d39e9c852e19a4"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05b7133a6e6aeb8df37d6f413f7705a37ab4031597f64ab56384c94d98fa0e90"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:224c421235f6102e8737032483f43c1a8cfb1d2f45740c44166219599358c2cd"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b14d82cdb934e99dda6d9d60dc84a24379820176cc4a0d123f88df319ae9c150"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2728b01246a3bba6de144f9e3115b532ee44bd6cf39795194fb75491824a1413"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:470b94480bb5ee929f5acba6995251ada5e059a5ef3e0dfc63cca287283ebfa6"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:997abc4df705d1295a42f95b4eec4950a37ad8ae46d913caeee117b6b198811c"}, - {file = "pydantic_core-2.18.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:75250dbc5290e3f1a0f4618db35e51a165186f9034eff158f3d490b3fed9f8a0"}, - {file = "pydantic_core-2.18.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4456f2dca97c425231d7315737d45239b2b51a50dc2b6f0c2bb181fce6207664"}, - {file = "pydantic_core-2.18.2-cp311-none-win32.whl", hash = "sha256:269322dcc3d8bdb69f054681edff86276b2ff972447863cf34c8b860f5188e2e"}, - {file = "pydantic_core-2.18.2-cp311-none-win_amd64.whl", hash = "sha256:800d60565aec896f25bc3cfa56d2277d52d5182af08162f7954f938c06dc4ee3"}, - {file = "pydantic_core-2.18.2-cp311-none-win_arm64.whl", hash = "sha256:1404c69d6a676245199767ba4f633cce5f4ad4181f9d0ccb0577e1f66cf4c46d"}, - {file = "pydantic_core-2.18.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:fb2bd7be70c0fe4dfd32c951bc813d9fe6ebcbfdd15a07527796c8204bd36242"}, - {file = "pydantic_core-2.18.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6132dd3bd52838acddca05a72aafb6eab6536aa145e923bb50f45e78b7251043"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7d904828195733c183d20a54230c0df0eb46ec746ea1a666730787353e87182"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c9bd70772c720142be1020eac55f8143a34ec9f82d75a8e7a07852023e46617f"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b8ed04b3582771764538f7ee7001b02e1170223cf9b75dff0bc698fadb00cf3"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e6dac87ddb34aaec85f873d737e9d06a3555a1cc1a8e0c44b7f8d5daeb89d86f"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ca4ae5a27ad7a4ee5170aebce1574b375de390bc01284f87b18d43a3984df72"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:886eec03591b7cf058467a70a87733b35f44707bd86cf64a615584fd72488b7c"}, - {file = "pydantic_core-2.18.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ca7b0c1f1c983e064caa85f3792dd2fe3526b3505378874afa84baf662e12241"}, - {file = "pydantic_core-2.18.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b4356d3538c3649337df4074e81b85f0616b79731fe22dd11b99499b2ebbdf3"}, - {file = "pydantic_core-2.18.2-cp312-none-win32.whl", hash = "sha256:8b172601454f2d7701121bbec3425dd71efcb787a027edf49724c9cefc14c038"}, - {file = "pydantic_core-2.18.2-cp312-none-win_amd64.whl", hash = "sha256:b1bd7e47b1558ea872bd16c8502c414f9e90dcf12f1395129d7bb42a09a95438"}, - {file = "pydantic_core-2.18.2-cp312-none-win_arm64.whl", hash = "sha256:98758d627ff397e752bc339272c14c98199c613f922d4a384ddc07526c86a2ec"}, - {file = "pydantic_core-2.18.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:9fdad8e35f278b2c3eb77cbdc5c0a49dada440657bf738d6905ce106dc1de439"}, - {file = "pydantic_core-2.18.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1d90c3265ae107f91a4f279f4d6f6f1d4907ac76c6868b27dc7fb33688cfb347"}, - {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:390193c770399861d8df9670fb0d1874f330c79caaca4642332df7c682bf6b91"}, - {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:82d5d4d78e4448683cb467897fe24e2b74bb7b973a541ea1dcfec1d3cbce39fb"}, - {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4774f3184d2ef3e14e8693194f661dea5a4d6ca4e3dc8e39786d33a94865cefd"}, - {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d4d938ec0adf5167cb335acb25a4ee69a8107e4984f8fbd2e897021d9e4ca21b"}, - {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0e8b1be28239fc64a88a8189d1df7fad8be8c1ae47fcc33e43d4be15f99cc70"}, - {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:868649da93e5a3d5eacc2b5b3b9235c98ccdbfd443832f31e075f54419e1b96b"}, - {file = "pydantic_core-2.18.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:78363590ef93d5d226ba21a90a03ea89a20738ee5b7da83d771d283fd8a56761"}, - {file = "pydantic_core-2.18.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:852e966fbd035a6468fc0a3496589b45e2208ec7ca95c26470a54daed82a0788"}, - {file = "pydantic_core-2.18.2-cp38-none-win32.whl", hash = "sha256:6a46e22a707e7ad4484ac9ee9f290f9d501df45954184e23fc29408dfad61350"}, - {file = "pydantic_core-2.18.2-cp38-none-win_amd64.whl", hash = "sha256:d91cb5ea8b11607cc757675051f61b3d93f15eca3cefb3e6c704a5d6e8440f4e"}, - {file = "pydantic_core-2.18.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:ae0a8a797a5e56c053610fa7be147993fe50960fa43609ff2a9552b0e07013e8"}, - {file = "pydantic_core-2.18.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:042473b6280246b1dbf530559246f6842b56119c2926d1e52b631bdc46075f2a"}, - {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a388a77e629b9ec814c1b1e6b3b595fe521d2cdc625fcca26fbc2d44c816804"}, - {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25add29b8f3b233ae90ccef2d902d0ae0432eb0d45370fe315d1a5cf231004b"}, - {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f459a5ce8434614dfd39bbebf1041952ae01da6bed9855008cb33b875cb024c0"}, - {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eff2de745698eb46eeb51193a9f41d67d834d50e424aef27df2fcdee1b153845"}, - {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8309f67285bdfe65c372ea3722b7a5642680f3dba538566340a9d36e920b5f0"}, - {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f93a8a2e3938ff656a7c1bc57193b1319960ac015b6e87d76c76bf14fe0244b4"}, - {file = "pydantic_core-2.18.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:22057013c8c1e272eb8d0eebc796701167d8377441ec894a8fed1af64a0bf399"}, - {file = "pydantic_core-2.18.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cfeecd1ac6cc1fb2692c3d5110781c965aabd4ec5d32799773ca7b1456ac636b"}, - {file = "pydantic_core-2.18.2-cp39-none-win32.whl", hash = "sha256:0d69b4c2f6bb3e130dba60d34c0845ba31b69babdd3f78f7c0c8fae5021a253e"}, - {file = "pydantic_core-2.18.2-cp39-none-win_amd64.whl", hash = "sha256:d9319e499827271b09b4e411905b24a426b8fb69464dfa1696258f53a3334641"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a1874c6dd4113308bd0eb568418e6114b252afe44319ead2b4081e9b9521fe75"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:ccdd111c03bfd3666bd2472b674c6899550e09e9f298954cfc896ab92b5b0e6d"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e18609ceaa6eed63753037fc06ebb16041d17d28199ae5aba0052c51449650a9"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e5c584d357c4e2baf0ff7baf44f4994be121e16a2c88918a5817331fc7599d7"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43f0f463cf89ace478de71a318b1b4f05ebc456a9b9300d027b4b57c1a2064fb"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e1b395e58b10b73b07b7cf740d728dd4ff9365ac46c18751bf8b3d8cca8f625a"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0098300eebb1c837271d3d1a2cd2911e7c11b396eac9661655ee524a7f10587b"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:36789b70d613fbac0a25bb07ab3d9dba4d2e38af609c020cf4d888d165ee0bf3"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3f9a801e7c8f1ef8718da265bba008fa121243dfe37c1cea17840b0944dfd72c"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:3a6515ebc6e69d85502b4951d89131ca4e036078ea35533bb76327f8424531ce"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20aca1e2298c56ececfd8ed159ae4dde2df0781988c97ef77d5c16ff4bd5b400"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:223ee893d77a310a0391dca6df00f70bbc2f36a71a895cecd9a0e762dc37b349"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2334ce8c673ee93a1d6a65bd90327588387ba073c17e61bf19b4fd97d688d63c"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:cbca948f2d14b09d20268cda7b0367723d79063f26c4ffc523af9042cad95592"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b3ef08e20ec49e02d5c6717a91bb5af9b20f1805583cb0adfe9ba2c6b505b5ae"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c6fdc8627910eed0c01aed6a390a252fe3ea6d472ee70fdde56273f198938374"}, - {file = "pydantic_core-2.18.2.tar.gz", hash = "sha256:2e29d20810dfc3043ee13ac7d9e25105799817683348823f305ab3f349b9386e"}, + {file = "pydantic_core-2.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3acae97ffd19bf091c72df4d726d552c473f3576409b2a7ca36b2f535ffff4a3"}, + {file = "pydantic_core-2.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41f4c96227a67a013e7de5ff8f20fb496ce573893b7f4f2707d065907bffdbd6"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f239eb799a2081495ea659d8d4a43a8f42cd1fe9ff2e7e436295c38a10c286a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53e431da3fc53360db73eedf6f7124d1076e1b4ee4276b36fb25514544ceb4a3"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1f62b2413c3a0e846c3b838b2ecd6c7a19ec6793b2a522745b0869e37ab5bc1"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d41e6daee2813ecceea8eda38062d69e280b39df793f5a942fa515b8ed67953"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d482efec8b7dc6bfaedc0f166b2ce349df0011f5d2f1f25537ced4cfc34fd98"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e93e1a4b4b33daed65d781a57a522ff153dcf748dee70b40c7258c5861e1768a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7c4ea22b6739b162c9ecaaa41d718dfad48a244909fe7ef4b54c0b530effc5a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4f2790949cf385d985a31984907fecb3896999329103df4e4983a4a41e13e840"}, + {file = "pydantic_core-2.20.1-cp310-none-win32.whl", hash = "sha256:5e999ba8dd90e93d57410c5e67ebb67ffcaadcea0ad973240fdfd3a135506250"}, + {file = "pydantic_core-2.20.1-cp310-none-win_amd64.whl", hash = "sha256:512ecfbefef6dac7bc5eaaf46177b2de58cdf7acac8793fe033b24ece0b9566c"}, + {file = "pydantic_core-2.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d2a8fa9d6d6f891f3deec72f5cc668e6f66b188ab14bb1ab52422fe8e644f312"}, + {file = "pydantic_core-2.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:175873691124f3d0da55aeea1d90660a6ea7a3cfea137c38afa0a5ffabe37b88"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37eee5b638f0e0dcd18d21f59b679686bbd18917b87db0193ae36f9c23c355fc"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25e9185e2d06c16ee438ed39bf62935ec436474a6ac4f9358524220f1b236e43"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:150906b40ff188a3260cbee25380e7494ee85048584998c1e66df0c7a11c17a6"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ad4aeb3e9a97286573c03df758fc7627aecdd02f1da04516a86dc159bf70121"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3f3ed29cd9f978c604708511a1f9c2fdcb6c38b9aae36a51905b8811ee5cbf1"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0dae11d8f5ded51699c74d9548dcc5938e0804cc8298ec0aa0da95c21fff57b"}, + {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:faa6b09ee09433b87992fb5a2859efd1c264ddc37280d2dd5db502126d0e7f27"}, + {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9dc1b507c12eb0481d071f3c1808f0529ad41dc415d0ca11f7ebfc666e66a18b"}, + {file = "pydantic_core-2.20.1-cp311-none-win32.whl", hash = "sha256:fa2fddcb7107e0d1808086ca306dcade7df60a13a6c347a7acf1ec139aa6789a"}, + {file = "pydantic_core-2.20.1-cp311-none-win_amd64.whl", hash = "sha256:40a783fb7ee353c50bd3853e626f15677ea527ae556429453685ae32280c19c2"}, + {file = "pydantic_core-2.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:595ba5be69b35777474fa07f80fc260ea71255656191adb22a8c53aba4479231"}, + {file = "pydantic_core-2.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a4f55095ad087474999ee28d3398bae183a66be4823f753cd7d67dd0153427c9"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9aa05d09ecf4c75157197f27cdc9cfaeb7c5f15021c6373932bf3e124af029f"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e97fdf088d4b31ff4ba35db26d9cc472ac7ef4a2ff2badeabf8d727b3377fc52"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc633a9fe1eb87e250b5c57d389cf28998e4292336926b0b6cdaee353f89a237"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d573faf8eb7e6b1cbbcb4f5b247c60ca8be39fe2c674495df0eb4318303137fe"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26dc97754b57d2fd00ac2b24dfa341abffc380b823211994c4efac7f13b9e90e"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:33499e85e739a4b60c9dac710c20a08dc73cb3240c9a0e22325e671b27b70d24"}, + {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bebb4d6715c814597f85297c332297c6ce81e29436125ca59d1159b07f423eb1"}, + {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:516d9227919612425c8ef1c9b869bbbee249bc91912c8aaffb66116c0b447ebd"}, + {file = "pydantic_core-2.20.1-cp312-none-win32.whl", hash = "sha256:469f29f9093c9d834432034d33f5fe45699e664f12a13bf38c04967ce233d688"}, + {file = "pydantic_core-2.20.1-cp312-none-win_amd64.whl", hash = "sha256:035ede2e16da7281041f0e626459bcae33ed998cca6a0a007a5ebb73414ac72d"}, + {file = "pydantic_core-2.20.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:0827505a5c87e8aa285dc31e9ec7f4a17c81a813d45f70b1d9164e03a813a686"}, + {file = "pydantic_core-2.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:19c0fa39fa154e7e0b7f82f88ef85faa2a4c23cc65aae2f5aea625e3c13c735a"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa223cd1e36b642092c326d694d8bf59b71ddddc94cdb752bbbb1c5c91d833b"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c336a6d235522a62fef872c6295a42ecb0c4e1d0f1a3e500fe949415761b8a19"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7eb6a0587eded33aeefea9f916899d42b1799b7b14b8f8ff2753c0ac1741edac"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70c8daf4faca8da5a6d655f9af86faf6ec2e1768f4b8b9d0226c02f3d6209703"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9fa4c9bf273ca41f940bceb86922a7667cd5bf90e95dbb157cbb8441008482c"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:11b71d67b4725e7e2a9f6e9c0ac1239bbc0c48cce3dc59f98635efc57d6dac83"}, + {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:270755f15174fb983890c49881e93f8f1b80f0b5e3a3cc1394a255706cabd203"}, + {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c81131869240e3e568916ef4c307f8b99583efaa60a8112ef27a366eefba8ef0"}, + {file = "pydantic_core-2.20.1-cp313-none-win32.whl", hash = "sha256:b91ced227c41aa29c672814f50dbb05ec93536abf8f43cd14ec9521ea09afe4e"}, + {file = "pydantic_core-2.20.1-cp313-none-win_amd64.whl", hash = "sha256:65db0f2eefcaad1a3950f498aabb4875c8890438bc80b19362cf633b87a8ab20"}, + {file = "pydantic_core-2.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4745f4ac52cc6686390c40eaa01d48b18997cb130833154801a442323cc78f91"}, + {file = "pydantic_core-2.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a8ad4c766d3f33ba8fd692f9aa297c9058970530a32c728a2c4bfd2616d3358b"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41e81317dd6a0127cabce83c0c9c3fbecceae981c8391e6f1dec88a77c8a569a"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04024d270cf63f586ad41fff13fde4311c4fc13ea74676962c876d9577bcc78f"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eaad4ff2de1c3823fddf82f41121bdf453d922e9a238642b1dedb33c4e4f98ad"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:26ab812fa0c845df815e506be30337e2df27e88399b985d0bb4e3ecfe72df31c"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c5ebac750d9d5f2706654c638c041635c385596caf68f81342011ddfa1e5598"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2aafc5a503855ea5885559eae883978c9b6d8c8993d67766ee73d82e841300dd"}, + {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4868f6bd7c9d98904b748a2653031fc9c2f85b6237009d475b1008bfaeb0a5aa"}, + {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa2f457b4af386254372dfa78a2eda2563680d982422641a85f271c859df1987"}, + {file = "pydantic_core-2.20.1-cp38-none-win32.whl", hash = "sha256:225b67a1f6d602de0ce7f6c1c3ae89a4aa25d3de9be857999e9124f15dab486a"}, + {file = "pydantic_core-2.20.1-cp38-none-win_amd64.whl", hash = "sha256:6b507132dcfc0dea440cce23ee2182c0ce7aba7054576efc65634f080dbe9434"}, + {file = "pydantic_core-2.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b03f7941783b4c4a26051846dea594628b38f6940a2fdc0df00b221aed39314c"}, + {file = "pydantic_core-2.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1eedfeb6089ed3fad42e81a67755846ad4dcc14d73698c120a82e4ccf0f1f9f6"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:635fee4e041ab9c479e31edda27fcf966ea9614fff1317e280d99eb3e5ab6fe2"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:77bf3ac639c1ff567ae3b47f8d4cc3dc20f9966a2a6dd2311dcc055d3d04fb8a"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ed1b0132f24beeec5a78b67d9388656d03e6a7c837394f99257e2d55b461611"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6514f963b023aeee506678a1cf821fe31159b925c4b76fe2afa94cc70b3222b"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10d4204d8ca33146e761c79f83cc861df20e7ae9f6487ca290a97702daf56006"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2d036c7187b9422ae5b262badb87a20a49eb6c5238b2004e96d4da1231badef1"}, + {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9ebfef07dbe1d93efb94b4700f2d278494e9162565a54f124c404a5656d7ff09"}, + {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6b9d9bb600328a1ce523ab4f454859e9d439150abb0906c5a1983c146580ebab"}, + {file = "pydantic_core-2.20.1-cp39-none-win32.whl", hash = "sha256:784c1214cb6dd1e3b15dd8b91b9a53852aed16671cc3fbe4786f4f1db07089e2"}, + {file = "pydantic_core-2.20.1-cp39-none-win_amd64.whl", hash = "sha256:d2fe69c5434391727efa54b47a1e7986bb0186e72a41b203df8f5b0a19a4f669"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a45f84b09ac9c3d35dfcf6a27fd0634d30d183205230a0ebe8373a0e8cfa0906"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d02a72df14dfdbaf228424573a07af10637bd490f0901cee872c4f434a735b94"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2b27e6af28f07e2f195552b37d7d66b150adbaa39a6d327766ffd695799780f"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:084659fac3c83fd674596612aeff6041a18402f1e1bc19ca39e417d554468482"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:242b8feb3c493ab78be289c034a1f659e8826e2233786e36f2893a950a719bb6"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:38cf1c40a921d05c5edc61a785c0ddb4bed67827069f535d794ce6bcded919fc"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e0bbdd76ce9aa5d4209d65f2b27fc6e5ef1312ae6c5333c26db3f5ade53a1e99"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:254ec27fdb5b1ee60684f91683be95e5133c994cc54e86a0b0963afa25c8f8a6"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:407653af5617f0757261ae249d3fba09504d7a71ab36ac057c938572d1bc9331"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c693e916709c2465b02ca0ad7b387c4f8423d1db7b4649c551f27a529181c5ad"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b5ff4911aea936a47d9376fd3ab17e970cc543d1b68921886e7f64bd28308d1"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177f55a886d74f1808763976ac4efd29b7ed15c69f4d838bbd74d9d09cf6fa86"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:964faa8a861d2664f0c7ab0c181af0bea66098b1919439815ca8803ef136fc4e"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4dd484681c15e6b9a977c785a345d3e378d72678fd5f1f3c0509608da24f2ac0"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f6d6cff3538391e8486a431569b77921adfcdef14eb18fbf19b7c0a5294d4e6a"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a6d511cc297ff0883bc3708b465ff82d7560193169a8b93260f74ecb0a5e08a7"}, + {file = "pydantic_core-2.20.1.tar.gz", hash = "sha256:26ca695eeee5f9f1aeeb211ffc12f10bcb6f71e2989988fda61dabd65db878d4"}, ] [package.dependencies] @@ -1231,13 +1258,13 @@ testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "no [[package]] name = "pytest-asyncio" -version = "0.21.1" +version = "0.21.2" description = "Pytest support for asyncio" optional = false python-versions = ">=3.7" files = [ - {file = "pytest-asyncio-0.21.1.tar.gz", hash = "sha256:40a7eae6dded22c7b604986855ea48400ab15b069ae38116e8c01238e9eeb64d"}, - {file = "pytest_asyncio-0.21.1-py3-none-any.whl", hash = "sha256:8666c1c8ac02631d7c51ba282e0c69a8a452b211ffedf2599099845da5c5c37b"}, + {file = "pytest_asyncio-0.21.2-py3-none-any.whl", hash = "sha256:ab664c88bb7998f711d8039cacd4884da6430886ae8bbd4eded552ed2004f16b"}, + {file = "pytest_asyncio-0.21.2.tar.gz", hash = "sha256:d67738fc232b94b326b9d060750beb16e0074210b98dd8b58a5239fa2a154f45"}, ] [package.dependencies] @@ -1355,13 +1382,13 @@ files = [ [[package]] name = "requests" -version = "2.31.0" +version = "2.32.3" description = "Python HTTP for Humans." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, - {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, + {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, + {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, ] [package.dependencies] @@ -1430,64 +1457,64 @@ files = [ [[package]] name = "sqlalchemy" -version = "2.0.30" +version = "2.0.31" description = "Database Abstraction Library" optional = false python-versions = ">=3.7" files = [ - {file = "SQLAlchemy-2.0.30-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3b48154678e76445c7ded1896715ce05319f74b1e73cf82d4f8b59b46e9c0ddc"}, - {file = "SQLAlchemy-2.0.30-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2753743c2afd061bb95a61a51bbb6a1a11ac1c44292fad898f10c9839a7f75b2"}, - {file = "SQLAlchemy-2.0.30-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7bfc726d167f425d4c16269a9a10fe8630ff6d14b683d588044dcef2d0f6be7"}, - {file = "SQLAlchemy-2.0.30-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4f61ada6979223013d9ab83a3ed003ded6959eae37d0d685db2c147e9143797"}, - {file = "SQLAlchemy-2.0.30-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3a365eda439b7a00732638f11072907c1bc8e351c7665e7e5da91b169af794af"}, - {file = "SQLAlchemy-2.0.30-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bba002a9447b291548e8d66fd8c96a6a7ed4f2def0bb155f4f0a1309fd2735d5"}, - {file = "SQLAlchemy-2.0.30-cp310-cp310-win32.whl", hash = "sha256:0138c5c16be3600923fa2169532205d18891b28afa817cb49b50e08f62198bb8"}, - {file = "SQLAlchemy-2.0.30-cp310-cp310-win_amd64.whl", hash = "sha256:99650e9f4cf3ad0d409fed3eec4f071fadd032e9a5edc7270cd646a26446feeb"}, - {file = "SQLAlchemy-2.0.30-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:955991a09f0992c68a499791a753523f50f71a6885531568404fa0f231832aa0"}, - {file = "SQLAlchemy-2.0.30-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f69e4c756ee2686767eb80f94c0125c8b0a0b87ede03eacc5c8ae3b54b99dc46"}, - {file = "SQLAlchemy-2.0.30-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69c9db1ce00e59e8dd09d7bae852a9add716efdc070a3e2068377e6ff0d6fdaa"}, - {file = "SQLAlchemy-2.0.30-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1429a4b0f709f19ff3b0cf13675b2b9bfa8a7e79990003207a011c0db880a13"}, - {file = "SQLAlchemy-2.0.30-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:efedba7e13aa9a6c8407c48facfdfa108a5a4128e35f4c68f20c3407e4376aa9"}, - {file = "SQLAlchemy-2.0.30-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:16863e2b132b761891d6c49f0a0f70030e0bcac4fd208117f6b7e053e68668d0"}, - {file = "SQLAlchemy-2.0.30-cp311-cp311-win32.whl", hash = "sha256:2ecabd9ccaa6e914e3dbb2aa46b76dede7eadc8cbf1b8083c94d936bcd5ffb49"}, - {file = "SQLAlchemy-2.0.30-cp311-cp311-win_amd64.whl", hash = "sha256:0b3f4c438e37d22b83e640f825ef0f37b95db9aa2d68203f2c9549375d0b2260"}, - {file = "SQLAlchemy-2.0.30-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5a79d65395ac5e6b0c2890935bad892eabb911c4aa8e8015067ddb37eea3d56c"}, - {file = "SQLAlchemy-2.0.30-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9a5baf9267b752390252889f0c802ea13b52dfee5e369527da229189b8bd592e"}, - {file = "SQLAlchemy-2.0.30-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cb5a646930c5123f8461f6468901573f334c2c63c795b9af350063a736d0134"}, - {file = "SQLAlchemy-2.0.30-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:296230899df0b77dec4eb799bcea6fbe39a43707ce7bb166519c97b583cfcab3"}, - {file = "SQLAlchemy-2.0.30-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c62d401223f468eb4da32627bffc0c78ed516b03bb8a34a58be54d618b74d472"}, - {file = "SQLAlchemy-2.0.30-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3b69e934f0f2b677ec111b4d83f92dc1a3210a779f69bf905273192cf4ed433e"}, - {file = "SQLAlchemy-2.0.30-cp312-cp312-win32.whl", hash = "sha256:77d2edb1f54aff37e3318f611637171e8ec71472f1fdc7348b41dcb226f93d90"}, - {file = "SQLAlchemy-2.0.30-cp312-cp312-win_amd64.whl", hash = "sha256:b6c7ec2b1f4969fc19b65b7059ed00497e25f54069407a8701091beb69e591a5"}, - {file = "SQLAlchemy-2.0.30-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5a8e3b0a7e09e94be7510d1661339d6b52daf202ed2f5b1f9f48ea34ee6f2d57"}, - {file = "SQLAlchemy-2.0.30-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b60203c63e8f984df92035610c5fb76d941254cf5d19751faab7d33b21e5ddc0"}, - {file = "SQLAlchemy-2.0.30-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1dc3eabd8c0232ee8387fbe03e0a62220a6f089e278b1f0aaf5e2d6210741ad"}, - {file = "SQLAlchemy-2.0.30-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:40ad017c672c00b9b663fcfcd5f0864a0a97828e2ee7ab0c140dc84058d194cf"}, - {file = "SQLAlchemy-2.0.30-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:e42203d8d20dc704604862977b1470a122e4892791fe3ed165f041e4bf447a1b"}, - {file = "SQLAlchemy-2.0.30-cp37-cp37m-win32.whl", hash = "sha256:2a4f4da89c74435f2bc61878cd08f3646b699e7d2eba97144030d1be44e27584"}, - {file = "SQLAlchemy-2.0.30-cp37-cp37m-win_amd64.whl", hash = "sha256:b6bf767d14b77f6a18b6982cbbf29d71bede087edae495d11ab358280f304d8e"}, - {file = "SQLAlchemy-2.0.30-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bc0c53579650a891f9b83fa3cecd4e00218e071d0ba00c4890f5be0c34887ed3"}, - {file = "SQLAlchemy-2.0.30-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:311710f9a2ee235f1403537b10c7687214bb1f2b9ebb52702c5aa4a77f0b3af7"}, - {file = "SQLAlchemy-2.0.30-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:408f8b0e2c04677e9c93f40eef3ab22f550fecb3011b187f66a096395ff3d9fd"}, - {file = "SQLAlchemy-2.0.30-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:37a4b4fb0dd4d2669070fb05b8b8824afd0af57587393015baee1cf9890242d9"}, - {file = "SQLAlchemy-2.0.30-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a943d297126c9230719c27fcbbeab57ecd5d15b0bd6bfd26e91bfcfe64220621"}, - {file = "SQLAlchemy-2.0.30-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0a089e218654e740a41388893e090d2e2c22c29028c9d1353feb38638820bbeb"}, - {file = "SQLAlchemy-2.0.30-cp38-cp38-win32.whl", hash = "sha256:fa561138a64f949f3e889eb9ab8c58e1504ab351d6cf55259dc4c248eaa19da6"}, - {file = "SQLAlchemy-2.0.30-cp38-cp38-win_amd64.whl", hash = "sha256:7d74336c65705b986d12a7e337ba27ab2b9d819993851b140efdf029248e818e"}, - {file = "SQLAlchemy-2.0.30-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ae8c62fe2480dd61c532ccafdbce9b29dacc126fe8be0d9a927ca3e699b9491a"}, - {file = "SQLAlchemy-2.0.30-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2383146973a15435e4717f94c7509982770e3e54974c71f76500a0136f22810b"}, - {file = "SQLAlchemy-2.0.30-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8409de825f2c3b62ab15788635ccaec0c881c3f12a8af2b12ae4910a0a9aeef6"}, - {file = "SQLAlchemy-2.0.30-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0094c5dc698a5f78d3d1539853e8ecec02516b62b8223c970c86d44e7a80f6c7"}, - {file = "SQLAlchemy-2.0.30-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:edc16a50f5e1b7a06a2dcc1f2205b0b961074c123ed17ebda726f376a5ab0953"}, - {file = "SQLAlchemy-2.0.30-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:f7703c2010355dd28f53deb644a05fc30f796bd8598b43f0ba678878780b6e4c"}, - {file = "SQLAlchemy-2.0.30-cp39-cp39-win32.whl", hash = "sha256:1f9a727312ff6ad5248a4367358e2cf7e625e98b1028b1d7ab7b806b7d757513"}, - {file = "SQLAlchemy-2.0.30-cp39-cp39-win_amd64.whl", hash = "sha256:a0ef36b28534f2a5771191be6edb44cc2673c7b2edf6deac6562400288664221"}, - {file = "SQLAlchemy-2.0.30-py3-none-any.whl", hash = "sha256:7108d569d3990c71e26a42f60474b4c02c8586c4681af5fd67e51a044fdea86a"}, - {file = "SQLAlchemy-2.0.30.tar.gz", hash = "sha256:2b1708916730f4830bc69d6f49d37f7698b5bd7530aca7f04f785f8849e95255"}, + {file = "SQLAlchemy-2.0.31-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f2a213c1b699d3f5768a7272de720387ae0122f1becf0901ed6eaa1abd1baf6c"}, + {file = "SQLAlchemy-2.0.31-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9fea3d0884e82d1e33226935dac990b967bef21315cbcc894605db3441347443"}, + {file = "SQLAlchemy-2.0.31-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3ad7f221d8a69d32d197e5968d798217a4feebe30144986af71ada8c548e9fa"}, + {file = "SQLAlchemy-2.0.31-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f2bee229715b6366f86a95d497c347c22ddffa2c7c96143b59a2aa5cc9eebbc"}, + {file = "SQLAlchemy-2.0.31-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cd5b94d4819c0c89280b7c6109c7b788a576084bf0a480ae17c227b0bc41e109"}, + {file = "SQLAlchemy-2.0.31-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:750900a471d39a7eeba57580b11983030517a1f512c2cb287d5ad0fcf3aebd58"}, + {file = "SQLAlchemy-2.0.31-cp310-cp310-win32.whl", hash = "sha256:7bd112be780928c7f493c1a192cd8c5fc2a2a7b52b790bc5a84203fb4381c6be"}, + {file = "SQLAlchemy-2.0.31-cp310-cp310-win_amd64.whl", hash = "sha256:5a48ac4d359f058474fadc2115f78a5cdac9988d4f99eae44917f36aa1476327"}, + {file = "SQLAlchemy-2.0.31-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f68470edd70c3ac3b6cd5c2a22a8daf18415203ca1b036aaeb9b0fb6f54e8298"}, + {file = "SQLAlchemy-2.0.31-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2e2c38c2a4c5c634fe6c3c58a789712719fa1bf9b9d6ff5ebfce9a9e5b89c1ca"}, + {file = "SQLAlchemy-2.0.31-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd15026f77420eb2b324dcb93551ad9c5f22fab2c150c286ef1dc1160f110203"}, + {file = "SQLAlchemy-2.0.31-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2196208432deebdfe3b22185d46b08f00ac9d7b01284e168c212919891289396"}, + {file = "SQLAlchemy-2.0.31-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:352b2770097f41bff6029b280c0e03b217c2dcaddc40726f8f53ed58d8a85da4"}, + {file = "SQLAlchemy-2.0.31-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:56d51ae825d20d604583f82c9527d285e9e6d14f9a5516463d9705dab20c3740"}, + {file = "SQLAlchemy-2.0.31-cp311-cp311-win32.whl", hash = "sha256:6e2622844551945db81c26a02f27d94145b561f9d4b0c39ce7bfd2fda5776dac"}, + {file = "SQLAlchemy-2.0.31-cp311-cp311-win_amd64.whl", hash = "sha256:ccaf1b0c90435b6e430f5dd30a5aede4764942a695552eb3a4ab74ed63c5b8d3"}, + {file = "SQLAlchemy-2.0.31-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3b74570d99126992d4b0f91fb87c586a574a5872651185de8297c6f90055ae42"}, + {file = "SQLAlchemy-2.0.31-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6f77c4f042ad493cb8595e2f503c7a4fe44cd7bd59c7582fd6d78d7e7b8ec52c"}, + {file = "SQLAlchemy-2.0.31-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd1591329333daf94467e699e11015d9c944f44c94d2091f4ac493ced0119449"}, + {file = "SQLAlchemy-2.0.31-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:74afabeeff415e35525bf7a4ecdab015f00e06456166a2eba7590e49f8db940e"}, + {file = "SQLAlchemy-2.0.31-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b9c01990d9015df2c6f818aa8f4297d42ee71c9502026bb074e713d496e26b67"}, + {file = "SQLAlchemy-2.0.31-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:66f63278db425838b3c2b1c596654b31939427016ba030e951b292e32b99553e"}, + {file = "SQLAlchemy-2.0.31-cp312-cp312-win32.whl", hash = "sha256:0b0f658414ee4e4b8cbcd4a9bb0fd743c5eeb81fc858ca517217a8013d282c96"}, + {file = "SQLAlchemy-2.0.31-cp312-cp312-win_amd64.whl", hash = "sha256:fa4b1af3e619b5b0b435e333f3967612db06351217c58bfb50cee5f003db2a5a"}, + {file = "SQLAlchemy-2.0.31-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:f43e93057cf52a227eda401251c72b6fbe4756f35fa6bfebb5d73b86881e59b0"}, + {file = "SQLAlchemy-2.0.31-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d337bf94052856d1b330d5fcad44582a30c532a2463776e1651bd3294ee7e58b"}, + {file = "SQLAlchemy-2.0.31-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c06fb43a51ccdff3b4006aafee9fcf15f63f23c580675f7734245ceb6b6a9e05"}, + {file = "SQLAlchemy-2.0.31-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:b6e22630e89f0e8c12332b2b4c282cb01cf4da0d26795b7eae16702a608e7ca1"}, + {file = "SQLAlchemy-2.0.31-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:79a40771363c5e9f3a77f0e28b3302801db08040928146e6808b5b7a40749c88"}, + {file = "SQLAlchemy-2.0.31-cp37-cp37m-win32.whl", hash = "sha256:501ff052229cb79dd4c49c402f6cb03b5a40ae4771efc8bb2bfac9f6c3d3508f"}, + {file = "SQLAlchemy-2.0.31-cp37-cp37m-win_amd64.whl", hash = "sha256:597fec37c382a5442ffd471f66ce12d07d91b281fd474289356b1a0041bdf31d"}, + {file = "SQLAlchemy-2.0.31-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:dc6d69f8829712a4fd799d2ac8d79bdeff651c2301b081fd5d3fe697bd5b4ab9"}, + {file = "SQLAlchemy-2.0.31-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:23b9fbb2f5dd9e630db70fbe47d963c7779e9c81830869bd7d137c2dc1ad05fb"}, + {file = "SQLAlchemy-2.0.31-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a21c97efcbb9f255d5c12a96ae14da873233597dfd00a3a0c4ce5b3e5e79704"}, + {file = "SQLAlchemy-2.0.31-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26a6a9837589c42b16693cf7bf836f5d42218f44d198f9343dd71d3164ceeeac"}, + {file = "SQLAlchemy-2.0.31-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:dc251477eae03c20fae8db9c1c23ea2ebc47331bcd73927cdcaecd02af98d3c3"}, + {file = "SQLAlchemy-2.0.31-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:2fd17e3bb8058359fa61248c52c7b09a97cf3c820e54207a50af529876451808"}, + {file = "SQLAlchemy-2.0.31-cp38-cp38-win32.whl", hash = "sha256:c76c81c52e1e08f12f4b6a07af2b96b9b15ea67ccdd40ae17019f1c373faa227"}, + {file = "SQLAlchemy-2.0.31-cp38-cp38-win_amd64.whl", hash = "sha256:4b600e9a212ed59355813becbcf282cfda5c93678e15c25a0ef896b354423238"}, + {file = "SQLAlchemy-2.0.31-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b6cf796d9fcc9b37011d3f9936189b3c8074a02a4ed0c0fbbc126772c31a6d4"}, + {file = "SQLAlchemy-2.0.31-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:78fe11dbe37d92667c2c6e74379f75746dc947ee505555a0197cfba9a6d4f1a4"}, + {file = "SQLAlchemy-2.0.31-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2fc47dc6185a83c8100b37acda27658fe4dbd33b7d5e7324111f6521008ab4fe"}, + {file = "SQLAlchemy-2.0.31-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a41514c1a779e2aa9a19f67aaadeb5cbddf0b2b508843fcd7bafdf4c6864005"}, + {file = "SQLAlchemy-2.0.31-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:afb6dde6c11ea4525318e279cd93c8734b795ac8bb5dda0eedd9ebaca7fa23f1"}, + {file = "SQLAlchemy-2.0.31-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:3f9faef422cfbb8fd53716cd14ba95e2ef655400235c3dfad1b5f467ba179c8c"}, + {file = "SQLAlchemy-2.0.31-cp39-cp39-win32.whl", hash = "sha256:fc6b14e8602f59c6ba893980bea96571dd0ed83d8ebb9c4479d9ed5425d562e9"}, + {file = "SQLAlchemy-2.0.31-cp39-cp39-win_amd64.whl", hash = "sha256:3cb8a66b167b033ec72c3812ffc8441d4e9f5f78f5e31e54dcd4c90a4ca5bebc"}, + {file = "SQLAlchemy-2.0.31-py3-none-any.whl", hash = "sha256:69f3e3c08867a8e4856e92d7afb618b95cdee18e0bc1647b77599722c9a28911"}, + {file = "SQLAlchemy-2.0.31.tar.gz", hash = "sha256:b607489dd4a54de56984a0c7656247504bd5523d9d0ba799aef59d4add009484"}, ] [package.dependencies] -greenlet = {version = "!=0.4.17", markers = "platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\""} +greenlet = {version = "!=0.4.17", markers = "python_version < \"3.13\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")"} typing-extensions = ">=4.6.0" [package.extras] @@ -1531,17 +1558,18 @@ pytest = ">=7.0.0,<9.0.0" [[package]] name = "tenacity" -version = "8.2.3" +version = "8.5.0" description = "Retry code until it succeeds" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "tenacity-8.2.3-py3-none-any.whl", hash = "sha256:ce510e327a630c9e1beaf17d42e6ffacc88185044ad85cf74c0a8887c6a0f88c"}, - {file = "tenacity-8.2.3.tar.gz", hash = "sha256:5398ef0d78e63f40007c1fb4c0bff96e1911394d2fa8d194f77619c05ff6cc8a"}, + {file = "tenacity-8.5.0-py3-none-any.whl", hash = "sha256:b594c2a5945830c267ce6b79a166228323ed52718f30302c1359836112346687"}, + {file = "tenacity-8.5.0.tar.gz", hash = "sha256:8bc6c0c8a09b31e6cad13c47afbed1a567518250a9a171418582ed8d9c20ca78"}, ] [package.extras] -doc = ["reno", "sphinx", "tornado (>=4.5)"] +doc = ["reno", "sphinx"] +test = ["pytest", "tornado (>=4.5)", "typeguard"] [[package]] name = "tomli" @@ -1556,24 +1584,24 @@ files = [ [[package]] name = "types-pillow" -version = "10.2.0.20240423" +version = "10.2.0.20240520" description = "Typing stubs for Pillow" optional = false python-versions = ">=3.8" files = [ - {file = "types-Pillow-10.2.0.20240423.tar.gz", hash = "sha256:696e68b9b6a58548fc307a8669830469237c5b11809ddf978ac77fafa79251cd"}, - {file = "types_Pillow-10.2.0.20240423-py3-none-any.whl", hash = "sha256:bd12923093b96c91d523efcdb66967a307f1a843bcfaf2d5a529146c10a9ced3"}, + {file = "types-Pillow-10.2.0.20240520.tar.gz", hash = "sha256:130b979195465fa1e1676d8e81c9c7c30319e8e95b12fae945e8f0d525213107"}, + {file = "types_Pillow-10.2.0.20240520-py3-none-any.whl", hash = "sha256:33c36494b380e2a269bb742181bea5d9b00820367822dbd3760f07210a1da23d"}, ] [[package]] name = "types-requests" -version = "2.31.0.20240406" +version = "2.32.0.20240712" description = "Typing stubs for requests" optional = false python-versions = ">=3.8" files = [ - {file = "types-requests-2.31.0.20240406.tar.gz", hash = "sha256:4428df33c5503945c74b3f42e82b181e86ec7b724620419a2966e2de604ce1a1"}, - {file = "types_requests-2.31.0.20240406-py3-none-any.whl", hash = "sha256:6216cdac377c6b9a040ac1c0404f7284bd13199c0e1bb235f4324627e8898cf5"}, + {file = "types-requests-2.32.0.20240712.tar.gz", hash = "sha256:90c079ff05e549f6bf50e02e910210b98b8ff1ebdd18e19c873cd237737c1358"}, + {file = "types_requests-2.32.0.20240712-py3-none-any.whl", hash = "sha256:f754283e152c752e46e70942fa2a146b5bc70393522257bb85bd1ef7e019dcc3"}, ] [package.dependencies] @@ -1581,13 +1609,13 @@ urllib3 = ">=2" [[package]] name = "typing-extensions" -version = "4.11.0" +version = "4.12.2" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.11.0-py3-none-any.whl", hash = "sha256:c1f94d72897edaf4ce775bb7558d5b79d8126906a14ea5ed1635921406c0387a"}, - {file = "typing_extensions-4.11.0.tar.gz", hash = "sha256:83f085bd5ca59c80295fc2a82ab5dac679cbe02b9f33f7d83af68e241bea51b0"}, + {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, + {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, ] [[package]] @@ -1607,13 +1635,13 @@ typing-extensions = ">=3.7.4" [[package]] name = "urllib3" -version = "2.2.1" +version = "2.2.2" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.8" files = [ - {file = "urllib3-2.2.1-py3-none-any.whl", hash = "sha256:450b20ec296a467077128bff42b73080516e71b56ff59a60a02bef2232c4fa9d"}, - {file = "urllib3-2.2.1.tar.gz", hash = "sha256:d0570876c61ab9e520d776c38acbbb5b05a776d3f9ff98a5c8fd5162a444cf19"}, + {file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"}, + {file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"}, ] [package.extras] @@ -1624,40 +1652,43 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "watchdog" -version = "4.0.0" +version = "4.0.1" description = "Filesystem events monitoring" optional = false python-versions = ">=3.8" files = [ - {file = "watchdog-4.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:39cb34b1f1afbf23e9562501673e7146777efe95da24fab5707b88f7fb11649b"}, - {file = "watchdog-4.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c522392acc5e962bcac3b22b9592493ffd06d1fc5d755954e6be9f4990de932b"}, - {file = "watchdog-4.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6c47bdd680009b11c9ac382163e05ca43baf4127954c5f6d0250e7d772d2b80c"}, - {file = "watchdog-4.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8350d4055505412a426b6ad8c521bc7d367d1637a762c70fdd93a3a0d595990b"}, - {file = "watchdog-4.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c17d98799f32e3f55f181f19dd2021d762eb38fdd381b4a748b9f5a36738e935"}, - {file = "watchdog-4.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4986db5e8880b0e6b7cd52ba36255d4793bf5cdc95bd6264806c233173b1ec0b"}, - {file = "watchdog-4.0.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:11e12fafb13372e18ca1bbf12d50f593e7280646687463dd47730fd4f4d5d257"}, - {file = "watchdog-4.0.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5369136a6474678e02426bd984466343924d1df8e2fd94a9b443cb7e3aa20d19"}, - {file = "watchdog-4.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76ad8484379695f3fe46228962017a7e1337e9acadafed67eb20aabb175df98b"}, - {file = "watchdog-4.0.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:45cc09cc4c3b43fb10b59ef4d07318d9a3ecdbff03abd2e36e77b6dd9f9a5c85"}, - {file = "watchdog-4.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:eed82cdf79cd7f0232e2fdc1ad05b06a5e102a43e331f7d041e5f0e0a34a51c4"}, - {file = "watchdog-4.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ba30a896166f0fee83183cec913298151b73164160d965af2e93a20bbd2ab605"}, - {file = "watchdog-4.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d18d7f18a47de6863cd480734613502904611730f8def45fc52a5d97503e5101"}, - {file = "watchdog-4.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2895bf0518361a9728773083908801a376743bcc37dfa252b801af8fd281b1ca"}, - {file = "watchdog-4.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:87e9df830022488e235dd601478c15ad73a0389628588ba0b028cb74eb72fed8"}, - {file = "watchdog-4.0.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6e949a8a94186bced05b6508faa61b7adacc911115664ccb1923b9ad1f1ccf7b"}, - {file = "watchdog-4.0.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6a4db54edea37d1058b08947c789a2354ee02972ed5d1e0dca9b0b820f4c7f92"}, - {file = "watchdog-4.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d31481ccf4694a8416b681544c23bd271f5a123162ab603c7d7d2dd7dd901a07"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:8fec441f5adcf81dd240a5fe78e3d83767999771630b5ddfc5867827a34fa3d3"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:6a9c71a0b02985b4b0b6d14b875a6c86ddea2fdbebd0c9a720a806a8bbffc69f"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:557ba04c816d23ce98a06e70af6abaa0485f6d94994ec78a42b05d1c03dcbd50"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:d0f9bd1fd919134d459d8abf954f63886745f4660ef66480b9d753a7c9d40927"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:f9b2fdca47dc855516b2d66eef3c39f2672cbf7e7a42e7e67ad2cbfcd6ba107d"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:73c7a935e62033bd5e8f0da33a4dcb763da2361921a69a5a95aaf6c93aa03a87"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:6a80d5cae8c265842c7419c560b9961561556c4361b297b4c431903f8c33b269"}, - {file = "watchdog-4.0.0-py3-none-win32.whl", hash = "sha256:8f9a542c979df62098ae9c58b19e03ad3df1c9d8c6895d96c0d51da17b243b1c"}, - {file = "watchdog-4.0.0-py3-none-win_amd64.whl", hash = "sha256:f970663fa4f7e80401a7b0cbeec00fa801bf0287d93d48368fc3e6fa32716245"}, - {file = "watchdog-4.0.0-py3-none-win_ia64.whl", hash = "sha256:9a03e16e55465177d416699331b0f3564138f1807ecc5f2de9d55d8f188d08c7"}, - {file = "watchdog-4.0.0.tar.gz", hash = "sha256:e3e7065cbdabe6183ab82199d7a4f6b3ba0a438c5a512a68559846ccb76a78ec"}, + {file = "watchdog-4.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:da2dfdaa8006eb6a71051795856bedd97e5b03e57da96f98e375682c48850645"}, + {file = "watchdog-4.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e93f451f2dfa433d97765ca2634628b789b49ba8b504fdde5837cdcf25fdb53b"}, + {file = "watchdog-4.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ef0107bbb6a55f5be727cfc2ef945d5676b97bffb8425650dadbb184be9f9a2b"}, + {file = "watchdog-4.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:17e32f147d8bf9657e0922c0940bcde863b894cd871dbb694beb6704cfbd2fb5"}, + {file = "watchdog-4.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:03e70d2df2258fb6cb0e95bbdbe06c16e608af94a3ffbd2b90c3f1e83eb10767"}, + {file = "watchdog-4.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:123587af84260c991dc5f62a6e7ef3d1c57dfddc99faacee508c71d287248459"}, + {file = "watchdog-4.0.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:093b23e6906a8b97051191a4a0c73a77ecc958121d42346274c6af6520dec175"}, + {file = "watchdog-4.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:611be3904f9843f0529c35a3ff3fd617449463cb4b73b1633950b3d97fa4bfb7"}, + {file = "watchdog-4.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:62c613ad689ddcb11707f030e722fa929f322ef7e4f18f5335d2b73c61a85c28"}, + {file = "watchdog-4.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:d4925e4bf7b9bddd1c3de13c9b8a2cdb89a468f640e66fbfabaf735bd85b3e35"}, + {file = "watchdog-4.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cad0bbd66cd59fc474b4a4376bc5ac3fc698723510cbb64091c2a793b18654db"}, + {file = "watchdog-4.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a3c2c317a8fb53e5b3d25790553796105501a235343f5d2bf23bb8649c2c8709"}, + {file = "watchdog-4.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c9904904b6564d4ee8a1ed820db76185a3c96e05560c776c79a6ce5ab71888ba"}, + {file = "watchdog-4.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:667f3c579e813fcbad1b784db7a1aaa96524bed53437e119f6a2f5de4db04235"}, + {file = "watchdog-4.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d10a681c9a1d5a77e75c48a3b8e1a9f2ae2928eda463e8d33660437705659682"}, + {file = "watchdog-4.0.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0144c0ea9997b92615af1d94afc0c217e07ce2c14912c7b1a5731776329fcfc7"}, + {file = "watchdog-4.0.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:998d2be6976a0ee3a81fb8e2777900c28641fb5bfbd0c84717d89bca0addcdc5"}, + {file = "watchdog-4.0.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e7921319fe4430b11278d924ef66d4daa469fafb1da679a2e48c935fa27af193"}, + {file = "watchdog-4.0.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:f0de0f284248ab40188f23380b03b59126d1479cd59940f2a34f8852db710625"}, + {file = "watchdog-4.0.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:bca36be5707e81b9e6ce3208d92d95540d4ca244c006b61511753583c81c70dd"}, + {file = "watchdog-4.0.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:ab998f567ebdf6b1da7dc1e5accfaa7c6992244629c0fdaef062f43249bd8dee"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_aarch64.whl", hash = "sha256:dddba7ca1c807045323b6af4ff80f5ddc4d654c8bce8317dde1bd96b128ed253"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_armv7l.whl", hash = "sha256:4513ec234c68b14d4161440e07f995f231be21a09329051e67a2118a7a612d2d"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_i686.whl", hash = "sha256:4107ac5ab936a63952dea2a46a734a23230aa2f6f9db1291bf171dac3ebd53c6"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_ppc64.whl", hash = "sha256:6e8c70d2cd745daec2a08734d9f63092b793ad97612470a0ee4cbb8f5f705c57"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:f27279d060e2ab24c0aa98363ff906d2386aa6c4dc2f1a374655d4e02a6c5e5e"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_s390x.whl", hash = "sha256:f8affdf3c0f0466e69f5b3917cdd042f89c8c63aebdb9f7c078996f607cdb0f5"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_x86_64.whl", hash = "sha256:ac7041b385f04c047fcc2951dc001671dee1b7e0615cde772e84b01fbf68ee84"}, + {file = "watchdog-4.0.1-py3-none-win32.whl", hash = "sha256:206afc3d964f9a233e6ad34618ec60b9837d0582b500b63687e34011e15bb429"}, + {file = "watchdog-4.0.1-py3-none-win_amd64.whl", hash = "sha256:7577b3c43e5909623149f76b099ac49a1a01ca4e167d1785c76eb52fa585745a"}, + {file = "watchdog-4.0.1-py3-none-win_ia64.whl", hash = "sha256:d7b9f5f3299e8dd230880b6c55504a1f69cf1e4316275d1b215ebdd8187ec88d"}, + {file = "watchdog-4.0.1.tar.gz", hash = "sha256:eebaacf674fa25511e8867028d281e602ee6500045b57f43b08778082f7f8b44"}, ] [package.extras] @@ -1769,4 +1800,4 @@ multidict = ">=4.0" [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "e9a538766aca94f7cf2fa991936319857ac32d78e79b0b815f691b81476a27a6" +content-hash = "2ed2a883a76dfb2972b6c169a46a56486834ade36f578474ddb44f018c7b628d" diff --git a/libs/ai-endpoints/pyproject.toml b/libs/ai-endpoints/pyproject.toml index 5e6f2a29..0632779b 100644 --- a/libs/ai-endpoints/pyproject.toml +++ b/libs/ai-endpoints/pyproject.toml @@ -12,7 +12,7 @@ license = "MIT" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" -langchain-core = ">=0.1.27,<0.3" +langchain-core = ">=0.1.47,<0.3" aiohttp = "^3.9.1" pillow = ">=10.0.0,<11.0.0" langchain-community = "^0.2.5" From 1c4f1c1933a59dd3c159961efbf00359aceb7577 Mon Sep 17 00:00:00 2001 From: Erick Friis Date: Tue, 23 Jul 2024 17:12:12 -0700 Subject: [PATCH 52/60] ai-endpoints: remove langchain-community dependency --- .../langchain_nvidia_ai_endpoints/_utils.py | 63 +++++++++++++++++++ .../chat_models.py | 2 +- libs/ai-endpoints/pyproject.toml | 1 - 3 files changed, 64 insertions(+), 2 deletions(-) create mode 100644 libs/ai-endpoints/langchain_nvidia_ai_endpoints/_utils.py diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_utils.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_utils.py new file mode 100644 index 00000000..7280a3db --- /dev/null +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_utils.py @@ -0,0 +1,63 @@ +from __future__ import annotations + +from typing import ( + Any, + Dict, +) + +from langchain_core.messages import ( + AIMessage, + BaseMessage, + ChatMessage, + FunctionMessage, + HumanMessage, + SystemMessage, + ToolMessage, +) + + +def convert_message_to_dict(message: BaseMessage) -> dict: + """Convert a LangChain message to a dictionary. + + Args: + message: The LangChain message. + + Returns: + The dictionary. + """ + message_dict: Dict[str, Any] + if isinstance(message, ChatMessage): + message_dict = {"role": message.role, "content": message.content} + elif isinstance(message, HumanMessage): + message_dict = {"role": "user", "content": message.content} + elif isinstance(message, AIMessage): + message_dict = {"role": "assistant", "content": message.content} + if "function_call" in message.additional_kwargs: + message_dict["function_call"] = message.additional_kwargs["function_call"] + # If function call only, content is None not empty string + if message_dict["content"] == "": + message_dict["content"] = None + if "tool_calls" in message.additional_kwargs: + message_dict["tool_calls"] = message.additional_kwargs["tool_calls"] + # If tool calls only, content is None not empty string + if message_dict["content"] == "": + message_dict["content"] = None + elif isinstance(message, SystemMessage): + message_dict = {"role": "system", "content": message.content} + elif isinstance(message, FunctionMessage): + message_dict = { + "role": "function", + "content": message.content, + "name": message.name, + } + elif isinstance(message, ToolMessage): + message_dict = { + "role": "tool", + "content": message.content, + "tool_call_id": message.tool_call_id, + } + else: + raise TypeError(f"Got unknown type {message}") + if "name" in message.additional_kwargs: + message_dict["name"] = message.additional_kwargs["name"] + return message_dict diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py index 86a6a8ca..f6572afe 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py @@ -23,7 +23,6 @@ ) import requests -from langchain_community.adapters.openai import convert_message_to_dict from langchain_core.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, @@ -46,6 +45,7 @@ from langchain_nvidia_ai_endpoints._common import _NVIDIAClient from langchain_nvidia_ai_endpoints._statics import Model +from langchain_nvidia_ai_endpoints._utils import convert_message_to_dict _CallbackManager = Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun] _DictOrPydanticClass = Union[Dict[str, Any], Type[BaseModel]] diff --git a/libs/ai-endpoints/pyproject.toml b/libs/ai-endpoints/pyproject.toml index 0632779b..7b7b6b28 100644 --- a/libs/ai-endpoints/pyproject.toml +++ b/libs/ai-endpoints/pyproject.toml @@ -15,7 +15,6 @@ python = ">=3.8.1,<4.0" langchain-core = ">=0.1.47,<0.3" aiohttp = "^3.9.1" pillow = ">=10.0.0,<11.0.0" -langchain-community = "^0.2.5" [tool.poetry.group.test] optional = true From 24598b3e8c27fc53459cda63c586a34197fa8e6e Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Wed, 24 Jul 2024 07:21:01 -0400 Subject: [PATCH 53/60] allow AIMessage.content=None for tool calls --- .../chat_models.py | 4 ++- .../tests/unit_tests/test_messages.py | 36 +++++++++++++++++++ 2 files changed, 39 insertions(+), 1 deletion(-) create mode 100644 libs/ai-endpoints/tests/unit_tests/test_messages.py diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py index 86a6a8ca..18c206dd 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py @@ -378,7 +378,9 @@ def _get_payload( messages.append(dict(role="user", content=msg)) elif isinstance(msg, dict): if msg.get("content", None) is None: - raise ValueError(f"Message {msg} has no content") + # content=None is valid for assistant messages (tool calling) + if not msg.get("role") == "assistant": + raise ValueError(f"Message {msg} has no content.") messages.append(msg) else: raise ValueError(f"Unknown message received: {msg} of type {type(msg)}") diff --git a/libs/ai-endpoints/tests/unit_tests/test_messages.py b/libs/ai-endpoints/tests/unit_tests/test_messages.py new file mode 100644 index 00000000..3bbbaa92 --- /dev/null +++ b/libs/ai-endpoints/tests/unit_tests/test_messages.py @@ -0,0 +1,36 @@ +import requests_mock +from langchain_core.messages import AIMessage + +from langchain_nvidia_ai_endpoints import ChatNVIDIA + + +def test_invoke_aimessage_content_none(requests_mock: requests_mock.Mocker) -> None: + requests_mock.post( + "https://integrate.api.nvidia.com/v1/chat/completions", + json={ + "id": "mock-id", + "created": 1234567890, + "object": "chat.completion", + "model": "mock-model", + "choices": [ + { + "index": 0, + "message": {"role": "assistant", "content": "WORKED"}, + } + ], + }, + ) + + empty_aimessage = AIMessage(content="EMPTY") + empty_aimessage.content = None # type: ignore + + llm = ChatNVIDIA() + response = llm.invoke([empty_aimessage]) + request = requests_mock.request_history[0] + assert request.method == "POST" + assert request.url == "https://integrate.api.nvidia.com/v1/chat/completions" + message = request.json()["messages"][0] + assert "content" in message and message["content"] != "EMPTY" + assert "content" in message and message["content"] is None + assert isinstance(response, AIMessage) + assert response.content == "WORKED" From c3745e01c399ec13a211bde4d026b54bab9beee0 Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Wed, 24 Jul 2024 08:23:03 -0400 Subject: [PATCH 54/60] ensure tool_choice=name works for func & cls tools --- .../chat_models.py | 10 +-- .../tests/unit_tests/test_bind_tools.py | 64 +++++++++++++++++++ 2 files changed, 67 insertions(+), 7 deletions(-) create mode 100644 libs/ai-endpoints/tests/unit_tests/test_bind_tools.py diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py index 18c206dd..b1738536 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py @@ -489,19 +489,15 @@ def bind_tools( tool_name = tool_choice["function"]["name"] # check that the specified tool is in the tools list + tool_dicts = [convert_to_openai_tool(tool) for tool in tools] if tool_name: - if not any( - isinstance(tool, BaseTool) and tool.name == tool_name for tool in tools - ) and not any( - isinstance(tool, dict) and tool.get("name") == tool_name - for tool in tools - ): + if not any(tool["function"]["name"] == tool_name for tool in tool_dicts): raise ValueError( f"Tool choice '{tool_name}' not found in the tools list" ) return super().bind( - tools=[convert_to_openai_tool(tool) for tool in tools], + tools=tool_dicts, tool_choice=tool_choice, **kwargs, ) diff --git a/libs/ai-endpoints/tests/unit_tests/test_bind_tools.py b/libs/ai-endpoints/tests/unit_tests/test_bind_tools.py new file mode 100644 index 00000000..4876f5f1 --- /dev/null +++ b/libs/ai-endpoints/tests/unit_tests/test_bind_tools.py @@ -0,0 +1,64 @@ +import warnings +from typing import Any + +import pytest +from langchain_core.pydantic_v1 import BaseModel, Field +from langchain_core.tools import tool + +from langchain_nvidia_ai_endpoints import ChatNVIDIA + + +def xxyyzz_func(a: int, b: int) -> int: + """xxyyzz two numbers""" + return 42 + + +class xxyyzz_cls(BaseModel): + """xxyyzz two numbers""" + + a: int = Field(..., description="First number") + b: int = Field(..., description="Second number") + + +@tool +def xxyyzz_tool( + a: int = Field(..., description="First number"), + b: int = Field(..., description="Second number"), +) -> int: + """xxyyzz two numbers""" + return 42 + + +@pytest.mark.parametrize( + "tools, choice", + [ + ([xxyyzz_func], "xxyyzz_func"), + ([xxyyzz_cls], "xxyyzz_cls"), + ([xxyyzz_tool], "xxyyzz_tool"), + ], + ids=["func", "cls", "tool"], +) +def test_bind_tool_and_select(tools: Any, choice: str) -> None: + warnings.filterwarnings( + "ignore", category=UserWarning, message=".*not known to support tools.*" + ) + ChatNVIDIA(api_key="BOGUS").bind_tools(tools=tools, tool_choice=choice) + + +@pytest.mark.parametrize( + "tools, choice", + [ + ([], "wrong"), + ([xxyyzz_func], "wrong_xxyyzz_func"), + ([xxyyzz_cls], "wrong_xxyyzz_cls"), + ([xxyyzz_tool], "wrong_xxyyzz_tool"), + ], + ids=["empty", "func", "cls", "tool"], +) +def test_bind_tool_and_select_negative(tools: Any, choice: str) -> None: + warnings.filterwarnings( + "ignore", category=UserWarning, message=".*not known to support tools.*" + ) + with pytest.raises(ValueError) as e: + ChatNVIDIA(api_key="BOGUS").bind_tools(tools=tools, tool_choice=choice) + assert "not found in the tools list" in str(e.value) From e91834bd12938127f05d4a76466a7e8f6ea1e8b7 Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Wed, 24 Jul 2024 13:29:23 -0400 Subject: [PATCH 55/60] updated poetry.lock --- libs/ai-endpoints/poetry.lock | 360 +--------------------------------- 1 file changed, 2 insertions(+), 358 deletions(-) diff --git a/libs/ai-endpoints/poetry.lock b/libs/ai-endpoints/poetry.lock index 26a503a6..1a4176d7 100644 --- a/libs/ai-endpoints/poetry.lock +++ b/libs/ai-endpoints/poetry.lock @@ -292,21 +292,6 @@ files = [ {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] -[[package]] -name = "dataclasses-json" -version = "0.6.7" -description = "Easily serialize dataclasses to and from JSON." -optional = false -python-versions = "<4.0,>=3.7" -files = [ - {file = "dataclasses_json-0.6.7-py3-none-any.whl", hash = "sha256:0dbf33f26c8d5305befd61b39d2b3414e8a407bedc2834dea9b8d642666fb40a"}, - {file = "dataclasses_json-0.6.7.tar.gz", hash = "sha256:b6b3e528266ea45b9535223bc53ca645f5208833c29229e847b3f26a1cc55fc0"}, -] - -[package.dependencies] -marshmallow = ">=3.18.0,<4.0.0" -typing-inspect = ">=0.4.0,<1" - [[package]] name = "exceptiongroup" version = "1.2.2" @@ -436,77 +421,6 @@ files = [ {file = "frozenlist-1.4.1.tar.gz", hash = "sha256:c037a86e8513059a2613aaba4d817bb90b9d9b6b69aace3ce9c877e8c8ed402b"}, ] -[[package]] -name = "greenlet" -version = "3.0.3" -description = "Lightweight in-process concurrent programming" -optional = false -python-versions = ">=3.7" -files = [ - {file = "greenlet-3.0.3-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:9da2bd29ed9e4f15955dd1595ad7bc9320308a3b766ef7f837e23ad4b4aac31a"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d353cadd6083fdb056bb46ed07e4340b0869c305c8ca54ef9da3421acbdf6881"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dca1e2f3ca00b84a396bc1bce13dd21f680f035314d2379c4160c98153b2059b"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3ed7fb269f15dc662787f4119ec300ad0702fa1b19d2135a37c2c4de6fadfd4a"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd4f49ae60e10adbc94b45c0b5e6a179acc1736cf7a90160b404076ee283cf83"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:73a411ef564e0e097dbe7e866bb2dda0f027e072b04da387282b02c308807405"}, - {file = "greenlet-3.0.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7f362975f2d179f9e26928c5b517524e89dd48530a0202570d55ad6ca5d8a56f"}, - {file = "greenlet-3.0.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:649dde7de1a5eceb258f9cb00bdf50e978c9db1b996964cd80703614c86495eb"}, - {file = "greenlet-3.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:68834da854554926fbedd38c76e60c4a2e3198c6fbed520b106a8986445caaf9"}, - {file = "greenlet-3.0.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:b1b5667cced97081bf57b8fa1d6bfca67814b0afd38208d52538316e9422fc61"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:52f59dd9c96ad2fc0d5724107444f76eb20aaccb675bf825df6435acb7703559"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:afaff6cf5200befd5cec055b07d1c0a5a06c040fe5ad148abcd11ba6ab9b114e"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fe754d231288e1e64323cfad462fcee8f0288654c10bdf4f603a39ed923bef33"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2797aa5aedac23af156bbb5a6aa2cd3427ada2972c828244eb7d1b9255846379"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b7f009caad047246ed379e1c4dbcb8b020f0a390667ea74d2387be2998f58a22"}, - {file = "greenlet-3.0.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c5e1536de2aad7bf62e27baf79225d0d64360d4168cf2e6becb91baf1ed074f3"}, - {file = "greenlet-3.0.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:894393ce10ceac937e56ec00bb71c4c2f8209ad516e96033e4b3b1de270e200d"}, - {file = "greenlet-3.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:1ea188d4f49089fc6fb283845ab18a2518d279c7cd9da1065d7a84e991748728"}, - {file = "greenlet-3.0.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:70fb482fdf2c707765ab5f0b6655e9cfcf3780d8d87355a063547b41177599be"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4d1ac74f5c0c0524e4a24335350edad7e5f03b9532da7ea4d3c54d527784f2e"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:149e94a2dd82d19838fe4b2259f1b6b9957d5ba1b25640d2380bea9c5df37676"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:15d79dd26056573940fcb8c7413d84118086f2ec1a8acdfa854631084393efcc"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b7db1ebff4ba09aaaeae6aa491daeb226c8150fc20e836ad00041bcb11230"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fcd2469d6a2cf298f198f0487e0a5b1a47a42ca0fa4dfd1b6862c999f018ebbf"}, - {file = "greenlet-3.0.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1f672519db1796ca0d8753f9e78ec02355e862d0998193038c7073045899f305"}, - {file = "greenlet-3.0.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2516a9957eed41dd8f1ec0c604f1cdc86758b587d964668b5b196a9db5bfcde6"}, - {file = "greenlet-3.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:bba5387a6975598857d86de9eac14210a49d554a77eb8261cc68b7d082f78ce2"}, - {file = "greenlet-3.0.3-cp37-cp37m-macosx_11_0_universal2.whl", hash = "sha256:5b51e85cb5ceda94e79d019ed36b35386e8c37d22f07d6a751cb659b180d5274"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:daf3cb43b7cf2ba96d614252ce1684c1bccee6b2183a01328c98d36fcd7d5cb0"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99bf650dc5d69546e076f413a87481ee1d2d09aaaaaca058c9251b6d8c14783f"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2dd6e660effd852586b6a8478a1d244b8dc90ab5b1321751d2ea15deb49ed414"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3391d1e16e2a5a1507d83e4a8b100f4ee626e8eca43cf2cadb543de69827c4c"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e1f145462f1fa6e4a4ae3c0f782e580ce44d57c8f2c7aae1b6fa88c0b2efdb41"}, - {file = "greenlet-3.0.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1a7191e42732df52cb5f39d3527217e7ab73cae2cb3694d241e18f53d84ea9a7"}, - {file = "greenlet-3.0.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0448abc479fab28b00cb472d278828b3ccca164531daab4e970a0458786055d6"}, - {file = "greenlet-3.0.3-cp37-cp37m-win32.whl", hash = "sha256:b542be2440edc2d48547b5923c408cbe0fc94afb9f18741faa6ae970dbcb9b6d"}, - {file = "greenlet-3.0.3-cp37-cp37m-win_amd64.whl", hash = "sha256:01bc7ea167cf943b4c802068e178bbf70ae2e8c080467070d01bfa02f337ee67"}, - {file = "greenlet-3.0.3-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:1996cb9306c8595335bb157d133daf5cf9f693ef413e7673cb07e3e5871379ca"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ddc0f794e6ad661e321caa8d2f0a55ce01213c74722587256fb6566049a8b04"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c9db1c18f0eaad2f804728c67d6c610778456e3e1cc4ab4bbd5eeb8e6053c6fc"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7170375bcc99f1a2fbd9c306f5be8764eaf3ac6b5cb968862cad4c7057756506"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b66c9c1e7ccabad3a7d037b2bcb740122a7b17a53734b7d72a344ce39882a1b"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:098d86f528c855ead3479afe84b49242e174ed262456c342d70fc7f972bc13c4"}, - {file = "greenlet-3.0.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:81bb9c6d52e8321f09c3d165b2a78c680506d9af285bfccbad9fb7ad5a5da3e5"}, - {file = "greenlet-3.0.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fd096eb7ffef17c456cfa587523c5f92321ae02427ff955bebe9e3c63bc9f0da"}, - {file = "greenlet-3.0.3-cp38-cp38-win32.whl", hash = "sha256:d46677c85c5ba00a9cb6f7a00b2bfa6f812192d2c9f7d9c4f6a55b60216712f3"}, - {file = "greenlet-3.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:419b386f84949bf0e7c73e6032e3457b82a787c1ab4a0e43732898a761cc9dbf"}, - {file = "greenlet-3.0.3-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:da70d4d51c8b306bb7a031d5cff6cc25ad253affe89b70352af5f1cb68e74b53"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:086152f8fbc5955df88382e8a75984e2bb1c892ad2e3c80a2508954e52295257"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d73a9fe764d77f87f8ec26a0c85144d6a951a6c438dfe50487df5595c6373eac"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b7dcbe92cc99f08c8dd11f930de4d99ef756c3591a5377d1d9cd7dd5e896da71"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1551a8195c0d4a68fac7a4325efac0d541b48def35feb49d803674ac32582f61"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:64d7675ad83578e3fc149b617a444fab8efdafc9385471f868eb5ff83e446b8b"}, - {file = "greenlet-3.0.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b37eef18ea55f2ffd8f00ff8fe7c8d3818abd3e25fb73fae2ca3b672e333a7a6"}, - {file = "greenlet-3.0.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:77457465d89b8263bca14759d7c1684df840b6811b2499838cc5b040a8b5b113"}, - {file = "greenlet-3.0.3-cp39-cp39-win32.whl", hash = "sha256:57e8974f23e47dac22b83436bdcf23080ade568ce77df33159e019d161ce1d1e"}, - {file = "greenlet-3.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:c5ee858cfe08f34712f548c3c363e807e7186f03ad7a5039ebadb29e8c6be067"}, - {file = "greenlet-3.0.3.tar.gz", hash = "sha256:43374442353259554ce33599da8b692d5aa96f8976d567d4badf263371fbe491"}, -] - -[package.extras] -docs = ["Sphinx", "furo"] -test = ["objgraph", "psutil"] - [[package]] name = "idna" version = "3.7" @@ -554,59 +468,6 @@ files = [ {file = "jsonpointer-3.0.0.tar.gz", hash = "sha256:2b2d729f2091522d61c3b31f82e11870f60b68f43fbc705cb76bf4b832af59ef"}, ] -[[package]] -name = "langchain" -version = "0.2.11" -description = "Building applications with LLMs through composability" -optional = false -python-versions = "<4.0,>=3.8.1" -files = [ - {file = "langchain-0.2.11-py3-none-any.whl", hash = "sha256:5a7a8b4918f3d3bebce9b4f23b92d050699e6f7fb97591e8941177cf07a260a2"}, - {file = "langchain-0.2.11.tar.gz", hash = "sha256:d7a9e4165f02dca0bd78addbc2319d5b9286b5d37c51d784124102b57e9fd297"}, -] - -[package.dependencies] -aiohttp = ">=3.8.3,<4.0.0" -async-timeout = {version = ">=4.0.0,<5.0.0", markers = "python_version < \"3.11\""} -langchain-core = ">=0.2.23,<0.3.0" -langchain-text-splitters = ">=0.2.0,<0.3.0" -langsmith = ">=0.1.17,<0.2.0" -numpy = [ - {version = ">=1,<2", markers = "python_version < \"3.12\""}, - {version = ">=1.26.0,<2.0.0", markers = "python_version >= \"3.12\""}, -] -pydantic = ">=1,<3" -PyYAML = ">=5.3" -requests = ">=2,<3" -SQLAlchemy = ">=1.4,<3" -tenacity = ">=8.1.0,<8.4.0 || >8.4.0,<9.0.0" - -[[package]] -name = "langchain-community" -version = "0.2.10" -description = "Community contributed LangChain integrations." -optional = false -python-versions = "<4.0,>=3.8.1" -files = [ - {file = "langchain_community-0.2.10-py3-none-any.whl", hash = "sha256:9f4d1b5ab7f0b0a704f538e26e50fce45a461da6d2bf6b7b636d24f22fbc088a"}, - {file = "langchain_community-0.2.10.tar.gz", hash = "sha256:3a0404bad4bd07d6f86affdb62fb3d080a456c66191754d586a409d9d6024d62"}, -] - -[package.dependencies] -aiohttp = ">=3.8.3,<4.0.0" -dataclasses-json = ">=0.5.7,<0.7" -langchain = ">=0.2.9,<0.3.0" -langchain-core = ">=0.2.23,<0.3.0" -langsmith = ">=0.1.0,<0.2.0" -numpy = [ - {version = ">=1,<2", markers = "python_version < \"3.12\""}, - {version = ">=1.26.0,<2.0.0", markers = "python_version >= \"3.12\""}, -] -PyYAML = ">=5.3" -requests = ">=2,<3" -SQLAlchemy = ">=1.4,<3" -tenacity = ">=8.1.0,<8.4.0 || >8.4.0,<9.0.0" - [[package]] name = "langchain-core" version = "0.2.23" @@ -631,23 +492,9 @@ tenacity = "^8.1.0,!=8.4.0" type = "git" url = "https://github.com/langchain-ai/langchain.git" reference = "HEAD" -resolved_reference = "03881c674358df6bc243f67a77050aa95c660bcd" +resolved_reference = "b55f6105c6ddb885256a9af062f75048539ca952" subdirectory = "libs/core" -[[package]] -name = "langchain-text-splitters" -version = "0.2.2" -description = "LangChain text splitting utilities" -optional = false -python-versions = "<4.0,>=3.8.1" -files = [ - {file = "langchain_text_splitters-0.2.2-py3-none-any.whl", hash = "sha256:1c80d4b11b55e2995f02d2a326c0323ee1eeff24507329bb22924e420c782dff"}, - {file = "langchain_text_splitters-0.2.2.tar.gz", hash = "sha256:a1e45de10919fa6fb080ef0525deab56557e9552083600455cb9fa4238076140"}, -] - -[package.dependencies] -langchain-core = ">=0.2.10,<0.3.0" - [[package]] name = "langsmith" version = "0.1.93" @@ -667,25 +514,6 @@ pydantic = [ ] requests = ">=2,<3" -[[package]] -name = "marshmallow" -version = "3.21.3" -description = "A lightweight library for converting complex datatypes to and from native Python datatypes." -optional = false -python-versions = ">=3.8" -files = [ - {file = "marshmallow-3.21.3-py3-none-any.whl", hash = "sha256:86ce7fb914aa865001a4b2092c4c2872d13bc347f3d42673272cabfdbad386f1"}, - {file = "marshmallow-3.21.3.tar.gz", hash = "sha256:4f57c5e050a54d66361e826f94fba213eb10b67b2fdb02c3e0343ce207ba1662"}, -] - -[package.dependencies] -packaging = ">=17.0" - -[package.extras] -dev = ["marshmallow[tests]", "pre-commit (>=3.5,<4.0)", "tox"] -docs = ["alabaster (==0.7.16)", "autodocsumm (==0.2.12)", "sphinx (==7.3.7)", "sphinx-issues (==4.1.0)", "sphinx-version-warning (==1.1.2)"] -tests = ["pytest", "pytz", "simplejson"] - [[package]] name = "multidict" version = "6.0.5" @@ -846,88 +674,6 @@ files = [ {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, ] -[[package]] -name = "numpy" -version = "1.24.4" -description = "Fundamental package for array computing in Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "numpy-1.24.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c0bfb52d2169d58c1cdb8cc1f16989101639b34c7d3ce60ed70b19c63eba0b64"}, - {file = "numpy-1.24.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ed094d4f0c177b1b8e7aa9cba7d6ceed51c0e569a5318ac0ca9a090680a6a1b1"}, - {file = "numpy-1.24.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79fc682a374c4a8ed08b331bef9c5f582585d1048fa6d80bc6c35bc384eee9b4"}, - {file = "numpy-1.24.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ffe43c74893dbf38c2b0a1f5428760a1a9c98285553c89e12d70a96a7f3a4d6"}, - {file = "numpy-1.24.4-cp310-cp310-win32.whl", hash = "sha256:4c21decb6ea94057331e111a5bed9a79d335658c27ce2adb580fb4d54f2ad9bc"}, - {file = "numpy-1.24.4-cp310-cp310-win_amd64.whl", hash = "sha256:b4bea75e47d9586d31e892a7401f76e909712a0fd510f58f5337bea9572c571e"}, - {file = "numpy-1.24.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f136bab9c2cfd8da131132c2cf6cc27331dd6fae65f95f69dcd4ae3c3639c810"}, - {file = "numpy-1.24.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e2926dac25b313635e4d6cf4dc4e51c8c0ebfed60b801c799ffc4c32bf3d1254"}, - {file = "numpy-1.24.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:222e40d0e2548690405b0b3c7b21d1169117391c2e82c378467ef9ab4c8f0da7"}, - {file = "numpy-1.24.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7215847ce88a85ce39baf9e89070cb860c98fdddacbaa6c0da3ffb31b3350bd5"}, - {file = "numpy-1.24.4-cp311-cp311-win32.whl", hash = "sha256:4979217d7de511a8d57f4b4b5b2b965f707768440c17cb70fbf254c4b225238d"}, - {file = "numpy-1.24.4-cp311-cp311-win_amd64.whl", hash = "sha256:b7b1fc9864d7d39e28f41d089bfd6353cb5f27ecd9905348c24187a768c79694"}, - {file = "numpy-1.24.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1452241c290f3e2a312c137a9999cdbf63f78864d63c79039bda65ee86943f61"}, - {file = "numpy-1.24.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:04640dab83f7c6c85abf9cd729c5b65f1ebd0ccf9de90b270cd61935eef0197f"}, - {file = "numpy-1.24.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5425b114831d1e77e4b5d812b69d11d962e104095a5b9c3b641a218abcc050e"}, - {file = "numpy-1.24.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd80e219fd4c71fc3699fc1dadac5dcf4fd882bfc6f7ec53d30fa197b8ee22dc"}, - {file = "numpy-1.24.4-cp38-cp38-win32.whl", hash = "sha256:4602244f345453db537be5314d3983dbf5834a9701b7723ec28923e2889e0bb2"}, - {file = "numpy-1.24.4-cp38-cp38-win_amd64.whl", hash = "sha256:692f2e0f55794943c5bfff12b3f56f99af76f902fc47487bdfe97856de51a706"}, - {file = "numpy-1.24.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2541312fbf09977f3b3ad449c4e5f4bb55d0dbf79226d7724211acc905049400"}, - {file = "numpy-1.24.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9667575fb6d13c95f1b36aca12c5ee3356bf001b714fc354eb5465ce1609e62f"}, - {file = "numpy-1.24.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3a86ed21e4f87050382c7bc96571755193c4c1392490744ac73d660e8f564a9"}, - {file = "numpy-1.24.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d11efb4dbecbdf22508d55e48d9c8384db795e1b7b51ea735289ff96613ff74d"}, - {file = "numpy-1.24.4-cp39-cp39-win32.whl", hash = "sha256:6620c0acd41dbcb368610bb2f4d83145674040025e5536954782467100aa8835"}, - {file = "numpy-1.24.4-cp39-cp39-win_amd64.whl", hash = "sha256:befe2bf740fd8373cf56149a5c23a0f601e82869598d41f8e188a0e9869926f8"}, - {file = "numpy-1.24.4-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:31f13e25b4e304632a4619d0e0777662c2ffea99fcae2029556b17d8ff958aef"}, - {file = "numpy-1.24.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95f7ac6540e95bc440ad77f56e520da5bf877f87dca58bd095288dce8940532a"}, - {file = "numpy-1.24.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e98f220aa76ca2a977fe435f5b04d7b3470c0a2e6312907b37ba6068f26787f2"}, - {file = "numpy-1.24.4.tar.gz", hash = "sha256:80f5e3a4e498641401868df4208b74581206afbee7cf7b8329daae82676d9463"}, -] - -[[package]] -name = "numpy" -version = "1.26.4" -description = "Fundamental package for array computing in Python" -optional = false -python-versions = ">=3.9" -files = [ - {file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"}, - {file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"}, - {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d209d8969599b27ad20994c8e41936ee0964e6da07478d6c35016bc386b66ad4"}, - {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffa75af20b44f8dba823498024771d5ac50620e6915abac414251bd971b4529f"}, - {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:62b8e4b1e28009ef2846b4c7852046736bab361f7aeadeb6a5b89ebec3c7055a"}, - {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a4abb4f9001ad2858e7ac189089c42178fcce737e4169dc61321660f1a96c7d2"}, - {file = "numpy-1.26.4-cp310-cp310-win32.whl", hash = "sha256:bfe25acf8b437eb2a8b2d49d443800a5f18508cd811fea3181723922a8a82b07"}, - {file = "numpy-1.26.4-cp310-cp310-win_amd64.whl", hash = "sha256:b97fe8060236edf3662adfc2c633f56a08ae30560c56310562cb4f95500022d5"}, - {file = "numpy-1.26.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c66707fabe114439db9068ee468c26bbdf909cac0fb58686a42a24de1760c71"}, - {file = "numpy-1.26.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:edd8b5fe47dab091176d21bb6de568acdd906d1887a4584a15a9a96a1dca06ef"}, - {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ab55401287bfec946ced39700c053796e7cc0e3acbef09993a9ad2adba6ca6e"}, - {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:666dbfb6ec68962c033a450943ded891bed2d54e6755e35e5835d63f4f6931d5"}, - {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:96ff0b2ad353d8f990b63294c8986f1ec3cb19d749234014f4e7eb0112ceba5a"}, - {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:60dedbb91afcbfdc9bc0b1f3f402804070deed7392c23eb7a7f07fa857868e8a"}, - {file = "numpy-1.26.4-cp311-cp311-win32.whl", hash = "sha256:1af303d6b2210eb850fcf03064d364652b7120803a0b872f5211f5234b399f20"}, - {file = "numpy-1.26.4-cp311-cp311-win_amd64.whl", hash = "sha256:cd25bcecc4974d09257ffcd1f098ee778f7834c3ad767fe5db785be9a4aa9cb2"}, - {file = "numpy-1.26.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218"}, - {file = "numpy-1.26.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b"}, - {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b"}, - {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed"}, - {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a"}, - {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0"}, - {file = "numpy-1.26.4-cp312-cp312-win32.whl", hash = "sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110"}, - {file = "numpy-1.26.4-cp312-cp312-win_amd64.whl", hash = "sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818"}, - {file = "numpy-1.26.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7349ab0fa0c429c82442a27a9673fc802ffdb7c7775fad780226cb234965e53c"}, - {file = "numpy-1.26.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:52b8b60467cd7dd1e9ed082188b4e6bb35aa5cdd01777621a1658910745b90be"}, - {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5241e0a80d808d70546c697135da2c613f30e28251ff8307eb72ba696945764"}, - {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f870204a840a60da0b12273ef34f7051e98c3b5961b61b0c2c1be6dfd64fbcd3"}, - {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:679b0076f67ecc0138fd2ede3a8fd196dddc2ad3254069bcb9faf9a79b1cebcd"}, - {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:47711010ad8555514b434df65f7d7b076bb8261df1ca9bb78f53d3b2db02e95c"}, - {file = "numpy-1.26.4-cp39-cp39-win32.whl", hash = "sha256:a354325ee03388678242a4d7ebcd08b5c727033fcff3b2f536aea978e15ee9e6"}, - {file = "numpy-1.26.4-cp39-cp39-win_amd64.whl", hash = "sha256:3373d5d70a5fe74a2c1bb6d2cfd9609ecf686d47a2d7b1d37a8f3b6bf6003aea"}, - {file = "numpy-1.26.4-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:afedb719a9dcfc7eaf2287b839d8198e06dcd4cb5d276a3df279231138e83d30"}, - {file = "numpy-1.26.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95a7476c59002f2f6c590b9b7b998306fba6a5aa646b1e22ddfeaf8f78c3a29c"}, - {file = "numpy-1.26.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7e50d0a0cc3189f9cb0aeb3a6a6af18c16f59f004b866cd2be1c14b36134a4a0"}, - {file = "numpy-1.26.4.tar.gz", hash = "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010"}, -] - [[package]] name = "orjson" version = "3.10.6" @@ -1455,93 +1201,6 @@ files = [ {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, ] -[[package]] -name = "sqlalchemy" -version = "2.0.31" -description = "Database Abstraction Library" -optional = false -python-versions = ">=3.7" -files = [ - {file = "SQLAlchemy-2.0.31-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f2a213c1b699d3f5768a7272de720387ae0122f1becf0901ed6eaa1abd1baf6c"}, - {file = "SQLAlchemy-2.0.31-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9fea3d0884e82d1e33226935dac990b967bef21315cbcc894605db3441347443"}, - {file = "SQLAlchemy-2.0.31-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3ad7f221d8a69d32d197e5968d798217a4feebe30144986af71ada8c548e9fa"}, - {file = "SQLAlchemy-2.0.31-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f2bee229715b6366f86a95d497c347c22ddffa2c7c96143b59a2aa5cc9eebbc"}, - {file = "SQLAlchemy-2.0.31-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cd5b94d4819c0c89280b7c6109c7b788a576084bf0a480ae17c227b0bc41e109"}, - {file = "SQLAlchemy-2.0.31-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:750900a471d39a7eeba57580b11983030517a1f512c2cb287d5ad0fcf3aebd58"}, - {file = "SQLAlchemy-2.0.31-cp310-cp310-win32.whl", hash = "sha256:7bd112be780928c7f493c1a192cd8c5fc2a2a7b52b790bc5a84203fb4381c6be"}, - {file = "SQLAlchemy-2.0.31-cp310-cp310-win_amd64.whl", hash = "sha256:5a48ac4d359f058474fadc2115f78a5cdac9988d4f99eae44917f36aa1476327"}, - {file = "SQLAlchemy-2.0.31-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f68470edd70c3ac3b6cd5c2a22a8daf18415203ca1b036aaeb9b0fb6f54e8298"}, - {file = "SQLAlchemy-2.0.31-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2e2c38c2a4c5c634fe6c3c58a789712719fa1bf9b9d6ff5ebfce9a9e5b89c1ca"}, - {file = "SQLAlchemy-2.0.31-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd15026f77420eb2b324dcb93551ad9c5f22fab2c150c286ef1dc1160f110203"}, - {file = "SQLAlchemy-2.0.31-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2196208432deebdfe3b22185d46b08f00ac9d7b01284e168c212919891289396"}, - {file = "SQLAlchemy-2.0.31-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:352b2770097f41bff6029b280c0e03b217c2dcaddc40726f8f53ed58d8a85da4"}, - {file = "SQLAlchemy-2.0.31-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:56d51ae825d20d604583f82c9527d285e9e6d14f9a5516463d9705dab20c3740"}, - {file = "SQLAlchemy-2.0.31-cp311-cp311-win32.whl", hash = "sha256:6e2622844551945db81c26a02f27d94145b561f9d4b0c39ce7bfd2fda5776dac"}, - {file = "SQLAlchemy-2.0.31-cp311-cp311-win_amd64.whl", hash = "sha256:ccaf1b0c90435b6e430f5dd30a5aede4764942a695552eb3a4ab74ed63c5b8d3"}, - {file = "SQLAlchemy-2.0.31-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3b74570d99126992d4b0f91fb87c586a574a5872651185de8297c6f90055ae42"}, - {file = "SQLAlchemy-2.0.31-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6f77c4f042ad493cb8595e2f503c7a4fe44cd7bd59c7582fd6d78d7e7b8ec52c"}, - {file = "SQLAlchemy-2.0.31-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd1591329333daf94467e699e11015d9c944f44c94d2091f4ac493ced0119449"}, - {file = "SQLAlchemy-2.0.31-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:74afabeeff415e35525bf7a4ecdab015f00e06456166a2eba7590e49f8db940e"}, - {file = "SQLAlchemy-2.0.31-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b9c01990d9015df2c6f818aa8f4297d42ee71c9502026bb074e713d496e26b67"}, - {file = "SQLAlchemy-2.0.31-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:66f63278db425838b3c2b1c596654b31939427016ba030e951b292e32b99553e"}, - {file = "SQLAlchemy-2.0.31-cp312-cp312-win32.whl", hash = "sha256:0b0f658414ee4e4b8cbcd4a9bb0fd743c5eeb81fc858ca517217a8013d282c96"}, - {file = "SQLAlchemy-2.0.31-cp312-cp312-win_amd64.whl", hash = "sha256:fa4b1af3e619b5b0b435e333f3967612db06351217c58bfb50cee5f003db2a5a"}, - {file = "SQLAlchemy-2.0.31-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:f43e93057cf52a227eda401251c72b6fbe4756f35fa6bfebb5d73b86881e59b0"}, - {file = "SQLAlchemy-2.0.31-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d337bf94052856d1b330d5fcad44582a30c532a2463776e1651bd3294ee7e58b"}, - {file = "SQLAlchemy-2.0.31-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c06fb43a51ccdff3b4006aafee9fcf15f63f23c580675f7734245ceb6b6a9e05"}, - {file = "SQLAlchemy-2.0.31-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:b6e22630e89f0e8c12332b2b4c282cb01cf4da0d26795b7eae16702a608e7ca1"}, - {file = "SQLAlchemy-2.0.31-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:79a40771363c5e9f3a77f0e28b3302801db08040928146e6808b5b7a40749c88"}, - {file = "SQLAlchemy-2.0.31-cp37-cp37m-win32.whl", hash = "sha256:501ff052229cb79dd4c49c402f6cb03b5a40ae4771efc8bb2bfac9f6c3d3508f"}, - {file = "SQLAlchemy-2.0.31-cp37-cp37m-win_amd64.whl", hash = "sha256:597fec37c382a5442ffd471f66ce12d07d91b281fd474289356b1a0041bdf31d"}, - {file = "SQLAlchemy-2.0.31-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:dc6d69f8829712a4fd799d2ac8d79bdeff651c2301b081fd5d3fe697bd5b4ab9"}, - {file = "SQLAlchemy-2.0.31-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:23b9fbb2f5dd9e630db70fbe47d963c7779e9c81830869bd7d137c2dc1ad05fb"}, - {file = "SQLAlchemy-2.0.31-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a21c97efcbb9f255d5c12a96ae14da873233597dfd00a3a0c4ce5b3e5e79704"}, - {file = "SQLAlchemy-2.0.31-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26a6a9837589c42b16693cf7bf836f5d42218f44d198f9343dd71d3164ceeeac"}, - {file = "SQLAlchemy-2.0.31-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:dc251477eae03c20fae8db9c1c23ea2ebc47331bcd73927cdcaecd02af98d3c3"}, - {file = "SQLAlchemy-2.0.31-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:2fd17e3bb8058359fa61248c52c7b09a97cf3c820e54207a50af529876451808"}, - {file = "SQLAlchemy-2.0.31-cp38-cp38-win32.whl", hash = "sha256:c76c81c52e1e08f12f4b6a07af2b96b9b15ea67ccdd40ae17019f1c373faa227"}, - {file = "SQLAlchemy-2.0.31-cp38-cp38-win_amd64.whl", hash = "sha256:4b600e9a212ed59355813becbcf282cfda5c93678e15c25a0ef896b354423238"}, - {file = "SQLAlchemy-2.0.31-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b6cf796d9fcc9b37011d3f9936189b3c8074a02a4ed0c0fbbc126772c31a6d4"}, - {file = "SQLAlchemy-2.0.31-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:78fe11dbe37d92667c2c6e74379f75746dc947ee505555a0197cfba9a6d4f1a4"}, - {file = "SQLAlchemy-2.0.31-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2fc47dc6185a83c8100b37acda27658fe4dbd33b7d5e7324111f6521008ab4fe"}, - {file = "SQLAlchemy-2.0.31-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a41514c1a779e2aa9a19f67aaadeb5cbddf0b2b508843fcd7bafdf4c6864005"}, - {file = "SQLAlchemy-2.0.31-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:afb6dde6c11ea4525318e279cd93c8734b795ac8bb5dda0eedd9ebaca7fa23f1"}, - {file = "SQLAlchemy-2.0.31-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:3f9faef422cfbb8fd53716cd14ba95e2ef655400235c3dfad1b5f467ba179c8c"}, - {file = "SQLAlchemy-2.0.31-cp39-cp39-win32.whl", hash = "sha256:fc6b14e8602f59c6ba893980bea96571dd0ed83d8ebb9c4479d9ed5425d562e9"}, - {file = "SQLAlchemy-2.0.31-cp39-cp39-win_amd64.whl", hash = "sha256:3cb8a66b167b033ec72c3812ffc8441d4e9f5f78f5e31e54dcd4c90a4ca5bebc"}, - {file = "SQLAlchemy-2.0.31-py3-none-any.whl", hash = "sha256:69f3e3c08867a8e4856e92d7afb618b95cdee18e0bc1647b77599722c9a28911"}, - {file = "SQLAlchemy-2.0.31.tar.gz", hash = "sha256:b607489dd4a54de56984a0c7656247504bd5523d9d0ba799aef59d4add009484"}, -] - -[package.dependencies] -greenlet = {version = "!=0.4.17", markers = "python_version < \"3.13\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")"} -typing-extensions = ">=4.6.0" - -[package.extras] -aiomysql = ["aiomysql (>=0.2.0)", "greenlet (!=0.4.17)"] -aioodbc = ["aioodbc", "greenlet (!=0.4.17)"] -aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing_extensions (!=3.10.0.1)"] -asyncio = ["greenlet (!=0.4.17)"] -asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (!=0.4.17)"] -mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5)"] -mssql = ["pyodbc"] -mssql-pymssql = ["pymssql"] -mssql-pyodbc = ["pyodbc"] -mypy = ["mypy (>=0.910)"] -mysql = ["mysqlclient (>=1.4.0)"] -mysql-connector = ["mysql-connector-python"] -oracle = ["cx_oracle (>=8)"] -oracle-oracledb = ["oracledb (>=1.0.1)"] -postgresql = ["psycopg2 (>=2.7)"] -postgresql-asyncpg = ["asyncpg", "greenlet (!=0.4.17)"] -postgresql-pg8000 = ["pg8000 (>=1.29.1)"] -postgresql-psycopg = ["psycopg (>=3.0.7)"] -postgresql-psycopg2binary = ["psycopg2-binary"] -postgresql-psycopg2cffi = ["psycopg2cffi"] -postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"] -pymysql = ["pymysql"] -sqlcipher = ["sqlcipher3_binary"] - [[package]] name = "syrupy" version = "4.6.1" @@ -1618,21 +1277,6 @@ files = [ {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, ] -[[package]] -name = "typing-inspect" -version = "0.9.0" -description = "Runtime inspection utilities for typing module." -optional = false -python-versions = "*" -files = [ - {file = "typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f"}, - {file = "typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78"}, -] - -[package.dependencies] -mypy-extensions = ">=0.3.0" -typing-extensions = ">=3.7.4" - [[package]] name = "urllib3" version = "2.2.2" @@ -1800,4 +1444,4 @@ multidict = ">=4.0" [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "2ed2a883a76dfb2972b6c169a46a56486834ade36f578474ddb44f018c7b628d" +content-hash = "eaa16f38cabc695e4ef785ee4d7e87635669ef3323931d2514747a13a3db8ac8" From 3575b16df21d5c9016def9ca00609cf7d487791d Mon Sep 17 00:00:00 2001 From: Daniel Glogowski Date: Wed, 24 Jul 2024 21:21:00 -0700 Subject: [PATCH 56/60] chat nb update with tool calling and upstream changes --- .../docs/chat/nvidia_ai_endpoints.ipynb | 61 +++++++++++-------- 1 file changed, 35 insertions(+), 26 deletions(-) diff --git a/libs/ai-endpoints/docs/chat/nvidia_ai_endpoints.ipynb b/libs/ai-endpoints/docs/chat/nvidia_ai_endpoints.ipynb index e1025759..3009be7e 100644 --- a/libs/ai-endpoints/docs/chat/nvidia_ai_endpoints.ipynb +++ b/libs/ai-endpoints/docs/chat/nvidia_ai_endpoints.ipynb @@ -451,7 +451,7 @@ "id": "137662a6" }, "source": [ - "## Example usage within a Conversation Chains" + "## Example usage within a RunnableWithMessageHistory" ] }, { @@ -461,7 +461,7 @@ "id": "79efa62d" }, "source": [ - "Like any other integration, ChatNVIDIA is fine to support chat utilities like conversation buffers by default. Below, we show the [LangChain ConversationBufferMemory](https://python.langchain.com/docs/modules/memory/types/buffer) example applied to the `mistralai/mixtral-8x22b-instruct-v0.1` model." + "Like any other integration, ChatNVIDIA is fine to support chat utilities like RunnableWithMessageHistory which is analogous to using `ConversationChain`. Below, we show the [LangChain RunnableWithMessageHistory](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html) example applied to the `mistralai/mixtral-8x22b-instruct-v0.1` model." ] }, { @@ -483,8 +483,19 @@ }, "outputs": [], "source": [ - "from langchain.chains import ConversationChain\n", - "from langchain.memory import ConversationBufferMemory\n", + "from langchain_core.chat_history import InMemoryChatMessageHistory\n", + "from langchain_core.runnables.history import RunnableWithMessageHistory\n", + "\n", + "# store is a dictionary that maps session IDs to their corresponding chat histories.\n", + "store = {} # memory is maintained outside the chain\n", + "\n", + "\n", + "# A function that returns the chat history for a given session ID.\n", + "def get_session_history(session_id: str) -> InMemoryChatMessageHistory:\n", + " if session_id not in store:\n", + " store[session_id] = InMemoryChatMessageHistory()\n", + " return store[session_id]\n", + "\n", "\n", "chat = ChatNVIDIA(\n", " model=\"mistralai/mixtral-8x22b-instruct-v0.1\",\n", @@ -493,24 +504,18 @@ " top_p=1.0,\n", ")\n", "\n", - "conversation = ConversationChain(llm=chat, memory=ConversationBufferMemory())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f644ff28", - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 268 - }, - "id": "f644ff28", - "outputId": "bae354cc-2118-4e01-ce20-a717ac94d27d" - }, - "outputs": [], - "source": [ - "conversation.invoke(\"Hi there!\")[\"response\"]" + "# Define a RunnableConfig object, with a `configurable` key. session_id determines thread\n", + "config = {\"configurable\": {\"session_id\": \"1\"}}\n", + "\n", + "conversation = RunnableWithMessageHistory(\n", + " chat,\n", + " get_session_history,\n", + ")\n", + "\n", + "conversation.invoke(\n", + " \"Hi I'm Srijan Dubey.\", # input or query\n", + " config=config,\n", + ")" ] }, { @@ -527,9 +532,10 @@ }, "outputs": [], "source": [ - "conversation.invoke(\"I'm doing well! Just having a conversation with an AI.\")[\n", - " \"response\"\n", - "]" + "conversation.invoke(\n", + " \"I'm doing well! Just having a conversation with an AI.\",\n", + " config=config,\n", + ")" ] }, { @@ -546,7 +552,10 @@ }, "outputs": [], "source": [ - "conversation.invoke(\"Tell me about yourself.\")[\"response\"]" + "conversation.invoke(\n", + " \"Tell me about yourself.\",\n", + " config=config,\n", + ")" ] }, { From f9f8f7c56c39b2f2368c7b92829984e4dc8cdc23 Mon Sep 17 00:00:00 2001 From: Daniel Glogowski Date: Sat, 27 Jul 2024 13:22:28 -0700 Subject: [PATCH 57/60] update nvidia.mdx file for stale links --- libs/ai-endpoints/docs/providers/nvidia.mdx | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/libs/ai-endpoints/docs/providers/nvidia.mdx b/libs/ai-endpoints/docs/providers/nvidia.mdx index 70f1123c..b4f33e0e 100644 --- a/libs/ai-endpoints/docs/providers/nvidia.mdx +++ b/libs/ai-endpoints/docs/providers/nvidia.mdx @@ -78,5 +78,6 @@ A selection of NVIDIA AI Foundation models are supported directly in LangChain w The active models which are supported can be found [in API Catalog](https://build.nvidia.com/). **The following may be useful examples to help you get started:** -- **[`ChatNVIDIA` Model](/docs/integrations/chat/nvidia_ai_endpoints).** -- **[`NVIDIAEmbeddings` Model for RAG Workflows](/docs/integrations/text_embedding/nvidia_ai_endpoints).** +- **[`ChatNVIDIA` Model](/libs/ai-endpoints/docs/chat/nvidia_ai_endpoints.ipynb).** +- **[`NVIDIAEmbeddings` Model for retrieval in RAG Workflows](/libs/ai-endpoints/docs/text_embedding/nvidia_ai_endpoints.ipynb).** +- **[`NVIDIARerank` and `NVIDIAEmbeddings` Models for retrival and re-reranking](/libs/ai-endpoints/docs/retrievers/nvidia_rerank.ipynb).** \ No newline at end of file From 07aa3f7313e5aacb28d12137e49298c7985320dd Mon Sep 17 00:00:00 2001 From: Daniel Glogowski Date: Sat, 27 Jul 2024 13:34:05 -0700 Subject: [PATCH 58/60] changed references to point to langchain main repo --- libs/ai-endpoints/docs/providers/nvidia.mdx | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/libs/ai-endpoints/docs/providers/nvidia.mdx b/libs/ai-endpoints/docs/providers/nvidia.mdx index b4f33e0e..05975156 100644 --- a/libs/ai-endpoints/docs/providers/nvidia.mdx +++ b/libs/ai-endpoints/docs/providers/nvidia.mdx @@ -78,6 +78,5 @@ A selection of NVIDIA AI Foundation models are supported directly in LangChain w The active models which are supported can be found [in API Catalog](https://build.nvidia.com/). **The following may be useful examples to help you get started:** -- **[`ChatNVIDIA` Model](/libs/ai-endpoints/docs/chat/nvidia_ai_endpoints.ipynb).** -- **[`NVIDIAEmbeddings` Model for retrieval in RAG Workflows](/libs/ai-endpoints/docs/text_embedding/nvidia_ai_endpoints.ipynb).** -- **[`NVIDIARerank` and `NVIDIAEmbeddings` Models for retrival and re-reranking](/libs/ai-endpoints/docs/retrievers/nvidia_rerank.ipynb).** \ No newline at end of file +- **[`ChatNVIDIA` Model](https://github.com/langchain-ai/langchain/blob/master/docs/docs/integrations/chat/nvidia_ai_endpoints.ipynb).** +- **[`NVIDIAEmbeddings` Model for retrieval in RAG Workflows](https://github.com/langchain-ai/langchain/blob/master/docs/docs/integrations/text_embedding/nvidia_ai_endpoints.ipynb).** \ No newline at end of file From 920bbb06166efd13968c2f0c3af18c78fd42a81a Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Tue, 30 Jul 2024 06:34:31 -0400 Subject: [PATCH 59/60] add support for nvidia/usdcode-llama3-70b-instruct --- libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py index 7c51466b..af78e07b 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py @@ -303,6 +303,11 @@ def validate_client(cls, client: str, values: dict) -> str: client="ChatNVIDIA", supports_tools=True, ), + "nvidia/usdcode-llama3-70b-instruct": Model( + id="nvidia/usdcode-llama3-70b-instruct", + model_type="chat", + client="ChatNVIDIA", + ), } QA_MODEL_TABLE = { From 2938627ee4e6af9e09ff1262ab64c2c952d7f1fe Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Tue, 30 Jul 2024 07:24:10 -0400 Subject: [PATCH 60/60] add support for baai/bge-m3 --- libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py index af78e07b..b72f1236 100644 --- a/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py +++ b/libs/ai-endpoints/langchain_nvidia_ai_endpoints/_statics.py @@ -422,6 +422,11 @@ def validate_client(cls, client: str, values: dict) -> str: model_type="embedding", client="NVIDIAEmbeddings", ), + "baai/bge-m3": Model( + id="baai/bge-m3", + model_type="embedding", + client="NVIDIAEmbeddings", + ), } RANKING_MODEL_TABLE = {