From 26c2487d9badeb58b7c8af6d362d14458b645a07 Mon Sep 17 00:00:00 2001 From: Holt Skinner Date: Mon, 26 Feb 2024 12:44:57 -0600 Subject: [PATCH 01/17] feat: Add custom telemetry context upon client creation - **Description:** Add custom user agent to Vertex AI SDK initialization. Allows API usage metrics collection. - Follow-up to https://github.com/langchain-ai/langchain/pull/12168 - **Dependencies:** `google-cloud-aiplatform` to be updated in PR https://github.com/googleapis/python-aiplatform/pull/3261 - Before Merging, Update `raise_vertex_import_error(minimum_expected_version: str = "1.38.0")` to the actual version once the SDK is updated. - https://pypi.org/project/google-cloud-aiplatform/ Tested locally successfully when installing from source PR. DO NOT MERGE - Until https://github.com/googleapis/python-aiplatform/pull/3261 is Released and `minimum_expected_version` is updated to correct version. --- .../langchain_google_vertexai/_utils.py | 24 ++++++++++-- .../langchain_google_vertexai/chat_models.py | 38 ++++++++++--------- .../langchain_google_vertexai/embeddings.py | 11 ++++-- .../langchain_google_vertexai/llms.py | 33 +++++++++------- .../vision_models.py | 5 ++- 5 files changed, 71 insertions(+), 40 deletions(-) diff --git a/libs/vertexai/langchain_google_vertexai/_utils.py b/libs/vertexai/langchain_google_vertexai/_utils.py index 641721af..8444a478 100644 --- a/libs/vertexai/langchain_google_vertexai/_utils.py +++ b/libs/vertexai/langchain_google_vertexai/_utils.py @@ -2,7 +2,7 @@ import dataclasses from importlib import metadata -from typing import Any, Callable, Dict, Optional, Union +from typing import Any, Callable, Dict, Optional, Tuple, Union import google.api_core import proto # type: ignore[import-untyped] @@ -58,7 +58,7 @@ def raise_vertex_import_error(minimum_expected_version: str = "1.38.0") -> None: ) -def get_client_info(module: Optional[str] = None) -> "ClientInfo": +def get_user_agent(module: Optional[str] = None) -> Tuple[str, str]: r"""Returns a custom user agent header. Args: @@ -67,13 +67,29 @@ def get_client_info(module: Optional[str] = None) -> "ClientInfo": Returns: google.api_core.gapic_v1.client_info.ClientInfo """ - langchain_version = metadata.version("langchain") + try: + langchain_version = metadata.version("langchain") + except metadata.PackageNotFoundError: + langchain_version = "0.0.0" client_library_version = ( f"{langchain_version}-{module}" if module else langchain_version ) + return client_library_version, f"langchain/{client_library_version}" + + +def get_client_info(module: Optional[str] = None) -> "ClientInfo": + r"""Returns a client info object with a custom user agent header. + + Args: + module (Optional[str]): + Optional. The module for a custom user agent header. + Returns: + google.api_core.gapic_v1.client_info.ClientInfo + """ + client_library_version, user_agent = get_user_agent(module) return ClientInfo( client_library_version=client_library_version, - user_agent=f"langchain/{client_library_version}", + user_agent=user_agent, ) diff --git a/libs/vertexai/langchain_google_vertexai/chat_models.py b/libs/vertexai/langchain_google_vertexai/chat_models.py index c7129a6d..6e1248fb 100644 --- a/libs/vertexai/langchain_google_vertexai/chat_models.py +++ b/libs/vertexai/langchain_google_vertexai/chat_models.py @@ -1,4 +1,5 @@ """Wrapper around Google VertexAI chat-based models.""" + from __future__ import annotations import json @@ -13,6 +14,7 @@ AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) +from google.cloud.aiplatform.telemetry import tool_context_manager from langchain_core.language_models.chat_models import ( BaseChatModel, generate_from_stream, @@ -55,6 +57,7 @@ from langchain_google_vertexai._image_utils import ImageBytesLoader from langchain_google_vertexai._utils import ( get_generation_info, + get_user_agent, is_codey_model, is_gemini_model, ) @@ -291,24 +294,25 @@ def validate_environment(cls, values: Dict) -> Dict: raise ValueError("Safety settings are only supported for Gemini models") cls._init_vertexai(values) - if is_gemini: - values["client"] = GenerativeModel( - model_name=values["model_name"], safety_settings=safety_settings - ) - values["client_preview"] = GenerativeModel( - model_name=values["model_name"], safety_settings=safety_settings - ) - else: - if is_codey_model(values["model_name"]): - model_cls = CodeChatModel - model_cls_preview = PreviewCodeChatModel + with tool_context_manager(get_user_agent("vertex-ai-llm")): + if is_gemini: + values["client"] = GenerativeModel( + model_name=values["model_name"], safety_settings=safety_settings + ) + values["client_preview"] = GenerativeModel( + model_name=values["model_name"], safety_settings=safety_settings + ) else: - model_cls = ChatModel - model_cls_preview = PreviewChatModel - values["client"] = model_cls.from_pretrained(values["model_name"]) - values["client_preview"] = model_cls_preview.from_pretrained( - values["model_name"] - ) + if is_codey_model(values["model_name"]): + model_cls = CodeChatModel + model_cls_preview = PreviewCodeChatModel + else: + model_cls = ChatModel + model_cls_preview = PreviewChatModel + values["client"] = model_cls.from_pretrained(values["model_name"]) + values["client_preview"] = model_cls_preview.from_pretrained( + values["model_name"] + ) return values def _generate( diff --git a/libs/vertexai/langchain_google_vertexai/embeddings.py b/libs/vertexai/langchain_google_vertexai/embeddings.py index f7a9b97f..ac813adc 100644 --- a/libs/vertexai/langchain_google_vertexai/embeddings.py +++ b/libs/vertexai/langchain_google_vertexai/embeddings.py @@ -12,6 +12,7 @@ ResourceExhausted, ServiceUnavailable, ) +from google.cloud.aiplatform.telemetry import tool_context_manager from langchain_core.embeddings import Embeddings from langchain_core.language_models.llms import create_base_retry_decorator from langchain_core.pydantic_v1 import root_validator @@ -21,6 +22,7 @@ ) from langchain_google_vertexai._base import _VertexAICommon +from langchain_google_vertexai._utils import get_user_agent logger = logging.getLogger(__name__) @@ -46,7 +48,8 @@ def validate_environment(cls, values: Dict) -> Dict: "textembedding-gecko@001" ) values["model_name"] = "textembedding-gecko@001" - values["client"] = TextEmbeddingModel.from_pretrained(values["model_name"]) + with tool_context_manager(get_user_agent("vertex-ai-embeddings")): + values["client"] = TextEmbeddingModel.from_pretrained(values["model_name"]) return values def __init__( @@ -79,9 +82,9 @@ def __init__( self.instance["task_executor"] = ThreadPoolExecutor( max_workers=request_parallelism ) - self.instance[ - "embeddings_task_type_supported" - ] = not self.client._endpoint_name.endswith("/textembedding-gecko@001") + self.instance["embeddings_task_type_supported"] = ( + not self.client._endpoint_name.endswith("/textembedding-gecko@001") + ) @staticmethod def _split_by_punctuation(text: str) -> List[str]: diff --git a/libs/vertexai/langchain_google_vertexai/llms.py b/libs/vertexai/langchain_google_vertexai/llms.py index 9257269c..cf077ca4 100644 --- a/libs/vertexai/langchain_google_vertexai/llms.py +++ b/libs/vertexai/langchain_google_vertexai/llms.py @@ -10,6 +10,7 @@ PredictionServiceClient, ) from google.cloud.aiplatform.models import Prediction +from google.cloud.aiplatform.telemetry import tool_context_manager from google.protobuf import json_format from google.protobuf.struct_pb2 import Value from langchain_core.callbacks.manager import ( @@ -55,6 +56,7 @@ create_retry_decorator, get_client_info, get_generation_info, + get_user_agent, is_codey_model, is_gemini_model, ) @@ -314,22 +316,25 @@ def validate_environment(cls, values: Dict) -> Dict: model_cls = TextGenerationModel preview_model_cls = PreviewTextGenerationModel - if tuned_model_name: - values["client"] = model_cls.get_tuned_model(tuned_model_name) - values["client_preview"] = preview_model_cls.get_tuned_model( - tuned_model_name - ) - else: - if is_gemini: - values["client"] = model_cls( - model_name=model_name, safety_settings=safety_settings - ) - values["client_preview"] = preview_model_cls( - model_name=model_name, safety_settings=safety_settings + with tool_context_manager(get_user_agent("vertex-ai-llm")): + if tuned_model_name: + values["client"] = model_cls.get_tuned_model(tuned_model_name) + values["client_preview"] = preview_model_cls.get_tuned_model( + tuned_model_name ) else: - values["client"] = model_cls.from_pretrained(model_name) - values["client_preview"] = preview_model_cls.from_pretrained(model_name) + if is_gemini: + values["client"] = model_cls( + model_name=model_name, safety_settings=safety_settings + ) + values["client_preview"] = preview_model_cls( + model_name=model_name, safety_settings=safety_settings + ) + else: + values["client"] = model_cls.from_pretrained(model_name) + values["client_preview"] = preview_model_cls.from_pretrained( + model_name + ) if values["streaming"] and values["n"] > 1: raise ValueError("Only one candidate can be generated with streaming!") diff --git a/libs/vertexai/langchain_google_vertexai/vision_models.py b/libs/vertexai/langchain_google_vertexai/vision_models.py index 45469800..a005fdbd 100644 --- a/libs/vertexai/langchain_google_vertexai/vision_models.py +++ b/libs/vertexai/langchain_google_vertexai/vision_models.py @@ -2,6 +2,7 @@ from typing import Any, Dict, List, Union +from google.cloud.aiplatform.telemetry import tool_context_manager from langchain_core.callbacks import CallbackManagerForLLMRun from langchain_core.language_models import BaseChatModel, BaseLLM from langchain_core.messages import AIMessage, BaseMessage @@ -22,6 +23,7 @@ get_text_str_from_content_part, image_bytes_to_b64_string, ) +from langchain_google_vertexai._utils import get_user_agent class _BaseImageTextModel(BaseModel): @@ -38,7 +40,8 @@ class _BaseImageTextModel(BaseModel): def _create_model(self) -> ImageTextModel: """Builds the model object from the class attributes.""" - return ImageTextModel.from_pretrained(model_name=self.model_name) + with tool_context_manager(get_user_agent("vertex-ai-imagen")): + return ImageTextModel.from_pretrained(model_name=self.model_name) def _get_image_from_message_part(self, message_part: str | Dict) -> Image | None: """Given a message part obtain a image if the part represents it. From 13283dad3f53e350ff85c5a6602c60d3d46cb34f Mon Sep 17 00:00:00 2001 From: Holt Skinner Date: Mon, 26 Feb 2024 13:12:59 -0600 Subject: [PATCH 02/17] Fix lint errors --- libs/vertexai/langchain_google_vertexai/_utils.py | 2 +- libs/vertexai/langchain_google_vertexai/chat_models.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/libs/vertexai/langchain_google_vertexai/_utils.py b/libs/vertexai/langchain_google_vertexai/_utils.py index eda23981..adbcdb81 100644 --- a/libs/vertexai/langchain_google_vertexai/_utils.py +++ b/libs/vertexai/langchain_google_vertexai/_utils.py @@ -3,7 +3,7 @@ import dataclasses import re from importlib import metadata -from typing import Any, Callable, Dict, Optional, Tuple, Union +from typing import Any, Callable, Dict, List, Optional, Tuple, Union import google.api_core import proto # type: ignore[import-untyped] diff --git a/libs/vertexai/langchain_google_vertexai/chat_models.py b/libs/vertexai/langchain_google_vertexai/chat_models.py index 6e1248fb..cdd07e53 100644 --- a/libs/vertexai/langchain_google_vertexai/chat_models.py +++ b/libs/vertexai/langchain_google_vertexai/chat_models.py @@ -10,11 +10,12 @@ import proto # type: ignore[import-untyped] from google.cloud.aiplatform_v1beta1.types.content import Part as GapicPart from google.cloud.aiplatform_v1beta1.types.tool import FunctionCall +from google.cloud.aiplatform.telemetry import tool_context_manager + from langchain_core.callbacks import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) -from google.cloud.aiplatform.telemetry import tool_context_manager from langchain_core.language_models.chat_models import ( BaseChatModel, generate_from_stream, From 1f6637b19c03149d44b8b5dbe2548f68d3ebdd56 Mon Sep 17 00:00:00 2001 From: Holt Skinner Date: Mon, 26 Feb 2024 14:44:47 -0600 Subject: [PATCH 03/17] Change tool_context_manager context location --- .../langchain_google_vertexai/_base.py | 7 + .../langchain_google_vertexai/_utils.py | 4 +- .../langchain_google_vertexai/chat_models.py | 403 +++++++++--------- .../langchain_google_vertexai/embeddings.py | 4 +- .../langchain_google_vertexai/llms.py | 47 +- .../vision_models.py | 95 +++-- 6 files changed, 294 insertions(+), 266 deletions(-) diff --git a/libs/vertexai/langchain_google_vertexai/_base.py b/libs/vertexai/langchain_google_vertexai/_base.py index 1b2a9846..4efea07b 100644 --- a/libs/vertexai/langchain_google_vertexai/_base.py +++ b/libs/vertexai/langchain_google_vertexai/_base.py @@ -27,6 +27,7 @@ from langchain_google_vertexai._enums import HarmBlockThreshold, HarmCategory from langchain_google_vertexai._utils import ( get_client_info, + get_user_agent, is_codey_model, is_gemini_model, ) @@ -142,6 +143,12 @@ def _default_params(self) -> Dict[str, Any]: ) return updated_params + @property + def _user_agent(self) -> str: + """Gets the User Agent.""" + _, user_agent = get_user_agent(f"{type(self).__name__}_{self.model_name}") + return user_agent + @classmethod def _init_vertexai(cls, values: Dict) -> None: vertexai.init( diff --git a/libs/vertexai/langchain_google_vertexai/_utils.py b/libs/vertexai/langchain_google_vertexai/_utils.py index adbcdb81..8e4e858b 100644 --- a/libs/vertexai/langchain_google_vertexai/_utils.py +++ b/libs/vertexai/langchain_google_vertexai/_utils.py @@ -66,7 +66,7 @@ def get_user_agent(module: Optional[str] = None) -> Tuple[str, str]: module (Optional[str]): Optional. The module for a custom user agent header. Returns: - google.api_core.gapic_v1.client_info.ClientInfo + Tuple[str, str] """ try: langchain_version = metadata.version("langchain") @@ -95,7 +95,7 @@ def get_client_info(module: Optional[str] = None) -> "ClientInfo": def load_image_from_gcs(path: str, project: Optional[str] = None) -> Image: - """Loads im Image from GCS.""" + """Loads an Image from GCS.""" gcs_client = storage.Client(project=project) pieces = path.split("/") blobs = list(gcs_client.list_blobs(pieces[2], prefix="/".join(pieces[3:]))) diff --git a/libs/vertexai/langchain_google_vertexai/chat_models.py b/libs/vertexai/langchain_google_vertexai/chat_models.py index cdd07e53..5ad8d0e2 100644 --- a/libs/vertexai/langchain_google_vertexai/chat_models.py +++ b/libs/vertexai/langchain_google_vertexai/chat_models.py @@ -1,6 +1,6 @@ """Wrapper around Google VertexAI chat-based models.""" -from __future__ import annotations +from __future__ import annotations # noqa import json import logging @@ -295,25 +295,24 @@ def validate_environment(cls, values: Dict) -> Dict: raise ValueError("Safety settings are only supported for Gemini models") cls._init_vertexai(values) - with tool_context_manager(get_user_agent("vertex-ai-llm")): - if is_gemini: - values["client"] = GenerativeModel( - model_name=values["model_name"], safety_settings=safety_settings - ) - values["client_preview"] = GenerativeModel( - model_name=values["model_name"], safety_settings=safety_settings - ) + if is_gemini: + values["client"] = GenerativeModel( + model_name=values["model_name"], safety_settings=safety_settings + ) + values["client_preview"] = GenerativeModel( + model_name=values["model_name"], safety_settings=safety_settings + ) + else: + if is_codey_model(values["model_name"]): + model_cls = CodeChatModel + model_cls_preview = PreviewCodeChatModel else: - if is_codey_model(values["model_name"]): - model_cls = CodeChatModel - model_cls_preview = PreviewCodeChatModel - else: - model_cls = ChatModel - model_cls_preview = PreviewChatModel - values["client"] = model_cls.from_pretrained(values["model_name"]) - values["client_preview"] = model_cls_preview.from_pretrained( - values["model_name"] - ) + model_cls = ChatModel + model_cls_preview = PreviewChatModel + values["client"] = model_cls.from_pretrained(values["model_name"]) + values["client_preview"] = model_cls_preview.from_pretrained( + values["model_name"] + ) return values def _generate( @@ -339,67 +338,68 @@ def _generate( Raises: ValueError: if the last message in the list is not from human. """ - should_stream = stream if stream is not None else self.streaming - safety_settings = kwargs.pop("safety_settings", None) - if should_stream: - stream_iter = self._stream( - messages, stop=stop, run_manager=run_manager, **kwargs - ) - return generate_from_stream(stream_iter) - - params = self._prepare_params(stop=stop, stream=False, **kwargs) - msg_params = {} - if "candidate_count" in params: - msg_params["candidate_count"] = params.pop("candidate_count") - - if self._is_gemini_model: - history_gemini = _parse_chat_history_gemini( - messages, - project=self.project, - convert_system_message_to_human=self.convert_system_message_to_human, - ) - message = history_gemini.pop() - chat = self.client.start_chat(history=history_gemini) - - # set param to `functions` until core tool/function calling implemented - raw_tools = params.pop("functions") if "functions" in params else None - tools = _format_tools_to_vertex_tool(raw_tools) if raw_tools else None - response = chat.send_message( - message, - generation_config=params, - tools=tools, - safety_settings=safety_settings, - ) - generations = [ - ChatGeneration( - message=_parse_response_candidate(candidate), - generation_info=get_generation_info( - candidate, - self._is_gemini_model, - usage_metadata=response.to_dict().get("usage_metadata"), - ), + with tool_context_manager(self._user_agent): + should_stream = stream if stream is not None else self.streaming + safety_settings = kwargs.pop("safety_settings", None) + if should_stream: + stream_iter = self._stream( + messages, stop=stop, run_manager=run_manager, **kwargs ) - for candidate in response.candidates - ] - else: - question = _get_question(messages) - history = _parse_chat_history(messages[:-1]) - examples = kwargs.get("examples") or self.examples - if examples: - params["examples"] = _parse_examples(examples) - chat = self._start_chat(history, **params) - response = chat.send_message(question.content, **msg_params) - generations = [ - ChatGeneration( - message=AIMessage(content=candidate.text), - generation_info=get_generation_info( - candidate, - self._is_gemini_model, - usage_metadata=response.raw_prediction_response.metadata, - ), + return generate_from_stream(stream_iter) + + params = self._prepare_params(stop=stop, stream=False, **kwargs) + msg_params = {} + if "candidate_count" in params: + msg_params["candidate_count"] = params.pop("candidate_count") + + if self._is_gemini_model: + history_gemini = _parse_chat_history_gemini( + messages, + project=self.project, + convert_system_message_to_human=self.convert_system_message_to_human, ) - for candidate in response.candidates - ] + message = history_gemini.pop() + chat = self.client.start_chat(history=history_gemini) + + # set param to `functions` until core tool/function calling implemented + raw_tools = params.pop("functions") if "functions" in params else None + tools = _format_tools_to_vertex_tool(raw_tools) if raw_tools else None + response = chat.send_message( + message, + generation_config=params, + tools=tools, + safety_settings=safety_settings, + ) + generations = [ + ChatGeneration( + message=_parse_response_candidate(candidate), + generation_info=get_generation_info( + candidate, + self._is_gemini_model, + usage_metadata=response.to_dict().get("usage_metadata"), + ), + ) + for candidate in response.candidates + ] + else: + question = _get_question(messages) + history = _parse_chat_history(messages[:-1]) + examples = kwargs.get("examples") or self.examples + if examples: + params["examples"] = _parse_examples(examples) + chat = self._start_chat(history, **params) + response = chat.send_message(question.content, **msg_params) + generations = [ + ChatGeneration( + message=AIMessage(content=candidate.text), + generation_info=get_generation_info( + candidate, + self._is_gemini_model, + usage_metadata=response.raw_prediction_response.metadata, + ), + ) + for candidate in response.candidates + ] return ChatResult(generations=generations) async def _agenerate( @@ -427,59 +427,60 @@ async def _agenerate( kwargs.pop("stream") logger.warning("ChatVertexAI does not currently support async streaming.") - params = self._prepare_params(stop=stop, **kwargs) - safety_settings = kwargs.pop("safety_settings", None) - msg_params = {} - if "candidate_count" in params: - msg_params["candidate_count"] = params.pop("candidate_count") - - if self._is_gemini_model: - history_gemini = _parse_chat_history_gemini( - messages, - project=self.project, - convert_system_message_to_human=self.convert_system_message_to_human, - ) - message = history_gemini.pop() - chat = self.client.start_chat(history=history_gemini) - # set param to `functions` until core tool/function calling implemented - raw_tools = params.pop("functions") if "functions" in params else None - tools = _format_tools_to_vertex_tool(raw_tools) if raw_tools else None - response = await chat.send_message_async( - message, - generation_config=params, - tools=tools, - safety_settings=safety_settings, - ) - generations = [ - ChatGeneration( - message=_parse_response_candidate(c), - generation_info=get_generation_info( - c, - self._is_gemini_model, - usage_metadata=response.to_dict().get("usage_metadata"), - ), + with tool_context_manager(self._user_agent): + params = self._prepare_params(stop=stop, **kwargs) + safety_settings = kwargs.pop("safety_settings", None) + msg_params = {} + if "candidate_count" in params: + msg_params["candidate_count"] = params.pop("candidate_count") + + if self._is_gemini_model: + history_gemini = _parse_chat_history_gemini( + messages, + project=self.project, + convert_system_message_to_human=self.convert_system_message_to_human, ) - for c in response.candidates - ] - else: - question = _get_question(messages) - history = _parse_chat_history(messages[:-1]) - examples = kwargs.get("examples", None) or self.examples - if examples: - params["examples"] = _parse_examples(examples) - chat = self._start_chat(history, **params) - response = await chat.send_message_async(question.content, **msg_params) - generations = [ - ChatGeneration( - message=AIMessage(content=r.text), - generation_info=get_generation_info( - r, - self._is_gemini_model, - usage_metadata=response.raw_prediction_response.metadata, - ), + message = history_gemini.pop() + chat = self.client.start_chat(history=history_gemini) + # set param to `functions` until core tool/function calling implemented + raw_tools = params.pop("functions") if "functions" in params else None + tools = _format_tools_to_vertex_tool(raw_tools) if raw_tools else None + response = await chat.send_message_async( + message, + generation_config=params, + tools=tools, + safety_settings=safety_settings, ) - for r in response.candidates - ] + generations = [ + ChatGeneration( + message=_parse_response_candidate(c), + generation_info=get_generation_info( + c, + self._is_gemini_model, + usage_metadata=response.to_dict().get("usage_metadata"), + ), + ) + for c in response.candidates + ] + else: + question = _get_question(messages) + history = _parse_chat_history(messages[:-1]) + examples = kwargs.get("examples", None) or self.examples + if examples: + params["examples"] = _parse_examples(examples) + chat = self._start_chat(history, **params) + response = await chat.send_message_async(question.content, **msg_params) + generations = [ + ChatGeneration( + message=AIMessage(content=r.text), + generation_info=get_generation_info( + r, + self._is_gemini_model, + usage_metadata=response.raw_prediction_response.metadata, + ), + ) + for r in response.candidates + ] return ChatResult(generations=generations) def _stream( @@ -489,8 +490,73 @@ def _stream( run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[ChatGenerationChunk]: - params = self._prepare_params(stop=stop, stream=True, **kwargs) - if self._is_gemini_model: + with tool_context_manager(self._user_agent): + params = self._prepare_params(stop=stop, stream=True, **kwargs) + if self._is_gemini_model: + history_gemini = _parse_chat_history_gemini( + messages, + project=self.project, + convert_system_message_to_human=self.convert_system_message_to_human, + ) + message = history_gemini.pop() + chat = self.client.start_chat(history=history_gemini) + # set param to `functions` until core tool/function calling implemented + raw_tools = params.pop("functions") if "functions" in params else None + tools = _format_tools_to_vertex_tool(raw_tools) if raw_tools else None + safety_settings = params.pop("safety_settings", None) + responses = chat.send_message( + message, + stream=True, + generation_config=params, + safety_settings=safety_settings, + tools=tools, + ) + for response in responses: + message = _parse_response_candidate(response.candidates[0]) + if run_manager: + run_manager.on_llm_new_token(message.content) + yield ChatGenerationChunk( + message=AIMessageChunk( + content=message.content, + additional_kwargs=message.additional_kwargs, + ), + generation_info=get_generation_info( + response.candidates[0], + self._is_gemini_model, + usage_metadata=response.to_dict().get("usage_metadata"), + ), + ) + else: + question = _get_question(messages) + history = _parse_chat_history(messages[:-1]) + examples = kwargs.get("examples", None) + if examples: + params["examples"] = _parse_examples(examples) + chat = self._start_chat(history, **params) + responses = chat.send_message_streaming(question.content, **params) + for response in responses: + if run_manager: + run_manager.on_llm_new_token(response.text) + yield ChatGenerationChunk( + message=AIMessageChunk(content=response.text), + generation_info=get_generation_info( + response, + self._is_gemini_model, + usage_metadata=response.raw_prediction_response.metadata, + ), + ) + + async def _astream( + self, + messages: List[BaseMessage], + stop: Optional[List[str]] = None, + run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> AsyncIterator[ChatGenerationChunk]: + if not self._is_gemini_model: + raise NotImplementedError() + with tool_context_manager(self._user_agent): + params = self._prepare_params(stop=stop, stream=True, **kwargs) history_gemini = _parse_chat_history_gemini( messages, project=self.project, @@ -498,94 +564,31 @@ def _stream( ) message = history_gemini.pop() chat = self.client.start_chat(history=history_gemini) - # set param to `functions` until core tool/function calling implemented raw_tools = params.pop("functions") if "functions" in params else None tools = _format_tools_to_vertex_tool(raw_tools) if raw_tools else None safety_settings = params.pop("safety_settings", None) - responses = chat.send_message( + async for chunk in await chat.send_message_async( message, stream=True, generation_config=params, safety_settings=safety_settings, tools=tools, - ) - for response in responses: - message = _parse_response_candidate(response.candidates[0]) + ): + message = _parse_response_candidate(chunk.candidates[0]) if run_manager: - run_manager.on_llm_new_token(message.content) + await run_manager.on_llm_new_token(message.content) yield ChatGenerationChunk( message=AIMessageChunk( content=message.content, additional_kwargs=message.additional_kwargs, ), generation_info=get_generation_info( - response.candidates[0], - self._is_gemini_model, - usage_metadata=response.to_dict().get("usage_metadata"), - ), - ) - else: - question = _get_question(messages) - history = _parse_chat_history(messages[:-1]) - examples = kwargs.get("examples", None) - if examples: - params["examples"] = _parse_examples(examples) - chat = self._start_chat(history, **params) - responses = chat.send_message_streaming(question.content, **params) - for response in responses: - if run_manager: - run_manager.on_llm_new_token(response.text) - yield ChatGenerationChunk( - message=AIMessageChunk(content=response.text), - generation_info=get_generation_info( - response, + chunk.candidates[0], self._is_gemini_model, - usage_metadata=response.raw_prediction_response.metadata, + usage_metadata=chunk.to_dict().get("usage_metadata"), ), ) - async def _astream( - self, - messages: List[BaseMessage], - stop: Optional[List[str]] = None, - run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, - **kwargs: Any, - ) -> AsyncIterator[ChatGenerationChunk]: - if not self._is_gemini_model: - raise NotImplementedError() - params = self._prepare_params(stop=stop, stream=True, **kwargs) - history_gemini = _parse_chat_history_gemini( - messages, - project=self.project, - convert_system_message_to_human=self.convert_system_message_to_human, - ) - message = history_gemini.pop() - chat = self.client.start_chat(history=history_gemini) - raw_tools = params.pop("functions") if "functions" in params else None - tools = _format_tools_to_vertex_tool(raw_tools) if raw_tools else None - safety_settings = params.pop("safety_settings", None) - async for chunk in await chat.send_message_async( - message, - stream=True, - generation_config=params, - safety_settings=safety_settings, - tools=tools, - ): - message = _parse_response_candidate(chunk.candidates[0]) - if run_manager: - await run_manager.on_llm_new_token(message.content) - yield ChatGenerationChunk( - message=AIMessageChunk( - content=message.content, - additional_kwargs=message.additional_kwargs, - ), - generation_info=get_generation_info( - chunk.candidates[0], - self._is_gemini_model, - usage_metadata=chunk.to_dict().get("usage_metadata"), - ), - ) - def _start_chat( self, history: _ChatHistory, **kwargs: Any ) -> Union[ChatSession, CodeChatSession]: diff --git a/libs/vertexai/langchain_google_vertexai/embeddings.py b/libs/vertexai/langchain_google_vertexai/embeddings.py index ac813adc..eeeb1037 100644 --- a/libs/vertexai/langchain_google_vertexai/embeddings.py +++ b/libs/vertexai/langchain_google_vertexai/embeddings.py @@ -48,7 +48,6 @@ def validate_environment(cls, values: Dict) -> Dict: "textembedding-gecko@001" ) values["model_name"] = "textembedding-gecko@001" - with tool_context_manager(get_user_agent("vertex-ai-embeddings")): values["client"] = TextEmbeddingModel.from_pretrained(values["model_name"]) return values @@ -173,7 +172,8 @@ def _completion_with_retry(texts_to_process: List[str]) -> Any: embeddings = self.client.get_embeddings(requests) return [embs.values for embs in embeddings] - return _completion_with_retry(texts) + with tool_context_manager(self._user_agent): + return _completion_with_retry(texts) def _prepare_and_validate_batches( self, texts: List[str], embeddings_type: Optional[str] = None diff --git a/libs/vertexai/langchain_google_vertexai/llms.py b/libs/vertexai/langchain_google_vertexai/llms.py index cf077ca4..73361177 100644 --- a/libs/vertexai/langchain_google_vertexai/llms.py +++ b/libs/vertexai/langchain_google_vertexai/llms.py @@ -91,7 +91,8 @@ def _completion_with_retry_inner( return llm.client.predict_streaming(prompt[0], **kwargs) return llm.client.predict(prompt[0], **kwargs) - return _completion_with_retry_inner(prompt, is_gemini, **kwargs) + with tool_context_manager(llm._user_agent): + return _completion_with_retry_inner(prompt, is_gemini, **kwargs) async def _acompletion_with_retry( @@ -122,9 +123,10 @@ async def _acompletion_with_retry_inner( raise ValueError("Async streaming is supported only for Gemini family!") return await llm.client.predict_async(prompt, **kwargs) - return await _acompletion_with_retry_inner( - prompt, is_gemini, stream=stream, **kwargs - ) + with tool_context_manager(llm._user_agent): + return await _acompletion_with_retry_inner( + prompt, is_gemini, stream=stream, **kwargs + ) class _VertexAIBase(BaseModel): @@ -232,6 +234,12 @@ def _default_params(self) -> Dict[str, Any]: ) return updated_params + @property + def _user_agent(self) -> str: + """Gets the User Agent.""" + _, user_agent = get_user_agent(f"{type(self).__name__}_{self.model_name}") + return user_agent + @classmethod def _init_vertexai(cls, values: Dict) -> None: vertexai.init( @@ -316,25 +324,22 @@ def validate_environment(cls, values: Dict) -> Dict: model_cls = TextGenerationModel preview_model_cls = PreviewTextGenerationModel - with tool_context_manager(get_user_agent("vertex-ai-llm")): - if tuned_model_name: - values["client"] = model_cls.get_tuned_model(tuned_model_name) - values["client_preview"] = preview_model_cls.get_tuned_model( - tuned_model_name + if tuned_model_name: + values["client"] = model_cls.get_tuned_model(tuned_model_name) + values["client_preview"] = preview_model_cls.get_tuned_model( + tuned_model_name + ) + else: + if is_gemini: + values["client"] = model_cls( + model_name=model_name, safety_settings=safety_settings + ) + values["client_preview"] = preview_model_cls( + model_name=model_name, safety_settings=safety_settings ) else: - if is_gemini: - values["client"] = model_cls( - model_name=model_name, safety_settings=safety_settings - ) - values["client_preview"] = preview_model_cls( - model_name=model_name, safety_settings=safety_settings - ) - else: - values["client"] = model_cls.from_pretrained(model_name) - values["client_preview"] = preview_model_cls.from_pretrained( - model_name - ) + values["client"] = model_cls.from_pretrained(model_name) + values["client_preview"] = preview_model_cls.from_pretrained(model_name) if values["streaming"] and values["n"] > 1: raise ValueError("Only one candidate can be generated with streaming!") diff --git a/libs/vertexai/langchain_google_vertexai/vision_models.py b/libs/vertexai/langchain_google_vertexai/vision_models.py index a005fdbd..212c22f8 100644 --- a/libs/vertexai/langchain_google_vertexai/vision_models.py +++ b/libs/vertexai/langchain_google_vertexai/vision_models.py @@ -40,8 +40,7 @@ class _BaseImageTextModel(BaseModel): def _create_model(self) -> ImageTextModel: """Builds the model object from the class attributes.""" - with tool_context_manager(get_user_agent("vertex-ai-imagen")): - return ImageTextModel.from_pretrained(model_name=self.model_name) + return ImageTextModel.from_pretrained(model_name=self.model_name) def _get_image_from_message_part(self, message_part: str | Dict) -> Image | None: """Given a message part obtain a image if the part represents it. @@ -78,6 +77,12 @@ def _llm_type(self) -> str: """Returns the type of LLM""" return "vertexai-vision" + @property + def _user_agent(self) -> str: + """Gets the User Agent.""" + _, user_agent = get_user_agent(f"{type(self).__name__}_{self.model_name}") + return user_agent + class _BaseVertexAIImageCaptioning(_BaseImageTextModel): """Base class for Image Captioning models.""" @@ -91,13 +96,14 @@ def _get_captions(self, image: Image) -> List[str]: Returns: List of captions obtained from the image. """ - model = self._create_model() - captions = model.get_captions( - image=image, - number_of_results=self.number_of_results, - language=self.language, - ) - return captions + with tool_context_manager(self._user_agent): + model = self._create_model() + captions = model.get_captions( + image=image, + number_of_results=self.number_of_results, + language=self.language, + ) + return captions class VertexAIImageCaptioning(_BaseVertexAIImageCaptioning, BaseLLM): @@ -269,11 +275,12 @@ def _ask_questions(self, image: Image, query: str) -> List[str]: Returns: List of responses to the query. """ - model = self._create_model() - answers = model.ask_question( - image=image, question=query, number_of_results=self.number_of_results - ) - return answers + with tool_context_manager(self._user_agent): + model = self._create_model() + answers = model.ask_question( + image=image, question=query, number_of_results=self.number_of_results + ) + return answers class _BaseVertexAIImageGenerator(BaseModel): @@ -306,17 +313,17 @@ def _generate_images(self, prompt: str) -> List[str]: Returns: List of b64 encoded strings. """ - - model = ImageGenerationModel.from_pretrained(self.model_name) - - generation_result = model.generate_images( - prompt=prompt, - negative_prompt=self.negative_prompt, - number_of_images=self.number_of_images, - language=self.language, - guidance_scale=self.guidance_scale, - seed=self.seed, - ) + with tool_context_manager(self._user_agent): + model = ImageGenerationModel.from_pretrained(self.model_name) + + generation_result = model.generate_images( + prompt=prompt, + negative_prompt=self.negative_prompt, + number_of_images=self.number_of_images, + language=self.language, + guidance_scale=self.guidance_scale, + seed=self.seed, + ) image_str_list = [ self._to_b64_string(image) for image in generation_result.images @@ -334,22 +341,22 @@ def _edit_images(self, image_str: str, prompt: str) -> List[str]: Returns: List of b64 encoded strings. """ - - model = ImageGenerationModel.from_pretrained(self.model_name) - - image_loader = ImageBytesLoader(project=self.project) - image_bytes = image_loader.load_bytes(image_str) - image = Image(image_bytes=image_bytes) - - generation_result = model.edit_image( - prompt=prompt, - base_image=image, - negative_prompt=self.negative_prompt, - number_of_images=self.number_of_images, - language=self.language, - guidance_scale=self.guidance_scale, - seed=self.seed, - ) + with tool_context_manager(self._user_agent): + model = ImageGenerationModel.from_pretrained(self.model_name) + + image_loader = ImageBytesLoader(project=self.project) + image_bytes = image_loader.load_bytes(image_str) + image = Image(image_bytes=image_bytes) + + generation_result = model.edit_image( + prompt=prompt, + base_image=image, + negative_prompt=self.negative_prompt, + number_of_images=self.number_of_images, + language=self.language, + guidance_scale=self.guidance_scale, + seed=self.seed, + ) image_str_list = [ self._to_b64_string(image) for image in generation_result.images @@ -386,6 +393,12 @@ def _llm_type(self) -> str: """Returns the type of LLM""" return "vertexai-vision" + @property + def _user_agent(self) -> str: + """Gets the User Agent.""" + _, user_agent = get_user_agent(f"{type(self).__name__}_{self.model_name}") + return user_agent + class VertexAIImageGeneratorChat(_BaseVertexAIImageGenerator, BaseChatModel): """Generates an image from a prompt.""" From 2993393cd837d557aace838c5f431bfda7b31c23 Mon Sep 17 00:00:00 2001 From: Holt Skinner Date: Mon, 26 Feb 2024 14:46:54 -0600 Subject: [PATCH 04/17] Remove unused imports --- libs/vertexai/langchain_google_vertexai/chat_models.py | 1 - libs/vertexai/langchain_google_vertexai/embeddings.py | 1 - 2 files changed, 2 deletions(-) diff --git a/libs/vertexai/langchain_google_vertexai/chat_models.py b/libs/vertexai/langchain_google_vertexai/chat_models.py index 5ad8d0e2..4caf0f8f 100644 --- a/libs/vertexai/langchain_google_vertexai/chat_models.py +++ b/libs/vertexai/langchain_google_vertexai/chat_models.py @@ -58,7 +58,6 @@ from langchain_google_vertexai._image_utils import ImageBytesLoader from langchain_google_vertexai._utils import ( get_generation_info, - get_user_agent, is_codey_model, is_gemini_model, ) diff --git a/libs/vertexai/langchain_google_vertexai/embeddings.py b/libs/vertexai/langchain_google_vertexai/embeddings.py index eeeb1037..df393cef 100644 --- a/libs/vertexai/langchain_google_vertexai/embeddings.py +++ b/libs/vertexai/langchain_google_vertexai/embeddings.py @@ -22,7 +22,6 @@ ) from langchain_google_vertexai._base import _VertexAICommon -from langchain_google_vertexai._utils import get_user_agent logger = logging.getLogger(__name__) From ad59603a623fd8336404d18ff1e8cdcee7c42394 Mon Sep 17 00:00:00 2001 From: Holt Skinner Date: Mon, 26 Feb 2024 15:27:04 -0600 Subject: [PATCH 05/17] Add tool_context_manger to more places in Embeddings and VectorSearch --- .../langchain_google_vertexai/__init__.py | 2 + .../langchain_google_vertexai/embeddings.py | 3 ++ .../vectorstores/_sdk_manager.py | 44 ++++++++++++------- 3 files changed, 34 insertions(+), 15 deletions(-) diff --git a/libs/vertexai/langchain_google_vertexai/__init__.py b/libs/vertexai/langchain_google_vertexai/__init__.py index 858d0f43..08603556 100644 --- a/libs/vertexai/langchain_google_vertexai/__init__.py +++ b/libs/vertexai/langchain_google_vertexai/__init__.py @@ -1,8 +1,10 @@ from langchain_google_vertexai._enums import HarmBlockThreshold, HarmCategory from langchain_google_vertexai.chains import create_structured_runnable from langchain_google_vertexai.chat_models import ChatVertexAI +from langchain_google_vertexai.embeddings import VertexAIEmbeddings from langchain_google_vertexai.functions_utils import PydanticFunctionsOutputParser from langchain_google_vertexai.gemma import ( + GemmaChatLocalHF, GemmaChatLocalKaggle, GemmaChatVertexAIModelGarden, GemmaLocalHF, diff --git a/libs/vertexai/langchain_google_vertexai/embeddings.py b/libs/vertexai/langchain_google_vertexai/embeddings.py index df393cef..e8c95a6f 100644 --- a/libs/vertexai/langchain_google_vertexai/embeddings.py +++ b/libs/vertexai/langchain_google_vertexai/embeddings.py @@ -22,6 +22,7 @@ ) from langchain_google_vertexai._base import _VertexAICommon +from langchain_google_vertexai._utils import get_user_agent logger = logging.getLogger(__name__) @@ -47,6 +48,8 @@ def validate_environment(cls, values: Dict) -> Dict: "textembedding-gecko@001" ) values["model_name"] = "textembedding-gecko@001" + _, user_agent = get_user_agent(f"{cls.__name__}_{values['model_name']}") + with tool_context_manager(user_agent): values["client"] = TextEmbeddingModel.from_pretrained(values["model_name"]) return values diff --git a/libs/vertexai/langchain_google_vertexai/vectorstores/_sdk_manager.py b/libs/vertexai/langchain_google_vertexai/vectorstores/_sdk_manager.py index 38ae4f76..712548c4 100644 --- a/libs/vertexai/langchain_google_vertexai/vectorstores/_sdk_manager.py +++ b/libs/vertexai/langchain_google_vertexai/vectorstores/_sdk_manager.py @@ -5,11 +5,14 @@ MatchingEngineIndex, MatchingEngineIndexEndpoint, ) +from google.cloud.aiplatform.telemetry import tool_context_manager from google.oauth2.service_account import Credentials # type: ignore if TYPE_CHECKING: from google.cloud import datastore # type: ignore[attr-defined] +from langchain_google_vertexai._utils import get_client_info, get_user_agent + class VectorSearchSDKManager: """Class in charge of building all Google Cloud SDK Objects needed to build @@ -60,7 +63,11 @@ def get_gcs_client(self) -> storage.Client: Returns: Google Cloud Storage Agent. """ - return storage.Client(project=self._project_id, credentials=self._credentials) + return storage.Client( + project=self._project_id, + credentials=self._credentials, + client_info=get_client_info(module="vertex-ai-matching-engine"), + ) def get_gcs_bucket(self, bucket_name: str) -> storage.Bucket: """Retrieves a Google Cloud Bucket by bucket name. @@ -79,12 +86,14 @@ def get_index(self, index_id: str) -> MatchingEngineIndex: Returns: MatchingEngineIndex instance. """ - return MatchingEngineIndex( - index_name=index_id, - project=self._project_id, - location=self._region, - credentials=self._credentials, - ) + _, user_agent = get_user_agent("vertex-ai-matching-engine") + with tool_context_manager(user_agent): + return MatchingEngineIndex( + index_name=index_id, + project=self._project_id, + location=self._region, + credentials=self._credentials, + ) def get_endpoint(self, endpoint_id: str) -> MatchingEngineIndexEndpoint: """Retrieves a MatchingEngineIndexEndpoint (VectorSearchIndexEndpoint) by id. @@ -93,24 +102,29 @@ def get_endpoint(self, endpoint_id: str) -> MatchingEngineIndexEndpoint: Returns: MatchingEngineIndexEndpoint instance. """ - return MatchingEngineIndexEndpoint( - index_endpoint_name=endpoint_id, - project=self._project_id, - location=self._region, - credentials=self._credentials, - ) + _, user_agent = get_user_agent("vertex-ai-matching-engine") + with tool_context_manager(user_agent): + return MatchingEngineIndexEndpoint( + index_endpoint_name=endpoint_id, + project=self._project_id, + location=self._region, + credentials=self._credentials, + ) def get_datastore_client(self, **kwargs: Any) -> "datastore.Client": """Gets a datastore Client. Args: - **kwargs: Keyword arguments to pass to datatastore.Client constructor. + **kwargs: Keyword arguments to pass to datastore.Client constructor. Returns: datastore Client. """ from google.cloud import datastore # type: ignore[attr-defined] ds_client = datastore.Client( - project=self._project_id, credentials=self._credentials, **kwargs + project=self._project_id, + credentials=self._credentials, + client_info=get_client_info(module="vertex-ai-matching-engine"), + **kwargs, ) return ds_client From 6d1d45071af02c66fe75aa1be3b4cc2e42d72829 Mon Sep 17 00:00:00 2001 From: Holt Skinner Date: Mon, 26 Feb 2024 16:09:50 -0600 Subject: [PATCH 06/17] fix lint error --- libs/vertexai/langchain_google_vertexai/embeddings.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/libs/vertexai/langchain_google_vertexai/embeddings.py b/libs/vertexai/langchain_google_vertexai/embeddings.py index e8c95a6f..241730fd 100644 --- a/libs/vertexai/langchain_google_vertexai/embeddings.py +++ b/libs/vertexai/langchain_google_vertexai/embeddings.py @@ -83,9 +83,9 @@ def __init__( self.instance["task_executor"] = ThreadPoolExecutor( max_workers=request_parallelism ) - self.instance["embeddings_task_type_supported"] = ( - not self.client._endpoint_name.endswith("/textembedding-gecko@001") - ) + self.instance[ + "embeddings_task_type_supported" + ] = not self.client._endpoint_name.endswith("/textembedding-gecko@001") @staticmethod def _split_by_punctuation(text: str) -> List[str]: From 4372a150a101cbf9726d2baf28ac7189188db75d Mon Sep 17 00:00:00 2001 From: Holt Skinner Date: Mon, 26 Feb 2024 16:16:36 -0600 Subject: [PATCH 07/17] Silence lint error in embeddings.py --- libs/vertexai/langchain_google_vertexai/embeddings.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/libs/vertexai/langchain_google_vertexai/embeddings.py b/libs/vertexai/langchain_google_vertexai/embeddings.py index 241730fd..c5563597 100644 --- a/libs/vertexai/langchain_google_vertexai/embeddings.py +++ b/libs/vertexai/langchain_google_vertexai/embeddings.py @@ -48,7 +48,7 @@ def validate_environment(cls, values: Dict) -> Dict: "textembedding-gecko@001" ) values["model_name"] = "textembedding-gecko@001" - _, user_agent = get_user_agent(f"{cls.__name__}_{values['model_name']}") + _, user_agent = get_user_agent(f"{cls.__name__}_{values['model_name']}") # type: ignore with tool_context_manager(user_agent): values["client"] = TextEmbeddingModel.from_pretrained(values["model_name"]) return values @@ -83,9 +83,9 @@ def __init__( self.instance["task_executor"] = ThreadPoolExecutor( max_workers=request_parallelism ) - self.instance[ - "embeddings_task_type_supported" - ] = not self.client._endpoint_name.endswith("/textembedding-gecko@001") + self.instance["embeddings_task_type_supported"] = ( + not self.client._endpoint_name.endswith("/textembedding-gecko@001") + ) @staticmethod def _split_by_punctuation(text: str) -> List[str]: From a80581c47f38b23e0b25d42c0f62594bfcf76490 Mon Sep 17 00:00:00 2001 From: Holt Skinner Date: Mon, 26 Feb 2024 17:24:59 -0600 Subject: [PATCH 08/17] Fix formatting --- libs/vertexai/langchain_google_vertexai/embeddings.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/libs/vertexai/langchain_google_vertexai/embeddings.py b/libs/vertexai/langchain_google_vertexai/embeddings.py index c5563597..29edab23 100644 --- a/libs/vertexai/langchain_google_vertexai/embeddings.py +++ b/libs/vertexai/langchain_google_vertexai/embeddings.py @@ -83,9 +83,9 @@ def __init__( self.instance["task_executor"] = ThreadPoolExecutor( max_workers=request_parallelism ) - self.instance["embeddings_task_type_supported"] = ( - not self.client._endpoint_name.endswith("/textembedding-gecko@001") - ) + self.instance[ + "embeddings_task_type_supported" + ] = not self.client._endpoint_name.endswith("/textembedding-gecko@001") @staticmethod def _split_by_punctuation(text: str) -> List[str]: From 1eadeca4da411d6411a45bb630618d5f116c4f8c Mon Sep 17 00:00:00 2001 From: Holt Skinner Date: Mon, 4 Mar 2024 15:15:24 -0600 Subject: [PATCH 09/17] Updated telemetry import --- .../langchain_google_vertexai/chat_models.py | 10 +++++----- .../vertexai/langchain_google_vertexai/embeddings.py | 12 ++++++------ libs/vertexai/langchain_google_vertexai/llms.py | 6 +++--- .../vectorstores/_sdk_manager.py | 6 +++--- .../langchain_google_vertexai/vision_models.py | 10 +++++----- 5 files changed, 22 insertions(+), 22 deletions(-) diff --git a/libs/vertexai/langchain_google_vertexai/chat_models.py b/libs/vertexai/langchain_google_vertexai/chat_models.py index 4caf0f8f..a3434b90 100644 --- a/libs/vertexai/langchain_google_vertexai/chat_models.py +++ b/libs/vertexai/langchain_google_vertexai/chat_models.py @@ -10,7 +10,7 @@ import proto # type: ignore[import-untyped] from google.cloud.aiplatform_v1beta1.types.content import Part as GapicPart from google.cloud.aiplatform_v1beta1.types.tool import FunctionCall -from google.cloud.aiplatform.telemetry import tool_context_manager +from google.cloud.aiplatform import telemetry from langchain_core.callbacks import ( AsyncCallbackManagerForLLMRun, @@ -337,7 +337,7 @@ def _generate( Raises: ValueError: if the last message in the list is not from human. """ - with tool_context_manager(self._user_agent): + with telemetry.tool_context_manager(self._user_agent): should_stream = stream if stream is not None else self.streaming safety_settings = kwargs.pop("safety_settings", None) if should_stream: @@ -426,7 +426,7 @@ async def _agenerate( kwargs.pop("stream") logger.warning("ChatVertexAI does not currently support async streaming.") - with tool_context_manager(self._user_agent): + with telemetry.tool_context_manager(self._user_agent): params = self._prepare_params(stop=stop, **kwargs) safety_settings = kwargs.pop("safety_settings", None) msg_params = {} @@ -489,7 +489,7 @@ def _stream( run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[ChatGenerationChunk]: - with tool_context_manager(self._user_agent): + with telemetry.tool_context_manager(self._user_agent): params = self._prepare_params(stop=stop, stream=True, **kwargs) if self._is_gemini_model: history_gemini = _parse_chat_history_gemini( @@ -554,7 +554,7 @@ async def _astream( ) -> AsyncIterator[ChatGenerationChunk]: if not self._is_gemini_model: raise NotImplementedError() - with tool_context_manager(self._user_agent): + with telemetry.tool_context_manager(self._user_agent): params = self._prepare_params(stop=stop, stream=True, **kwargs) history_gemini = _parse_chat_history_gemini( messages, diff --git a/libs/vertexai/langchain_google_vertexai/embeddings.py b/libs/vertexai/langchain_google_vertexai/embeddings.py index 29edab23..fe16a1ca 100644 --- a/libs/vertexai/langchain_google_vertexai/embeddings.py +++ b/libs/vertexai/langchain_google_vertexai/embeddings.py @@ -12,7 +12,7 @@ ResourceExhausted, ServiceUnavailable, ) -from google.cloud.aiplatform.telemetry import tool_context_manager +from google.cloud.aiplatform import telemetry from langchain_core.embeddings import Embeddings from langchain_core.language_models.llms import create_base_retry_decorator from langchain_core.pydantic_v1 import root_validator @@ -49,7 +49,7 @@ def validate_environment(cls, values: Dict) -> Dict: ) values["model_name"] = "textembedding-gecko@001" _, user_agent = get_user_agent(f"{cls.__name__}_{values['model_name']}") # type: ignore - with tool_context_manager(user_agent): + with telemetry.tool_context_manager(user_agent): values["client"] = TextEmbeddingModel.from_pretrained(values["model_name"]) return values @@ -83,9 +83,9 @@ def __init__( self.instance["task_executor"] = ThreadPoolExecutor( max_workers=request_parallelism ) - self.instance[ - "embeddings_task_type_supported" - ] = not self.client._endpoint_name.endswith("/textembedding-gecko@001") + self.instance["embeddings_task_type_supported"] = ( + not self.client._endpoint_name.endswith("/textembedding-gecko@001") + ) @staticmethod def _split_by_punctuation(text: str) -> List[str]: @@ -174,7 +174,7 @@ def _completion_with_retry(texts_to_process: List[str]) -> Any: embeddings = self.client.get_embeddings(requests) return [embs.values for embs in embeddings] - with tool_context_manager(self._user_agent): + with telemetry.tool_context_manager(self._user_agent): return _completion_with_retry(texts) def _prepare_and_validate_batches( diff --git a/libs/vertexai/langchain_google_vertexai/llms.py b/libs/vertexai/langchain_google_vertexai/llms.py index 73361177..c9acc4ef 100644 --- a/libs/vertexai/langchain_google_vertexai/llms.py +++ b/libs/vertexai/langchain_google_vertexai/llms.py @@ -10,7 +10,7 @@ PredictionServiceClient, ) from google.cloud.aiplatform.models import Prediction -from google.cloud.aiplatform.telemetry import tool_context_manager +from google.cloud.aiplatform import telemetry from google.protobuf import json_format from google.protobuf.struct_pb2 import Value from langchain_core.callbacks.manager import ( @@ -91,7 +91,7 @@ def _completion_with_retry_inner( return llm.client.predict_streaming(prompt[0], **kwargs) return llm.client.predict(prompt[0], **kwargs) - with tool_context_manager(llm._user_agent): + with telemetry.tool_context_manager(llm._user_agent): return _completion_with_retry_inner(prompt, is_gemini, **kwargs) @@ -123,7 +123,7 @@ async def _acompletion_with_retry_inner( raise ValueError("Async streaming is supported only for Gemini family!") return await llm.client.predict_async(prompt, **kwargs) - with tool_context_manager(llm._user_agent): + with telemetry.tool_context_manager(llm._user_agent): return await _acompletion_with_retry_inner( prompt, is_gemini, stream=stream, **kwargs ) diff --git a/libs/vertexai/langchain_google_vertexai/vectorstores/_sdk_manager.py b/libs/vertexai/langchain_google_vertexai/vectorstores/_sdk_manager.py index 712548c4..084bb086 100644 --- a/libs/vertexai/langchain_google_vertexai/vectorstores/_sdk_manager.py +++ b/libs/vertexai/langchain_google_vertexai/vectorstores/_sdk_manager.py @@ -5,7 +5,7 @@ MatchingEngineIndex, MatchingEngineIndexEndpoint, ) -from google.cloud.aiplatform.telemetry import tool_context_manager +from google.cloud.aiplatform import telemetry from google.oauth2.service_account import Credentials # type: ignore if TYPE_CHECKING: @@ -87,7 +87,7 @@ def get_index(self, index_id: str) -> MatchingEngineIndex: MatchingEngineIndex instance. """ _, user_agent = get_user_agent("vertex-ai-matching-engine") - with tool_context_manager(user_agent): + with telemetry.tool_context_manager(user_agent): return MatchingEngineIndex( index_name=index_id, project=self._project_id, @@ -103,7 +103,7 @@ def get_endpoint(self, endpoint_id: str) -> MatchingEngineIndexEndpoint: MatchingEngineIndexEndpoint instance. """ _, user_agent = get_user_agent("vertex-ai-matching-engine") - with tool_context_manager(user_agent): + with telemetry.tool_context_manager(user_agent): return MatchingEngineIndexEndpoint( index_endpoint_name=endpoint_id, project=self._project_id, diff --git a/libs/vertexai/langchain_google_vertexai/vision_models.py b/libs/vertexai/langchain_google_vertexai/vision_models.py index 212c22f8..ba0d43bd 100644 --- a/libs/vertexai/langchain_google_vertexai/vision_models.py +++ b/libs/vertexai/langchain_google_vertexai/vision_models.py @@ -2,7 +2,7 @@ from typing import Any, Dict, List, Union -from google.cloud.aiplatform.telemetry import tool_context_manager +from google.cloud.aiplatform import telemetry from langchain_core.callbacks import CallbackManagerForLLMRun from langchain_core.language_models import BaseChatModel, BaseLLM from langchain_core.messages import AIMessage, BaseMessage @@ -96,7 +96,7 @@ def _get_captions(self, image: Image) -> List[str]: Returns: List of captions obtained from the image. """ - with tool_context_manager(self._user_agent): + with telemetry.tool_context_manager(self._user_agent): model = self._create_model() captions = model.get_captions( image=image, @@ -275,7 +275,7 @@ def _ask_questions(self, image: Image, query: str) -> List[str]: Returns: List of responses to the query. """ - with tool_context_manager(self._user_agent): + with telemetry.tool_context_manager(self._user_agent): model = self._create_model() answers = model.ask_question( image=image, question=query, number_of_results=self.number_of_results @@ -313,7 +313,7 @@ def _generate_images(self, prompt: str) -> List[str]: Returns: List of b64 encoded strings. """ - with tool_context_manager(self._user_agent): + with telemetry.tool_context_manager(self._user_agent): model = ImageGenerationModel.from_pretrained(self.model_name) generation_result = model.generate_images( @@ -341,7 +341,7 @@ def _edit_images(self, image_str: str, prompt: str) -> List[str]: Returns: List of b64 encoded strings. """ - with tool_context_manager(self._user_agent): + with telemetry.tool_context_manager(self._user_agent): model = ImageGenerationModel.from_pretrained(self.model_name) image_loader = ImageBytesLoader(project=self.project) From d6f0961e80c1e905c29bc302b28de67a86ed3a38 Mon Sep 17 00:00:00 2001 From: Holt Skinner Date: Tue, 5 Mar 2024 15:28:51 -0600 Subject: [PATCH 10/17] Fix lint errors --- libs/vertexai/langchain_google_vertexai/embeddings.py | 6 +++--- libs/vertexai/langchain_google_vertexai/llms.py | 8 -------- .../vectorstores/_sdk_manager.py | 2 +- 3 files changed, 4 insertions(+), 12 deletions(-) diff --git a/libs/vertexai/langchain_google_vertexai/embeddings.py b/libs/vertexai/langchain_google_vertexai/embeddings.py index 3308825b..da2a65cc 100644 --- a/libs/vertexai/langchain_google_vertexai/embeddings.py +++ b/libs/vertexai/langchain_google_vertexai/embeddings.py @@ -110,9 +110,9 @@ def __init__( self.instance["task_executor"] = ThreadPoolExecutor( max_workers=request_parallelism ) - self.instance["embeddings_task_type_supported"] = ( - not self.client._endpoint_name.endswith("/textembedding-gecko@001") - ) + self.instance[ + "embeddings_task_type_supported" + ] = not self.client._endpoint_name.endswith("/textembedding-gecko@001") retry_errors: List[Type[BaseException]] = [ ResourceExhausted, diff --git a/libs/vertexai/langchain_google_vertexai/llms.py b/libs/vertexai/langchain_google_vertexai/llms.py index 6bc5aa35..a9dd8681 100644 --- a/libs/vertexai/langchain_google_vertexai/llms.py +++ b/libs/vertexai/langchain_google_vertexai/llms.py @@ -4,15 +4,7 @@ from typing import Any, AsyncIterator, ClassVar, Dict, Iterator, List, Optional, Union import vertexai # type: ignore[import-untyped] -from google.api_core.client_options import ClientOptions -from google.cloud.aiplatform.gapic import ( - PredictionServiceAsyncClient, - PredictionServiceClient, -) -from google.cloud.aiplatform.models import Prediction from google.cloud.aiplatform import telemetry -from google.protobuf import json_format -from google.protobuf.struct_pb2 import Value from langchain_core.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, diff --git a/libs/vertexai/langchain_google_vertexai/vectorstores/_sdk_manager.py b/libs/vertexai/langchain_google_vertexai/vectorstores/_sdk_manager.py index 084bb086..b36f6bf8 100644 --- a/libs/vertexai/langchain_google_vertexai/vectorstores/_sdk_manager.py +++ b/libs/vertexai/langchain_google_vertexai/vectorstores/_sdk_manager.py @@ -1,11 +1,11 @@ from typing import TYPE_CHECKING, Any, Union from google.cloud import aiplatform, storage # type: ignore[attr-defined] +from google.cloud.aiplatform import telemetry from google.cloud.aiplatform.matching_engine import ( MatchingEngineIndex, MatchingEngineIndexEndpoint, ) -from google.cloud.aiplatform import telemetry from google.oauth2.service_account import Credentials # type: ignore if TYPE_CHECKING: From cf12afd26581738fbaca9706df0b9fb8aa22448c Mon Sep 17 00:00:00 2001 From: Holt Skinner Date: Thu, 14 Mar 2024 09:54:58 -0500 Subject: [PATCH 11/17] Updated minimum `aiplatform` client library version to 1.44.0 --- libs/vertexai/langchain_google_vertexai/_utils.py | 2 +- libs/vertexai/pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libs/vertexai/langchain_google_vertexai/_utils.py b/libs/vertexai/langchain_google_vertexai/_utils.py index 8e4e858b..51d093a0 100644 --- a/libs/vertexai/langchain_google_vertexai/_utils.py +++ b/libs/vertexai/langchain_google_vertexai/_utils.py @@ -45,7 +45,7 @@ def create_retry_decorator( return decorator -def raise_vertex_import_error(minimum_expected_version: str = "1.38.0") -> None: +def raise_vertex_import_error(minimum_expected_version: str = "1.44.0") -> None: """Raise ImportError related to Vertex SDK being not available. Args: diff --git a/libs/vertexai/pyproject.toml b/libs/vertexai/pyproject.toml index 24e61925..bbbc97e8 100644 --- a/libs/vertexai/pyproject.toml +++ b/libs/vertexai/pyproject.toml @@ -13,7 +13,7 @@ license = "MIT" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" langchain-core = ">=0.1.27,<0.2" -google-cloud-aiplatform = "^1.42.1" +google-cloud-aiplatform = "^1.44.0" google-cloud-storage = "^2.14.0" types-requests = "^2.31.0" types-protobuf = "^4.24.0.4" From cabbea215356df4982b87093523d4da358dc180c Mon Sep 17 00:00:00 2001 From: Holt Skinner Date: Thu, 14 Mar 2024 16:48:33 -0500 Subject: [PATCH 12/17] Update poetry.lock --- libs/vertexai/poetry.lock | 433 ++++++++++++++++++++------------------ 1 file changed, 226 insertions(+), 207 deletions(-) diff --git a/libs/vertexai/poetry.lock b/libs/vertexai/poetry.lock index 712d7ae2..48f8c3ea 100644 --- a/libs/vertexai/poetry.lock +++ b/libs/vertexai/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "aiohttp" @@ -486,13 +486,13 @@ grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] [[package]] name = "google-api-python-client" -version = "2.119.0" +version = "2.122.0" description = "Google API Client Library for Python" optional = false python-versions = ">=3.7" files = [ - {file = "google-api-python-client-2.119.0.tar.gz", hash = "sha256:ff9ef7539eaf7e088a481b25d1af4704210b07863e1d51b5ee498b910a3a46a3"}, - {file = "google_api_python_client-2.119.0-py2.py3-none-any.whl", hash = "sha256:84e43bdb58dd8d2301669513863996378ffe9a3bf6d23b5ccd4f1e021323dbeb"}, + {file = "google-api-python-client-2.122.0.tar.gz", hash = "sha256:77447bf2d6b6ea9e686fd66fc2f12ee7a63e3889b7427676429ebf09fcb5dcf9"}, + {file = "google_api_python_client-2.122.0-py2.py3-none-any.whl", hash = "sha256:a5953e60394b77b98bcc7ff7c4971ed784b3b693e9a569c176eaccb1549330f2"}, ] [package.dependencies] @@ -504,13 +504,13 @@ uritemplate = ">=3.0.1,<5" [[package]] name = "google-auth" -version = "2.28.1" +version = "2.28.2" description = "Google Authentication Library" optional = false python-versions = ">=3.7" files = [ - {file = "google-auth-2.28.1.tar.gz", hash = "sha256:34fc3046c257cedcf1622fc4b31fc2be7923d9b4d44973d481125ecc50d83885"}, - {file = "google_auth-2.28.1-py2.py3-none-any.whl", hash = "sha256:25141e2d7a14bfcba945f5e9827f98092716e99482562f15306e5b026e21aa72"}, + {file = "google-auth-2.28.2.tar.gz", hash = "sha256:80b8b4969aa9ed5938c7828308f20f035bc79f9d8fb8120bf9dc8db20b41ba30"}, + {file = "google_auth-2.28.2-py2.py3-none-any.whl", hash = "sha256:9fd67bbcd40f16d9d42f950228e9cf02a2ded4ae49198b27432d0cded5a74c38"}, ] [package.dependencies] @@ -542,13 +542,13 @@ httplib2 = ">=0.19.0" [[package]] name = "google-cloud-aiplatform" -version = "1.42.1" +version = "1.44.0" description = "Vertex AI API client library" optional = false python-versions = ">=3.8" files = [ - {file = "google-cloud-aiplatform-1.42.1.tar.gz", hash = "sha256:679068e068e29059d673a6410483fea762286fa07739d684fb1b4626698e0805"}, - {file = "google_cloud_aiplatform-1.42.1-py2.py3-none-any.whl", hash = "sha256:9f25ebd306807972cf05a578abc16695c4f72d4a2dd7e7b1624dbe247937ba24"}, + {file = "google-cloud-aiplatform-1.44.0.tar.gz", hash = "sha256:65876e8080ed3c4b3ec79c9a4090faab558fc369e3e4e75fa7a43d9813680cb2"}, + {file = "google_cloud_aiplatform-1.44.0-py2.py3-none-any.whl", hash = "sha256:3670d961ce8c3da3d4bc3451b0e73221a461ebc7805ca16a51df99cbfd3785b0"}, ] [package.dependencies] @@ -567,32 +567,33 @@ autologging = ["mlflow (>=1.27.0,<=2.1.1)"] cloud-profiler = ["tensorboard-plugin-profile (>=2.4.0,<3.0.0dev)", "tensorflow (>=2.4.0,<3.0.0dev)", "werkzeug (>=2.0.0,<2.1.0dev)"] datasets = ["pyarrow (>=10.0.1)", "pyarrow (>=3.0.0,<8.0dev)"] endpoint = ["requests (>=2.28.1)"] -full = ["cloudpickle (<3.0)", "docker (>=5.0.3)", "explainable-ai-sdk (>=1.0.0)", "fastapi (>=0.71.0,<0.103.1)", "google-cloud-bigquery", "google-cloud-bigquery-storage", "google-cloud-logging (<4.0)", "google-vizier (>=0.1.6)", "httpx (>=0.23.0,<0.25.0)", "lit-nlp (==0.4.0)", "mlflow (>=1.27.0,<=2.1.1)", "numpy (>=1.15.0)", "pandas (>=1.0.0)", "pyarrow (>=10.0.1)", "pyarrow (>=3.0.0,<8.0dev)", "pyarrow (>=6.0.1)", "pydantic (<2)", "pyyaml (==5.3.1)", "ray[default] (>=2.4,<2.5)", "ray[default] (>=2.5,<2.5.1)", "requests (>=2.28.1)", "starlette (>=0.17.1)", "tensorflow (>=2.3.0,<2.15.0)", "tensorflow (>=2.3.0,<3.0.0dev)", "urllib3 (>=1.21.1,<1.27)", "uvicorn[standard] (>=0.16.0)"] +full = ["cloudpickle (<3.0)", "docker (>=5.0.3)", "explainable-ai-sdk (>=1.0.0)", "fastapi (>=0.71.0,<0.103.1)", "google-cloud-bigquery", "google-cloud-bigquery-storage", "google-cloud-logging (<4.0)", "google-vizier (>=0.1.6)", "httpx (>=0.23.0,<0.25.0)", "immutabledict", "lit-nlp (==0.4.0)", "mlflow (>=1.27.0,<=2.1.1)", "numpy (>=1.15.0)", "pandas (>=1.0.0)", "pandas (>=1.0.0,<2.2.0)", "pyarrow (>=10.0.1)", "pyarrow (>=3.0.0,<8.0dev)", "pyarrow (>=6.0.1)", "pydantic (<2)", "pyyaml (==5.3.1)", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<=2.9.3)", "ray[default] (>=2.5,<=2.9.3)", "requests (>=2.28.1)", "starlette (>=0.17.1)", "tensorflow (>=2.3.0,<2.15.0)", "tensorflow (>=2.3.0,<3.0.0dev)", "urllib3 (>=1.21.1,<1.27)", "uvicorn[standard] (>=0.16.0)"] lit = ["explainable-ai-sdk (>=1.0.0)", "lit-nlp (==0.4.0)", "pandas (>=1.0.0)", "tensorflow (>=2.3.0,<3.0.0dev)"] metadata = ["numpy (>=1.15.0)", "pandas (>=1.0.0)"] pipelines = ["pyyaml (==5.3.1)"] prediction = ["docker (>=5.0.3)", "fastapi (>=0.71.0,<0.103.1)", "httpx (>=0.23.0,<0.25.0)", "starlette (>=0.17.1)", "uvicorn[standard] (>=0.16.0)"] preview = ["cloudpickle (<3.0)", "google-cloud-logging (<4.0)"] private-endpoints = ["requests (>=2.28.1)", "urllib3 (>=1.21.1,<1.27)"] -ray = ["google-cloud-bigquery", "google-cloud-bigquery-storage", "pandas (>=1.0.0)", "pyarrow (>=6.0.1)", "pydantic (<2)", "ray[default] (>=2.4,<2.5)", "ray[default] (>=2.5,<2.5.1)"] +ray = ["google-cloud-bigquery", "google-cloud-bigquery-storage", "immutabledict", "pandas (>=1.0.0,<2.2.0)", "pyarrow (>=6.0.1)", "pydantic (<2)", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<=2.9.3)", "ray[default] (>=2.5,<=2.9.3)"] tensorboard = ["tensorflow (>=2.3.0,<2.15.0)"] -testing = ["bigframes", "cloudpickle (<3.0)", "docker (>=5.0.3)", "explainable-ai-sdk (>=1.0.0)", "fastapi (>=0.71.0,<0.103.1)", "google-api-core (>=2.11,<3.0.0)", "google-cloud-bigquery", "google-cloud-bigquery-storage", "google-cloud-logging (<4.0)", "google-vizier (>=0.1.6)", "grpcio-testing", "httpx (>=0.23.0,<0.25.0)", "ipython", "kfp (>=2.6.0,<3.0.0)", "lit-nlp (==0.4.0)", "mlflow (>=1.27.0,<=2.1.1)", "numpy (>=1.15.0)", "pandas (>=1.0.0)", "pyarrow (>=10.0.1)", "pyarrow (>=3.0.0,<8.0dev)", "pyarrow (>=6.0.1)", "pydantic (<2)", "pyfakefs", "pytest-asyncio", "pytest-xdist", "pyyaml (==5.3.1)", "ray[default] (>=2.4,<2.5)", "ray[default] (>=2.5,<2.5.1)", "requests (>=2.28.1)", "requests-toolbelt (<1.0.0)", "scikit-learn", "starlette (>=0.17.1)", "tensorboard-plugin-profile (>=2.4.0,<3.0.0dev)", "tensorflow (>=2.3.0,<2.15.0)", "tensorflow (>=2.3.0,<3.0.0dev)", "tensorflow (>=2.3.0,<=2.12.0)", "tensorflow (>=2.4.0,<3.0.0dev)", "torch (>=2.0.0,<2.1.0)", "urllib3 (>=1.21.1,<1.27)", "uvicorn[standard] (>=0.16.0)", "werkzeug (>=2.0.0,<2.1.0dev)", "xgboost", "xgboost-ray"] +testing = ["bigframes", "cloudpickle (<3.0)", "docker (>=5.0.3)", "explainable-ai-sdk (>=1.0.0)", "fastapi (>=0.71.0,<0.103.1)", "google-api-core (>=2.11,<3.0.0)", "google-cloud-bigquery", "google-cloud-bigquery-storage", "google-cloud-logging (<4.0)", "google-vizier (>=0.1.6)", "grpcio-testing", "httpx (>=0.23.0,<0.25.0)", "immutabledict", "ipython", "kfp (>=2.6.0,<3.0.0)", "lit-nlp (==0.4.0)", "mlflow (>=1.27.0,<=2.1.1)", "numpy (>=1.15.0)", "pandas (>=1.0.0)", "pandas (>=1.0.0,<2.2.0)", "pyarrow (>=10.0.1)", "pyarrow (>=3.0.0,<8.0dev)", "pyarrow (>=6.0.1)", "pydantic (<2)", "pyfakefs", "pytest-asyncio", "pytest-xdist", "pyyaml (==5.3.1)", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<=2.9.3)", "ray[default] (>=2.5,<=2.9.3)", "requests (>=2.28.1)", "requests-toolbelt (<1.0.0)", "scikit-learn", "starlette (>=0.17.1)", "tensorboard-plugin-profile (>=2.4.0,<3.0.0dev)", "tensorflow (==2.13.0)", "tensorflow (>=2.3.0,<2.15.0)", "tensorflow (>=2.3.0,<3.0.0dev)", "tensorflow (>=2.4.0,<3.0.0dev)", "torch (>=2.0.0,<2.1.0)", "urllib3 (>=1.21.1,<1.27)", "uvicorn[standard] (>=0.16.0)", "werkzeug (>=2.0.0,<2.1.0dev)", "xgboost", "xgboost-ray"] vizier = ["google-vizier (>=0.1.6)"] xai = ["tensorflow (>=2.3.0,<3.0.0dev)"] [[package]] name = "google-cloud-bigquery" -version = "3.17.2" +version = "3.19.0" description = "Google BigQuery API client library" optional = false python-versions = ">=3.7" files = [ - {file = "google-cloud-bigquery-3.17.2.tar.gz", hash = "sha256:6e1cf669a40e567ab3289c7b5f2056363da9fcb85d9a4736ee90240d4a7d84ea"}, - {file = "google_cloud_bigquery-3.17.2-py2.py3-none-any.whl", hash = "sha256:cdadf5283dca55a1a350bacf8c8a7466169d3cf46c5a0a3abc5e9aa0b0a51dee"}, + {file = "google-cloud-bigquery-3.19.0.tar.gz", hash = "sha256:8e311dae49768e1501fcdc5e916bff4b7e169471e5707919f4a6f78a02b3b5a6"}, + {file = "google_cloud_bigquery-3.19.0-py2.py3-none-any.whl", hash = "sha256:c6b8850247a4b132066e49f6e45f850c22824482838688d744a4398eea1120ed"}, ] [package.dependencies] -google-api-core = ">=1.31.5,<2.0.dev0 || >2.3.0,<3.0.0dev" +google-api-core = {version = ">=1.34.1,<2.0.dev0 || >=2.11.dev0,<3.0.0dev", extras = ["grpc"]} +google-auth = ">=2.14.1,<3.0.0dev" google-cloud-core = ">=1.6.0,<3.0.0dev" google-resumable-media = ">=0.6.0,<3.0dev" packaging = ">=20.0.0" @@ -653,36 +654,36 @@ libcst = ["libcst (>=0.2.5)"] [[package]] name = "google-cloud-resource-manager" -version = "1.12.2" +version = "1.12.3" description = "Google Cloud Resource Manager API client library" optional = false python-versions = ">=3.7" files = [ - {file = "google-cloud-resource-manager-1.12.2.tar.gz", hash = "sha256:2ede446a5087b236f0e1fb39cca3791bae97eb0d9125057401454b190d5572ee"}, - {file = "google_cloud_resource_manager-1.12.2-py2.py3-none-any.whl", hash = "sha256:45abbb8911195cc831cc77c8e3be84decc271686579b332d4142af507f423ebf"}, + {file = "google-cloud-resource-manager-1.12.3.tar.gz", hash = "sha256:809851824119834e4f2310b2c4f38621c1d16b2bb14d5b9f132e69c79d355e7f"}, + {file = "google_cloud_resource_manager-1.12.3-py2.py3-none-any.whl", hash = "sha256:92be7d6959927b76d90eafc4028985c37975a46ded5466a018f02e8649e113d4"}, ] [package.dependencies] google-api-core = {version = ">=1.34.1,<2.0.dev0 || >=2.11.dev0,<3.0.0dev", extras = ["grpc"]} -google-auth = ">=2.14.1,<3.0.0dev" +google-auth = ">=2.14.1,<2.24.0 || >2.24.0,<2.25.0 || >2.25.0,<3.0.0dev" grpc-google-iam-v1 = ">=0.12.4,<1.0.0dev" proto-plus = ">=1.22.3,<2.0.0dev" protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0dev" [[package]] name = "google-cloud-storage" -version = "2.14.0" +version = "2.15.0" description = "Google Cloud Storage API client library" optional = false python-versions = ">=3.7" files = [ - {file = "google-cloud-storage-2.14.0.tar.gz", hash = "sha256:2d23fcf59b55e7b45336729c148bb1c464468c69d5efbaee30f7201dd90eb97e"}, - {file = "google_cloud_storage-2.14.0-py2.py3-none-any.whl", hash = "sha256:8641243bbf2a2042c16a6399551fbb13f062cbc9a2de38d6c0bb5426962e9dbd"}, + {file = "google-cloud-storage-2.15.0.tar.gz", hash = "sha256:7560a3c48a03d66c553dc55215d35883c680fe0ab44c23aa4832800ccc855c74"}, + {file = "google_cloud_storage-2.15.0-py2.py3-none-any.whl", hash = "sha256:5d9237f88b648e1d724a0f20b5cde65996a37fe51d75d17660b1404097327dd2"}, ] [package.dependencies] -google-api-core = ">=1.31.5,<2.0.dev0 || >2.3.0,<3.0.0dev" -google-auth = ">=2.23.3,<3.0dev" +google-api-core = ">=2.15.0,<3.0.0dev" +google-auth = ">=2.26.1,<3.0dev" google-cloud-core = ">=2.3.0,<3.0dev" google-crc32c = ">=1.0,<2.0dev" google-resumable-media = ">=2.6.0" @@ -791,13 +792,13 @@ requests = ["requests (>=2.18.0,<3.0.0dev)"] [[package]] name = "googleapis-common-protos" -version = "1.62.0" +version = "1.63.0" description = "Common protobufs used in Google APIs" optional = false python-versions = ">=3.7" files = [ - {file = "googleapis-common-protos-1.62.0.tar.gz", hash = "sha256:83f0ece9f94e5672cced82f592d2a5edf527a96ed1794f0bab36d5735c996277"}, - {file = "googleapis_common_protos-1.62.0-py2.py3-none-any.whl", hash = "sha256:4750113612205514f9f6aa4cb00d523a94f3e8c06c5ad2fee466387dc4875f07"}, + {file = "googleapis-common-protos-1.63.0.tar.gz", hash = "sha256:17ad01b11d5f1d0171c06d3ba5c04c54474e883b66b949722b4938ee2694ef4e"}, + {file = "googleapis_common_protos-1.63.0-py2.py3-none-any.whl", hash = "sha256:ae45f75702f7c08b541f750854a678bd8f534a1a6bace6afe975f1d0a82d6632"}, ] [package.dependencies] @@ -896,84 +897,84 @@ protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.1 || >4.21.1,<4 [[package]] name = "grpcio" -version = "1.62.0" +version = "1.62.1" description = "HTTP/2-based RPC framework" optional = false python-versions = ">=3.7" files = [ - {file = "grpcio-1.62.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:136ffd79791b1eddda8d827b607a6285474ff8a1a5735c4947b58c481e5e4271"}, - {file = "grpcio-1.62.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:d6a56ba703be6b6267bf19423d888600c3f574ac7c2cc5e6220af90662a4d6b0"}, - {file = "grpcio-1.62.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:4cd356211579043fce9f52acc861e519316fff93980a212c8109cca8f47366b6"}, - {file = "grpcio-1.62.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e803e9b58d8f9b4ff0ea991611a8d51b31c68d2e24572cd1fe85e99e8cc1b4f8"}, - {file = "grpcio-1.62.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f4c04fe33039b35b97c02d2901a164bbbb2f21fb9c4e2a45a959f0b044c3512c"}, - {file = "grpcio-1.62.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:95370c71b8c9062f9ea033a0867c4c73d6f0ff35113ebd2618171ec1f1e903e0"}, - {file = "grpcio-1.62.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c912688acc05e4ff012c8891803659d6a8a8b5106f0f66e0aed3fb7e77898fa6"}, - {file = "grpcio-1.62.0-cp310-cp310-win32.whl", hash = "sha256:821a44bd63d0f04e33cf4ddf33c14cae176346486b0df08b41a6132b976de5fc"}, - {file = "grpcio-1.62.0-cp310-cp310-win_amd64.whl", hash = "sha256:81531632f93fece32b2762247c4c169021177e58e725494f9a746ca62c83acaa"}, - {file = "grpcio-1.62.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:3fa15850a6aba230eed06b236287c50d65a98f05054a0f01ccedf8e1cc89d57f"}, - {file = "grpcio-1.62.0-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:36df33080cd7897623feff57831eb83c98b84640b016ce443305977fac7566fb"}, - {file = "grpcio-1.62.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:7a195531828b46ea9c4623c47e1dc45650fc7206f8a71825898dd4c9004b0928"}, - {file = "grpcio-1.62.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ab140a3542bbcea37162bdfc12ce0d47a3cda3f2d91b752a124cc9fe6776a9e2"}, - {file = "grpcio-1.62.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f9d6c3223914abb51ac564dc9c3782d23ca445d2864321b9059d62d47144021"}, - {file = "grpcio-1.62.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:fbe0c20ce9a1cff75cfb828b21f08d0a1ca527b67f2443174af6626798a754a4"}, - {file = "grpcio-1.62.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:38f69de9c28c1e7a8fd24e4af4264726637b72f27c2099eaea6e513e7142b47e"}, - {file = "grpcio-1.62.0-cp311-cp311-win32.whl", hash = "sha256:ce1aafdf8d3f58cb67664f42a617af0e34555fe955450d42c19e4a6ad41c84bd"}, - {file = "grpcio-1.62.0-cp311-cp311-win_amd64.whl", hash = "sha256:eef1d16ac26c5325e7d39f5452ea98d6988c700c427c52cbc7ce3201e6d93334"}, - {file = "grpcio-1.62.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:8aab8f90b2a41208c0a071ec39a6e5dbba16fd827455aaa070fec241624ccef8"}, - {file = "grpcio-1.62.0-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:62aa1659d8b6aad7329ede5d5b077e3d71bf488d85795db517118c390358d5f6"}, - {file = "grpcio-1.62.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:0d7ae7fc7dbbf2d78d6323641ded767d9ec6d121aaf931ec4a5c50797b886532"}, - {file = "grpcio-1.62.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f359d635ee9428f0294bea062bb60c478a8ddc44b0b6f8e1f42997e5dc12e2ee"}, - {file = "grpcio-1.62.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77d48e5b1f8f4204889f1acf30bb57c30378e17c8d20df5acbe8029e985f735c"}, - {file = "grpcio-1.62.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:662d3df5314ecde3184cf87ddd2c3a66095b3acbb2d57a8cada571747af03873"}, - {file = "grpcio-1.62.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:92cdb616be44c8ac23a57cce0243af0137a10aa82234f23cd46e69e115071388"}, - {file = "grpcio-1.62.0-cp312-cp312-win32.whl", hash = "sha256:0b9179478b09ee22f4a36b40ca87ad43376acdccc816ce7c2193a9061bf35701"}, - {file = "grpcio-1.62.0-cp312-cp312-win_amd64.whl", hash = "sha256:614c3ed234208e76991992342bab725f379cc81c7dd5035ee1de2f7e3f7a9842"}, - {file = "grpcio-1.62.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:7e1f51e2a460b7394670fdb615e26d31d3260015154ea4f1501a45047abe06c9"}, - {file = "grpcio-1.62.0-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:bcff647e7fe25495e7719f779cc219bbb90b9e79fbd1ce5bda6aae2567f469f2"}, - {file = "grpcio-1.62.0-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:56ca7ba0b51ed0de1646f1735154143dcbdf9ec2dbe8cc6645def299bb527ca1"}, - {file = "grpcio-1.62.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e84bfb2a734e4a234b116be208d6f0214e68dcf7804306f97962f93c22a1839"}, - {file = "grpcio-1.62.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c1488b31a521fbba50ae86423f5306668d6f3a46d124f7819c603979fc538c4"}, - {file = "grpcio-1.62.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:98d8f4eb91f1ce0735bf0b67c3b2a4fea68b52b2fd13dc4318583181f9219b4b"}, - {file = "grpcio-1.62.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:b3d3d755cfa331d6090e13aac276d4a3fb828bf935449dc16c3d554bf366136b"}, - {file = "grpcio-1.62.0-cp37-cp37m-win_amd64.whl", hash = "sha256:a33f2bfd8a58a02aab93f94f6c61279be0f48f99fcca20ebaee67576cd57307b"}, - {file = "grpcio-1.62.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:5e709f7c8028ce0443bddc290fb9c967c1e0e9159ef7a030e8c21cac1feabd35"}, - {file = "grpcio-1.62.0-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:2f3d9a4d0abb57e5f49ed5039d3ed375826c2635751ab89dcc25932ff683bbb6"}, - {file = "grpcio-1.62.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:62ccb92f594d3d9fcd00064b149a0187c246b11e46ff1b7935191f169227f04c"}, - {file = "grpcio-1.62.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:921148f57c2e4b076af59a815467d399b7447f6e0ee10ef6d2601eb1e9c7f402"}, - {file = "grpcio-1.62.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f897b16190b46bc4d4aaf0a32a4b819d559a37a756d7c6b571e9562c360eed72"}, - {file = "grpcio-1.62.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1bc8449084fe395575ed24809752e1dc4592bb70900a03ca42bf236ed5bf008f"}, - {file = "grpcio-1.62.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:81d444e5e182be4c7856cd33a610154fe9ea1726bd071d07e7ba13fafd202e38"}, - {file = "grpcio-1.62.0-cp38-cp38-win32.whl", hash = "sha256:88f41f33da3840b4a9bbec68079096d4caf629e2c6ed3a72112159d570d98ebe"}, - {file = "grpcio-1.62.0-cp38-cp38-win_amd64.whl", hash = "sha256:fc2836cb829895ee190813446dce63df67e6ed7b9bf76060262c55fcd097d270"}, - {file = "grpcio-1.62.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:fcc98cff4084467839d0a20d16abc2a76005f3d1b38062464d088c07f500d170"}, - {file = "grpcio-1.62.0-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:0d3dee701e48ee76b7d6fbbba18ba8bc142e5b231ef7d3d97065204702224e0e"}, - {file = "grpcio-1.62.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:b7a6be562dd18e5d5bec146ae9537f20ae1253beb971c0164f1e8a2f5a27e829"}, - {file = "grpcio-1.62.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:29cb592c4ce64a023712875368bcae13938c7f03e99f080407e20ffe0a9aa33b"}, - {file = "grpcio-1.62.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1eda79574aec8ec4d00768dcb07daba60ed08ef32583b62b90bbf274b3c279f7"}, - {file = "grpcio-1.62.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7eea57444a354ee217fda23f4b479a4cdfea35fb918ca0d8a0e73c271e52c09c"}, - {file = "grpcio-1.62.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0e97f37a3b7c89f9125b92d22e9c8323f4e76e7993ba7049b9f4ccbe8bae958a"}, - {file = "grpcio-1.62.0-cp39-cp39-win32.whl", hash = "sha256:39cd45bd82a2e510e591ca2ddbe22352e8413378852ae814549c162cf3992a93"}, - {file = "grpcio-1.62.0-cp39-cp39-win_amd64.whl", hash = "sha256:b71c65427bf0ec6a8b48c68c17356cb9fbfc96b1130d20a07cb462f4e4dcdcd5"}, - {file = "grpcio-1.62.0.tar.gz", hash = "sha256:748496af9238ac78dcd98cce65421f1adce28c3979393e3609683fcd7f3880d7"}, + {file = "grpcio-1.62.1-cp310-cp310-linux_armv7l.whl", hash = "sha256:179bee6f5ed7b5f618844f760b6acf7e910988de77a4f75b95bbfaa8106f3c1e"}, + {file = "grpcio-1.62.1-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:48611e4fa010e823ba2de8fd3f77c1322dd60cb0d180dc6630a7e157b205f7ea"}, + {file = "grpcio-1.62.1-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:b2a0e71b0a2158aa4bce48be9f8f9eb45cbd17c78c7443616d00abbe2a509f6d"}, + {file = "grpcio-1.62.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fbe80577c7880911d3ad65e5ecc997416c98f354efeba2f8d0f9112a67ed65a5"}, + {file = "grpcio-1.62.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58f6c693d446964e3292425e1d16e21a97a48ba9172f2d0df9d7b640acb99243"}, + {file = "grpcio-1.62.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:77c339403db5a20ef4fed02e4d1a9a3d9866bf9c0afc77a42234677313ea22f3"}, + {file = "grpcio-1.62.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b5a4ea906db7dec694098435d84bf2854fe158eb3cd51e1107e571246d4d1d70"}, + {file = "grpcio-1.62.1-cp310-cp310-win32.whl", hash = "sha256:4187201a53f8561c015bc745b81a1b2d278967b8de35f3399b84b0695e281d5f"}, + {file = "grpcio-1.62.1-cp310-cp310-win_amd64.whl", hash = "sha256:844d1f3fb11bd1ed362d3fdc495d0770cfab75761836193af166fee113421d66"}, + {file = "grpcio-1.62.1-cp311-cp311-linux_armv7l.whl", hash = "sha256:833379943d1728a005e44103f17ecd73d058d37d95783eb8f0b28ddc1f54d7b2"}, + {file = "grpcio-1.62.1-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:c7fcc6a32e7b7b58f5a7d27530669337a5d587d4066060bcb9dee7a8c833dfb7"}, + {file = "grpcio-1.62.1-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:fa7d28eb4d50b7cbe75bb8b45ed0da9a1dc5b219a0af59449676a29c2eed9698"}, + {file = "grpcio-1.62.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48f7135c3de2f298b833be8b4ae20cafe37091634e91f61f5a7eb3d61ec6f660"}, + {file = "grpcio-1.62.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:71f11fd63365ade276c9d4a7b7df5c136f9030e3457107e1791b3737a9b9ed6a"}, + {file = "grpcio-1.62.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4b49fd8fe9f9ac23b78437da94c54aa7e9996fbb220bac024a67469ce5d0825f"}, + {file = "grpcio-1.62.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:482ae2ae78679ba9ed5752099b32e5fe580443b4f798e1b71df412abf43375db"}, + {file = "grpcio-1.62.1-cp311-cp311-win32.whl", hash = "sha256:1faa02530b6c7426404372515fe5ddf66e199c2ee613f88f025c6f3bd816450c"}, + {file = "grpcio-1.62.1-cp311-cp311-win_amd64.whl", hash = "sha256:5bd90b8c395f39bc82a5fb32a0173e220e3f401ff697840f4003e15b96d1befc"}, + {file = "grpcio-1.62.1-cp312-cp312-linux_armv7l.whl", hash = "sha256:b134d5d71b4e0837fff574c00e49176051a1c532d26c052a1e43231f252d813b"}, + {file = "grpcio-1.62.1-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:d1f6c96573dc09d50dbcbd91dbf71d5cf97640c9427c32584010fbbd4c0e0037"}, + {file = "grpcio-1.62.1-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:359f821d4578f80f41909b9ee9b76fb249a21035a061a327f91c953493782c31"}, + {file = "grpcio-1.62.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a485f0c2010c696be269184bdb5ae72781344cb4e60db976c59d84dd6354fac9"}, + {file = "grpcio-1.62.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b50b09b4dc01767163d67e1532f948264167cd27f49e9377e3556c3cba1268e1"}, + {file = "grpcio-1.62.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3227c667dccbe38f2c4d943238b887bac588d97c104815aecc62d2fd976e014b"}, + {file = "grpcio-1.62.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3952b581eb121324853ce2b191dae08badb75cd493cb4e0243368aa9e61cfd41"}, + {file = "grpcio-1.62.1-cp312-cp312-win32.whl", hash = "sha256:83a17b303425104d6329c10eb34bba186ffa67161e63fa6cdae7776ff76df73f"}, + {file = "grpcio-1.62.1-cp312-cp312-win_amd64.whl", hash = "sha256:6696ffe440333a19d8d128e88d440f91fb92c75a80ce4b44d55800e656a3ef1d"}, + {file = "grpcio-1.62.1-cp37-cp37m-linux_armv7l.whl", hash = "sha256:e3393b0823f938253370ebef033c9fd23d27f3eae8eb9a8f6264900c7ea3fb5a"}, + {file = "grpcio-1.62.1-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:83e7ccb85a74beaeae2634f10eb858a0ed1a63081172649ff4261f929bacfd22"}, + {file = "grpcio-1.62.1-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:882020c87999d54667a284c7ddf065b359bd00251fcd70279ac486776dbf84ec"}, + {file = "grpcio-1.62.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a10383035e864f386fe096fed5c47d27a2bf7173c56a6e26cffaaa5a361addb1"}, + {file = "grpcio-1.62.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:960edebedc6b9ada1ef58e1c71156f28689978188cd8cff3b646b57288a927d9"}, + {file = "grpcio-1.62.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:23e2e04b83f347d0aadde0c9b616f4726c3d76db04b438fd3904b289a725267f"}, + {file = "grpcio-1.62.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:978121758711916d34fe57c1f75b79cdfc73952f1481bb9583399331682d36f7"}, + {file = "grpcio-1.62.1-cp37-cp37m-win_amd64.whl", hash = "sha256:9084086190cc6d628f282e5615f987288b95457292e969b9205e45b442276407"}, + {file = "grpcio-1.62.1-cp38-cp38-linux_armv7l.whl", hash = "sha256:22bccdd7b23c420a27fd28540fb5dcbc97dc6be105f7698cb0e7d7a420d0e362"}, + {file = "grpcio-1.62.1-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:8999bf1b57172dbc7c3e4bb3c732658e918f5c333b2942243f10d0d653953ba9"}, + {file = "grpcio-1.62.1-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:d9e52558b8b8c2f4ac05ac86344a7417ccdd2b460a59616de49eb6933b07a0bd"}, + {file = "grpcio-1.62.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1714e7bc935780bc3de1b3fcbc7674209adf5208ff825799d579ffd6cd0bd505"}, + {file = "grpcio-1.62.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c8842ccbd8c0e253c1f189088228f9b433f7a93b7196b9e5b6f87dba393f5d5d"}, + {file = "grpcio-1.62.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1f1e7b36bdff50103af95a80923bf1853f6823dd62f2d2a2524b66ed74103e49"}, + {file = "grpcio-1.62.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bba97b8e8883a8038606480d6b6772289f4c907f6ba780fa1f7b7da7dfd76f06"}, + {file = "grpcio-1.62.1-cp38-cp38-win32.whl", hash = "sha256:a7f615270fe534548112a74e790cd9d4f5509d744dd718cd442bf016626c22e4"}, + {file = "grpcio-1.62.1-cp38-cp38-win_amd64.whl", hash = "sha256:e6c8c8693df718c5ecbc7babb12c69a4e3677fd11de8886f05ab22d4e6b1c43b"}, + {file = "grpcio-1.62.1-cp39-cp39-linux_armv7l.whl", hash = "sha256:73db2dc1b201d20ab7083e7041946910bb991e7e9761a0394bbc3c2632326483"}, + {file = "grpcio-1.62.1-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:407b26b7f7bbd4f4751dbc9767a1f0716f9fe72d3d7e96bb3ccfc4aace07c8de"}, + {file = "grpcio-1.62.1-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:f8de7c8cef9261a2d0a62edf2ccea3d741a523c6b8a6477a340a1f2e417658de"}, + {file = "grpcio-1.62.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bd5c8a1af40ec305d001c60236308a67e25419003e9bb3ebfab5695a8d0b369"}, + {file = "grpcio-1.62.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be0477cb31da67846a33b1a75c611f88bfbcd427fe17701b6317aefceee1b96f"}, + {file = "grpcio-1.62.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:60dcd824df166ba266ee0cfaf35a31406cd16ef602b49f5d4dfb21f014b0dedd"}, + {file = "grpcio-1.62.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:973c49086cabab773525f6077f95e5a993bfc03ba8fc32e32f2c279497780585"}, + {file = "grpcio-1.62.1-cp39-cp39-win32.whl", hash = "sha256:12859468e8918d3bd243d213cd6fd6ab07208195dc140763c00dfe901ce1e1b4"}, + {file = "grpcio-1.62.1-cp39-cp39-win_amd64.whl", hash = "sha256:b7209117bbeebdfa5d898205cc55153a51285757902dd73c47de498ad4d11332"}, + {file = "grpcio-1.62.1.tar.gz", hash = "sha256:6c455e008fa86d9e9a9d85bb76da4277c0d7d9668a3bfa70dbe86e9f3c759947"}, ] [package.extras] -protobuf = ["grpcio-tools (>=1.62.0)"] +protobuf = ["grpcio-tools (>=1.62.1)"] [[package]] name = "grpcio-status" -version = "1.62.0" +version = "1.62.1" description = "Status proto mapping for gRPC" optional = false python-versions = ">=3.6" files = [ - {file = "grpcio-status-1.62.0.tar.gz", hash = "sha256:0d693e9c09880daeaac060d0c3dba1ae470a43c99e5d20dfeafd62cf7e08a85d"}, - {file = "grpcio_status-1.62.0-py3-none-any.whl", hash = "sha256:3baac03fcd737310e67758c4082a188107f771d32855bce203331cd4c9aa687a"}, + {file = "grpcio-status-1.62.1.tar.gz", hash = "sha256:3431c8abbab0054912c41df5c72f03ddf3b7a67be8a287bb3c18a3456f96ff77"}, + {file = "grpcio_status-1.62.1-py3-none-any.whl", hash = "sha256:af0c3ab85da31669f21749e8d53d669c061ebc6ce5637be49a46edcb7aa8ab17"}, ] [package.dependencies] googleapis-common-protos = ">=1.5.5" -grpcio = ">=1.62.0" +grpcio = ">=1.62.1" protobuf = ">=4.21.6" [[package]] @@ -1039,13 +1040,13 @@ files = [ [[package]] name = "langchain" -version = "0.1.9" +version = "0.1.12" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.8.1,<4.0" files = [ - {file = "langchain-0.1.9-py3-none-any.whl", hash = "sha256:1436a9f4e498bb9f8e01e0ab8d185771d49c0fc96b3d2da4d5bed5055015746f"}, - {file = "langchain-0.1.9.tar.gz", hash = "sha256:da1f89aeaf5cbc225eb1d6523af0f273c062fdc40a76ec455486c3c184f741b1"}, + {file = "langchain-0.1.12-py3-none-any.whl", hash = "sha256:b4dd1760e2d035daefad08af60a209b96b729ee45492d34e3e127e553a471034"}, + {file = "langchain-0.1.12.tar.gz", hash = "sha256:5f612761ba548b81748ed8dc70535e8de0531445415028a82de3fd8255bfa8a3"}, ] [package.dependencies] @@ -1053,9 +1054,10 @@ aiohttp = ">=3.8.3,<4.0.0" async-timeout = {version = ">=4.0.0,<5.0.0", markers = "python_version < \"3.11\""} dataclasses-json = ">=0.5.7,<0.7" jsonpatch = ">=1.33,<2.0" -langchain-community = ">=0.0.21,<0.1" -langchain-core = ">=0.1.26,<0.2" -langsmith = ">=0.1.0,<0.2.0" +langchain-community = ">=0.0.28,<0.1" +langchain-core = ">=0.1.31,<0.2.0" +langchain-text-splitters = ">=0.0.1,<0.1" +langsmith = ">=0.1.17,<0.2.0" numpy = ">=1,<2" pydantic = ">=1,<3" PyYAML = ">=5.3" @@ -1064,7 +1066,7 @@ SQLAlchemy = ">=1.4,<3" tenacity = ">=8.1.0,<9.0.0" [package.extras] -azure = ["azure-ai-formrecognizer (>=3.2.1,<4.0.0)", "azure-ai-textanalytics (>=5.3.0,<6.0.0)", "azure-ai-vision (>=0.11.1b1,<0.12.0)", "azure-cognitiveservices-speech (>=1.28.0,<2.0.0)", "azure-core (>=1.26.4,<2.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "azure-search-documents (==11.4.0b8)", "openai (<2)"] +azure = ["azure-ai-formrecognizer (>=3.2.1,<4.0.0)", "azure-ai-textanalytics (>=5.3.0,<6.0.0)", "azure-cognitiveservices-speech (>=1.28.0,<2.0.0)", "azure-core (>=1.26.4,<2.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "azure-search-documents (==11.4.0b8)", "openai (<2)"] clarifai = ["clarifai (>=9.1.0)"] cli = ["typer (>=0.9.0,<0.10.0)"] cohere = ["cohere (>=4,<5)"] @@ -1079,19 +1081,19 @@ text-helpers = ["chardet (>=5.1.0,<6.0.0)"] [[package]] name = "langchain-community" -version = "0.0.24" +version = "0.0.28" description = "Community contributed LangChain integrations." optional = false python-versions = ">=3.8.1,<4.0" files = [ - {file = "langchain_community-0.0.24-py3-none-any.whl", hash = "sha256:575e776817d6f5e3dfdff0230049de342e06aaa60fb1924316cf82b4e710fe84"}, - {file = "langchain_community-0.0.24.tar.gz", hash = "sha256:fd609f6c962cca4b7b75f2159f1fbf74b14fdd45011ee2be53e95db4e678837f"}, + {file = "langchain_community-0.0.28-py3-none-any.whl", hash = "sha256:bdb015ac455ae68432ea104628717583dce041e1abdfcefe86e39f034f5e90b8"}, + {file = "langchain_community-0.0.28.tar.gz", hash = "sha256:8664d243a90550fc5ddc137b712034e02c8d43afc8d4cc832ba5842b44c864ce"}, ] [package.dependencies] aiohttp = ">=3.8.3,<4.0.0" dataclasses-json = ">=0.5.7,<0.7" -langchain-core = ">=0.1.26,<0.2" +langchain-core = ">=0.1.31,<0.2.0" langsmith = ">=0.1.0,<0.2.0" numpy = ">=1,<2" PyYAML = ">=5.3" @@ -1101,17 +1103,17 @@ tenacity = ">=8.1.0,<9.0.0" [package.extras] cli = ["typer (>=0.9.0,<0.10.0)"] -extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "azure-ai-documentintelligence (>=1.0.0b1,<2.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.0,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "cohere (>=4,<5)", "databricks-vectorsearch (>=0.21,<0.22)", "datasets (>=2.15.0,<3.0.0)", "dgml-utils (>=0.3.0,<0.4.0)", "elasticsearch (>=8.12.0,<9.0.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "fireworks-ai (>=0.9.0,<0.10.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "gradientai (>=1.4.0,<2.0.0)", "hdbcli (>=2.19.21,<3.0.0)", "hologres-vector (>=0.0.6,<0.0.7)", "html2text (>=2020.1.16,<2021.0.0)", "httpx (>=0.24.1,<0.25.0)", "javelin-sdk (>=0.1.8,<0.2.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "lxml (>=4.9.2,<5.0.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "msal (>=1.25.0,<2.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "nvidia-riva-client (>=2.14.0,<3.0.0)", "oci (>=2.119.1,<3.0.0)", "openai (<2)", "openapi-pydantic (>=0.3.2,<0.4.0)", "oracle-ads (>=2.9.1,<3.0.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "praw (>=7.7.1,<8.0.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "rdflib (==7.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "tree-sitter (>=0.20.2,<0.21.0)", "tree-sitter-languages (>=1.8.0,<2.0.0)", "upstash-redis (>=0.15.0,<0.16.0)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)", "zhipuai (>=1.0.7,<2.0.0)"] +extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "azure-ai-documentintelligence (>=1.0.0b1,<2.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.0,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "cloudpickle (>=2.0.0)", "cohere (>=4,<5)", "databricks-vectorsearch (>=0.21,<0.22)", "datasets (>=2.15.0,<3.0.0)", "dgml-utils (>=0.3.0,<0.4.0)", "elasticsearch (>=8.12.0,<9.0.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "fireworks-ai (>=0.9.0,<0.10.0)", "friendli-client (>=1.2.4,<2.0.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "gradientai (>=1.4.0,<2.0.0)", "hdbcli (>=2.19.21,<3.0.0)", "hologres-vector (>=0.0.6,<0.0.7)", "html2text (>=2020.1.16,<2021.0.0)", "httpx (>=0.24.1,<0.25.0)", "javelin-sdk (>=0.1.8,<0.2.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "lxml (>=4.9.2,<5.0.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "msal (>=1.25.0,<2.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "nvidia-riva-client (>=2.14.0,<3.0.0)", "oci (>=2.119.1,<3.0.0)", "openai (<2)", "openapi-pydantic (>=0.3.2,<0.4.0)", "oracle-ads (>=2.9.1,<3.0.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "praw (>=7.7.1,<8.0.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "rdflib (==7.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "tidb-vector (>=0.0.3,<1.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "tree-sitter (>=0.20.2,<0.21.0)", "tree-sitter-languages (>=1.8.0,<2.0.0)", "upstash-redis (>=0.15.0,<0.16.0)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)", "zhipuai (>=1.0.7,<2.0.0)"] [[package]] name = "langchain-core" -version = "0.1.27" +version = "0.1.32" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.8.1,<4.0" files = [ - {file = "langchain_core-0.1.27-py3-none-any.whl", hash = "sha256:68eb89dc4a932baf4fb6b4b75b7119eec9e5405e892d2137e9fe0a1d24a40d0c"}, - {file = "langchain_core-0.1.27.tar.gz", hash = "sha256:698414223525c0bc130d85a614e1493905d588ab72fe0c9ad3b537b1dc62067f"}, + {file = "langchain_core-0.1.32-py3-none-any.whl", hash = "sha256:192aecdee6216af19b596ec18e7be3da0b9ecb9083eec263e02b68125737245d"}, + {file = "langchain_core-0.1.32.tar.gz", hash = "sha256:d62683becbf20f51f12875791a042320f45eaa0c87a267d30bc03bc1a07f5ec2"}, ] [package.dependencies] @@ -1127,15 +1129,32 @@ tenacity = ">=8.1.0,<9.0.0" [package.extras] extended-testing = ["jinja2 (>=3,<4)"] +[[package]] +name = "langchain-text-splitters" +version = "0.0.1" +description = "LangChain text splitting utilities" +optional = false +python-versions = ">=3.8.1,<4.0" +files = [ + {file = "langchain_text_splitters-0.0.1-py3-none-any.whl", hash = "sha256:f5b802f873f5ff6a8b9259ff34d53ed989666ef4e1582e6d1adb3b5520e3839a"}, + {file = "langchain_text_splitters-0.0.1.tar.gz", hash = "sha256:ac459fa98799f5117ad5425a9330b21961321e30bc19a2a2f9f761ddadd62aa1"}, +] + +[package.dependencies] +langchain-core = ">=0.1.28,<0.2.0" + +[package.extras] +extended-testing = ["lxml (>=5.1.0,<6.0.0)"] + [[package]] name = "langsmith" -version = "0.1.9" +version = "0.1.26" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." optional = false python-versions = ">=3.8.1,<4.0" files = [ - {file = "langsmith-0.1.9-py3-none-any.whl", hash = "sha256:f821b3cb07a87eac5cb2181ff0b61051811e4eef09ae4b46e700981f7ae5dfb9"}, - {file = "langsmith-0.1.9.tar.gz", hash = "sha256:9bd3e80607722c3d2db84cf3440005491a859b80b5e499bc988032d5c2da91f0"}, + {file = "langsmith-0.1.26-py3-none-any.whl", hash = "sha256:3a84c21c01dc9c62fe6713789fed5621ed2d6faa7ffe9b90dde1288b8d4824d3"}, + {file = "langsmith-0.1.26.tar.gz", hash = "sha256:121e5334b2267dc6e2705c2b219aeda7595b3abf004a9a83504d4c46c1894565"}, ] [package.dependencies] @@ -1145,13 +1164,13 @@ requests = ">=2,<3" [[package]] name = "marshmallow" -version = "3.21.0" +version = "3.21.1" description = "A lightweight library for converting complex datatypes to and from native Python datatypes." optional = false python-versions = ">=3.8" files = [ - {file = "marshmallow-3.21.0-py3-none-any.whl", hash = "sha256:e7997f83571c7fd476042c2c188e4ee8a78900ca5e74bd9c8097afa56624e9bd"}, - {file = "marshmallow-3.21.0.tar.gz", hash = "sha256:20f53be28c6e374a711a16165fb22a8dc6003e3f7cda1285e3ca777b9193885b"}, + {file = "marshmallow-3.21.1-py3-none-any.whl", hash = "sha256:f085493f79efb0644f270a9bf2892843142d80d7174bbbd2f3713f2a589dc633"}, + {file = "marshmallow-3.21.1.tar.gz", hash = "sha256:4e65e9e0d80fc9e609574b9983cf32579f305c718afb30d7233ab818571768c3"}, ] [package.dependencies] @@ -1263,38 +1282,38 @@ files = [ [[package]] name = "mypy" -version = "1.8.0" +version = "1.9.0" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" files = [ - {file = "mypy-1.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:485a8942f671120f76afffff70f259e1cd0f0cfe08f81c05d8816d958d4577d3"}, - {file = "mypy-1.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:df9824ac11deaf007443e7ed2a4a26bebff98d2bc43c6da21b2b64185da011c4"}, - {file = "mypy-1.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2afecd6354bbfb6e0160f4e4ad9ba6e4e003b767dd80d85516e71f2e955ab50d"}, - {file = "mypy-1.8.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8963b83d53ee733a6e4196954502b33567ad07dfd74851f32be18eb932fb1cb9"}, - {file = "mypy-1.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:e46f44b54ebddbeedbd3d5b289a893219065ef805d95094d16a0af6630f5d410"}, - {file = "mypy-1.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:855fe27b80375e5c5878492f0729540db47b186509c98dae341254c8f45f42ae"}, - {file = "mypy-1.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4c886c6cce2d070bd7df4ec4a05a13ee20c0aa60cb587e8d1265b6c03cf91da3"}, - {file = "mypy-1.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d19c413b3c07cbecf1f991e2221746b0d2a9410b59cb3f4fb9557f0365a1a817"}, - {file = "mypy-1.8.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9261ed810972061388918c83c3f5cd46079d875026ba97380f3e3978a72f503d"}, - {file = "mypy-1.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:51720c776d148bad2372ca21ca29256ed483aa9a4cdefefcef49006dff2a6835"}, - {file = "mypy-1.8.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:52825b01f5c4c1c4eb0db253ec09c7aa17e1a7304d247c48b6f3599ef40db8bd"}, - {file = "mypy-1.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f5ac9a4eeb1ec0f1ccdc6f326bcdb464de5f80eb07fb38b5ddd7b0de6bc61e55"}, - {file = "mypy-1.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afe3fe972c645b4632c563d3f3eff1cdca2fa058f730df2b93a35e3b0c538218"}, - {file = "mypy-1.8.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:42c6680d256ab35637ef88891c6bd02514ccb7e1122133ac96055ff458f93fc3"}, - {file = "mypy-1.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:720a5ca70e136b675af3af63db533c1c8c9181314d207568bbe79051f122669e"}, - {file = "mypy-1.8.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:028cf9f2cae89e202d7b6593cd98db6759379f17a319b5faf4f9978d7084cdc6"}, - {file = "mypy-1.8.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4e6d97288757e1ddba10dd9549ac27982e3e74a49d8d0179fc14d4365c7add66"}, - {file = "mypy-1.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f1478736fcebb90f97e40aff11a5f253af890c845ee0c850fe80aa060a267c6"}, - {file = "mypy-1.8.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:42419861b43e6962a649068a61f4a4839205a3ef525b858377a960b9e2de6e0d"}, - {file = "mypy-1.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:2b5b6c721bd4aabaadead3a5e6fa85c11c6c795e0c81a7215776ef8afc66de02"}, - {file = "mypy-1.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5c1538c38584029352878a0466f03a8ee7547d7bd9f641f57a0f3017a7c905b8"}, - {file = "mypy-1.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4ef4be7baf08a203170f29e89d79064463b7fc7a0908b9d0d5114e8009c3a259"}, - {file = "mypy-1.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7178def594014aa6c35a8ff411cf37d682f428b3b5617ca79029d8ae72f5402b"}, - {file = "mypy-1.8.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ab3c84fa13c04aeeeabb2a7f67a25ef5d77ac9d6486ff33ded762ef353aa5592"}, - {file = "mypy-1.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:99b00bc72855812a60d253420d8a2eae839b0afa4938f09f4d2aa9bb4654263a"}, - {file = "mypy-1.8.0-py3-none-any.whl", hash = "sha256:538fd81bb5e430cc1381a443971c0475582ff9f434c16cd46d2c66763ce85d9d"}, - {file = "mypy-1.8.0.tar.gz", hash = "sha256:6ff8b244d7085a0b425b56d327b480c3b29cafbd2eff27316a004f9a7391ae07"}, + {file = "mypy-1.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f8a67616990062232ee4c3952f41c779afac41405806042a8126fe96e098419f"}, + {file = "mypy-1.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d357423fa57a489e8c47b7c85dfb96698caba13d66e086b412298a1a0ea3b0ed"}, + {file = "mypy-1.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49c87c15aed320de9b438ae7b00c1ac91cd393c1b854c2ce538e2a72d55df150"}, + {file = "mypy-1.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:48533cdd345c3c2e5ef48ba3b0d3880b257b423e7995dada04248725c6f77374"}, + {file = "mypy-1.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:4d3dbd346cfec7cb98e6cbb6e0f3c23618af826316188d587d1c1bc34f0ede03"}, + {file = "mypy-1.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:653265f9a2784db65bfca694d1edd23093ce49740b2244cde583aeb134c008f3"}, + {file = "mypy-1.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3a3c007ff3ee90f69cf0a15cbcdf0995749569b86b6d2f327af01fd1b8aee9dc"}, + {file = "mypy-1.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2418488264eb41f69cc64a69a745fad4a8f86649af4b1041a4c64ee61fc61129"}, + {file = "mypy-1.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:68edad3dc7d70f2f17ae4c6c1b9471a56138ca22722487eebacfd1eb5321d612"}, + {file = "mypy-1.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:85ca5fcc24f0b4aeedc1d02f93707bccc04733f21d41c88334c5482219b1ccb3"}, + {file = "mypy-1.9.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aceb1db093b04db5cd390821464504111b8ec3e351eb85afd1433490163d60cd"}, + {file = "mypy-1.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0235391f1c6f6ce487b23b9dbd1327b4ec33bb93934aa986efe8a9563d9349e6"}, + {file = "mypy-1.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4d5ddc13421ba3e2e082a6c2d74c2ddb3979c39b582dacd53dd5d9431237185"}, + {file = "mypy-1.9.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:190da1ee69b427d7efa8aa0d5e5ccd67a4fb04038c380237a0d96829cb157913"}, + {file = "mypy-1.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:fe28657de3bfec596bbeef01cb219833ad9d38dd5393fc649f4b366840baefe6"}, + {file = "mypy-1.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e54396d70be04b34f31d2edf3362c1edd023246c82f1730bbf8768c28db5361b"}, + {file = "mypy-1.9.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5e6061f44f2313b94f920e91b204ec600982961e07a17e0f6cd83371cb23f5c2"}, + {file = "mypy-1.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81a10926e5473c5fc3da8abb04119a1f5811a236dc3a38d92015cb1e6ba4cb9e"}, + {file = "mypy-1.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b685154e22e4e9199fc95f298661deea28aaede5ae16ccc8cbb1045e716b3e04"}, + {file = "mypy-1.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:5d741d3fc7c4da608764073089e5f58ef6352bedc223ff58f2f038c2c4698a89"}, + {file = "mypy-1.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:587ce887f75dd9700252a3abbc9c97bbe165a4a630597845c61279cf32dfbf02"}, + {file = "mypy-1.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f88566144752999351725ac623471661c9d1cd8caa0134ff98cceeea181789f4"}, + {file = "mypy-1.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61758fabd58ce4b0720ae1e2fea5cfd4431591d6d590b197775329264f86311d"}, + {file = "mypy-1.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e49499be624dead83927e70c756970a0bc8240e9f769389cdf5714b0784ca6bf"}, + {file = "mypy-1.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:571741dc4194b4f82d344b15e8837e8c5fcc462d66d076748142327626a1b6e9"}, + {file = "mypy-1.9.0-py3-none-any.whl", hash = "sha256:a260627a570559181a9ea5de61ac6297aa5af202f06fd7ab093ce74e7181e43e"}, + {file = "mypy-1.9.0.tar.gz", hash = "sha256:3cc5da0127e6a478cddd906068496a97a7618a21ce9b54bde5bf7e539c7af974"}, ] [package.dependencies] @@ -1588,13 +1607,13 @@ pyasn1 = ">=0.4.6,<0.6.0" [[package]] name = "pydantic" -version = "2.6.2" +version = "2.6.4" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.6.2-py3-none-any.whl", hash = "sha256:37a5432e54b12fecaa1049c5195f3d860a10e01bdfd24f1840ef14bd0d3aeab3"}, - {file = "pydantic-2.6.2.tar.gz", hash = "sha256:a09be1c3d28f3abe37f8a78af58284b236a92ce520105ddc91a6d29ea1176ba7"}, + {file = "pydantic-2.6.4-py3-none-any.whl", hash = "sha256:cc46fce86607580867bdc3361ad462bab9c222ef042d3da86f2fb333e1d916c5"}, + {file = "pydantic-2.6.4.tar.gz", hash = "sha256:b1704e0847db01817624a6b86766967f552dd9dbf3afba4004409f908dcc84e6"}, ] [package.dependencies] @@ -1698,13 +1717,13 @@ typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" [[package]] name = "pyparsing" -version = "3.1.1" +version = "3.1.2" description = "pyparsing module - Classes and methods to define and execute parsing grammars" optional = false python-versions = ">=3.6.8" files = [ - {file = "pyparsing-3.1.1-py3-none-any.whl", hash = "sha256:32c7c0b711493c72ff18a981d24f28aaf9c1fb7ed5e9667c9e84e3db623bdbfb"}, - {file = "pyparsing-3.1.1.tar.gz", hash = "sha256:ede28a1a32462f5a9705e07aea48001a08f7cf81a021585011deba701581a0db"}, + {file = "pyparsing-3.1.2-py3-none-any.whl", hash = "sha256:f9db75911801ed778fe61bb643079ff86601aca99fcae6345aa67292038fb742"}, + {file = "pyparsing-3.1.2.tar.gz", hash = "sha256:a1bac0ce561155ecc3ed78ca94d3c9378656ad4c94c1270de543f621420f94ad"}, ] [package.extras] @@ -1784,13 +1803,13 @@ watchdog = ">=2.0.0" [[package]] name = "python-dateutil" -version = "2.8.2" +version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" files = [ - {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, - {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, ] [package.dependencies] @@ -1998,60 +2017,60 @@ files = [ [[package]] name = "sqlalchemy" -version = "2.0.27" +version = "2.0.28" description = "Database Abstraction Library" optional = false python-versions = ">=3.7" files = [ - {file = "SQLAlchemy-2.0.27-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d04e579e911562f1055d26dab1868d3e0bb905db3bccf664ee8ad109f035618a"}, - {file = "SQLAlchemy-2.0.27-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fa67d821c1fd268a5a87922ef4940442513b4e6c377553506b9db3b83beebbd8"}, - {file = "SQLAlchemy-2.0.27-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c7a596d0be71b7baa037f4ac10d5e057d276f65a9a611c46970f012752ebf2d"}, - {file = "SQLAlchemy-2.0.27-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:954d9735ee9c3fa74874c830d089a815b7b48df6f6b6e357a74130e478dbd951"}, - {file = "SQLAlchemy-2.0.27-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5cd20f58c29bbf2680039ff9f569fa6d21453fbd2fa84dbdb4092f006424c2e6"}, - {file = "SQLAlchemy-2.0.27-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:03f448ffb731b48323bda68bcc93152f751436ad6037f18a42b7e16af9e91c07"}, - {file = "SQLAlchemy-2.0.27-cp310-cp310-win32.whl", hash = "sha256:d997c5938a08b5e172c30583ba6b8aad657ed9901fc24caf3a7152eeccb2f1b4"}, - {file = "SQLAlchemy-2.0.27-cp310-cp310-win_amd64.whl", hash = "sha256:eb15ef40b833f5b2f19eeae65d65e191f039e71790dd565c2af2a3783f72262f"}, - {file = "SQLAlchemy-2.0.27-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6c5bad7c60a392850d2f0fee8f355953abaec878c483dd7c3836e0089f046bf6"}, - {file = "SQLAlchemy-2.0.27-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a3012ab65ea42de1be81fff5fb28d6db893ef978950afc8130ba707179b4284a"}, - {file = "SQLAlchemy-2.0.27-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dbcd77c4d94b23e0753c5ed8deba8c69f331d4fd83f68bfc9db58bc8983f49cd"}, - {file = "SQLAlchemy-2.0.27-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d177b7e82f6dd5e1aebd24d9c3297c70ce09cd1d5d37b43e53f39514379c029c"}, - {file = "SQLAlchemy-2.0.27-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:680b9a36029b30cf063698755d277885d4a0eab70a2c7c6e71aab601323cba45"}, - {file = "SQLAlchemy-2.0.27-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1306102f6d9e625cebaca3d4c9c8f10588735ef877f0360b5cdb4fdfd3fd7131"}, - {file = "SQLAlchemy-2.0.27-cp311-cp311-win32.whl", hash = "sha256:5b78aa9f4f68212248aaf8943d84c0ff0f74efc65a661c2fc68b82d498311fd5"}, - {file = "SQLAlchemy-2.0.27-cp311-cp311-win_amd64.whl", hash = "sha256:15e19a84b84528f52a68143439d0c7a3a69befcd4f50b8ef9b7b69d2628ae7c4"}, - {file = "SQLAlchemy-2.0.27-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:0de1263aac858f288a80b2071990f02082c51d88335a1db0d589237a3435fe71"}, - {file = "SQLAlchemy-2.0.27-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce850db091bf7d2a1f2fdb615220b968aeff3849007b1204bf6e3e50a57b3d32"}, - {file = "SQLAlchemy-2.0.27-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8dfc936870507da96aebb43e664ae3a71a7b96278382bcfe84d277b88e379b18"}, - {file = "SQLAlchemy-2.0.27-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4fbe6a766301f2e8a4519f4500fe74ef0a8509a59e07a4085458f26228cd7cc"}, - {file = "SQLAlchemy-2.0.27-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:4535c49d961fe9a77392e3a630a626af5baa967172d42732b7a43496c8b28876"}, - {file = "SQLAlchemy-2.0.27-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0fb3bffc0ced37e5aa4ac2416f56d6d858f46d4da70c09bb731a246e70bff4d5"}, - {file = "SQLAlchemy-2.0.27-cp312-cp312-win32.whl", hash = "sha256:7f470327d06400a0aa7926b375b8e8c3c31d335e0884f509fe272b3c700a7254"}, - {file = "SQLAlchemy-2.0.27-cp312-cp312-win_amd64.whl", hash = "sha256:f9374e270e2553653d710ece397df67db9d19c60d2647bcd35bfc616f1622dcd"}, - {file = "SQLAlchemy-2.0.27-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e97cf143d74a7a5a0f143aa34039b4fecf11343eed66538610debc438685db4a"}, - {file = "SQLAlchemy-2.0.27-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7b5a3e2120982b8b6bd1d5d99e3025339f7fb8b8267551c679afb39e9c7c7f1"}, - {file = "SQLAlchemy-2.0.27-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e36aa62b765cf9f43a003233a8c2d7ffdeb55bc62eaa0a0380475b228663a38f"}, - {file = "SQLAlchemy-2.0.27-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:5ada0438f5b74c3952d916c199367c29ee4d6858edff18eab783b3978d0db16d"}, - {file = "SQLAlchemy-2.0.27-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:b1d9d1bfd96eef3c3faedb73f486c89e44e64e40e5bfec304ee163de01cf996f"}, - {file = "SQLAlchemy-2.0.27-cp37-cp37m-win32.whl", hash = "sha256:ca891af9f3289d24a490a5fde664ea04fe2f4984cd97e26de7442a4251bd4b7c"}, - {file = "SQLAlchemy-2.0.27-cp37-cp37m-win_amd64.whl", hash = "sha256:fd8aafda7cdff03b905d4426b714601c0978725a19efc39f5f207b86d188ba01"}, - {file = "SQLAlchemy-2.0.27-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ec1f5a328464daf7a1e4e385e4f5652dd9b1d12405075ccba1df842f7774b4fc"}, - {file = "SQLAlchemy-2.0.27-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ad862295ad3f644e3c2c0d8b10a988e1600d3123ecb48702d2c0f26771f1c396"}, - {file = "SQLAlchemy-2.0.27-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48217be1de7d29a5600b5c513f3f7664b21d32e596d69582be0a94e36b8309cb"}, - {file = "SQLAlchemy-2.0.27-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e56afce6431450442f3ab5973156289bd5ec33dd618941283847c9fd5ff06bf"}, - {file = "SQLAlchemy-2.0.27-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:611068511b5531304137bcd7fe8117c985d1b828eb86043bd944cebb7fae3910"}, - {file = "SQLAlchemy-2.0.27-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b86abba762ecfeea359112b2bb4490802b340850bbee1948f785141a5e020de8"}, - {file = "SQLAlchemy-2.0.27-cp38-cp38-win32.whl", hash = "sha256:30d81cc1192dc693d49d5671cd40cdec596b885b0ce3b72f323888ab1c3863d5"}, - {file = "SQLAlchemy-2.0.27-cp38-cp38-win_amd64.whl", hash = "sha256:120af1e49d614d2525ac247f6123841589b029c318b9afbfc9e2b70e22e1827d"}, - {file = "SQLAlchemy-2.0.27-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d07ee7793f2aeb9b80ec8ceb96bc8cc08a2aec8a1b152da1955d64e4825fcbac"}, - {file = "SQLAlchemy-2.0.27-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cb0845e934647232b6ff5150df37ceffd0b67b754b9fdbb095233deebcddbd4a"}, - {file = "SQLAlchemy-2.0.27-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fc19ae2e07a067663dd24fca55f8ed06a288384f0e6e3910420bf4b1270cc51"}, - {file = "SQLAlchemy-2.0.27-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b90053be91973a6fb6020a6e44382c97739736a5a9d74e08cc29b196639eb979"}, - {file = "SQLAlchemy-2.0.27-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2f5c9dfb0b9ab5e3a8a00249534bdd838d943ec4cfb9abe176a6c33408430230"}, - {file = "SQLAlchemy-2.0.27-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:33e8bde8fff203de50399b9039c4e14e42d4d227759155c21f8da4a47fc8053c"}, - {file = "SQLAlchemy-2.0.27-cp39-cp39-win32.whl", hash = "sha256:d873c21b356bfaf1589b89090a4011e6532582b3a8ea568a00e0c3aab09399dd"}, - {file = "SQLAlchemy-2.0.27-cp39-cp39-win_amd64.whl", hash = "sha256:ff2f1b7c963961d41403b650842dc2039175b906ab2093635d8319bef0b7d620"}, - {file = "SQLAlchemy-2.0.27-py3-none-any.whl", hash = "sha256:1ab4e0448018d01b142c916cc7119ca573803a4745cfe341b8f95657812700ac"}, - {file = "SQLAlchemy-2.0.27.tar.gz", hash = "sha256:86a6ed69a71fe6b88bf9331594fa390a2adda4a49b5c06f98e47bf0d392534f8"}, + {file = "SQLAlchemy-2.0.28-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e0b148ab0438f72ad21cb004ce3bdaafd28465c4276af66df3b9ecd2037bf252"}, + {file = "SQLAlchemy-2.0.28-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:bbda76961eb8f27e6ad3c84d1dc56d5bc61ba8f02bd20fcf3450bd421c2fcc9c"}, + {file = "SQLAlchemy-2.0.28-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:feea693c452d85ea0015ebe3bb9cd15b6f49acc1a31c28b3c50f4db0f8fb1e71"}, + {file = "SQLAlchemy-2.0.28-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5da98815f82dce0cb31fd1e873a0cb30934971d15b74e0d78cf21f9e1b05953f"}, + {file = "SQLAlchemy-2.0.28-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4a5adf383c73f2d49ad15ff363a8748319ff84c371eed59ffd0127355d6ea1da"}, + {file = "SQLAlchemy-2.0.28-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:56856b871146bfead25fbcaed098269d90b744eea5cb32a952df00d542cdd368"}, + {file = "SQLAlchemy-2.0.28-cp310-cp310-win32.whl", hash = "sha256:943aa74a11f5806ab68278284a4ddd282d3fb348a0e96db9b42cb81bf731acdc"}, + {file = "SQLAlchemy-2.0.28-cp310-cp310-win_amd64.whl", hash = "sha256:c6c4da4843e0dabde41b8f2e8147438330924114f541949e6318358a56d1875a"}, + {file = "SQLAlchemy-2.0.28-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:46a3d4e7a472bfff2d28db838669fc437964e8af8df8ee1e4548e92710929adc"}, + {file = "SQLAlchemy-2.0.28-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0d3dd67b5d69794cfe82862c002512683b3db038b99002171f624712fa71aeaa"}, + {file = "SQLAlchemy-2.0.28-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61e2e41656a673b777e2f0cbbe545323dbe0d32312f590b1bc09da1de6c2a02"}, + {file = "SQLAlchemy-2.0.28-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0315d9125a38026227f559488fe7f7cee1bd2fbc19f9fd637739dc50bb6380b2"}, + {file = "SQLAlchemy-2.0.28-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:af8ce2d31679006e7b747d30a89cd3ac1ec304c3d4c20973f0f4ad58e2d1c4c9"}, + {file = "SQLAlchemy-2.0.28-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:81ba314a08c7ab701e621b7ad079c0c933c58cdef88593c59b90b996e8b58fa5"}, + {file = "SQLAlchemy-2.0.28-cp311-cp311-win32.whl", hash = "sha256:1ee8bd6d68578e517943f5ebff3afbd93fc65f7ef8f23becab9fa8fb315afb1d"}, + {file = "SQLAlchemy-2.0.28-cp311-cp311-win_amd64.whl", hash = "sha256:ad7acbe95bac70e4e687a4dc9ae3f7a2f467aa6597049eeb6d4a662ecd990bb6"}, + {file = "SQLAlchemy-2.0.28-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d3499008ddec83127ab286c6f6ec82a34f39c9817f020f75eca96155f9765097"}, + {file = "SQLAlchemy-2.0.28-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9b66fcd38659cab5d29e8de5409cdf91e9986817703e1078b2fdaad731ea66f5"}, + {file = "SQLAlchemy-2.0.28-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bea30da1e76cb1acc5b72e204a920a3a7678d9d52f688f087dc08e54e2754c67"}, + {file = "SQLAlchemy-2.0.28-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:124202b4e0edea7f08a4db8c81cc7859012f90a0d14ba2bf07c099aff6e96462"}, + {file = "SQLAlchemy-2.0.28-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e23b88c69497a6322b5796c0781400692eca1ae5532821b39ce81a48c395aae9"}, + {file = "SQLAlchemy-2.0.28-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b6303bfd78fb3221847723104d152e5972c22367ff66edf09120fcde5ddc2e2"}, + {file = "SQLAlchemy-2.0.28-cp312-cp312-win32.whl", hash = "sha256:a921002be69ac3ab2cf0c3017c4e6a3377f800f1fca7f254c13b5f1a2f10022c"}, + {file = "SQLAlchemy-2.0.28-cp312-cp312-win_amd64.whl", hash = "sha256:b4a2cf92995635b64876dc141af0ef089c6eea7e05898d8d8865e71a326c0385"}, + {file = "SQLAlchemy-2.0.28-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e91b5e341f8c7f1e5020db8e5602f3ed045a29f8e27f7f565e0bdee3338f2c7"}, + {file = "SQLAlchemy-2.0.28-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45c7b78dfc7278329f27be02c44abc0d69fe235495bb8e16ec7ef1b1a17952db"}, + {file = "SQLAlchemy-2.0.28-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3eba73ef2c30695cb7eabcdb33bb3d0b878595737479e152468f3ba97a9c22a4"}, + {file = "SQLAlchemy-2.0.28-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:5df5d1dafb8eee89384fb7a1f79128118bc0ba50ce0db27a40750f6f91aa99d5"}, + {file = "SQLAlchemy-2.0.28-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:2858bbab1681ee5406650202950dc8f00e83b06a198741b7c656e63818633526"}, + {file = "SQLAlchemy-2.0.28-cp37-cp37m-win32.whl", hash = "sha256:9461802f2e965de5cff80c5a13bc945abea7edaa1d29360b485c3d2b56cdb075"}, + {file = "SQLAlchemy-2.0.28-cp37-cp37m-win_amd64.whl", hash = "sha256:a6bec1c010a6d65b3ed88c863d56b9ea5eeefdf62b5e39cafd08c65f5ce5198b"}, + {file = "SQLAlchemy-2.0.28-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:843a882cadebecc655a68bd9a5b8aa39b3c52f4a9a5572a3036fb1bb2ccdc197"}, + {file = "SQLAlchemy-2.0.28-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:dbb990612c36163c6072723523d2be7c3eb1517bbdd63fe50449f56afafd1133"}, + {file = "SQLAlchemy-2.0.28-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7e4baf9161d076b9a7e432fce06217b9bd90cfb8f1d543d6e8c4595627edb9"}, + {file = "SQLAlchemy-2.0.28-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0a5354cb4de9b64bccb6ea33162cb83e03dbefa0d892db88a672f5aad638a75"}, + {file = "SQLAlchemy-2.0.28-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:fffcc8edc508801ed2e6a4e7b0d150a62196fd28b4e16ab9f65192e8186102b6"}, + {file = "SQLAlchemy-2.0.28-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aca7b6d99a4541b2ebab4494f6c8c2f947e0df4ac859ced575238e1d6ca5716b"}, + {file = "SQLAlchemy-2.0.28-cp38-cp38-win32.whl", hash = "sha256:8c7f10720fc34d14abad5b647bc8202202f4948498927d9f1b4df0fb1cf391b7"}, + {file = "SQLAlchemy-2.0.28-cp38-cp38-win_amd64.whl", hash = "sha256:243feb6882b06a2af68ecf4bec8813d99452a1b62ba2be917ce6283852cf701b"}, + {file = "SQLAlchemy-2.0.28-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fc4974d3684f28b61b9a90fcb4c41fb340fd4b6a50c04365704a4da5a9603b05"}, + {file = "SQLAlchemy-2.0.28-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:87724e7ed2a936fdda2c05dbd99d395c91ea3c96f029a033a4a20e008dd876bf"}, + {file = "SQLAlchemy-2.0.28-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68722e6a550f5de2e3cfe9da6afb9a7dd15ef7032afa5651b0f0c6b3adb8815d"}, + {file = "SQLAlchemy-2.0.28-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:328529f7c7f90adcd65aed06a161851f83f475c2f664a898af574893f55d9e53"}, + {file = "SQLAlchemy-2.0.28-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:df40c16a7e8be7413b885c9bf900d402918cc848be08a59b022478804ea076b8"}, + {file = "SQLAlchemy-2.0.28-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:426f2fa71331a64f5132369ede5171c52fd1df1bd9727ce621f38b5b24f48750"}, + {file = "SQLAlchemy-2.0.28-cp39-cp39-win32.whl", hash = "sha256:33157920b233bc542ce497a81a2e1452e685a11834c5763933b440fedd1d8e2d"}, + {file = "SQLAlchemy-2.0.28-cp39-cp39-win_amd64.whl", hash = "sha256:2f60843068e432311c886c5f03c4664acaef507cf716f6c60d5fde7265be9d7b"}, + {file = "SQLAlchemy-2.0.28-py3-none-any.whl", hash = "sha256:78bb7e8da0183a8301352d569900d9d3594c48ac21dc1c2ec6b3121ed8b6c986"}, + {file = "SQLAlchemy-2.0.28.tar.gz", hash = "sha256:dd53b6c4e6d960600fd6532b79ee28e2da489322fcf6648738134587faf767b6"}, ] [package.dependencies] @@ -2124,35 +2143,35 @@ files = [ [[package]] name = "types-google-cloud-ndb" -version = "2.2.0.20240205" +version = "2.3.0.20240311" description = "Typing stubs for google-cloud-ndb" optional = false python-versions = ">=3.8" files = [ - {file = "types-google-cloud-ndb-2.2.0.20240205.tar.gz", hash = "sha256:8384b060f37cfde1786ca7bb7ba48037ef6b2e47bf29c02512cd275b92fa75fe"}, - {file = "types_google_cloud_ndb-2.2.0.20240205-py3-none-any.whl", hash = "sha256:d410fdb23085e186b2cb2501e7457fa7af2cf36ab40194b05ad15e12860a94e6"}, + {file = "types-google-cloud-ndb-2.3.0.20240311.tar.gz", hash = "sha256:c37a149f313827d9443a0f7b8dfd572292f9d9dabb8a9c4d68cdba81689a380f"}, + {file = "types_google_cloud_ndb-2.3.0.20240311-py3-none-any.whl", hash = "sha256:8209962a420d2c60615ee26bc21ad74d77a3e337045b70ed86843a974f2d2ecd"}, ] [[package]] name = "types-protobuf" -version = "4.24.0.20240129" +version = "4.24.0.20240311" description = "Typing stubs for protobuf" optional = false python-versions = ">=3.8" files = [ - {file = "types-protobuf-4.24.0.20240129.tar.gz", hash = "sha256:8a83dd3b9b76a33e08d8636c5daa212ace1396418ed91837635fcd564a624891"}, - {file = "types_protobuf-4.24.0.20240129-py3-none-any.whl", hash = "sha256:23be68cc29f3f5213b5c5878ac0151706182874040e220cfb11336f9ee642ead"}, + {file = "types-protobuf-4.24.0.20240311.tar.gz", hash = "sha256:c80426f9fb9b21aee514691e96ab32a5cd694a82e2ac07964b352c3e7e0182bc"}, + {file = "types_protobuf-4.24.0.20240311-py3-none-any.whl", hash = "sha256:8e039486df058141cb221ab99f88c5878c08cca4376db1d84f63279860aa09cd"}, ] [[package]] name = "types-requests" -version = "2.31.0.20240218" +version = "2.31.0.20240311" description = "Typing stubs for requests" optional = false python-versions = ">=3.8" files = [ - {file = "types-requests-2.31.0.20240218.tar.gz", hash = "sha256:f1721dba8385958f504a5386240b92de4734e047a08a40751c1654d1ac3349c5"}, - {file = "types_requests-2.31.0.20240218-py3-none-any.whl", hash = "sha256:a82807ec6ddce8f00fe0e949da6d6bc1fbf1715420218a9640d695f70a9e5a9b"}, + {file = "types-requests-2.31.0.20240311.tar.gz", hash = "sha256:b1c1b66abfb7fa79aae09097a811c4aa97130eb8831c60e47aee4ca344731ca5"}, + {file = "types_requests-2.31.0.20240311-py3-none-any.whl", hash = "sha256:47872893d65a38e282ee9f277a4ee50d1b28bd592040df7d1fdaffdf3779937d"}, ] [package.dependencies] @@ -2359,4 +2378,4 @@ multidict = ">=4.0" [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "9c5b3a08a7faba79d79a147d2ccb25848f94e92c1281687bc2e25244bbb52f86" +content-hash = "e1f5b1b60ca14f9ad4f9a839db8850fbe5f8c263ef0b4657e997546cdd1fce93" From df4e9030a0114e42edfe329e803df8e9c8c5b297 Mon Sep 17 00:00:00 2001 From: Holt Skinner Date: Thu, 14 Mar 2024 16:52:04 -0500 Subject: [PATCH 13/17] fix formatting error --- libs/vertexai/langchain_google_vertexai/embeddings.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/libs/vertexai/langchain_google_vertexai/embeddings.py b/libs/vertexai/langchain_google_vertexai/embeddings.py index da2a65cc..3bf658f4 100644 --- a/libs/vertexai/langchain_google_vertexai/embeddings.py +++ b/libs/vertexai/langchain_google_vertexai/embeddings.py @@ -77,7 +77,9 @@ def validate_environment(cls, values: Dict) -> Dict: values["model_name"] ) else: - values["client"] = TextEmbeddingModel.from_pretrained(values["model_name"]) + values["client"] = TextEmbeddingModel.from_pretrained( + values["model_name"] + ) return values def __init__( From 40f4562c369ea9bf92e5804d646d17f4477fb805 Mon Sep 17 00:00:00 2001 From: Holt Skinner Date: Thu, 14 Mar 2024 16:56:16 -0500 Subject: [PATCH 14/17] Fixed lint --- libs/vertexai/langchain_google_vertexai/_image_utils.py | 2 +- .../langchain_google_vertexai/vectorstores/_sdk_manager.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libs/vertexai/langchain_google_vertexai/_image_utils.py b/libs/vertexai/langchain_google_vertexai/_image_utils.py index 06fb3e9e..d15ec79d 100644 --- a/libs/vertexai/langchain_google_vertexai/_image_utils.py +++ b/libs/vertexai/langchain_google_vertexai/_image_utils.py @@ -7,7 +7,7 @@ from urllib.parse import urlparse import requests -from google.cloud import storage # type: ignore[attr-defined] +from google.cloud import storage class ImageBytesLoader: diff --git a/libs/vertexai/langchain_google_vertexai/vectorstores/_sdk_manager.py b/libs/vertexai/langchain_google_vertexai/vectorstores/_sdk_manager.py index 9bf398f6..5b168f9f 100644 --- a/libs/vertexai/langchain_google_vertexai/vectorstores/_sdk_manager.py +++ b/libs/vertexai/langchain_google_vertexai/vectorstores/_sdk_manager.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING, Any, Union -from google.cloud import aiplatform, storage # type: ignore[attr-defined] +from google.cloud import aiplatform, storage from google.cloud.aiplatform import telemetry from google.cloud.aiplatform.matching_engine import ( MatchingEngineIndex, From 179a894c0c2cf67c4808e844d67afe68c8ff71dd Mon Sep 17 00:00:00 2001 From: Holt Skinner Date: Thu, 14 Mar 2024 17:04:46 -0500 Subject: [PATCH 15/17] Revert telemetry changes to fix issue with new library version --- .../langchain_google_vertexai/chat_models.py | 372 +++++++++--------- 1 file changed, 183 insertions(+), 189 deletions(-) diff --git a/libs/vertexai/langchain_google_vertexai/chat_models.py b/libs/vertexai/langchain_google_vertexai/chat_models.py index 1d1e9c4a..e7cbe1fb 100644 --- a/libs/vertexai/langchain_google_vertexai/chat_models.py +++ b/libs/vertexai/langchain_google_vertexai/chat_models.py @@ -1,6 +1,6 @@ """Wrapper around Google VertexAI chat-based models.""" -from __future__ import annotations # noqa +from __future__ import annotations import json import logging @@ -11,8 +11,6 @@ import proto # type: ignore[import-untyped] from google.cloud.aiplatform_v1beta1.types.content import Part as GapicPart from google.cloud.aiplatform_v1beta1.types.tool import FunctionCall -from google.cloud.aiplatform import telemetry - from langchain_core.callbacks import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, @@ -253,7 +251,7 @@ def _get_question(messages: List[BaseMessage]) -> HumanMessage: def _parse_response_candidate(response_candidate: "Candidate") -> AIMessage: try: content = response_candidate.text - except ValueError: + except AttributeError: content = "" additional_kwargs = {} @@ -345,68 +343,67 @@ def _generate( Raises: ValueError: if the last message in the list is not from human. """ - with telemetry.tool_context_manager(self._user_agent): - should_stream = stream if stream is not None else self.streaming - safety_settings = kwargs.pop("safety_settings", None) - if should_stream: - stream_iter = self._stream( - messages, stop=stop, run_manager=run_manager, **kwargs - ) - return generate_from_stream(stream_iter) - - params = self._prepare_params(stop=stop, stream=False, **kwargs) - msg_params = {} - if "candidate_count" in params: - msg_params["candidate_count"] = params.pop("candidate_count") - - if self._is_gemini_model: - history_gemini = _parse_chat_history_gemini( - messages, - project=self.project, - convert_system_message_to_human=self.convert_system_message_to_human, + should_stream = stream if stream is not None else self.streaming + safety_settings = kwargs.pop("safety_settings", None) + if should_stream: + stream_iter = self._stream( + messages, stop=stop, run_manager=run_manager, **kwargs + ) + return generate_from_stream(stream_iter) + + params = self._prepare_params(stop=stop, stream=False, **kwargs) + msg_params = {} + if "candidate_count" in params: + msg_params["candidate_count"] = params.pop("candidate_count") + + if self._is_gemini_model: + history_gemini = _parse_chat_history_gemini( + messages, + project=self.project, + convert_system_message_to_human=self.convert_system_message_to_human, + ) + message = history_gemini.pop() + chat = self.client.start_chat(history=history_gemini) + + # set param to `functions` until core tool/function calling implemented + raw_tools = params.pop("functions") if "functions" in params else None + tools = _format_tools_to_vertex_tool(raw_tools) if raw_tools else None + response = chat.send_message( + message, + generation_config=params, + tools=tools, + safety_settings=safety_settings, + ) + generations = [ + ChatGeneration( + message=_parse_response_candidate(candidate), + generation_info=get_generation_info( + candidate, + self._is_gemini_model, + usage_metadata=response.to_dict().get("usage_metadata"), + ), ) - message = history_gemini.pop() - chat = self.client.start_chat(history=history_gemini) - - # set param to `functions` until core tool/function calling implemented - raw_tools = params.pop("functions") if "functions" in params else None - tools = _format_tools_to_vertex_tool(raw_tools) if raw_tools else None - response = chat.send_message( - message, - generation_config=params, - tools=tools, - safety_settings=safety_settings, + for candidate in response.candidates + ] + else: + question = _get_question(messages) + history = _parse_chat_history(messages[:-1]) + examples = kwargs.get("examples") or self.examples + if examples: + params["examples"] = _parse_examples(examples) + chat = self._start_chat(history, **params) + response = chat.send_message(question.content, **msg_params) + generations = [ + ChatGeneration( + message=AIMessage(content=candidate.text), + generation_info=get_generation_info( + candidate, + self._is_gemini_model, + usage_metadata=response.raw_prediction_response.metadata, + ), ) - generations = [ - ChatGeneration( - message=_parse_response_candidate(candidate), - generation_info=get_generation_info( - candidate, - self._is_gemini_model, - usage_metadata=response.to_dict().get("usage_metadata"), - ), - ) - for candidate in response.candidates - ] - else: - question = _get_question(messages) - history = _parse_chat_history(messages[:-1]) - examples = kwargs.get("examples") or self.examples - if examples: - params["examples"] = _parse_examples(examples) - chat = self._start_chat(history, **params) - response = chat.send_message(question.content, **msg_params) - generations = [ - ChatGeneration( - message=AIMessage(content=candidate.text), - generation_info=get_generation_info( - candidate, - self._is_gemini_model, - usage_metadata=response.raw_prediction_response.metadata, - ), - ) - for candidate in response.candidates - ] + for candidate in response.candidates + ] return ChatResult(generations=generations) async def _agenerate( @@ -434,60 +431,59 @@ async def _agenerate( kwargs.pop("stream") logger.warning("ChatVertexAI does not currently support async streaming.") - with telemetry.tool_context_manager(self._user_agent): - params = self._prepare_params(stop=stop, **kwargs) - safety_settings = kwargs.pop("safety_settings", None) - msg_params = {} - if "candidate_count" in params: - msg_params["candidate_count"] = params.pop("candidate_count") - - if self._is_gemini_model: - history_gemini = _parse_chat_history_gemini( - messages, - project=self.project, - convert_system_message_to_human=self.convert_system_message_to_human, + params = self._prepare_params(stop=stop, **kwargs) + safety_settings = kwargs.pop("safety_settings", None) + msg_params = {} + if "candidate_count" in params: + msg_params["candidate_count"] = params.pop("candidate_count") + + if self._is_gemini_model: + history_gemini = _parse_chat_history_gemini( + messages, + project=self.project, + convert_system_message_to_human=self.convert_system_message_to_human, + ) + message = history_gemini.pop() + chat = self.client.start_chat(history=history_gemini) + # set param to `functions` until core tool/function calling implemented + raw_tools = params.pop("functions") if "functions" in params else None + tools = _format_tools_to_vertex_tool(raw_tools) if raw_tools else None + response = await chat.send_message_async( + message, + generation_config=params, + tools=tools, + safety_settings=safety_settings, + ) + generations = [ + ChatGeneration( + message=_parse_response_candidate(c), + generation_info=get_generation_info( + c, + self._is_gemini_model, + usage_metadata=response.to_dict().get("usage_metadata"), + ), ) - message = history_gemini.pop() - chat = self.client.start_chat(history=history_gemini) - # set param to `functions` until core tool/function calling implemented - raw_tools = params.pop("functions") if "functions" in params else None - tools = _format_tools_to_vertex_tool(raw_tools) if raw_tools else None - response = await chat.send_message_async( - message, - generation_config=params, - tools=tools, - safety_settings=safety_settings, + for c in response.candidates + ] + else: + question = _get_question(messages) + history = _parse_chat_history(messages[:-1]) + examples = kwargs.get("examples", None) or self.examples + if examples: + params["examples"] = _parse_examples(examples) + chat = self._start_chat(history, **params) + response = await chat.send_message_async(question.content, **msg_params) + generations = [ + ChatGeneration( + message=AIMessage(content=r.text), + generation_info=get_generation_info( + r, + self._is_gemini_model, + usage_metadata=response.raw_prediction_response.metadata, + ), ) - generations = [ - ChatGeneration( - message=_parse_response_candidate(c), - generation_info=get_generation_info( - c, - self._is_gemini_model, - usage_metadata=response.to_dict().get("usage_metadata"), - ), - ) - for c in response.candidates - ] - else: - question = _get_question(messages) - history = _parse_chat_history(messages[:-1]) - examples = kwargs.get("examples", None) or self.examples - if examples: - params["examples"] = _parse_examples(examples) - chat = self._start_chat(history, **params) - response = await chat.send_message_async(question.content, **msg_params) - generations = [ - ChatGeneration( - message=AIMessage(content=r.text), - generation_info=get_generation_info( - r, - self._is_gemini_model, - usage_metadata=response.raw_prediction_response.metadata, - ), - ) - for r in response.candidates - ] + for r in response.candidates + ] return ChatResult(generations=generations) def _stream( @@ -497,73 +493,8 @@ def _stream( run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[ChatGenerationChunk]: - with telemetry.tool_context_manager(self._user_agent): - params = self._prepare_params(stop=stop, stream=True, **kwargs) - if self._is_gemini_model: - history_gemini = _parse_chat_history_gemini( - messages, - project=self.project, - convert_system_message_to_human=self.convert_system_message_to_human, - ) - message = history_gemini.pop() - chat = self.client.start_chat(history=history_gemini) - # set param to `functions` until core tool/function calling implemented - raw_tools = params.pop("functions") if "functions" in params else None - tools = _format_tools_to_vertex_tool(raw_tools) if raw_tools else None - safety_settings = params.pop("safety_settings", None) - responses = chat.send_message( - message, - stream=True, - generation_config=params, - safety_settings=safety_settings, - tools=tools, - ) - for response in responses: - message = _parse_response_candidate(response.candidates[0]) - if run_manager: - run_manager.on_llm_new_token(message.content) - yield ChatGenerationChunk( - message=AIMessageChunk( - content=message.content, - additional_kwargs=message.additional_kwargs, - ), - generation_info=get_generation_info( - response.candidates[0], - self._is_gemini_model, - usage_metadata=response.to_dict().get("usage_metadata"), - ), - ) - else: - question = _get_question(messages) - history = _parse_chat_history(messages[:-1]) - examples = kwargs.get("examples", None) - if examples: - params["examples"] = _parse_examples(examples) - chat = self._start_chat(history, **params) - responses = chat.send_message_streaming(question.content, **params) - for response in responses: - if run_manager: - run_manager.on_llm_new_token(response.text) - yield ChatGenerationChunk( - message=AIMessageChunk(content=response.text), - generation_info=get_generation_info( - response, - self._is_gemini_model, - usage_metadata=response.raw_prediction_response.metadata, - ), - ) - - async def _astream( - self, - messages: List[BaseMessage], - stop: Optional[List[str]] = None, - run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, - **kwargs: Any, - ) -> AsyncIterator[ChatGenerationChunk]: - if not self._is_gemini_model: - raise NotImplementedError() - with telemetry.tool_context_manager(self._user_agent): - params = self._prepare_params(stop=stop, stream=True, **kwargs) + params = self._prepare_params(stop=stop, stream=True, **kwargs) + if self._is_gemini_model: history_gemini = _parse_chat_history_gemini( messages, project=self.project, @@ -571,30 +502,93 @@ async def _astream( ) message = history_gemini.pop() chat = self.client.start_chat(history=history_gemini) + # set param to `functions` until core tool/function calling implemented raw_tools = params.pop("functions") if "functions" in params else None tools = _format_tools_to_vertex_tool(raw_tools) if raw_tools else None safety_settings = params.pop("safety_settings", None) - async for chunk in await chat.send_message_async( + responses = chat.send_message( message, stream=True, generation_config=params, safety_settings=safety_settings, tools=tools, - ): - message = _parse_response_candidate(chunk.candidates[0]) + ) + for response in responses: + message = _parse_response_candidate(response.candidates[0]) if run_manager: - await run_manager.on_llm_new_token(message.content) + run_manager.on_llm_new_token(message.content) yield ChatGenerationChunk( message=AIMessageChunk( content=message.content, additional_kwargs=message.additional_kwargs, ), generation_info=get_generation_info( - chunk.candidates[0], + response.candidates[0], self._is_gemini_model, - usage_metadata=chunk.to_dict().get("usage_metadata"), + usage_metadata=response.to_dict().get("usage_metadata"), ), ) + else: + question = _get_question(messages) + history = _parse_chat_history(messages[:-1]) + examples = kwargs.get("examples", None) + if examples: + params["examples"] = _parse_examples(examples) + chat = self._start_chat(history, **params) + responses = chat.send_message_streaming(question.content, **params) + for response in responses: + if run_manager: + run_manager.on_llm_new_token(response.text) + yield ChatGenerationChunk( + message=AIMessageChunk(content=response.text), + generation_info=get_generation_info( + response, + self._is_gemini_model, + usage_metadata=response.raw_prediction_response.metadata, + ), + ) + + async def _astream( + self, + messages: List[BaseMessage], + stop: Optional[List[str]] = None, + run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> AsyncIterator[ChatGenerationChunk]: + if not self._is_gemini_model: + raise NotImplementedError() + params = self._prepare_params(stop=stop, stream=True, **kwargs) + history_gemini = _parse_chat_history_gemini( + messages, + project=self.project, + convert_system_message_to_human=self.convert_system_message_to_human, + ) + message = history_gemini.pop() + chat = self.client.start_chat(history=history_gemini) + raw_tools = params.pop("functions") if "functions" in params else None + tools = _format_tools_to_vertex_tool(raw_tools) if raw_tools else None + safety_settings = params.pop("safety_settings", None) + async for chunk in await chat.send_message_async( + message, + stream=True, + generation_config=params, + safety_settings=safety_settings, + tools=tools, + ): + message = _parse_response_candidate(chunk.candidates[0]) + if run_manager: + await run_manager.on_llm_new_token(message.content) + yield ChatGenerationChunk( + message=AIMessageChunk( + content=message.content, + additional_kwargs=message.additional_kwargs, + ), + generation_info=get_generation_info( + chunk.candidates[0], + self._is_gemini_model, + usage_metadata=chunk.to_dict().get("usage_metadata"), + ), + ) def with_structured_output( self, From d4dde5861cf6d4734390cfd55c1ba5d4e07cff4c Mon Sep 17 00:00:00 2001 From: Holt Skinner Date: Thu, 14 Mar 2024 17:23:06 -0500 Subject: [PATCH 16/17] Re-added telemetry to chat_models --- .../langchain_google_vertexai/chat_models.py | 172 ++++++++++-------- 1 file changed, 93 insertions(+), 79 deletions(-) diff --git a/libs/vertexai/langchain_google_vertexai/chat_models.py b/libs/vertexai/langchain_google_vertexai/chat_models.py index e7cbe1fb..1f92a3de 100644 --- a/libs/vertexai/langchain_google_vertexai/chat_models.py +++ b/libs/vertexai/langchain_google_vertexai/chat_models.py @@ -11,6 +11,8 @@ import proto # type: ignore[import-untyped] from google.cloud.aiplatform_v1beta1.types.content import Part as GapicPart from google.cloud.aiplatform_v1beta1.types.tool import FunctionCall +from google.cloud.aiplatform import telemetry + from langchain_core.callbacks import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, @@ -346,10 +348,11 @@ def _generate( should_stream = stream if stream is not None else self.streaming safety_settings = kwargs.pop("safety_settings", None) if should_stream: - stream_iter = self._stream( - messages, stop=stop, run_manager=run_manager, **kwargs - ) - return generate_from_stream(stream_iter) + with telemetry.tool_context_manager(self._user_agent): + stream_iter = self._stream( + messages, stop=stop, run_manager=run_manager, **kwargs + ) + return generate_from_stream(stream_iter) params = self._prepare_params(stop=stop, stream=False, **kwargs) msg_params = {} @@ -363,17 +366,19 @@ def _generate( convert_system_message_to_human=self.convert_system_message_to_human, ) message = history_gemini.pop() - chat = self.client.start_chat(history=history_gemini) + with telemetry.tool_context_manager(self._user_agent): + chat = self.client.start_chat(history=history_gemini) # set param to `functions` until core tool/function calling implemented raw_tools = params.pop("functions") if "functions" in params else None tools = _format_tools_to_vertex_tool(raw_tools) if raw_tools else None - response = chat.send_message( - message, - generation_config=params, - tools=tools, - safety_settings=safety_settings, - ) + with telemetry.tool_context_manager(self._user_agent): + response = chat.send_message( + message, + generation_config=params, + tools=tools, + safety_settings=safety_settings, + ) generations = [ ChatGeneration( message=_parse_response_candidate(candidate), @@ -391,8 +396,9 @@ def _generate( examples = kwargs.get("examples") or self.examples if examples: params["examples"] = _parse_examples(examples) - chat = self._start_chat(history, **params) - response = chat.send_message(question.content, **msg_params) + with telemetry.tool_context_manager(self._user_agent): + chat = self._start_chat(history, **params) + response = chat.send_message(question.content, **msg_params) generations = [ ChatGeneration( message=AIMessage(content=candidate.text), @@ -444,16 +450,18 @@ async def _agenerate( convert_system_message_to_human=self.convert_system_message_to_human, ) message = history_gemini.pop() - chat = self.client.start_chat(history=history_gemini) + with telemetry.tool_context_manager(self._user_agent): + chat = self.client.start_chat(history=history_gemini) # set param to `functions` until core tool/function calling implemented raw_tools = params.pop("functions") if "functions" in params else None tools = _format_tools_to_vertex_tool(raw_tools) if raw_tools else None - response = await chat.send_message_async( - message, - generation_config=params, - tools=tools, - safety_settings=safety_settings, - ) + with telemetry.tool_context_manager(self._user_agent): + response = await chat.send_message_async( + message, + generation_config=params, + tools=tools, + safety_settings=safety_settings, + ) generations = [ ChatGeneration( message=_parse_response_candidate(c), @@ -471,8 +479,9 @@ async def _agenerate( examples = kwargs.get("examples", None) or self.examples if examples: params["examples"] = _parse_examples(examples) - chat = self._start_chat(history, **params) - response = await chat.send_message_async(question.content, **msg_params) + with telemetry.tool_context_manager(self._user_agent): + chat = self._start_chat(history, **params) + response = await chat.send_message_async(question.content, **msg_params) generations = [ ChatGeneration( message=AIMessage(content=r.text), @@ -501,52 +510,55 @@ def _stream( convert_system_message_to_human=self.convert_system_message_to_human, ) message = history_gemini.pop() - chat = self.client.start_chat(history=history_gemini) + with telemetry.tool_context_manager(self._user_agent): + chat = self.client.start_chat(history=history_gemini) # set param to `functions` until core tool/function calling implemented raw_tools = params.pop("functions") if "functions" in params else None tools = _format_tools_to_vertex_tool(raw_tools) if raw_tools else None safety_settings = params.pop("safety_settings", None) - responses = chat.send_message( - message, - stream=True, - generation_config=params, - safety_settings=safety_settings, - tools=tools, - ) - for response in responses: - message = _parse_response_candidate(response.candidates[0]) - if run_manager: - run_manager.on_llm_new_token(message.content) - yield ChatGenerationChunk( - message=AIMessageChunk( - content=message.content, - additional_kwargs=message.additional_kwargs, - ), - generation_info=get_generation_info( - response.candidates[0], - self._is_gemini_model, - usage_metadata=response.to_dict().get("usage_metadata"), - ), + with telemetry.tool_context_manager(self._user_agent): + responses = chat.send_message( + message, + stream=True, + generation_config=params, + safety_settings=safety_settings, + tools=tools, ) + for response in responses: + message = _parse_response_candidate(response.candidates[0]) + if run_manager: + run_manager.on_llm_new_token(message.content) + yield ChatGenerationChunk( + message=AIMessageChunk( + content=message.content, + additional_kwargs=message.additional_kwargs, + ), + generation_info=get_generation_info( + response.candidates[0], + self._is_gemini_model, + usage_metadata=response.to_dict().get("usage_metadata"), + ), + ) else: question = _get_question(messages) history = _parse_chat_history(messages[:-1]) examples = kwargs.get("examples", None) if examples: params["examples"] = _parse_examples(examples) - chat = self._start_chat(history, **params) - responses = chat.send_message_streaming(question.content, **params) - for response in responses: - if run_manager: - run_manager.on_llm_new_token(response.text) - yield ChatGenerationChunk( - message=AIMessageChunk(content=response.text), - generation_info=get_generation_info( - response, - self._is_gemini_model, - usage_metadata=response.raw_prediction_response.metadata, - ), - ) + with telemetry.tool_context_manager(self._user_agent): + chat = self._start_chat(history, **params) + responses = chat.send_message_streaming(question.content, **params) + for response in responses: + if run_manager: + run_manager.on_llm_new_token(response.text) + yield ChatGenerationChunk( + message=AIMessageChunk(content=response.text), + generation_info=get_generation_info( + response, + self._is_gemini_model, + usage_metadata=response.raw_prediction_response.metadata, + ), + ) async def _astream( self, @@ -564,31 +576,33 @@ async def _astream( convert_system_message_to_human=self.convert_system_message_to_human, ) message = history_gemini.pop() - chat = self.client.start_chat(history=history_gemini) + with telemetry.tool_context_manager(self._user_agent): + chat = self.client.start_chat(history=history_gemini) raw_tools = params.pop("functions") if "functions" in params else None tools = _format_tools_to_vertex_tool(raw_tools) if raw_tools else None safety_settings = params.pop("safety_settings", None) - async for chunk in await chat.send_message_async( - message, - stream=True, - generation_config=params, - safety_settings=safety_settings, - tools=tools, - ): - message = _parse_response_candidate(chunk.candidates[0]) - if run_manager: - await run_manager.on_llm_new_token(message.content) - yield ChatGenerationChunk( - message=AIMessageChunk( - content=message.content, - additional_kwargs=message.additional_kwargs, - ), - generation_info=get_generation_info( - chunk.candidates[0], - self._is_gemini_model, - usage_metadata=chunk.to_dict().get("usage_metadata"), - ), - ) + with telemetry.tool_context_manager(self._user_agent): + async for chunk in await chat.send_message_async( + message, + stream=True, + generation_config=params, + safety_settings=safety_settings, + tools=tools, + ): + message = _parse_response_candidate(chunk.candidates[0]) + if run_manager: + await run_manager.on_llm_new_token(message.content) + yield ChatGenerationChunk( + message=AIMessageChunk( + content=message.content, + additional_kwargs=message.additional_kwargs, + ), + generation_info=get_generation_info( + chunk.candidates[0], + self._is_gemini_model, + usage_metadata=chunk.to_dict().get("usage_metadata"), + ), + ) def with_structured_output( self, From 37edebd082b7d2272d6cb974d195ad021ca29bec Mon Sep 17 00:00:00 2001 From: Holt Skinner Date: Thu, 14 Mar 2024 17:26:16 -0500 Subject: [PATCH 17/17] Re-add noqa comment --- libs/vertexai/langchain_google_vertexai/chat_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/vertexai/langchain_google_vertexai/chat_models.py b/libs/vertexai/langchain_google_vertexai/chat_models.py index 1f92a3de..c97e08e7 100644 --- a/libs/vertexai/langchain_google_vertexai/chat_models.py +++ b/libs/vertexai/langchain_google_vertexai/chat_models.py @@ -1,6 +1,6 @@ """Wrapper around Google VertexAI chat-based models.""" -from __future__ import annotations +from __future__ import annotations # noqa import json import logging