diff --git a/libs/core/langchain_core/_api/deprecation.py b/libs/core/langchain_core/_api/deprecation.py index 58a97a416af84..ab6e47d31590a 100644 --- a/libs/core/langchain_core/_api/deprecation.py +++ b/libs/core/langchain_core/_api/deprecation.py @@ -51,15 +51,18 @@ def _validate_deprecation_params( ) -> None: """Validate the deprecation parameters.""" if pending and removal: - raise ValueError("A pending deprecation cannot have a scheduled removal") + msg = "A pending deprecation cannot have a scheduled removal" + raise ValueError(msg) if alternative and alternative_import: - raise ValueError("Cannot specify both alternative and alternative_import") + msg = "Cannot specify both alternative and alternative_import" + raise ValueError(msg) if alternative_import and "." not in alternative_import: - raise ValueError( + msg = ( "alternative_import must be a fully qualified module path. Got " f" {alternative_import}" ) + raise ValueError(msg) def deprecated( @@ -222,7 +225,8 @@ def warn_if_direct_instance( if not _obj_type: _obj_type = "attribute" if not _name: - raise ValueError(f"Field {obj} must have a name to be deprecated.") + msg = f"Field {obj} must have a name to be deprecated." + raise ValueError(msg) old_doc = obj.description def finalize(wrapper: Callable[..., Any], new_doc: str) -> T: @@ -241,7 +245,8 @@ def finalize(wrapper: Callable[..., Any], new_doc: str) -> T: if not _obj_type: _obj_type = "attribute" if not _name: - raise ValueError(f"Field {obj} must have a name to be deprecated.") + msg = f"Field {obj} must have a name to be deprecated." + raise ValueError(msg) old_doc = obj.description def finalize(wrapper: Callable[..., Any], new_doc: str) -> T: @@ -428,10 +433,11 @@ def warn_deprecated( if not pending: if not removal: removal = f"in {removal}" if removal else "within ?? minor releases" - raise NotImplementedError( + msg = ( f"Need to determine which default deprecation schedule to use. " f"{removal}" ) + raise NotImplementedError(msg) else: removal = f"in {removal}" @@ -523,9 +529,8 @@ def decorator(f: Callable[_P, _R]) -> Callable[_P, _R]: @functools.wraps(f) def wrapper(*args: _P.args, **kwargs: _P.kwargs) -> _R: if new in kwargs and old in kwargs: - raise TypeError( - f"{f.__name__}() got multiple values for argument {new!r}" - ) + msg = f"{f.__name__}() got multiple values for argument {new!r}" + raise TypeError(msg) if old in kwargs: warn_deprecated( since, diff --git a/libs/core/langchain_core/beta/runnables/context.py b/libs/core/langchain_core/beta/runnables/context.py index 739798eb1720c..a53e8fdf57969 100644 --- a/libs/core/langchain_core/beta/runnables/context.py +++ b/libs/core/langchain_core/beta/runnables/context.py @@ -59,7 +59,8 @@ def _key_from_id(id_: str) -> str: elif wout_prefix.endswith(CONTEXT_CONFIG_SUFFIX_SET): return wout_prefix[: -len(CONTEXT_CONFIG_SUFFIX_SET)] else: - raise ValueError(f"Invalid context config id {id_}") + msg = f"Invalid context config id {id_}" + raise ValueError(msg) def _config_with_context( @@ -103,16 +104,15 @@ def _config_with_context( for dep in deps_by_key[key]: if key in deps_by_key[dep]: - raise ValueError( - f"Deadlock detected between context keys {key} and {dep}" - ) + msg = f"Deadlock detected between context keys {key} and {dep}" + raise ValueError(msg) if len(setters) != 1: - raise ValueError(f"Expected exactly one setter for context key {key}") + msg = f"Expected exactly one setter for context key {key}" + raise ValueError(msg) setter_idx = setters[0][1] if any(getter_idx < setter_idx for _, getter_idx in getters): - raise ValueError( - f"Context setter for key {key} must be defined after all getters." - ) + msg = f"Context setter for key {key} must be defined after all getters." + raise ValueError(msg) if getters: context_funcs[getters[0][0].id] = partial(getter, events[key], values) @@ -271,9 +271,8 @@ def config_specs(self) -> list[ConfigurableFieldSpec]: if spec.id.endswith(CONTEXT_CONFIG_SUFFIX_GET): getter_key = spec.id.split("/")[1] if getter_key in self.keys: - raise ValueError( - f"Circular reference in context setter for key {getter_key}" - ) + msg = f"Circular reference in context setter for key {getter_key}" + raise ValueError(msg) return super().config_specs + [ ConfigurableFieldSpec( id=id_, diff --git a/libs/core/langchain_core/caches.py b/libs/core/langchain_core/caches.py index e93ce79694003..d534d70d25ffc 100644 --- a/libs/core/langchain_core/caches.py +++ b/libs/core/langchain_core/caches.py @@ -160,7 +160,8 @@ def __init__(self, *, maxsize: Optional[int] = None) -> None: """ self._cache: dict[tuple[str, str], RETURN_VAL_TYPE] = {} if maxsize is not None and maxsize <= 0: - raise ValueError("maxsize must be greater than 0") + msg = "maxsize must be greater than 0" + raise ValueError(msg) self._maxsize = maxsize def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: diff --git a/libs/core/langchain_core/callbacks/base.py b/libs/core/langchain_core/callbacks/base.py index c6e9090f78963..b3a61aa9a2a20 100644 --- a/libs/core/langchain_core/callbacks/base.py +++ b/libs/core/langchain_core/callbacks/base.py @@ -275,9 +275,8 @@ def on_chat_model_start( """ # NotImplementedError is thrown intentionally # Callback handler will fall back to on_llm_start if this is exception is thrown - raise NotImplementedError( - f"{self.__class__.__name__} does not implement `on_chat_model_start`" - ) + msg = f"{self.__class__.__name__} does not implement `on_chat_model_start`" + raise NotImplementedError(msg) def on_retriever_start( self, @@ -523,9 +522,8 @@ async def on_chat_model_start( """ # NotImplementedError is thrown intentionally # Callback handler will fall back to on_llm_start if this is exception is thrown - raise NotImplementedError( - f"{self.__class__.__name__} does not implement `on_chat_model_start`" - ) + msg = f"{self.__class__.__name__} does not implement `on_chat_model_start`" + raise NotImplementedError(msg) async def on_llm_new_token( self, diff --git a/libs/core/langchain_core/callbacks/manager.py b/libs/core/langchain_core/callbacks/manager.py index 604e17c10af5d..58340be1ed9a0 100644 --- a/libs/core/langchain_core/callbacks/manager.py +++ b/libs/core/langchain_core/callbacks/manager.py @@ -1510,11 +1510,12 @@ def on_custom_event( .. versionadded:: 0.2.14 """ if kwargs: - raise ValueError( + msg = ( "The dispatcher API does not accept additional keyword arguments." "Please do not pass any additional keyword arguments, instead " "include them in the data field." ) + raise ValueError(msg) if run_id is None: run_id = uuid.uuid4() @@ -1989,11 +1990,12 @@ async def on_custom_event( run_id = uuid.uuid4() if kwargs: - raise ValueError( + msg = ( "The dispatcher API does not accept additional keyword arguments." "Please do not pass any additional keyword arguments, instead " "include them in the data field." ) + raise ValueError(msg) await ahandle_event( self.handlers, "on_custom_event", @@ -2336,11 +2338,12 @@ def _configure( if v1_tracing_enabled_ and not tracing_v2_enabled_: # if both are enabled, can silently ignore the v1 tracer - raise RuntimeError( + msg = ( "Tracing using LangChainTracerV1 is no longer supported. " "Please set the LANGCHAIN_TRACING_V2 environment variable to enable " "tracing instead." ) + raise RuntimeError(msg) tracer_project = _get_tracer_project() debug = _get_debug() @@ -2519,13 +2522,14 @@ async def foo(inputs): # within a tool or a lambda and have the metadata events associated # with the parent run rather than have a new run id generated for each. if callback_manager.parent_run_id is None: - raise RuntimeError( + msg = ( "Unable to dispatch an adhoc event without a parent run id." "This function can only be called from within an existing run (e.g.," "inside a tool or a RunnableLambda or a RunnableGenerator.)" "If you are doing that and still seeing this error, try explicitly" "passing the config parameter to this function." ) + raise RuntimeError(msg) await callback_manager.on_custom_event( name, @@ -2588,13 +2592,14 @@ def foo(inputs): # within a tool or a lambda and have the metadata events associated # with the parent run rather than have a new run id generated for each. if callback_manager.parent_run_id is None: - raise RuntimeError( + msg = ( "Unable to dispatch an adhoc event without a parent run id." "This function can only be called from within an existing run (e.g.," "inside a tool or a RunnableLambda or a RunnableGenerator.)" "If you are doing that and still seeing this error, try explicitly" "passing the config parameter to this function." ) + raise RuntimeError(msg) callback_manager.on_custom_event( name, data, diff --git a/libs/core/langchain_core/chat_history.py b/libs/core/langchain_core/chat_history.py index 9b3eb09b03a05..4a579b4a985d2 100644 --- a/libs/core/langchain_core/chat_history.py +++ b/libs/core/langchain_core/chat_history.py @@ -157,10 +157,11 @@ def add_message(self, message: BaseMessage) -> None: # method, so we should use it. self.add_messages([message]) else: - raise NotImplementedError( + msg = ( "add_message is not implemented for this class. " "Please implement add_message or add_messages." ) + raise NotImplementedError(msg) def add_messages(self, messages: Sequence[BaseMessage]) -> None: """Add a list of messages. diff --git a/libs/core/langchain_core/document_loaders/base.py b/libs/core/langchain_core/document_loaders/base.py index 540ef888c3afe..2f458731916ce 100644 --- a/libs/core/langchain_core/document_loaders/base.py +++ b/libs/core/langchain_core/document_loaders/base.py @@ -53,11 +53,12 @@ def load_and_split( try: from langchain_text_splitters import RecursiveCharacterTextSplitter except ImportError as e: - raise ImportError( + msg = ( "Unable to import from langchain_text_splitters. Please specify " "text_splitter or install langchain_text_splitters with " "`pip install -U langchain-text-splitters`." - ) from e + ) + raise ImportError(msg) from e _text_splitter: TextSplitter = RecursiveCharacterTextSplitter() else: @@ -71,9 +72,8 @@ def lazy_load(self) -> Iterator[Document]: """A lazy loader for Documents.""" if type(self).load != BaseLoader.load: return iter(self.load()) - raise NotImplementedError( - f"{self.__class__.__name__} does not implement lazy_load()" - ) + msg = f"{self.__class__.__name__} does not implement lazy_load()" + raise NotImplementedError(msg) async def alazy_load(self) -> AsyncIterator[Document]: """A lazy loader for Documents.""" diff --git a/libs/core/langchain_core/documents/base.py b/libs/core/langchain_core/documents/base.py index e83e012a54589..2adfe1a718397 100644 --- a/libs/core/langchain_core/documents/base.py +++ b/libs/core/langchain_core/documents/base.py @@ -142,7 +142,8 @@ def source(self) -> Optional[str]: def check_blob_is_valid(cls, values: dict[str, Any]) -> Any: """Verify that either data or path is provided.""" if "data" not in values and "path" not in values: - raise ValueError("Either data or path must be provided") + msg = "Either data or path must be provided" + raise ValueError(msg) return values def as_string(self) -> str: @@ -155,7 +156,8 @@ def as_string(self) -> str: elif isinstance(self.data, str): return self.data else: - raise ValueError(f"Unable to get string for blob {self}") + msg = f"Unable to get string for blob {self}" + raise ValueError(msg) def as_bytes(self) -> bytes: """Read data as bytes.""" @@ -167,7 +169,8 @@ def as_bytes(self) -> bytes: with open(str(self.path), "rb") as f: return f.read() else: - raise ValueError(f"Unable to get bytes for blob {self}") + msg = f"Unable to get bytes for blob {self}" + raise ValueError(msg) @contextlib.contextmanager def as_bytes_io(self) -> Generator[Union[BytesIO, BufferedReader], None, None]: @@ -178,7 +181,8 @@ def as_bytes_io(self) -> Generator[Union[BytesIO, BufferedReader], None, None]: with open(str(self.path), "rb") as f: yield f else: - raise NotImplementedError(f"Unable to convert blob {self}") + msg = f"Unable to convert blob {self}" + raise NotImplementedError(msg) @classmethod def from_path( diff --git a/libs/core/langchain_core/exceptions.py b/libs/core/langchain_core/exceptions.py index c7ab7419f2a6b..f10197b04662a 100644 --- a/libs/core/langchain_core/exceptions.py +++ b/libs/core/langchain_core/exceptions.py @@ -41,10 +41,11 @@ def __init__( ): super().__init__(error) if send_to_llm and (observation is None or llm_output is None): - raise ValueError( + msg = ( "Arguments 'observation' & 'llm_output'" " are required if 'send_to_llm' is True" ) + raise ValueError(msg) self.observation = observation self.llm_output = llm_output self.send_to_llm = send_to_llm diff --git a/libs/core/langchain_core/indexing/api.py b/libs/core/langchain_core/indexing/api.py index 26566b1be8041..63226ba366a6f 100644 --- a/libs/core/langchain_core/indexing/api.py +++ b/libs/core/langchain_core/indexing/api.py @@ -73,20 +73,22 @@ def calculate_hashes(cls, values: dict[str, Any]) -> Any: for key in forbidden_keys: if key in metadata: - raise ValueError( + msg = ( f"Metadata cannot contain key {key} as it " f"is reserved for internal use." ) + raise ValueError(msg) content_hash = str(_hash_string_to_uuid(content)) try: metadata_hash = str(_hash_nested_dict_to_uuid(metadata)) except Exception as e: - raise ValueError( + msg = ( f"Failed to hash metadata: {e}. " f"Please use a dict that can be serialized using json." - ) from e + ) + raise ValueError(msg) from e values["content_hash"] = content_hash values["metadata_hash"] = metadata_hash @@ -154,10 +156,11 @@ def _get_source_id_assigner( elif callable(source_id_key): return source_id_key else: - raise ValueError( + msg = ( f"source_id_key should be either None, a string or a callable. " f"Got {source_id_key} of type {type(source_id_key)}." ) + raise ValueError(msg) def _deduplicate_in_order( @@ -269,13 +272,15 @@ def index( ValueError: If source_id_key is not None, but is not a string or callable. """ if cleanup not in {"incremental", "full", None}: - raise ValueError( + msg = ( f"cleanup should be one of 'incremental', 'full' or None. " f"Got {cleanup}." ) + raise ValueError(msg) if cleanup == "incremental" and source_id_key is None: - raise ValueError("Source id key is required when cleanup mode is incremental.") + msg = "Source id key is required when cleanup mode is incremental." + raise ValueError(msg) destination = vector_store # Renaming internally for clarity @@ -286,21 +291,24 @@ def index( for method in methods: if not hasattr(destination, method): - raise ValueError( + msg = ( f"Vectorstore {destination} does not have required method {method}" ) + raise ValueError(msg) if type(destination).delete == VectorStore.delete: # Checking if the vectorstore has overridden the default delete method # implementation which just raises a NotImplementedError - raise ValueError("Vectorstore has not implemented the delete method") + msg = "Vectorstore has not implemented the delete method" + raise ValueError(msg) elif isinstance(destination, DocumentIndex): pass else: - raise TypeError( + msg = ( f"Vectorstore should be either a VectorStore or a DocumentIndex. " f"Got {type(destination)}." ) + raise TypeError(msg) if isinstance(docs_source, BaseLoader): try: @@ -334,12 +342,13 @@ def index( # If the cleanup mode is incremental, source ids are required. for source_id, hashed_doc in zip(source_ids, hashed_docs): if source_id is None: - raise ValueError( + msg = ( "Source ids are required when cleanup mode is incremental. " f"Document that starts with " f"content: {hashed_doc.page_content[:100]} was not assigned " f"as source id." ) + raise ValueError(msg) # source ids cannot be None after for loop above. source_ids = cast(Sequence[str], source_ids) # type: ignore[assignment] @@ -400,7 +409,8 @@ def index( # mypy isn't good enough to determine that source ids cannot be None # here due to a check that's happening above, so we check again. if any(source_id is None for source_id in source_ids): - raise AssertionError("Source ids cannot be if cleanup=='incremental'.") + msg = "Source ids cannot be if cleanup=='incremental'." + raise AssertionError(msg) indexed_source_ids = cast( Sequence[str], [source_id_assigner(doc) for doc in docs_to_index] @@ -514,13 +524,15 @@ async def aindex( """ if cleanup not in {"incremental", "full", None}: - raise ValueError( + msg = ( f"cleanup should be one of 'incremental', 'full' or None. " f"Got {cleanup}." ) + raise ValueError(msg) if cleanup == "incremental" and source_id_key is None: - raise ValueError("Source id key is required when cleanup mode is incremental.") + msg = "Source id key is required when cleanup mode is incremental." + raise ValueError(msg) destination = vector_store # Renaming internally for clarity @@ -532,21 +544,24 @@ async def aindex( for method in methods: if not hasattr(destination, method): - raise ValueError( + msg = ( f"Vectorstore {destination} does not have required method {method}" ) + raise ValueError(msg) if type(destination).adelete == VectorStore.adelete: # Checking if the vectorstore has overridden the default delete method # implementation which just raises a NotImplementedError - raise ValueError("Vectorstore has not implemented the delete method") + msg = "Vectorstore has not implemented the delete method" + raise ValueError(msg) elif isinstance(destination, DocumentIndex): pass else: - raise TypeError( + msg = ( f"Vectorstore should be either a VectorStore or a DocumentIndex. " f"Got {type(destination)}." ) + raise TypeError(msg) async_doc_iterator: AsyncIterator[Document] if isinstance(docs_source, BaseLoader): try: @@ -588,12 +603,13 @@ async def aindex( # If the cleanup mode is incremental, source ids are required. for source_id, hashed_doc in zip(source_ids, hashed_docs): if source_id is None: - raise ValueError( + msg = ( "Source ids are required when cleanup mode is incremental. " f"Document that starts with " f"content: {hashed_doc.page_content[:100]} was not assigned " f"as source id." ) + raise ValueError(msg) # source ids cannot be None after for loop above. source_ids = cast(Sequence[str], source_ids) @@ -654,7 +670,8 @@ async def aindex( # mypy isn't good enough to determine that source ids cannot be None # here due to a check that's happening above, so we check again. if any(source_id is None for source_id in source_ids): - raise AssertionError("Source ids cannot be if cleanup=='incremental'.") + msg = "Source ids cannot be if cleanup=='incremental'." + raise AssertionError(msg) indexed_source_ids = cast( Sequence[str], [source_id_assigner(doc) for doc in docs_to_index] diff --git a/libs/core/langchain_core/indexing/base.py b/libs/core/langchain_core/indexing/base.py index 824263415bd58..d9418de7408d5 100644 --- a/libs/core/langchain_core/indexing/base.py +++ b/libs/core/langchain_core/indexing/base.py @@ -290,11 +290,13 @@ def update( """ if group_ids and len(keys) != len(group_ids): - raise ValueError("Length of keys must match length of group_ids") + msg = "Length of keys must match length of group_ids" + raise ValueError(msg) for index, key in enumerate(keys): group_id = group_ids[index] if group_ids else None if time_at_least and time_at_least > self.get_time(): - raise ValueError("time_at_least must be in the past") + msg = "time_at_least must be in the past" + raise ValueError(msg) self.records[key] = {"group_id": group_id, "updated_at": self.get_time()} async def aupdate( diff --git a/libs/core/langchain_core/indexing/in_memory.py b/libs/core/langchain_core/indexing/in_memory.py index 4983ecfe1bf66..7fd7adec6646e 100644 --- a/libs/core/langchain_core/indexing/in_memory.py +++ b/libs/core/langchain_core/indexing/in_memory.py @@ -47,7 +47,8 @@ def upsert(self, items: Sequence[Document], /, **kwargs: Any) -> UpsertResponse: def delete(self, ids: Optional[list[str]] = None, **kwargs: Any) -> DeleteResponse: """Delete by ID.""" if ids is None: - raise ValueError("IDs must be provided for deletion") + msg = "IDs must be provided for deletion" + raise ValueError(msg) ok_ids = [] diff --git a/libs/core/langchain_core/language_models/base.py b/libs/core/langchain_core/language_models/base.py index 93ebe523d8968..d1ac7891d85b0 100644 --- a/libs/core/langchain_core/language_models/base.py +++ b/libs/core/langchain_core/language_models/base.py @@ -60,11 +60,12 @@ def get_tokenizer() -> Any: try: from transformers import GPT2TokenizerFast # type: ignore[import] except ImportError as e: - raise ImportError( + msg = ( "Could not import transformers python package. " "This is needed in order to calculate get_token_ids. " "Please install it with `pip install transformers`." - ) from e + ) + raise ImportError(msg) from e # create a GPT-2 tokenizer instance return GPT2TokenizerFast.from_pretrained("gpt2") diff --git a/libs/core/langchain_core/language_models/chat_models.py b/libs/core/langchain_core/language_models/chat_models.py index 05af1f9ac60ed..ad238eda45940 100644 --- a/libs/core/langchain_core/language_models/chat_models.py +++ b/libs/core/langchain_core/language_models/chat_models.py @@ -89,7 +89,8 @@ def generate_from_stream(stream: Iterator[ChatGenerationChunk]) -> ChatResult: if generation: generation += list(stream) if generation is None: - raise ValueError("No generations found in stream.") + msg = "No generations found in stream." + raise ValueError(msg) return ChatResult( generations=[ ChatGeneration( @@ -265,10 +266,11 @@ def _convert_input(self, input: LanguageModelInput) -> PromptValue: elif isinstance(input, Sequence): return ChatPromptValue(messages=convert_to_messages(input)) else: - raise ValueError( + msg = ( f"Invalid input type {type(input)}. " "Must be a PromptValue, str, or list of BaseMessages." ) + raise ValueError(msg) def invoke( self, @@ -817,9 +819,8 @@ def _generate_with_cache( elif self.cache is None: pass else: - raise ValueError( - "Asked to cache, but no cache found at `langchain.cache`." - ) + msg = "Asked to cache, but no cache found at `langchain.cache`." + raise ValueError(msg) # Apply the rate limiter after checking the cache, since # we usually don't want to rate limit cache lookups, but @@ -891,9 +892,8 @@ async def _agenerate_with_cache( elif self.cache is None: pass else: - raise ValueError( - "Asked to cache, but no cache found at `langchain.cache`." - ) + msg = "Asked to cache, but no cache found at `langchain.cache`." + raise ValueError(msg) # Apply the rate limiter after checking the cache, since # we usually don't want to rate limit cache lookups, but @@ -1020,7 +1020,8 @@ def __call__( if isinstance(generation, ChatGeneration): return generation.message else: - raise ValueError("Unexpected generation type") + msg = "Unexpected generation type" + raise ValueError(msg) async def _call_async( self, @@ -1036,7 +1037,8 @@ async def _call_async( if isinstance(generation, ChatGeneration): return generation.message else: - raise ValueError("Unexpected generation type") + msg = "Unexpected generation type" + raise ValueError(msg) @deprecated("0.1.7", alternative="invoke", removal="1.0") def call_as_llm( @@ -1053,7 +1055,8 @@ def predict( if isinstance(result.content, str): return result.content else: - raise ValueError("Cannot use predict when output is not a string.") + msg = "Cannot use predict when output is not a string." + raise ValueError(msg) @deprecated("0.1.7", alternative="invoke", removal="1.0") def predict_messages( @@ -1077,7 +1080,8 @@ async def apredict( if isinstance(result.content, str): return result.content else: - raise ValueError("Cannot use predict when output is not a string.") + msg = "Cannot use predict when output is not a string." + raise ValueError(msg) @deprecated("0.1.7", alternative="ainvoke", removal="1.0") async def apredict_messages( @@ -1220,7 +1224,8 @@ class AnswerWithJustification(BaseModel): # } """ # noqa: E501 if kwargs: - raise ValueError(f"Received unsupported arguments {kwargs}") + msg = f"Received unsupported arguments {kwargs}" + raise ValueError(msg) from langchain_core.output_parsers.openai_tools import ( JsonOutputKeyToolsParser, @@ -1228,9 +1233,8 @@ class AnswerWithJustification(BaseModel): ) if self.bind_tools is BaseChatModel.bind_tools: - raise NotImplementedError( - "with_structured_output is not implemented for this model." - ) + msg = "with_structured_output is not implemented for this model." + raise NotImplementedError(msg) llm = self.bind_tools([schema], tool_choice="any") if isinstance(schema, type) and is_basemodel_subclass(schema): output_parser: OutputParserLike = PydanticToolsParser( diff --git a/libs/core/langchain_core/language_models/fake_chat_models.py b/libs/core/langchain_core/language_models/fake_chat_models.py index 8dc4e55695c86..95d605b7cb4e5 100644 --- a/libs/core/langchain_core/language_models/fake_chat_models.py +++ b/libs/core/langchain_core/language_models/fake_chat_models.py @@ -238,18 +238,20 @@ def _stream( messages, stop=stop, run_manager=run_manager, **kwargs ) if not isinstance(chat_result, ChatResult): - raise ValueError( + msg = ( f"Expected generate to return a ChatResult, " f"but got {type(chat_result)} instead." ) + raise ValueError(msg) message = chat_result.generations[0].message if not isinstance(message, AIMessage): - raise ValueError( + msg = ( f"Expected invoke to return an AIMessage, " f"but got {type(message)} instead." ) + raise ValueError(msg) content = message.content diff --git a/libs/core/langchain_core/language_models/llms.py b/libs/core/langchain_core/language_models/llms.py index 44f7afd60ff81..c247078a3a832 100644 --- a/libs/core/langchain_core/language_models/llms.py +++ b/libs/core/langchain_core/language_models/llms.py @@ -135,15 +135,17 @@ def _resolve_cache(cache: Union[BaseCache, bool, None]) -> Optional[BaseCache]: elif cache is True: llm_cache = get_llm_cache() if llm_cache is None: - raise ValueError( + msg = ( "No global cache was configured. Use `set_llm_cache`." "to set a global cache if you want to use a global cache." "Otherwise either pass a cache object or set cache to False/None" ) + raise ValueError(msg) elif cache is False: llm_cache = None else: - raise ValueError(f"Unsupported cache value {cache}") + msg = f"Unsupported cache value {cache}" + raise ValueError(msg) return llm_cache @@ -332,10 +334,11 @@ def _convert_input(self, input: LanguageModelInput) -> PromptValue: elif isinstance(input, Sequence): return ChatPromptValue(messages=convert_to_messages(input)) else: - raise ValueError( + msg = ( f"Invalid input type {type(input)}. " "Must be a PromptValue, str, or list of BaseMessages." ) + raise ValueError(msg) def _get_ls_params( self, @@ -842,10 +845,11 @@ def generate( prompt and additional model provider-specific output. """ if not isinstance(prompts, list): - raise ValueError( + msg = ( "Argument 'prompts' is expected to be of type List[str], received" f" argument of type {type(prompts)}." ) + raise ValueError(msg) # Create callback managers if isinstance(metadata, list): metadata = [ @@ -989,10 +993,11 @@ def _get_run_ids_list( return [None] * len(prompts) if isinstance(run_id, list): if len(run_id) != len(prompts): - raise ValueError( + msg = ( "Number of manually provided run_id's does not match batch length." f" {len(run_id)} != {len(prompts)}" ) + raise ValueError(msg) return run_id return [run_id] + [None] * (len(prompts) - 1) @@ -1262,11 +1267,12 @@ def __call__( ValueError: If the prompt is not a string. """ if not isinstance(prompt, str): - raise ValueError( + msg = ( "Argument `prompt` is expected to be a string. Instead found " f"{type(prompt)}. If you want to run the LLM on multiple prompts, use " "`generate` instead." ) + raise ValueError(msg) return ( self.generate( [prompt], @@ -1387,7 +1393,8 @@ def save(self, file_path: Union[Path, str]) -> None: with open(file_path, "w") as f: yaml.dump(prompt_dict, f, default_flow_style=False) else: - raise ValueError(f"{save_path} must be json or yaml") + msg = f"{save_path} must be json or yaml" + raise ValueError(msg) class LLM(BaseLLM): diff --git a/libs/core/langchain_core/load/dump.py b/libs/core/langchain_core/load/dump.py index dbc723d41e099..00fae99d5287f 100644 --- a/libs/core/langchain_core/load/dump.py +++ b/libs/core/langchain_core/load/dump.py @@ -37,7 +37,8 @@ def dumps(obj: Any, *, pretty: bool = False, **kwargs: Any) -> str: ValueError: If `default` is passed as a kwarg. """ if "default" in kwargs: - raise ValueError("`default` should not be passed to dumps") + msg = "`default` should not be passed to dumps" + raise ValueError(msg) try: if pretty: indent = kwargs.pop("indent", 2) diff --git a/libs/core/langchain_core/load/load.py b/libs/core/langchain_core/load/load.py index 875ddf92f90f6..80f13f2539457 100644 --- a/libs/core/langchain_core/load/load.py +++ b/libs/core/langchain_core/load/load.py @@ -96,17 +96,19 @@ def __call__(self, value: dict[str, Any]) -> Any: else: if self.secrets_from_env and key in os.environ and os.environ[key]: return os.environ[key] - raise KeyError(f'Missing key "{key}" in load(secrets_map)') + msg = f'Missing key "{key}" in load(secrets_map)' + raise KeyError(msg) if ( value.get("lc") == 1 and value.get("type") == "not_implemented" and value.get("id") is not None ): - raise NotImplementedError( + msg = ( "Trying to load an object that doesn't implement " f"serialization: {value}" ) + raise NotImplementedError(msg) if ( value.get("lc") == 1 @@ -121,7 +123,8 @@ def __call__(self, value: dict[str, Any]) -> Any: # The root namespace ["langchain"] is not a valid identifier. or namespace == ["langchain"] ): - raise ValueError(f"Invalid namespace: {value}") + msg = f"Invalid namespace: {value}" + raise ValueError(msg) # Has explicit import path. elif mapping_key in self.import_mappings: import_path = self.import_mappings[mapping_key] @@ -130,11 +133,12 @@ def __call__(self, value: dict[str, Any]) -> Any: # Import module mod = importlib.import_module(".".join(import_dir)) elif namespace[0] in DISALLOW_LOAD_FROM_PATH: - raise ValueError( + msg = ( "Trying to deserialize something that cannot " "be deserialized in current version of langchain-core: " f"{mapping_key}." ) + raise ValueError(msg) # Otherwise, treat namespace as path. else: mod = importlib.import_module(".".join(namespace)) @@ -143,7 +147,8 @@ def __call__(self, value: dict[str, Any]) -> Any: # The class must be a subclass of Serializable. if not issubclass(cls, Serializable): - raise ValueError(f"Invalid namespace: {value}") + msg = f"Invalid namespace: {value}" + raise ValueError(msg) # We don't need to recurse on kwargs # as json.loads will do that for us. diff --git a/libs/core/langchain_core/load/serializable.py b/libs/core/langchain_core/load/serializable.py index 02d410adb8544..b5e7d8b9150af 100644 --- a/libs/core/langchain_core/load/serializable.py +++ b/libs/core/langchain_core/load/serializable.py @@ -215,11 +215,12 @@ def to_json(self) -> Union[SerializedConstructor, SerializedNotImplemented]: for attr in deprecated_attributes: if hasattr(cls, attr): - raise ValueError( + msg = ( f"Class {self.__class__} has a deprecated " f"attribute {attr}. Please use the corresponding " f"classmethod instead." ) + raise ValueError(msg) # Get a reference to self bound to each class in the MRO this = cast(Serializable, self if cls is None else super(cls, self)) diff --git a/libs/core/langchain_core/messages/ai.py b/libs/core/langchain_core/messages/ai.py index dece1e575eeb6..63036baf04833 100644 --- a/libs/core/langchain_core/messages/ai.py +++ b/libs/core/langchain_core/messages/ai.py @@ -373,7 +373,8 @@ def init_tool_calls(self) -> Self: ) ) else: - raise ValueError("Malformed args.") + msg = "Malformed args." + raise ValueError(msg) except Exception: invalid_tool_calls.append( create_invalid_tool_call( @@ -402,9 +403,8 @@ def add_ai_message_chunks( ) -> AIMessageChunk: """Add multiple AIMessageChunks together.""" if any(left.example != o.example for o in others): - raise ValueError( - "Cannot concatenate AIMessageChunks with different example values." - ) + msg = "Cannot concatenate AIMessageChunks with different example values." + raise ValueError(msg) content = merge_content(left.content, *(o.content for o in others)) additional_kwargs = merge_dicts( diff --git a/libs/core/langchain_core/messages/base.py b/libs/core/langchain_core/messages/base.py index bf020f3fbff9f..9eab1ed431af2 100644 --- a/libs/core/langchain_core/messages/base.py +++ b/libs/core/langchain_core/messages/base.py @@ -223,11 +223,12 @@ def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore response_metadata=response_metadata, ) else: - raise TypeError( + msg = ( 'unsupported operand type(s) for +: "' f"{self.__class__.__name__}" f'" and "{other.__class__.__name__}"' ) + raise TypeError(msg) def message_to_dict(message: BaseMessage) -> dict: diff --git a/libs/core/langchain_core/messages/chat.py b/libs/core/langchain_core/messages/chat.py index 8bfbcc51536a6..73aafd8834ee6 100644 --- a/libs/core/langchain_core/messages/chat.py +++ b/libs/core/langchain_core/messages/chat.py @@ -48,9 +48,8 @@ def get_lc_namespace(cls) -> list[str]: def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore if isinstance(other, ChatMessageChunk): if self.role != other.role: - raise ValueError( - "Cannot concatenate ChatMessageChunks with different roles." - ) + msg = "Cannot concatenate ChatMessageChunks with different roles." + raise ValueError(msg) return self.__class__( role=self.role, diff --git a/libs/core/langchain_core/messages/function.py b/libs/core/langchain_core/messages/function.py index f06fd4f3b653f..080f580778819 100644 --- a/libs/core/langchain_core/messages/function.py +++ b/libs/core/langchain_core/messages/function.py @@ -54,9 +54,8 @@ def get_lc_namespace(cls) -> list[str]: def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore if isinstance(other, FunctionMessageChunk): if self.name != other.name: - raise ValueError( - "Cannot concatenate FunctionMessageChunks with different names." - ) + msg = "Cannot concatenate FunctionMessageChunks with different names." + raise ValueError(msg) return self.__class__( name=self.name, diff --git a/libs/core/langchain_core/messages/modifier.py b/libs/core/langchain_core/messages/modifier.py index 8a5fb6860d868..a7face34fe95a 100644 --- a/libs/core/langchain_core/messages/modifier.py +++ b/libs/core/langchain_core/messages/modifier.py @@ -20,7 +20,8 @@ def __init__(self, id: str, **kwargs: Any) -> None: ValueError: If the 'content' field is passed in kwargs. """ if kwargs.pop("content", None): - raise ValueError("RemoveMessage does not support 'content' field.") + msg = "RemoveMessage does not support 'content' field." + raise ValueError(msg) return super().__init__("", id=id, **kwargs) diff --git a/libs/core/langchain_core/messages/tool.py b/libs/core/langchain_core/messages/tool.py index 31d3f8b705635..653dd838f860e 100644 --- a/libs/core/langchain_core/messages/tool.py +++ b/libs/core/langchain_core/messages/tool.py @@ -94,11 +94,12 @@ def coerce_args(cls, values: dict) -> dict: try: values["content"] = str(content) except ValueError as e: - raise ValueError( + msg = ( "ToolMessage content should be a string or a list of string/dicts. " f"Received:\n\n{content=}\n\n which could not be coerced into a " "string." - ) from e + ) + raise ValueError(msg) from e elif isinstance(content, list): values["content"] = [] for i, x in enumerate(content): @@ -106,12 +107,13 @@ def coerce_args(cls, values: dict) -> dict: try: values["content"].append(str(x)) except ValueError as e: - raise ValueError( + msg = ( "ToolMessage content should be a string or a list of " "string/dicts. Received a list but " f"element ToolMessage.content[{i}] is not a dict and could " f"not be coerced to a string.:\n\n{x}" - ) from e + ) + raise ValueError(msg) from e else: values["content"].append(x) else: @@ -147,9 +149,8 @@ def get_lc_namespace(cls) -> list[str]: def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore if isinstance(other, ToolMessageChunk): if self.tool_call_id != other.tool_call_id: - raise ValueError( - "Cannot concatenate ToolMessageChunks with different names." - ) + msg = "Cannot concatenate ToolMessageChunks with different names." + raise ValueError(msg) return self.__class__( tool_call_id=self.tool_call_id, diff --git a/libs/core/langchain_core/messages/utils.py b/libs/core/langchain_core/messages/utils.py index d6092530831ad..8f88502c1df30 100644 --- a/libs/core/langchain_core/messages/utils.py +++ b/libs/core/langchain_core/messages/utils.py @@ -51,10 +51,11 @@ def _get_type(v: Any) -> str: elif hasattr(v, "type"): return v.type else: - raise TypeError( + msg = ( f"Expected either a dictionary with a 'type' key or an object " f"with a 'type' attribute. Instead got type {type(v)}." ) + raise TypeError(msg) AnyMessage = Annotated[ @@ -120,7 +121,8 @@ def get_buffer_string( elif isinstance(m, ChatMessage): role = m.role else: - raise ValueError(f"Got unsupported message type: {m}") + msg = f"Got unsupported message type: {m}" + raise ValueError(msg) message = f"{role}: {m.content}" if isinstance(m, AIMessage) and "function_call" in m.additional_kwargs: message += f"{m.additional_kwargs['function_call']}" @@ -158,7 +160,8 @@ def _message_from_dict(message: dict) -> BaseMessage: elif _type == "ChatMessageChunk": return ChatMessageChunk(**message["data"]) else: - raise ValueError(f"Got unexpected message type: {_type}") + msg = f"Got unexpected message type: {_type}" + raise ValueError(msg) def messages_from_dict(messages: Sequence[dict]) -> list[BaseMessage]: @@ -266,10 +269,11 @@ def _create_message_from_message_type( elif message_type == "remove": message = RemoveMessage(**kwargs) else: - raise ValueError( + msg = ( f"Unexpected message type: '{message_type}'. Use one of 'human'," f" 'user', 'ai', 'assistant', 'function', 'tool', or 'system'." ) + raise ValueError(msg) return message @@ -312,14 +316,14 @@ def _convert_to_message(message: MessageLikeRepresentation) -> BaseMessage: # None msg content is not allowed msg_content = msg_kwargs.pop("content") or "" except KeyError as e: - raise ValueError( - f"Message dict must contain 'role' and 'content' keys, got {message}" - ) from e + msg = f"Message dict must contain 'role' and 'content' keys, got {message}" + raise ValueError(msg) from e _message = _create_message_from_message_type( msg_type, msg_content, **msg_kwargs ) else: - raise NotImplementedError(f"Unsupported message type: {type(message)}") + msg = f"Unsupported message type: {type(message)}" + raise NotImplementedError(msg) return _message @@ -820,11 +824,12 @@ def list_token_counter(messages: Sequence[BaseMessage]) -> int: else: list_token_counter = token_counter # type: ignore[assignment] else: - raise ValueError( + msg = ( f"'token_counter' expected to be a model that implements " f"'get_num_tokens_from_messages()' or a function. Received object of type " f"{type(token_counter)}." ) + raise ValueError(msg) try: from langchain_text_splitters import TextSplitter @@ -859,9 +864,8 @@ def list_token_counter(messages: Sequence[BaseMessage]) -> int: text_splitter=text_splitter_fn, ) else: - raise ValueError( - f"Unrecognized {strategy=}. Supported strategies are 'last' and 'first'." - ) + msg = f"Unrecognized {strategy=}. Supported strategies are 'last' and 'first'." + raise ValueError(msg) def _first_max_tokens( @@ -995,10 +999,11 @@ def _msg_to_chunk(message: BaseMessage) -> BaseMessageChunk: if isinstance(message, msg_cls): return chunk_cls(**message.model_dump(exclude={"type"})) - raise ValueError( + msg = ( f"Unrecognized message class {message.__class__}. Supported classes are " f"{list(_MSG_CHUNK_MAP.keys())}" ) + raise ValueError(msg) def _chunk_to_msg(chunk: BaseMessageChunk) -> BaseMessage: @@ -1010,10 +1015,11 @@ def _chunk_to_msg(chunk: BaseMessageChunk) -> BaseMessage: if isinstance(chunk, chunk_cls): return msg_cls(**chunk.model_dump(exclude={"type", "tool_call_chunks"})) - raise ValueError( + msg = ( f"Unrecognized message chunk class {chunk.__class__}. Supported classes are " f"{list(_CHUNK_MSG_MAP.keys())}" ) + raise ValueError(msg) def _default_text_splitter(text: str) -> list[str]: diff --git a/libs/core/langchain_core/output_parsers/base.py b/libs/core/langchain_core/output_parsers/base.py index 9c132b4a0bf48..9d080cef300bc 100644 --- a/libs/core/langchain_core/output_parsers/base.py +++ b/libs/core/langchain_core/output_parsers/base.py @@ -177,10 +177,11 @@ def OutputType(self) -> type[T]: if "args" in metadata and len(metadata["args"]) > 0: return metadata["args"][0] - raise TypeError( + msg = ( f"Runnable {self.__class__.__name__} doesn't have an inferable OutputType. " "Override the OutputType property to specify the output type." ) + raise TypeError(msg) def invoke( self, @@ -310,10 +311,11 @@ def get_format_instructions(self) -> str: @property def _type(self) -> str: """Return the output parser type for serialization.""" - raise NotImplementedError( + msg = ( f"_type property is not implemented in class {self.__class__.__name__}." " This is required for serialization." ) + raise NotImplementedError(msg) def dict(self, **kwargs: Any) -> dict: """Return dictionary representation of output parser.""" diff --git a/libs/core/langchain_core/output_parsers/openai_functions.py b/libs/core/langchain_core/output_parsers/openai_functions.py index 460333953ae8f..d61533855fd7c 100644 --- a/libs/core/langchain_core/output_parsers/openai_functions.py +++ b/libs/core/langchain_core/output_parsers/openai_functions.py @@ -36,16 +36,14 @@ def parse_result(self, result: list[Generation], *, partial: bool = False) -> An """ generation = result[0] if not isinstance(generation, ChatGeneration): - raise OutputParserException( - "This output parser can only be used with a chat generation." - ) + msg = "This output parser can only be used with a chat generation." + raise OutputParserException(msg) message = generation.message try: func_call = copy.deepcopy(message.additional_kwargs["function_call"]) except KeyError as exc: - raise OutputParserException( - f"Could not parse function call: {exc}" - ) from exc + msg = f"Could not parse function call: {exc}" + raise OutputParserException(msg) from exc if self.args_only: return func_call["arguments"] @@ -88,14 +86,12 @@ def parse_result(self, result: list[Generation], *, partial: bool = False) -> An """ if len(result) != 1: - raise OutputParserException( - f"Expected exactly one result, but got {len(result)}" - ) + msg = f"Expected exactly one result, but got {len(result)}" + raise OutputParserException(msg) generation = result[0] if not isinstance(generation, ChatGeneration): - raise OutputParserException( - "This output parser can only be used with a chat generation." - ) + msg = "This output parser can only be used with a chat generation." + raise OutputParserException(msg) message = generation.message try: function_call = message.additional_kwargs["function_call"] @@ -103,9 +99,8 @@ def parse_result(self, result: list[Generation], *, partial: bool = False) -> An if partial: return None else: - raise OutputParserException( - f"Could not parse function call: {exc}" - ) from exc + msg = f"Could not parse function call: {exc}" + raise OutputParserException(msg) from exc try: if partial: try: @@ -129,9 +124,8 @@ def parse_result(self, result: list[Generation], *, partial: bool = False) -> An function_call["arguments"], strict=self.strict ) except (json.JSONDecodeError, TypeError) as exc: - raise OutputParserException( - f"Could not parse function call data: {exc}" - ) from exc + msg = f"Could not parse function call data: {exc}" + raise OutputParserException(msg) from exc else: try: return { @@ -141,9 +135,8 @@ def parse_result(self, result: list[Generation], *, partial: bool = False) -> An ), } except (json.JSONDecodeError, TypeError) as exc: - raise OutputParserException( - f"Could not parse function call data: {exc}" - ) from exc + msg = f"Could not parse function call data: {exc}" + raise OutputParserException(msg) from exc except KeyError: return None @@ -253,10 +246,11 @@ def validate_schema(cls, values: dict) -> Any: and issubclass(schema, BaseModel) ) elif values["args_only"] and isinstance(schema, dict): - raise ValueError( + msg = ( "If multiple pydantic schemas are provided then args_only should be" " False." ) + raise ValueError(msg) return values def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any: diff --git a/libs/core/langchain_core/output_parsers/openai_tools.py b/libs/core/langchain_core/output_parsers/openai_tools.py index a51f0d776572d..9e768e5da44e4 100644 --- a/libs/core/langchain_core/output_parsers/openai_tools.py +++ b/libs/core/langchain_core/output_parsers/openai_tools.py @@ -52,11 +52,12 @@ def parse_tool_call( raw_tool_call["function"]["arguments"], strict=strict ) except JSONDecodeError as e: - raise OutputParserException( + msg = ( f"Function {raw_tool_call['function']['name']} arguments:\n\n" f"{raw_tool_call['function']['arguments']}\n\nare not valid JSON. " f"Received JSONDecodeError {e}" - ) from e + ) + raise OutputParserException(msg) from e parsed = { "name": raw_tool_call["function"]["name"] or "", "args": function_args or {}, @@ -170,9 +171,8 @@ def parse_result(self, result: list[Generation], *, partial: bool = False) -> An generation = result[0] if not isinstance(generation, ChatGeneration): - raise OutputParserException( - "This output parser can only be used with a chat generation." - ) + msg = "This output parser can only be used with a chat generation." + raise OutputParserException(msg) message = generation.message if isinstance(message, AIMessage) and message.tool_calls: tool_calls = [dict(tc) for tc in message.tool_calls] @@ -285,10 +285,11 @@ def parse_result(self, result: list[Generation], *, partial: bool = False) -> An for res in json_results: try: if not isinstance(res["args"], dict): - raise ValueError( + msg = ( f"Tool arguments must be specified as a dict, received: " f"{res['args']}" ) + raise ValueError(msg) pydantic_objects.append(name_dict[res["type"]](**res["args"])) except (ValidationError, ValueError) as e: if partial: diff --git a/libs/core/langchain_core/output_parsers/pydantic.py b/libs/core/langchain_core/output_parsers/pydantic.py index fb6e3dcd71786..844fc9a703821 100644 --- a/libs/core/langchain_core/output_parsers/pydantic.py +++ b/libs/core/langchain_core/output_parsers/pydantic.py @@ -29,10 +29,9 @@ def _parse_obj(self, obj: dict) -> TBaseModel: elif issubclass(self.pydantic_object, pydantic.v1.BaseModel): return self.pydantic_object.parse_obj(obj) else: - raise OutputParserException( - f"Unsupported model version for PydanticOutputParser: \ + msg = f"Unsupported model version for PydanticOutputParser: \ {self.pydantic_object.__class__}" - ) + raise OutputParserException(msg) except (pydantic.ValidationError, pydantic.v1.ValidationError) as e: raise self._parser_exception(e, obj) from e else: # pydantic v1 diff --git a/libs/core/langchain_core/output_parsers/xml.py b/libs/core/langchain_core/output_parsers/xml.py index f476faf313731..3df9750f8e0fa 100644 --- a/libs/core/langchain_core/output_parsers/xml.py +++ b/libs/core/langchain_core/output_parsers/xml.py @@ -49,11 +49,12 @@ def __init__(self, parser: Literal["defusedxml", "xml"]) -> None: try: import defusedxml # type: ignore except ImportError as e: - raise ImportError( + msg = ( "defusedxml is not installed. " "Please install it to use the defusedxml parser." "You can install it with `pip install defusedxml` " - ) from e + ) + raise ImportError(msg) from e _parser = defusedxml.ElementTree.DefusedXMLParser(target=TreeBuilder()) else: _parser = None @@ -190,12 +191,13 @@ def parse(self, text: str) -> dict[str, Union[str, list[Any]]]: try: from defusedxml import ElementTree # type: ignore except ImportError as e: - raise ImportError( + msg = ( "defusedxml is not installed. " "Please install it to use the defusedxml parser." "You can install it with `pip install defusedxml`" "See https://github.com/tiran/defusedxml for more details" - ) from e + ) + raise ImportError(msg) from e _et = ElementTree # Use the defusedxml parser else: _et = ET # Use the standard library parser diff --git a/libs/core/langchain_core/outputs/chat_generation.py b/libs/core/langchain_core/outputs/chat_generation.py index 599283e60e5eb..d40e1fd5362b4 100644 --- a/libs/core/langchain_core/outputs/chat_generation.py +++ b/libs/core/langchain_core/outputs/chat_generation.py @@ -65,7 +65,8 @@ def set_text(self) -> Self: pass self.text = text except (KeyError, AttributeError) as e: - raise ValueError("Error while initializing ChatGeneration") from e + msg = "Error while initializing ChatGeneration" + raise ValueError(msg) from e return self @classmethod @@ -114,6 +115,7 @@ def __add__( generation_info=generation_info or None, ) else: - raise TypeError( + msg = ( f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'" ) + raise TypeError(msg) diff --git a/libs/core/langchain_core/outputs/generation.py b/libs/core/langchain_core/outputs/generation.py index bfd0cd70d75a5..ce2dd36390281 100644 --- a/libs/core/langchain_core/outputs/generation.py +++ b/libs/core/langchain_core/outputs/generation.py @@ -64,6 +64,7 @@ def __add__(self, other: GenerationChunk) -> GenerationChunk: generation_info=generation_info or None, ) else: - raise TypeError( + msg = ( f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'" ) + raise TypeError(msg) diff --git a/libs/core/langchain_core/prompts/base.py b/libs/core/langchain_core/prompts/base.py index f6932d6060a86..e399974a5b0ef 100644 --- a/libs/core/langchain_core/prompts/base.py +++ b/libs/core/langchain_core/prompts/base.py @@ -70,21 +70,22 @@ class BasePromptTemplate( def validate_variable_names(self) -> Self: """Validate variable names do not include restricted names.""" if "stop" in self.input_variables: - raise ValueError( + msg = ( "Cannot have an input variable named 'stop', as it is used internally," " please rename." ) + raise ValueError(msg) if "stop" in self.partial_variables: - raise ValueError( + msg = ( "Cannot have an partial variable named 'stop', as it is used " "internally, please rename." ) + raise ValueError(msg) overall = set(self.input_variables).intersection(self.partial_variables) if overall: - raise ValueError( - f"Found overlapping input and partial variables: {overall}" - ) + msg = f"Found overlapping input and partial variables: {overall}" + raise ValueError(msg) return self @classmethod @@ -143,10 +144,11 @@ def _validate_input(self, inner_input: Any) -> dict: inner_input = {var_name: inner_input} else: - raise TypeError( + msg = ( f"Expected mapping type as input to {self.__class__.__name__}. " f"Received {type(inner_input)}." ) + raise TypeError(msg) missing = set(self.input_variables).difference(inner_input) if missing: msg = ( @@ -341,12 +343,14 @@ def save(self, file_path: Union[Path, str]) -> None: prompt.save(file_path="path/prompt.yaml") """ if self.partial_variables: - raise ValueError("Cannot save prompt with partial variables.") + msg = "Cannot save prompt with partial variables." + raise ValueError(msg) # Fetch dictionary to save prompt_dict = self.dict() if "_type" not in prompt_dict: - raise NotImplementedError(f"Prompt {self} does not support saving.") + msg = f"Prompt {self} does not support saving." + raise NotImplementedError(msg) # Convert file to Path object. save_path = Path(file_path) if isinstance(file_path, str) else file_path @@ -361,7 +365,8 @@ def save(self, file_path: Union[Path, str]) -> None: with open(file_path, "w") as f: yaml.dump(prompt_dict, f, default_flow_style=False) else: - raise ValueError(f"{save_path} must be json or yaml") + msg = f"{save_path} must be json or yaml" + raise ValueError(msg) def _get_document_info(doc: Document, prompt: BasePromptTemplate[str]) -> dict: @@ -371,11 +376,12 @@ def _get_document_info(doc: Document, prompt: BasePromptTemplate[str]) -> dict: required_metadata = [ iv for iv in prompt.input_variables if iv != "page_content" ] - raise ValueError( + msg = ( f"Document prompt requires documents to have metadata variables: " f"{required_metadata}. Received document with missing metadata: " f"{list(missing_metadata)}." ) + raise ValueError(msg) return {k: base_info[k] for k in prompt.input_variables} diff --git a/libs/core/langchain_core/prompts/chat.py b/libs/core/langchain_core/prompts/chat.py index 201f916ff0704..e3771ab40a6f5 100644 --- a/libs/core/langchain_core/prompts/chat.py +++ b/libs/core/langchain_core/prompts/chat.py @@ -236,10 +236,11 @@ def format_messages(self, **kwargs: Any) -> list[BaseMessage]: else kwargs[self.variable_name] ) if not isinstance(value, list): - raise ValueError( + msg = ( f"variable {self.variable_name} should be a list of base messages, " f"got {value} of type {type(value)}" ) + raise ValueError(msg) value = convert_to_messages(value) if self.n_messages: value = value[-self.n_messages :] @@ -514,9 +515,8 @@ def from_template( return cls(prompt=prompt, **kwargs) elif isinstance(template, list): if (partial_variables is not None) and len(partial_variables) > 0: - raise ValueError( - "Partial variables are not supported for list of templates." - ) + msg = "Partial variables are not supported for list of templates." + raise ValueError(msg) prompt = [] for tmpl in template: if isinstance(tmpl, str) or isinstance(tmpl, dict) and "text" in tmpl: @@ -536,11 +536,12 @@ def from_template( vars = get_template_variables(img_template, "f-string") if vars: if len(vars) > 1: - raise ValueError( + msg = ( "Only one format variable allowed per image" f" template.\nGot: {vars}" f"\nFrom: {tmpl}" ) + raise ValueError(msg) input_variables = [vars[0]] img_template = {"url": img_template} img_template_obj = ImagePromptTemplate( @@ -559,13 +560,16 @@ def from_template( input_variables=input_variables, template=img_template ) else: - raise ValueError(f"Invalid image template: {tmpl}") + msg = f"Invalid image template: {tmpl}" + raise ValueError(msg) prompt.append(img_template_obj) else: - raise ValueError(f"Invalid template: {tmpl}") + msg = f"Invalid template: {tmpl}" + raise ValueError(msg) return cls(prompt=prompt, **kwargs) else: - raise ValueError(f"Invalid template: {template}") + msg = f"Invalid template: {template}" + raise ValueError(msg) @classmethod def from_template_file( @@ -1042,7 +1046,8 @@ def __add__(self, other: Any) -> ChatPromptTemplate: prompt = HumanMessagePromptTemplate.from_template(other) return ChatPromptTemplate(messages=self.messages + [prompt]) # type: ignore[call-arg] else: - raise NotImplementedError(f"Unsupported operand type for +: {type(other)}") + msg = f"Unsupported operand type for +: {type(other)}" + raise NotImplementedError(msg) @model_validator(mode="before") @classmethod @@ -1085,11 +1090,12 @@ def validate_input_variables(cls, values: dict) -> Any: input_vars = input_vars - optional_variables if "input_variables" in values and values.get("validate_template"): if input_vars != set(values["input_variables"]): - raise ValueError( + msg = ( "Got mismatched input_variables. " f"Expected: {input_vars}. " f"Got: {values['input_variables']}" ) + raise ValueError(msg) else: values["input_variables"] = sorted(input_vars) if optional_variables: @@ -1214,7 +1220,8 @@ def format_messages(self, **kwargs: Any) -> list[BaseMessage]: message = message_template.format_messages(**kwargs) result.extend(message) else: - raise ValueError(f"Unexpected input: {message_template}") + msg = f"Unexpected input: {message_template}" + raise ValueError(msg) return result async def aformat_messages(self, **kwargs: Any) -> list[BaseMessage]: @@ -1241,7 +1248,8 @@ async def aformat_messages(self, **kwargs: Any) -> list[BaseMessage]: message = await message_template.aformat_messages(**kwargs) result.extend(message) else: - raise ValueError(f"Unexpected input: {message_template}") + msg = f"Unexpected input: {message_template}" + raise ValueError(msg) return result def partial(self, **kwargs: Any) -> ChatPromptTemplate: @@ -1376,38 +1384,43 @@ def _create_template_from_message_type( elif message_type == "placeholder": if isinstance(template, str): if template[0] != "{" or template[-1] != "}": - raise ValueError( + msg = ( f"Invalid placeholder template: {template}." " Expected a variable name surrounded by curly braces." ) + raise ValueError(msg) var_name = template[1:-1] message = MessagesPlaceholder(variable_name=var_name, optional=True) elif len(template) == 2 and isinstance(template[1], bool): var_name_wrapped, is_optional = template if not isinstance(var_name_wrapped, str): - raise ValueError( + msg = ( "Expected variable name to be a string." f" Got: {var_name_wrapped}" ) + raise ValueError(msg) if var_name_wrapped[0] != "{" or var_name_wrapped[-1] != "}": - raise ValueError( + msg = ( f"Invalid placeholder template: {var_name_wrapped}." " Expected a variable name surrounded by curly braces." ) + raise ValueError(msg) var_name = var_name_wrapped[1:-1] message = MessagesPlaceholder(variable_name=var_name, optional=is_optional) else: - raise ValueError( + msg = ( "Unexpected arguments for placeholder message type." " Expected either a single string variable name" " or a list of [variable_name: str, is_optional: bool]." f" Got: {template}" ) + raise ValueError(msg) else: - raise ValueError( + msg = ( f"Unexpected message type: {message_type}. Use one of 'human'," f" 'user', 'ai', 'assistant', or 'system'." ) + raise ValueError(msg) return message @@ -1448,7 +1461,8 @@ def _convert_to_message( ) elif isinstance(message, tuple): if len(message) != 2: - raise ValueError(f"Expected 2-tuple of (role, template), got {message}") + msg = f"Expected 2-tuple of (role, template), got {message}" + raise ValueError(msg) message_type_str, template = message if isinstance(message_type_str, str): _message = _create_template_from_message_type( @@ -1461,6 +1475,7 @@ def _convert_to_message( ) ) else: - raise NotImplementedError(f"Unsupported message type: {type(message)}") + msg = f"Unsupported message type: {type(message)}" + raise NotImplementedError(msg) return _message diff --git a/libs/core/langchain_core/prompts/few_shot.py b/libs/core/langchain_core/prompts/few_shot.py index d02c885aa8599..54c004c23d662 100644 --- a/libs/core/langchain_core/prompts/few_shot.py +++ b/libs/core/langchain_core/prompts/few_shot.py @@ -62,14 +62,12 @@ def check_examples_and_selector(cls, values: dict) -> Any: examples = values.get("examples") example_selector = values.get("example_selector") if examples and example_selector: - raise ValueError( - "Only one of 'examples' and 'example_selector' should be provided" - ) + msg = "Only one of 'examples' and 'example_selector' should be provided" + raise ValueError(msg) if examples is None and example_selector is None: - raise ValueError( - "One of 'examples' and 'example_selector' should be provided" - ) + msg = "One of 'examples' and 'example_selector' should be provided" + raise ValueError(msg) return values @@ -90,9 +88,8 @@ def _get_examples(self, **kwargs: Any) -> list[dict]: elif self.example_selector is not None: return self.example_selector.select_examples(kwargs) else: - raise ValueError( - "One of 'examples' and 'example_selector' should be provided" - ) + msg = "One of 'examples' and 'example_selector' should be provided" + raise ValueError(msg) async def _aget_examples(self, **kwargs: Any) -> list[dict]: """Async get the examples to use for formatting the prompt. @@ -111,9 +108,8 @@ async def _aget_examples(self, **kwargs: Any) -> list[dict]: elif self.example_selector is not None: return await self.example_selector.aselect_examples(kwargs) else: - raise ValueError( - "One of 'examples' and 'example_selector' should be provided" - ) + msg = "One of 'examples' and 'example_selector' should be provided" + raise ValueError(msg) class FewShotPromptTemplate(_FewShotPromptTemplateMixin, StringPromptTemplate): @@ -243,7 +239,8 @@ def save(self, file_path: Union[Path, str]) -> None: ValueError: If example_selector is provided. """ if self.example_selector: - raise ValueError("Saving an example selector is not currently supported") + msg = "Saving an example selector is not currently supported" + raise ValueError(msg) return super().save(file_path) diff --git a/libs/core/langchain_core/prompts/few_shot_with_templates.py b/libs/core/langchain_core/prompts/few_shot_with_templates.py index 75e6344aa7a23..f293aae1c80f8 100644 --- a/libs/core/langchain_core/prompts/few_shot_with_templates.py +++ b/libs/core/langchain_core/prompts/few_shot_with_templates.py @@ -54,14 +54,12 @@ def check_examples_and_selector(cls, values: dict) -> Any: examples = values.get("examples") example_selector = values.get("example_selector") if examples and example_selector: - raise ValueError( - "Only one of 'examples' and 'example_selector' should be provided" - ) + msg = "Only one of 'examples' and 'example_selector' should be provided" + raise ValueError(msg) if examples is None and example_selector is None: - raise ValueError( - "One of 'examples' and 'example_selector' should be provided" - ) + msg = "One of 'examples' and 'example_selector' should be provided" + raise ValueError(msg) return values @@ -76,10 +74,11 @@ def template_is_valid(self) -> Self: expected_input_variables |= set(self.prefix.input_variables) missing_vars = expected_input_variables.difference(input_variables) if missing_vars: - raise ValueError( + msg = ( f"Got input_variables={input_variables}, but based on " f"prefix/suffix expected {expected_input_variables}" ) + raise ValueError(msg) else: self.input_variables = sorted( set(self.suffix.input_variables) @@ -216,5 +215,6 @@ def save(self, file_path: Union[Path, str]) -> None: ValueError: If example_selector is provided. """ if self.example_selector: - raise ValueError("Saving an example selector is not currently supported") + msg = "Saving an example selector is not currently supported" + raise ValueError(msg) return super().save(file_path) diff --git a/libs/core/langchain_core/prompts/image.py b/libs/core/langchain_core/prompts/image.py index c898dac31cf3f..5e76cea59debb 100644 --- a/libs/core/langchain_core/prompts/image.py +++ b/libs/core/langchain_core/prompts/image.py @@ -20,11 +20,12 @@ def __init__(self, **kwargs: Any) -> None: overlap = set(kwargs["input_variables"]) & {"url", "path", "detail"} if overlap: - raise ValueError( + msg = ( "input_variables for the image template cannot contain" " any of 'url', 'path', or 'detail'." f" Found: {overlap}" ) + raise ValueError(msg) super().__init__(**kwargs) @property @@ -91,13 +92,16 @@ def format( path = kwargs.get("path") or formatted.get("path") detail = kwargs.get("detail") or formatted.get("detail") if not url and not path: - raise ValueError("Must provide either url or path.") + msg = "Must provide either url or path." + raise ValueError(msg) if not url: if not isinstance(path, str): - raise ValueError("path must be a string.") + msg = "path must be a string." + raise ValueError(msg) url = image_utils.image_to_data_url(path) if not isinstance(url, str): - raise ValueError("url must be a string.") + msg = "url must be a string." + raise ValueError(msg) output: ImageURL = {"url": url} if detail: # Don't check literal values here: let the API check them diff --git a/libs/core/langchain_core/prompts/loading.py b/libs/core/langchain_core/prompts/loading.py index 39fce9fc301e4..3d65f8323f1b8 100644 --- a/libs/core/langchain_core/prompts/loading.py +++ b/libs/core/langchain_core/prompts/loading.py @@ -34,7 +34,8 @@ def load_prompt_from_config(config: dict) -> BasePromptTemplate: config_type = config.pop("_type", "prompt") if config_type not in type_to_loader_dict: - raise ValueError(f"Loading {config_type} prompt not supported") + msg = f"Loading {config_type} prompt not supported" + raise ValueError(msg) prompt_loader = type_to_loader_dict[config_type] return prompt_loader(config) @@ -46,9 +47,8 @@ def _load_template(var_name: str, config: dict) -> dict: if f"{var_name}_path" in config: # If it does, make sure template variable doesn't also exist. if var_name in config: - raise ValueError( - f"Both `{var_name}_path` and `{var_name}` cannot be provided." - ) + msg = f"Both `{var_name}_path` and `{var_name}` cannot be provided." + raise ValueError(msg) # Pop the template path from the config. template_path = Path(config.pop(f"{var_name}_path")) # Load the template. @@ -73,12 +73,12 @@ def _load_examples(config: dict) -> dict: elif config["examples"].endswith((".yaml", ".yml")): examples = yaml.safe_load(f) else: - raise ValueError( - "Invalid file format. Only json or yaml formats are supported." - ) + msg = "Invalid file format. Only json or yaml formats are supported." + raise ValueError(msg) config["examples"] = examples else: - raise ValueError("Invalid examples format. Only list or string are supported.") + msg = "Invalid examples format. Only list or string are supported." + raise ValueError(msg) return config @@ -90,7 +90,8 @@ def _load_output_parser(config: dict) -> dict: if output_parser_type == "default": output_parser = StrOutputParser(**_config) else: - raise ValueError(f"Unsupported output parser {output_parser_type}") + msg = f"Unsupported output parser {output_parser_type}" + raise ValueError(msg) config["output_parser"] = output_parser return config @@ -103,10 +104,11 @@ def _load_few_shot_prompt(config: dict) -> FewShotPromptTemplate: # Load the example prompt. if "example_prompt_path" in config: if "example_prompt" in config: - raise ValueError( + msg = ( "Only one of example_prompt and example_prompt_path should " "be specified." ) + raise ValueError(msg) config["example_prompt"] = load_prompt(config.pop("example_prompt_path")) else: config["example_prompt"] = load_prompt_from_config(config["example_prompt"]) @@ -126,11 +128,12 @@ def _load_prompt(config: dict) -> PromptTemplate: if template_format == "jinja2": # Disabled due to: # https://github.com/langchain-ai/langchain/issues/4394 - raise ValueError( + msg = ( f"Loading templates with '{template_format}' format is no longer supported " f"since it can lead to arbitrary code execution. Please migrate to using " f"the 'f-string' template format, which does not suffer from this issue." ) + raise ValueError(msg) return PromptTemplate(**config) @@ -151,11 +154,12 @@ def load_prompt( RuntimeError: If the path is a Lang Chain Hub path. """ if isinstance(path, str) and path.startswith("lc://"): - raise RuntimeError( + msg = ( "Loading from the deprecated github-based Hub is no longer supported. " "Please use the new LangChain Hub at https://smith.langchain.com/hub " "instead." ) + raise RuntimeError(msg) return _load_prompt_from_file(path, encoding) @@ -173,7 +177,8 @@ def _load_prompt_from_file( with open(file_path, encoding=encoding) as f: config = yaml.safe_load(f) else: - raise ValueError(f"Got unsupported file type {file_path.suffix}") + msg = f"Got unsupported file type {file_path.suffix}" + raise ValueError(msg) # Load the prompt from the config now. return load_prompt_from_config(config) @@ -186,7 +191,8 @@ def _load_chat_prompt(config: dict) -> ChatPromptTemplate: config.pop("input_variables") if not template: - raise ValueError("Can't load chat prompt without template") + msg = "Can't load chat prompt without template" + raise ValueError(msg) return ChatPromptTemplate.from_template(template=template, **config) diff --git a/libs/core/langchain_core/prompts/prompt.py b/libs/core/langchain_core/prompts/prompt.py index 7008be5883938..5c52ef36d076c 100644 --- a/libs/core/langchain_core/prompts/prompt.py +++ b/libs/core/langchain_core/prompts/prompt.py @@ -89,12 +89,12 @@ def pre_init_validation(cls, values: dict) -> Any: if values.get("validate_template"): if values["template_format"] == "mustache": - raise ValueError("Mustache templates cannot be validated.") + msg = "Mustache templates cannot be validated." + raise ValueError(msg) if "input_variables" not in values: - raise ValueError( - "Input variables must be provided to validate the template." - ) + msg = "Input variables must be provided to validate the template." + raise ValueError(msg) all_inputs = values["input_variables"] + list(values["partial_variables"]) check_valid_template( @@ -131,13 +131,11 @@ def __add__(self, other: Any) -> PromptTemplate: # Allow for easy combining if isinstance(other, PromptTemplate): if self.template_format != "f-string": - raise ValueError( - "Adding prompt templates only supported for f-strings." - ) + msg = "Adding prompt templates only supported for f-strings." + raise ValueError(msg) if other.template_format != "f-string": - raise ValueError( - "Adding prompt templates only supported for f-strings." - ) + msg = "Adding prompt templates only supported for f-strings." + raise ValueError(msg) input_variables = list( set(self.input_variables) | set(other.input_variables) ) @@ -147,7 +145,8 @@ def __add__(self, other: Any) -> PromptTemplate: partial_variables = dict(self.partial_variables.items()) for k, v in other.partial_variables.items(): if k in partial_variables: - raise ValueError("Cannot have same variable partialed twice.") + msg = "Cannot have same variable partialed twice." + raise ValueError(msg) else: partial_variables[k] = v return PromptTemplate( @@ -161,7 +160,8 @@ def __add__(self, other: Any) -> PromptTemplate: prompt = PromptTemplate.from_template(other) return self + prompt else: - raise NotImplementedError(f"Unsupported operand type for +: {type(other)}") + msg = f"Unsupported operand type for +: {type(other)}" + raise NotImplementedError(msg) @property def _prompt_type(self) -> str: diff --git a/libs/core/langchain_core/prompts/string.py b/libs/core/langchain_core/prompts/string.py index cb46d452aac25..01c19486b6760 100644 --- a/libs/core/langchain_core/prompts/string.py +++ b/libs/core/langchain_core/prompts/string.py @@ -42,13 +42,14 @@ def jinja2_formatter(template: str, /, **kwargs: Any) -> str: try: from jinja2.sandbox import SandboxedEnvironment except ImportError as e: - raise ImportError( + msg = ( "jinja2 not installed, which is needed to use the jinja2_formatter. " "Please install it with `pip install jinja2`." "Please be cautious when using jinja2 templates. " "Do not expand jinja2 templates using unverified or user-controlled " "inputs as that can result in arbitrary Python code execution." - ) from e + ) + raise ImportError(msg) from e # This uses a sandboxed environment to prevent arbitrary code execution. # Jinja2 uses an opt-out rather than opt-in approach for sand-boxing. @@ -89,10 +90,11 @@ def _get_jinja2_variables_from_template(template: str) -> set[str]: try: from jinja2 import Environment, meta except ImportError as e: - raise ImportError( + msg = ( "jinja2 not installed, which is needed to use the jinja2_formatter. " "Please install it with `pip install jinja2`." - ) from e + ) + raise ImportError(msg) from e env = Environment() ast = env.parse(template) variables = meta.find_undeclared_variables(ast) @@ -217,17 +219,19 @@ def check_valid_template( try: validator_func = DEFAULT_VALIDATOR_MAPPING[template_format] except KeyError as exc: - raise ValueError( + msg = ( f"Invalid template format {template_format!r}, should be one of" f" {list(DEFAULT_FORMATTER_MAPPING)}." - ) from exc + ) + raise ValueError(msg) from exc try: validator_func(template, input_variables) except (KeyError, IndexError) as exc: - raise ValueError( + msg = ( "Invalid prompt schema; check for mismatched or missing input parameters" f" from {input_variables}." - ) from exc + ) + raise ValueError(msg) from exc def get_template_variables(template: str, template_format: str) -> list[str]: @@ -253,7 +257,8 @@ def get_template_variables(template: str, template_format: str) -> list[str]: elif template_format == "mustache": input_variables = mustache_template_vars(template) else: - raise ValueError(f"Unsupported template format: {template_format}") + msg = f"Unsupported template format: {template_format}" + raise ValueError(msg) return sorted(input_variables) diff --git a/libs/core/langchain_core/prompts/structured.py b/libs/core/langchain_core/prompts/structured.py index 03fefcee02161..360f14ccf2fde 100644 --- a/libs/core/langchain_core/prompts/structured.py +++ b/libs/core/langchain_core/prompts/structured.py @@ -156,6 +156,5 @@ def pipe( name=name, ) else: - raise NotImplementedError( - "Structured prompts need to be piped to a language model." - ) + msg = "Structured prompts need to be piped to a language model." + raise NotImplementedError(msg) diff --git a/libs/core/langchain_core/runnables/base.py b/libs/core/langchain_core/runnables/base.py index 5dd5d35d1d09b..9f9a77bc70515 100644 --- a/libs/core/langchain_core/runnables/base.py +++ b/libs/core/langchain_core/runnables/base.py @@ -292,10 +292,11 @@ def InputType(self) -> type[Input]: # noqa: N802 if type_args and len(type_args) == 2: return type_args[0] - raise TypeError( + msg = ( f"Runnable {self.get_name()} doesn't have an inferable InputType. " "Override the InputType property to specify the input type." ) + raise TypeError(msg) @property def OutputType(self) -> type[Output]: # noqa: N802 @@ -313,10 +314,11 @@ def OutputType(self) -> type[Output]: # noqa: N802 if type_args and len(type_args) == 2: return type_args[1] - raise TypeError( + msg = ( f"Runnable {self.get_name()} doesn't have an inferable OutputType. " "Override the OutputType property to specify the output type." ) + raise TypeError(msg) @property def input_schema(self) -> type[BaseModel]: @@ -1379,9 +1381,8 @@ async def slow_thing(some_input: str, config: RunnableConfig) -> str: **kwargs, ) else: - raise NotImplementedError( - 'Only versions "v1" and "v2" of the schema is currently supported.' - ) + msg = 'Only versions "v1" and "v2" of the schema is currently supported.' + raise NotImplementedError(msg) async with aclosing(event_stream): async for event in event_stream: @@ -2513,10 +2514,11 @@ def configurable_fields( for key in kwargs: if key not in self.model_fields: - raise ValueError( + msg = ( f"Configuration key {key} not found in {self}: " f"available keys are {self.model_fields.keys()}" ) + raise ValueError(msg) return RunnableConfigurableFields(default=self, fields=kwargs) @@ -2772,9 +2774,8 @@ def __init__( else: steps_flat.append(coerce_to_runnable(step)) if len(steps_flat) < 2: - raise ValueError( - f"RunnableSequence must have at least 2 steps, got {len(steps_flat)}" - ) + msg = f"RunnableSequence must have at least 2 steps, got {len(steps_flat)}" + raise ValueError(msg) super().__init__( # type: ignore[call-arg] first=steps_flat[0], middle=list(steps_flat[1:-1]), @@ -2923,7 +2924,8 @@ def get_graph(self, config: Optional[RunnableConfig] = None) -> Graph: step_graph.trim_last_node() step_first_node, _ = graph.extend(step_graph) if not step_first_node: - raise ValueError(f"Runnable {step} has no first node") + msg = f"Runnable {step} has no first node" + raise ValueError(msg) if current_last_node: graph.add_edge(current_last_node, step_first_node) @@ -3655,9 +3657,11 @@ def get_graph(self, config: Optional[RunnableConfig] = None) -> Graph: else: step_first_node, step_last_node = graph.extend(step_graph) if not step_first_node: - raise ValueError(f"Runnable {step} has no first node") + msg = f"Runnable {step} has no first node" + raise ValueError(msg) if not step_last_node: - raise ValueError(f"Runnable {step} has no last node") + msg = f"Runnable {step} has no last node" + raise ValueError(msg) graph.add_edge(input_node, step_first_node) graph.add_edge(step_last_node, output_node) @@ -4049,10 +4053,11 @@ def __init__( self._transform = transform func_for_name = transform else: - raise TypeError( + msg = ( "Expected a generator function type for `transform`." f"Instead got an unsupported type: {type(transform)}" ) + raise TypeError(msg) try: self.name = name or func_for_name.__name__ @@ -4161,7 +4166,8 @@ def transform( **kwargs: Any, ) -> Iterator[Output]: if not hasattr(self, "_transform"): - raise NotImplementedError(f"{repr(self)} only supports async methods.") + msg = f"{repr(self)} only supports async methods." + raise NotImplementedError(msg) return self._transform_stream_with_config( input, self._transform, # type: ignore[arg-type] @@ -4192,7 +4198,8 @@ def atransform( **kwargs: Any, ) -> AsyncIterator[Output]: if not hasattr(self, "_atransform"): - raise NotImplementedError(f"{repr(self)} only supports sync methods.") + msg = f"{repr(self)} only supports sync methods." + raise NotImplementedError(msg) return self._atransform_stream_with_config( input, self._atransform, config, **kwargs @@ -4320,21 +4327,23 @@ def __init__( if is_async_callable(func) or is_async_generator(func): if afunc is not None: - raise TypeError( + msg = ( "Func was provided as a coroutine function, but afunc was " "also provided. If providing both, func should be a regular " "function to avoid ambiguity." ) + raise TypeError(msg) self.afunc = func func_for_name = func elif callable(func): self.func = cast(Callable[[Input], Output], func) func_for_name = func else: - raise TypeError( + msg = ( "Expected a callable type for `func`." f"Instead got an unsupported type: {type(func)}" ) + raise TypeError(msg) try: if name is not None: @@ -4497,9 +4506,11 @@ def get_graph(self, config: RunnableConfig | None = None) -> Graph: else: dep_first_node, dep_last_node = graph.extend(dep_graph) if not dep_first_node: - raise ValueError(f"Runnable {dep} has no first node") + msg = f"Runnable {dep} has no first node" + raise ValueError(msg) if not dep_last_node: - raise ValueError(f"Runnable {dep} has no last node") + msg = f"Runnable {dep} has no last node" + raise ValueError(msg) graph.add_edge(input_node, dep_first_node) graph.add_edge(dep_last_node, output_node) else: @@ -4560,9 +4571,10 @@ def _invoke( if isinstance(output, Runnable): recursion_limit = config["recursion_limit"] if recursion_limit <= 0: - raise RecursionError( + msg = ( f"Recursion limit reached when invoking {self} with input {input}." ) + raise RecursionError(msg) output = output.invoke( input, patch_config( @@ -4659,9 +4671,10 @@ async def f(*args, **kwargs): # type: ignore[no-untyped-def] if isinstance(output, Runnable): recursion_limit = config["recursion_limit"] if recursion_limit <= 0: - raise RecursionError( + msg = ( f"Recursion limit reached when invoking {self} with input {input}." ) + raise RecursionError(msg) output = await output.ainvoke( input, patch_config( @@ -4704,10 +4717,11 @@ def invoke( **kwargs, ) else: - raise TypeError( + msg = ( "Cannot invoke a coroutine function synchronously." "Use `ainvoke` instead." ) + raise TypeError(msg) async def ainvoke( self, @@ -4778,10 +4792,11 @@ def _transform( if isinstance(output, Runnable): recursion_limit = config["recursion_limit"] if recursion_limit <= 0: - raise RecursionError( + msg = ( f"Recursion limit reached when invoking " f"{self} with input {final}." ) + raise RecursionError(msg) for chunk in output.stream( final, patch_config( @@ -4809,10 +4824,11 @@ def transform( **kwargs, ) else: - raise TypeError( + msg = ( "Cannot stream a coroutine function synchronously." "Use `astream` instead." ) + raise TypeError(msg) def stream( self, @@ -4849,10 +4865,11 @@ async def _atransform( afunc = self.afunc else: if inspect.isgeneratorfunction(self.func): - raise TypeError( + msg = ( "Cannot stream from a generator function asynchronously." "Use .stream() instead." ) + raise TypeError(msg) def func( input: Input, @@ -4899,10 +4916,11 @@ async def f(*args, **kwargs): # type: ignore[no-untyped-def] if isinstance(output, Runnable): recursion_limit = config["recursion_limit"] if recursion_limit <= 0: - raise RecursionError( + msg = ( f"Recursion limit reached when invoking " f"{self} with input {final}." ) + raise RecursionError(msg) async for chunk in output.astream( final, patch_config( @@ -5061,9 +5079,8 @@ async def astream_events( **kwargs: Optional[Any], ) -> AsyncIterator[StreamEvent]: for _ in range(1): - raise NotImplementedError( - "RunnableEach does not support astream_events yet." - ) + msg = "RunnableEach does not support astream_events yet." + raise NotImplementedError(msg) yield @@ -5819,10 +5836,11 @@ def coerce_to_runnable(thing: RunnableLike) -> Runnable[Input, Output]: elif isinstance(thing, dict): return cast(Runnable[Input, Output], RunnableParallel(thing)) else: - raise TypeError( + msg = ( f"Expected a Runnable, callable or dict." f"Instead got an unsupported type: {type(thing)}" ) + raise TypeError(msg) @overload diff --git a/libs/core/langchain_core/runnables/branch.py b/libs/core/langchain_core/runnables/branch.py index b12fd416e0345..56c438861899e 100644 --- a/libs/core/langchain_core/runnables/branch.py +++ b/libs/core/langchain_core/runnables/branch.py @@ -92,7 +92,8 @@ def __init__( ValueError: If a branch is not of length 2. """ if len(branches) < 2: - raise ValueError("RunnableBranch requires at least two branches") + msg = "RunnableBranch requires at least two branches" + raise ValueError(msg) default = branches[-1] @@ -100,9 +101,8 @@ def __init__( default, (Runnable, Callable, Mapping), # type: ignore[arg-type] ): - raise TypeError( - "RunnableBranch default must be Runnable, callable or mapping." - ) + msg = "RunnableBranch default must be Runnable, callable or mapping." + raise TypeError(msg) default_ = cast( Runnable[Input, Output], coerce_to_runnable(cast(RunnableLike, default)) @@ -112,16 +112,18 @@ def __init__( for branch in branches[:-1]: if not isinstance(branch, (tuple, list)): # type: ignore[arg-type] - raise TypeError( + msg = ( f"RunnableBranch branches must be " f"tuples or lists, not {type(branch)}" ) + raise TypeError(msg) - if not len(branch) == 2: - raise ValueError( + if len(branch) != 2: + msg = ( f"RunnableBranch branches must be " f"tuples or lists of length 2, not {len(branch)}" ) + raise ValueError(msg) condition, runnable = branch condition = cast(Runnable[Input, bool], coerce_to_runnable(condition)) runnable = coerce_to_runnable(runnable) @@ -185,7 +187,8 @@ def config_specs(self) -> list[ConfigurableFieldSpec]: and s.id.endswith(CONTEXT_CONFIG_SUFFIX_SET) for s in specs ): - raise ValueError("RunnableBranch cannot contain context setters.") + msg = "RunnableBranch cannot contain context setters." + raise ValueError(msg) return specs def invoke( diff --git a/libs/core/langchain_core/runnables/config.py b/libs/core/langchain_core/runnables/config.py index 15868dc269f7b..7c285137fafe3 100644 --- a/libs/core/langchain_core/runnables/config.py +++ b/libs/core/langchain_core/runnables/config.py @@ -219,12 +219,14 @@ def get_config_list( """ if length < 0: - raise ValueError(f"length must be >= 0, but got {length}") + msg = f"length must be >= 0, but got {length}" + raise ValueError(msg) if isinstance(config, Sequence) and len(config) != length: - raise ValueError( + msg = ( f"config must be a list of the same length as inputs, " f"but got {len(config)} configs for {length} inputs" ) + raise ValueError(msg) if isinstance(config, Sequence): return list(map(ensure_config, config)) diff --git a/libs/core/langchain_core/runnables/configurable.py b/libs/core/langchain_core/runnables/configurable.py index 8dc9495be27f2..35f976340cab7 100644 --- a/libs/core/langchain_core/runnables/configurable.py +++ b/libs/core/langchain_core/runnables/configurable.py @@ -632,7 +632,8 @@ def _prepare( else: return (alt(), config) else: - raise ValueError(f"Unknown alternative: {which}") + msg = f"Unknown alternative: {which}" + raise ValueError(msg) def _strremoveprefix(s: str, prefix: str) -> str: diff --git a/libs/core/langchain_core/runnables/fallbacks.py b/libs/core/langchain_core/runnables/fallbacks.py index a08dc0b24f4b0..97d450015eae1 100644 --- a/libs/core/langchain_core/runnables/fallbacks.py +++ b/libs/core/langchain_core/runnables/fallbacks.py @@ -152,10 +152,11 @@ def invoke( self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any ) -> Output: if self.exception_key is not None and not isinstance(input, dict): - raise ValueError( + msg = ( "If 'exception_key' is specified then input must be a dictionary." f"However found a type of {type(input)} for input" ) + raise ValueError(msg) # setup callbacks config = ensure_config(config) callback_manager = get_callback_manager_for_config(config) @@ -192,7 +193,8 @@ def invoke( run_manager.on_chain_end(output) return output if first_error is None: - raise ValueError("No error stored at end of fallbacks.") + msg = "No error stored at end of fallbacks." + raise ValueError(msg) run_manager.on_chain_error(first_error) raise first_error @@ -203,10 +205,11 @@ async def ainvoke( **kwargs: Optional[Any], ) -> Output: if self.exception_key is not None and not isinstance(input, dict): - raise ValueError( + msg = ( "If 'exception_key' is specified then input must be a dictionary." f"However found a type of {type(input)} for input" ) + raise ValueError(msg) # setup callbacks config = ensure_config(config) callback_manager = get_async_callback_manager_for_config(config) @@ -243,7 +246,8 @@ async def ainvoke( await run_manager.on_chain_end(output) return output if first_error is None: - raise ValueError("No error stored at end of fallbacks.") + msg = "No error stored at end of fallbacks." + raise ValueError(msg) await run_manager.on_chain_error(first_error) raise first_error @@ -260,10 +264,11 @@ def batch( if self.exception_key is not None and not all( isinstance(input, dict) for input in inputs ): - raise ValueError( + msg = ( "If 'exception_key' is specified then inputs must be dictionaries." f"However found a type of {type(inputs[0])} for input" ) + raise ValueError(msg) if not inputs: return [] @@ -352,10 +357,11 @@ async def abatch( if self.exception_key is not None and not all( isinstance(input, dict) for input in inputs ): - raise ValueError( + msg = ( "If 'exception_key' is specified then inputs must be dictionaries." f"However found a type of {type(inputs[0])} for input" ) + raise ValueError(msg) if not inputs: return [] @@ -447,10 +453,11 @@ def stream( ) -> Iterator[Output]: """""" if self.exception_key is not None and not isinstance(input, dict): - raise ValueError( + msg = ( "If 'exception_key' is specified then input must be a dictionary." f"However found a type of {type(input)} for input" ) + raise ValueError(msg) # setup callbacks config = ensure_config(config) callback_manager = get_callback_manager_for_config(config) @@ -510,10 +517,11 @@ async def astream( **kwargs: Optional[Any], ) -> AsyncIterator[Output]: if self.exception_key is not None and not isinstance(input, dict): - raise ValueError( + msg = ( "If 'exception_key' is specified then input must be a dictionary." f"However found a type of {type(input)} for input" ) + raise ValueError(msg) # setup callbacks config = ensure_config(config) callback_manager = get_async_callback_manager_for_config(config) diff --git a/libs/core/langchain_core/runnables/graph.py b/libs/core/langchain_core/runnables/graph.py index baec005e5ac23..fd2c8adb98b23 100644 --- a/libs/core/langchain_core/runnables/graph.py +++ b/libs/core/langchain_core/runnables/graph.py @@ -330,7 +330,8 @@ def add_node( ValueError: If a node with the same id already exists. """ if id is not None and id in self.nodes: - raise ValueError(f"Node with id {id} already exists") + msg = f"Node with id {id} already exists" + raise ValueError(msg) id = id or self.next_id() node = Node(id=id, data=data, metadata=metadata, name=node_data_str(id, data)) self.nodes[node.id] = node @@ -371,9 +372,11 @@ def add_edge( ValueError: If the source or target node is not in the graph. """ if source.id not in self.nodes: - raise ValueError(f"Source node {source.id} not in graph") + msg = f"Source node {source.id} not in graph" + raise ValueError(msg) if target.id not in self.nodes: - raise ValueError(f"Target node {target.id} not in graph") + msg = f"Target node {target.id} not in graph" + raise ValueError(msg) edge = Edge( source=source.id, target=target.id, data=data, conditional=conditional ) diff --git a/libs/core/langchain_core/runnables/graph_ascii.py b/libs/core/langchain_core/runnables/graph_ascii.py index bdd1b5e2e4dfe..f2a031ba43dbb 100644 --- a/libs/core/langchain_core/runnables/graph_ascii.py +++ b/libs/core/langchain_core/runnables/graph_ascii.py @@ -161,9 +161,8 @@ def _build_sugiyama_layout( route_with_lines, ) except ImportError as exc: - raise ImportError( - "Install grandalf to draw graphs: `pip install grandalf`." - ) from exc + msg = "Install grandalf to draw graphs: `pip install grandalf`." + raise ImportError(msg) from exc # # Just a reminder about naming conventions: diff --git a/libs/core/langchain_core/runnables/graph_mermaid.py b/libs/core/langchain_core/runnables/graph_mermaid.py index 175c53eb6a4f3..70b8e4865b5a6 100644 --- a/libs/core/langchain_core/runnables/graph_mermaid.py +++ b/libs/core/langchain_core/runnables/graph_mermaid.py @@ -104,11 +104,12 @@ def add_subgraph(edges: list[Edge], prefix: str) -> None: if prefix and not self_loop: subgraph = prefix.split(":")[-1] if subgraph in seen_subgraphs: - raise ValueError( + msg = ( f"Found duplicate subgraph '{subgraph}' -- this likely means that " "you're reusing a subgraph node with the same name. " "Please adjust your graph to have subgraph nodes with unique names." ) + raise ValueError(msg) seen_subgraphs.add(subgraph) mermaid_graph += f"\tsubgraph {subgraph}\n" @@ -214,10 +215,11 @@ def draw_mermaid_png( ) else: supported_methods = ", ".join([m.value for m in MermaidDrawMethod]) - raise ValueError( + msg = ( f"Invalid draw method: {draw_method}. " f"Supported draw methods are: {supported_methods}" ) + raise ValueError(msg) return img_bytes @@ -233,9 +235,8 @@ async def _render_mermaid_using_pyppeteer( try: from pyppeteer import launch # type: ignore[import] except ImportError as e: - raise ImportError( - "Install Pyppeteer to use the Pyppeteer method: `pip install pyppeteer`." - ) from e + msg = "Install Pyppeteer to use the Pyppeteer method: `pip install pyppeteer`." + raise ImportError(msg) from e browser = await launch() page = await browser.newPage() @@ -304,10 +305,11 @@ def _render_mermaid_using_api( try: import requests # type: ignore[import] except ImportError as e: - raise ImportError( + msg = ( "Install the `requests` module to use the Mermaid.INK API: " "`pip install requests`." - ) from e + ) + raise ImportError(msg) from e # Use Mermaid API to render the image mermaid_syntax_encoded = base64.b64encode(mermaid_syntax.encode("utf8")).decode( @@ -332,7 +334,8 @@ def _render_mermaid_using_api( return img_bytes else: - raise ValueError( + msg = ( f"Failed to render the graph using the Mermaid.INK API. " f"Status code: {response.status_code}." ) + raise ValueError(msg) diff --git a/libs/core/langchain_core/runnables/graph_png.py b/libs/core/langchain_core/runnables/graph_png.py index 9cd48b459492c..4ce64ca6c1fc0 100644 --- a/libs/core/langchain_core/runnables/graph_png.py +++ b/libs/core/langchain_core/runnables/graph_png.py @@ -136,9 +136,8 @@ def draw(self, graph: Graph, output_path: Optional[str] = None) -> Optional[byte try: import pygraphviz as pgv # type: ignore[import] except ImportError as exc: - raise ImportError( - "Install pygraphviz to draw graphs: `pip install pygraphviz`." - ) from exc + msg = "Install pygraphviz to draw graphs: `pip install pygraphviz`." + raise ImportError(msg) from exc # Create a directed graph viz = pgv.AGraph(directed=True, nodesep=0.9, ranksep=1.0) diff --git a/libs/core/langchain_core/runnables/history.py b/libs/core/langchain_core/runnables/history.py index 71135513c5a34..3d13da5a66d89 100644 --- a/libs/core/langchain_core/runnables/history.py +++ b/libs/core/langchain_core/runnables/history.py @@ -472,16 +472,16 @@ def _get_input_messages( # This occurs for chat models - since we batch inputs if isinstance(input_val[0], list): if len(input_val) != 1: - raise ValueError( - f"Expected a single list of messages. Got {input_val}." - ) + msg = f"Expected a single list of messages. Got {input_val}." + raise ValueError(msg) return input_val[0] return list(input_val) else: - raise ValueError( + msg = ( f"Expected str, BaseMessage, List[BaseMessage], or Tuple[BaseMessage]. " f"Got {input_val}." ) + raise ValueError(msg) def _get_output_messages( self, output_val: Union[str, BaseMessage, Sequence[BaseMessage], dict] @@ -513,10 +513,11 @@ def _get_output_messages( elif isinstance(output_val, (list, tuple)): return list(output_val) else: - raise ValueError( + msg = ( f"Expected str, BaseMessage, List[BaseMessage], or Tuple[BaseMessage]. " f"Got {output_val}." ) + raise ValueError(msg) def _enter_history(self, input: Any, config: RunnableConfig) -> list[BaseMessage]: hist: BaseChatMessageHistory = config["configurable"]["message_history"] @@ -593,12 +594,13 @@ def _merge_configs(self, *configs: Optional[RunnableConfig]) -> RunnableConfig: missing_key: "[your-value-here]" for missing_key in missing_keys } example_config = {"configurable": example_configurable} - raise ValueError( + msg = ( f"Missing keys {sorted(missing_keys)} in config['configurable'] " f"Expected keys are {sorted(expected_keys)}." f"When using via .invoke() or .stream(), pass in a config; " f"e.g., chain.invoke({example_input}, {example_config})" ) + raise ValueError(msg) if len(expected_keys) == 1: if parameter_names: @@ -613,10 +615,11 @@ def _merge_configs(self, *configs: Optional[RunnableConfig]) -> RunnableConfig: else: # otherwise verify that names of keys patch and invoke by named arguments if set(expected_keys) != set(parameter_names): - raise ValueError( + msg = ( f"Expected keys {sorted(expected_keys)} do not match parameter " f"names {sorted(parameter_names)} of get_session_history." ) + raise ValueError(msg) message_history = self.get_session_history( **{key: configurable[key] for key in expected_keys} diff --git a/libs/core/langchain_core/runnables/router.py b/libs/core/langchain_core/runnables/router.py index c71cb85e4f78c..8d353648cd928 100644 --- a/libs/core/langchain_core/runnables/router.py +++ b/libs/core/langchain_core/runnables/router.py @@ -101,7 +101,8 @@ def invoke( key = input["key"] actual_input = input["input"] if key not in self.runnables: - raise ValueError(f"No runnable associated with key '{key}'") + msg = f"No runnable associated with key '{key}'" + raise ValueError(msg) runnable = self.runnables[key] return runnable.invoke(actual_input, config) @@ -115,7 +116,8 @@ async def ainvoke( key = input["key"] actual_input = input["input"] if key not in self.runnables: - raise ValueError(f"No runnable associated with key '{key}'") + msg = f"No runnable associated with key '{key}'" + raise ValueError(msg) runnable = self.runnables[key] return await runnable.ainvoke(actual_input, config) @@ -134,7 +136,8 @@ def batch( keys = [input["key"] for input in inputs] actual_inputs = [input["input"] for input in inputs] if any(key not in self.runnables for key in keys): - raise ValueError("One or more keys do not have a corresponding runnable") + msg = "One or more keys do not have a corresponding runnable" + raise ValueError(msg) def invoke( runnable: Runnable, input: Input, config: RunnableConfig @@ -169,7 +172,8 @@ async def abatch( keys = [input["key"] for input in inputs] actual_inputs = [input["input"] for input in inputs] if any(key not in self.runnables for key in keys): - raise ValueError("One or more keys do not have a corresponding runnable") + msg = "One or more keys do not have a corresponding runnable" + raise ValueError(msg) async def ainvoke( runnable: Runnable, input: Input, config: RunnableConfig @@ -201,7 +205,8 @@ def stream( key = input["key"] actual_input = input["input"] if key not in self.runnables: - raise ValueError(f"No runnable associated with key '{key}'") + msg = f"No runnable associated with key '{key}'" + raise ValueError(msg) runnable = self.runnables[key] yield from runnable.stream(actual_input, config) @@ -215,7 +220,8 @@ async def astream( key = input["key"] actual_input = input["input"] if key not in self.runnables: - raise ValueError(f"No runnable associated with key '{key}'") + msg = f"No runnable associated with key '{key}'" + raise ValueError(msg) runnable = self.runnables[key] async for output in runnable.astream(actual_input, config): diff --git a/libs/core/langchain_core/runnables/utils.py b/libs/core/langchain_core/runnables/utils.py index 189207ff2e454..06168a176d0a3 100644 --- a/libs/core/langchain_core/runnables/utils.py +++ b/libs/core/langchain_core/runnables/utils.py @@ -639,10 +639,11 @@ def get_unique_config_specs( if len(others) == 0 or all(o == first for o in others): unique.append(first) else: - raise ValueError( + msg = ( "RunnableSequence contains conflicting config specs" f"for {id}: {[first] + others}" ) + raise ValueError(msg) return unique diff --git a/libs/core/langchain_core/structured_query.py b/libs/core/langchain_core/structured_query.py index 9c46278b77c72..8aacbfbcc60a6 100644 --- a/libs/core/langchain_core/structured_query.py +++ b/libs/core/langchain_core/structured_query.py @@ -24,19 +24,21 @@ def _validate_func(self, func: Union[Operator, Comparator]) -> None: and self.allowed_operators is not None and func not in self.allowed_operators ): - raise ValueError( + msg = ( f"Received disallowed operator {func}. Allowed " f"comparators are {self.allowed_operators}" ) + raise ValueError(msg) if ( isinstance(func, Comparator) and self.allowed_comparators is not None and func not in self.allowed_comparators ): - raise ValueError( + msg = ( f"Received disallowed comparator {func}. Allowed " f"comparators are {self.allowed_comparators}" ) + raise ValueError(msg) @abstractmethod def visit_operation(self, operation: Operation) -> Any: diff --git a/libs/core/langchain_core/tools/base.py b/libs/core/langchain_core/tools/base.py index 85360f4171d2d..88c787558ffd0 100644 --- a/libs/core/langchain_core/tools/base.py +++ b/libs/core/langchain_core/tools/base.py @@ -127,9 +127,8 @@ def _validate_docstring_args_against_annotations( """Raise error if docstring arg is not in type annotations.""" for docstring_arg in arg_descriptions: if docstring_arg not in annotations: - raise ValueError( - f"Arg {docstring_arg} in docstring not found in function signature." - ) + msg = f"Arg {docstring_arg} in docstring not found in function signature." + raise ValueError(msg) def _infer_arg_descriptions( @@ -183,10 +182,11 @@ def _function_annotations_are_pydantic_v1( for parameter in signature.parameters.values() ) if any_v1_annotations and any_v2_annotations: - raise NotImplementedError( + msg = ( f"Function {func} contains a mix of Pydantic v1 and v2 annotations. " "Only one version of Pydantic annotations per function is supported." ) + raise NotImplementedError(msg) return any_v1_annotations and not any_v2_annotations @@ -335,7 +335,7 @@ class ChildTool(BaseTool): args_schema: Type[BaseModel] = SchemaClass ...""" name = cls.__name__ - raise SchemaAnnotationError( + msg = ( f"Tool definition for {name} must include valid type annotations" f" for argument 'args_schema' to behave as expected.\n" f"Expected annotation of 'Type[BaseModel]'" @@ -343,6 +343,7 @@ class ChildTool(BaseTool): f"Expected class looks like:\n" f"{typehint_mandate}" ) + raise SchemaAnnotationError(msg) name: str """The unique name of the tool that clearly communicates its purpose.""" @@ -422,10 +423,11 @@ def __init__(self, **kwargs: Any) -> None: and kwargs["args_schema"] is not None and not is_basemodel_subclass(kwargs["args_schema"]) ): - raise TypeError( + msg = ( f"args_schema must be a subclass of pydantic BaseModel. " f"Got: {kwargs['args_schema']}." ) + raise TypeError(msg) super().__init__(**kwargs) model_config = ConfigDict( @@ -515,10 +517,11 @@ def _parse_input(self, tool_input: Union[str, dict]) -> Union[str, dict[str, Any result = input_args.parse_obj(tool_input) result_dict = result.dict() else: - raise NotImplementedError( + msg = ( "args_schema must be a Pydantic BaseModel, " f"got {self.args_schema}" ) + raise NotImplementedError(msg) return { k: getattr(result, k) for k, v in result_dict.items() @@ -653,12 +656,13 @@ def run( response = context.run(self._run, *tool_args, **tool_kwargs) if self.response_format == "content_and_artifact": if not isinstance(response, tuple) or len(response) != 2: - raise ValueError( + msg = ( "Since response_format='content_and_artifact' " "a two-tuple of the message content and raw tool output is " f"expected. Instead generated response of type: " f"{type(response)}." ) + raise ValueError(msg) content, artifact = response else: content = response @@ -769,12 +773,13 @@ async def arun( response = await coro if self.response_format == "content_and_artifact": if not isinstance(response, tuple) or len(response) != 2: - raise ValueError( + msg = ( "Since response_format='content_and_artifact' " "a two-tuple of the message content and raw tool output is " f"expected. Instead generated response of type: " f"{type(response)}." ) + raise ValueError(msg) content, artifact = response else: content = response @@ -825,10 +830,11 @@ def _handle_validation_error( elif callable(flag): content = flag(e) else: - raise ValueError( + msg = ( f"Got unexpected type of `handle_validation_error`. Expected bool, " f"str or callable. Received: {flag}" ) + raise ValueError(msg) return content @@ -844,10 +850,11 @@ def _handle_tool_error( elif callable(flag): content = flag(e) else: - raise ValueError( + msg = ( f"Got unexpected type of `handle_tool_error`. Expected bool, str " f"or callable. Received: {flag}" ) + raise ValueError(msg) return content diff --git a/libs/core/langchain_core/tools/convert.py b/libs/core/langchain_core/tools/convert.py index b419a595559a5..e85435a86df69 100644 --- a/libs/core/langchain_core/tools/convert.py +++ b/libs/core/langchain_core/tools/convert.py @@ -146,7 +146,8 @@ def _make_tool(dec_func: Union[Callable, Runnable]) -> BaseTool: runnable = dec_func if runnable.input_schema.model_json_schema().get("type") != "object": - raise ValueError("Runnable must have an object schema.") + msg = "Runnable must have an object schema." + raise ValueError(msg) async def ainvoke_wrapper( callbacks: Optional[Callbacks] = None, **kwargs: Any @@ -189,10 +190,11 @@ def invoke_wrapper( # If someone doesn't want a schema applied, we must treat it as # a simple string->string function if dec_func.__doc__ is None: - raise ValueError( + msg = ( "Function must have a docstring if " "description not provided and infer_schema is False." ) + raise ValueError(msg) return Tool( name=tool_name, func=func, @@ -222,7 +224,8 @@ def _partial(func: Callable[[str], str]) -> BaseTool: return _partial else: - raise ValueError("Too many arguments for tool decorator") + msg = "Too many arguments for tool decorator" + raise ValueError(msg) def _get_description_from_runnable(runnable: Runnable) -> str: @@ -241,11 +244,12 @@ def _get_schema_from_runnable_and_arg_types( try: arg_types = get_type_hints(runnable.InputType) except TypeError as e: - raise TypeError( + msg = ( "Tool input must be str or dict. If dict, dict arguments must be " "typed. Either annotate types (e.g., with TypedDict) or pass " f"arg_types into `.as_tool` to specify. {str(e)}" - ) from e + ) + raise TypeError(msg) from e fields = {key: (key_type, Field(...)) for key, key_type in arg_types.items()} return create_model(name, **fields) # type: ignore diff --git a/libs/core/langchain_core/tools/simple.py b/libs/core/langchain_core/tools/simple.py index 6f0bdb516fc6c..118c8b39f6db3 100644 --- a/libs/core/langchain_core/tools/simple.py +++ b/libs/core/langchain_core/tools/simple.py @@ -68,11 +68,12 @@ def _to_args_and_kwargs(self, tool_input: Union[str, dict]) -> tuple[tuple, dict # For backwards compatibility. The tool must be run with a single input all_args = list(args) + list(kwargs.values()) if len(all_args) != 1: - raise ToolException( + msg = ( f"""Too many arguments to single-input tool {self.name}. Consider using StructuredTool instead.""" f" Args: {all_args}" ) + raise ToolException(msg) return tuple(all_args), {} def _run( @@ -89,7 +90,8 @@ def _run( if config_param := _get_runnable_config_param(self.func): kwargs[config_param] = config return self.func(*args, **kwargs) - raise NotImplementedError("Tool does not support sync invocation.") + msg = "Tool does not support sync invocation." + raise NotImplementedError(msg) async def _arun( self, @@ -152,7 +154,8 @@ def from_function( ValueError: If the function is not provided. """ if func is None and coroutine is None: - raise ValueError("Function and/or coroutine must be provided") + msg = "Function and/or coroutine must be provided" + raise ValueError(msg) return cls( name=name, func=func, diff --git a/libs/core/langchain_core/tools/structured.py b/libs/core/langchain_core/tools/structured.py index bf645265b457c..174e7b2f53704 100644 --- a/libs/core/langchain_core/tools/structured.py +++ b/libs/core/langchain_core/tools/structured.py @@ -78,7 +78,8 @@ def _run( if config_param := _get_runnable_config_param(self.func): kwargs[config_param] = config return self.func(*args, **kwargs) - raise NotImplementedError("StructuredTool does not support sync invocation.") + msg = "StructuredTool does not support sync invocation." + raise NotImplementedError(msg) async def _arun( self, @@ -167,7 +168,8 @@ def add(a: int, b: int) -> int: elif coroutine is not None: source_function = coroutine else: - raise ValueError("Function and/or coroutine must be provided") + msg = "Function and/or coroutine must be provided" + raise ValueError(msg) name = name or source_function.__name__ if args_schema is None and infer_schema: # schema name is appended within function @@ -184,9 +186,8 @@ def add(a: int, b: int) -> int: if description_ is None and args_schema: description_ = args_schema.__doc__ or None if description_ is None: - raise ValueError( - "Function must have a docstring if description not provided." - ) + msg = "Function must have a docstring if description not provided." + raise ValueError(msg) if description is None: # Only apply if using the function's docstring description_ = textwrap.dedent(description_).strip() diff --git a/libs/core/langchain_core/tracers/context.py b/libs/core/langchain_core/tracers/context.py index f2f05849d6265..295c68552a3a8 100644 --- a/libs/core/langchain_core/tracers/context.py +++ b/libs/core/langchain_core/tracers/context.py @@ -40,9 +40,10 @@ def tracing_enabled( session_name: str = "default", ) -> Generator[TracerSessionV1, None, None]: """Throw an error because this has been replaced by tracing_v2_enabled.""" - raise RuntimeError( + msg = ( "tracing_enabled is no longer supported. Please use tracing_enabled_v2 instead." ) + raise RuntimeError(msg) @contextmanager @@ -196,9 +197,8 @@ def register_configure_hook( to a non-None value. """ if env_var is not None and handle_class is None: - raise ValueError( - "If env_var is set, handle_class must also be set to a non-None value." - ) + msg = "If env_var is set, handle_class must also be set to a non-None value." + raise ValueError(msg) from langchain_core.callbacks.base import BaseCallbackHandler _configure_hooks.append( diff --git a/libs/core/langchain_core/tracers/core.py b/libs/core/langchain_core/tracers/core.py index 0cadbf44babcf..696d35150ff92 100644 --- a/libs/core/langchain_core/tracers/core.py +++ b/libs/core/langchain_core/tracers/core.py @@ -137,17 +137,19 @@ def _get_run( try: run = self.run_map[str(run_id)] except KeyError as exc: - raise TracerException(f"No indexed run ID {run_id}.") from exc + msg = f"No indexed run ID {run_id}." + raise TracerException(msg) from exc if isinstance(run_type, str): run_types: Union[set[str], None] = {run_type} else: run_types = run_type if run_types is not None and run.run_type not in run_types: - raise TracerException( + msg = ( f"Found {run.run_type} run at ID {run_id}, " f"but expected {run_types} run." ) + raise TracerException(msg) return run def _create_chat_model_run( @@ -170,10 +172,11 @@ def _create_chat_model_run( # This can eventually be cleaned up by writing a "modern" tracer # that has all the updated schema changes corresponding to # the "streaming_events" format. - raise NotImplementedError( + msg = ( f"Chat model tracing is not supported in " f"for {self._schema_format} format." ) + raise NotImplementedError(msg) start_time = datetime.now(timezone.utc) if metadata: kwargs.update({"metadata": metadata}) @@ -338,7 +341,8 @@ def _get_chain_inputs(self, inputs: Any) -> Any: "input": inputs, } else: - raise ValueError(f"Invalid format: {self._schema_format}") + msg = f"Invalid format: {self._schema_format}" + raise ValueError(msg) def _get_chain_outputs(self, outputs: Any) -> Any: """Get the outputs for a chain run.""" @@ -349,7 +353,8 @@ def _get_chain_outputs(self, outputs: Any) -> Any: "output": outputs, } else: - raise ValueError(f"Invalid format: {self._schema_format}") + msg = f"Invalid format: {self._schema_format}" + raise ValueError(msg) def _complete_chain_run( self, @@ -404,7 +409,8 @@ def _create_tool_run( elif self._schema_format == "streaming_events": inputs = {"input": inputs} else: - raise AssertionError(f"Invalid format: {self._schema_format}") + msg = f"Invalid format: {self._schema_format}" + raise AssertionError(msg) return Run( id=run_id, diff --git a/libs/core/langchain_core/tracers/evaluation.py b/libs/core/langchain_core/tracers/evaluation.py index c41fa2d7f5816..d74c5d86baedf 100644 --- a/libs/core/langchain_core/tracers/evaluation.py +++ b/libs/core/langchain_core/tracers/evaluation.py @@ -159,10 +159,11 @@ def _select_eval_results( elif isinstance(results, dict) and "results" in results: results_ = cast(list[EvaluationResult], results["results"]) else: - raise TypeError( + msg = ( f"Invalid evaluation result type {type(results)}." " Expected EvaluationResult or EvaluationResults." ) + raise TypeError(msg) return results_ def _log_evaluation_feedback( diff --git a/libs/core/langchain_core/tracers/event_stream.py b/libs/core/langchain_core/tracers/event_stream.py index 57cd0dc5f42fd..b7a1ddc853f67 100644 --- a/libs/core/langchain_core/tracers/event_stream.py +++ b/libs/core/langchain_core/tracers/event_stream.py @@ -136,10 +136,11 @@ def _get_parent_ids(self, run_id: UUID) -> list[str]: while parent_id := self.parent_map.get(run_id): str_parent_id = str(parent_id) if str_parent_id in parent_ids: - raise AssertionError( + msg = ( f"Parent ID {parent_id} is already in the parent_ids list. " f"This should never happen." ) + raise AssertionError(msg) parent_ids.append(str_parent_id) run_id = parent_id @@ -411,7 +412,8 @@ async def on_llm_new_token( chunk_: Union[GenerationChunk, BaseMessageChunk] if run_info is None: - raise AssertionError(f"Run ID {run_id} not found in run map.") + msg = f"Run ID {run_id} not found in run map." + raise AssertionError(msg) if self.is_tapped.get(run_id): return if run_info["run_type"] == "chat_model": @@ -429,7 +431,8 @@ async def on_llm_new_token( else: chunk_ = cast(GenerationChunk, chunk) else: - raise ValueError(f"Unexpected run type: {run_info['run_type']}") + msg = f"Unexpected run type: {run_info['run_type']}" + raise ValueError(msg) self._send( { @@ -484,7 +487,8 @@ async def on_llm_end( } event = "on_llm_end" else: - raise ValueError(f"Unexpected run type: {run_info['run_type']}") + msg = f"Unexpected run type: {run_info['run_type']}" + raise ValueError(msg) self._send( { @@ -626,10 +630,11 @@ async def on_tool_end(self, output: Any, *, run_id: UUID, **kwargs: Any) -> None """End a trace for a tool run.""" run_info = self.run_map.pop(run_id) if "inputs" not in run_info: - raise AssertionError( + msg = ( f"Run ID {run_id} is a tool call and is expected to have " f"inputs associated with it." ) + raise AssertionError(msg) inputs = run_info["inputs"] self._send( @@ -839,11 +844,12 @@ async def _astream_events_implementation_v1( if event_type == "stream": num_chunks = len(log_entry["streamed_output"]) if num_chunks != 1: - raise AssertionError( + msg = ( f"Expected exactly one chunk of streamed output, " f"got {num_chunks} instead. This is impossible. " f"Encountered in: {log_entry['name']}" ) + raise AssertionError(msg) data = {"chunk": log_entry["streamed_output"][0]} # Clean up the stream, we don't need it anymore. @@ -866,11 +872,12 @@ async def _astream_events_implementation_v1( if state["streamed_output"]: num_chunks = len(state["streamed_output"]) if num_chunks != 1: - raise AssertionError( + msg = ( f"Expected exactly one chunk of streamed output, " f"got {num_chunks} instead. This is impossible. " f"Encountered in: {state['name']}" ) + raise AssertionError(msg) data = {"chunk": state["streamed_output"][0]} # Clean up the stream, we don't need it anymore. @@ -945,10 +952,11 @@ async def _astream_events_implementation_v2( callbacks.add_handler(event_streamer, inherit=True) config["callbacks"] = callbacks else: - raise ValueError( + msg = ( f"Unexpected type for callbacks: {callbacks}." "Expected None, list or AsyncCallbackManager." ) + raise ValueError(msg) # Call the runnable in streaming mode, # add each chunk to the output stream diff --git a/libs/core/langchain_core/tracers/langchain.py b/libs/core/langchain_core/tracers/langchain.py index e04fccaa103d3..0183adb260402 100644 --- a/libs/core/langchain_core/tracers/langchain.py +++ b/libs/core/langchain_core/tracers/langchain.py @@ -191,7 +191,8 @@ def get_run_url(self) -> str: ValueError: If the run URL cannot be found. """ if not self.latest_run: - raise ValueError("No traced run found.") + msg = "No traced run found." + raise ValueError(msg) # If this is the first run in a project, the project may not yet be created. # This method is only really useful for debugging flows, so we will assume # there is some tolerace for latency. @@ -204,7 +205,8 @@ def get_run_url(self) -> str: return self.client.get_run_url( run=self.latest_run, project_name=self.project_name ) - raise ValueError("Failed to get run URL.") + msg = "Failed to get run URL." + raise ValueError(msg) def _get_tags(self, run: Run) -> list[str]: """Get combined tags for a run.""" diff --git a/libs/core/langchain_core/tracers/langchain_v1.py b/libs/core/langchain_core/tracers/langchain_v1.py index ea1c882ea67da..706dfb02edf57 100644 --- a/libs/core/langchain_core/tracers/langchain_v1.py +++ b/libs/core/langchain_core/tracers/langchain_v1.py @@ -3,14 +3,16 @@ def get_headers(*args: Any, **kwargs: Any) -> Any: """Throw an error because this has been replaced by get_headers.""" - raise RuntimeError( + msg = ( "get_headers for LangChainTracerV1 is no longer supported. " "Please use LangChainTracer instead." ) + raise RuntimeError(msg) def LangChainTracerV1(*args: Any, **kwargs: Any) -> Any: # noqa: N802 """Throw an error because this has been replaced by LangChainTracer.""" - raise RuntimeError( + msg = ( "LangChainTracerV1 is no longer supported. Please use LangChainTracer instead." ) + raise RuntimeError(msg) diff --git a/libs/core/langchain_core/tracers/log_stream.py b/libs/core/langchain_core/tracers/log_stream.py index 439d2a45381ac..2284ff7022f42 100644 --- a/libs/core/langchain_core/tracers/log_stream.py +++ b/libs/core/langchain_core/tracers/log_stream.py @@ -104,9 +104,8 @@ def __add__(self, other: Union[RunLogPatch, Any]) -> RunLog: state = jsonpatch.apply_patch(None, copy.deepcopy(ops)) return RunLog(*ops, state=state) - raise TypeError( - f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'" - ) + msg = f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'" + raise TypeError(msg) def __repr__(self) -> str: from pprint import pformat @@ -134,9 +133,8 @@ def __add__(self, other: Union[RunLogPatch, Any]) -> RunLog: state = jsonpatch.apply_patch(self.state, other.ops) return RunLog(*ops, state=state) - raise TypeError( - f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'" - ) + msg = f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'" + raise TypeError(msg) def __repr__(self) -> str: from pprint import pformat @@ -197,10 +195,11 @@ def __init__( ValueError: If an invalid schema format is provided (internal use only). """ if _schema_format not in {"original", "streaming_events"}: - raise ValueError( + msg = ( f"Invalid schema format: {_schema_format}. " f"Expected one of 'original', 'streaming_events'." ) + raise ValueError(msg) super().__init__(_schema_format=_schema_format) self.auto_close = auto_close @@ -496,11 +495,12 @@ def _get_standardized_inputs( None means that the input is not yet known! """ if schema_format == "original": - raise NotImplementedError( + msg = ( "Do not assign inputs with original schema drop the key for now." "When inputs are added to astream_log they should be added with " "standardized schema for streaming events." ) + raise NotImplementedError(msg) inputs = load(run.inputs) @@ -613,10 +613,11 @@ async def _astream_log_implementation( callbacks.add_handler(stream, inherit=True) config["callbacks"] = callbacks else: - raise ValueError( + msg = ( f"Unexpected type for callbacks: {callbacks}." "Expected None, list or AsyncCallbackManager." ) + raise ValueError(msg) # Call the runnable in streaming mode, # add each chunk to the output stream diff --git a/libs/core/langchain_core/utils/_merge.py b/libs/core/langchain_core/utils/_merge.py index 32261399d0012..be8f829387454 100644 --- a/libs/core/langchain_core/utils/_merge.py +++ b/libs/core/langchain_core/utils/_merge.py @@ -34,10 +34,11 @@ def merge_dicts(left: dict[str, Any], *others: dict[str, Any]) -> dict[str, Any] elif right_v is None: continue elif type(merged[right_k]) is not type(right_v): - raise TypeError( + msg = ( f'additional_kwargs["{right_k}"] already exists in this message,' " but with a different type." ) + raise TypeError(msg) elif isinstance(merged[right_k], str): # TODO: Add below special handling for 'type' key in 0.3 and remove # merge_lists 'type' logic. @@ -60,10 +61,11 @@ def merge_dicts(left: dict[str, Any], *others: dict[str, Any]) -> dict[str, Any] elif merged[right_k] == right_v: continue else: - raise TypeError( + msg = ( f"Additional kwargs key {right_k} already exists in left dict and " f"value has unsupported type {type(merged[right_k])}." ) + raise TypeError(msg) return merged @@ -125,10 +127,11 @@ def merge_obj(left: Any, right: Any) -> Any: if left is None or right is None: return left if left is not None else right elif type(left) is not type(right): - raise TypeError( + msg = ( f"left and right are of different types. Left type: {type(left)}. Right " f"type: {type(right)}." ) + raise TypeError(msg) elif isinstance(left, str): return left + right elif isinstance(left, dict): @@ -138,7 +141,8 @@ def merge_obj(left: Any, right: Any) -> Any: elif left == right: return left else: - raise ValueError( + msg = ( f"Unable to merge {left=} and {right=}. Both must be of type str, dict, or " f"list, or else be two equal objects." ) + raise ValueError(msg) diff --git a/libs/core/langchain_core/utils/aiter.py b/libs/core/langchain_core/utils/aiter.py index 135b116a419c1..5d4ef032efaaa 100644 --- a/libs/core/langchain_core/utils/aiter.py +++ b/libs/core/langchain_core/utils/aiter.py @@ -60,7 +60,8 @@ def py_anext( Callable[[AsyncIterator[T]], Awaitable[T]], type(iterator).__anext__ ) except AttributeError as e: - raise TypeError(f"{iterator!r} is not an async iterator") from e + msg = f"{iterator!r} is not an async iterator" + raise TypeError(msg) from e if default is _no_default: return __anext__(iterator) diff --git a/libs/core/langchain_core/utils/env.py b/libs/core/langchain_core/utils/env.py index 8319c9abb982b..ac018b0208a70 100644 --- a/libs/core/langchain_core/utils/env.py +++ b/libs/core/langchain_core/utils/env.py @@ -73,8 +73,9 @@ def get_from_env(key: str, env_key: str, default: Optional[str] = None) -> str: elif default is not None: return default else: - raise ValueError( + msg = ( f"Did not find {key}, please add an environment variable" f" `{env_key}` which contains it, or pass" f" `{key}` as a named parameter." ) + raise ValueError(msg) diff --git a/libs/core/langchain_core/utils/formatting.py b/libs/core/langchain_core/utils/formatting.py index 1e71cdf28ce7a..d00431be85b1e 100644 --- a/libs/core/langchain_core/utils/formatting.py +++ b/libs/core/langchain_core/utils/formatting.py @@ -25,10 +25,11 @@ def vformat( ValueError: If any arguments are provided. """ if len(args) > 0: - raise ValueError( + msg = ( "No arguments should be provided, " "everything should be passed as keyword arguments." ) + raise ValueError(msg) return super().vformat(format_string, args, kwargs) def validate_input_variables( diff --git a/libs/core/langchain_core/utils/function_calling.py b/libs/core/langchain_core/utils/function_calling.py index d93f5cb0098e6..9bb62c92b9e20 100644 --- a/libs/core/langchain_core/utils/function_calling.py +++ b/libs/core/langchain_core/utils/function_calling.py @@ -105,7 +105,8 @@ def convert_pydantic_to_openai_function( elif hasattr(model, "schema"): schema = model.schema() # Pydantic 1 else: - raise TypeError("Model must be a Pydantic model.") + msg = "Model must be a Pydantic model." + raise TypeError(msg) schema = dereference_refs(schema) if "definitions" in schema: # pydantic 1 schema.pop("definitions", None) @@ -237,11 +238,12 @@ def _convert_any_typed_dicts_to_pydantic( if (field_desc := field_kwargs.get("description")) and not isinstance( field_desc, str ): - raise ValueError( + msg = ( f"Invalid annotation for field {arg}. Third argument to " f"Annotated must be a string description, received value of " f"type {type(field_desc)}." ) + raise ValueError(msg) elif arg_desc := arg_descriptions.get(arg): field_kwargs["description"] = arg_desc else: @@ -387,12 +389,13 @@ def convert_to_openai_function( elif callable(function): oai_function = cast(dict, convert_python_function_to_openai_function(function)) else: - raise ValueError( + msg = ( f"Unsupported function\n\n{function}\n\nFunctions must be passed in" " as Dict, pydantic.BaseModel, or Callable. If they're a dict they must" " either be in OpenAI function format or valid JSON schema with top-level" " 'title' and 'description' keys." ) + raise ValueError(msg) if strict is not None: oai_function["strict"] = strict @@ -553,7 +556,8 @@ def _parse_google_docstring( if filtered_annotations and ( len(docstring_blocks) < 2 or not docstring_blocks[1].startswith("Args:") ): - raise ValueError("Found invalid Google-Style docstring.") + msg = "Found invalid Google-Style docstring." + raise ValueError(msg) descriptors = [] args_block = None past_descriptors = False @@ -571,7 +575,8 @@ def _parse_google_docstring( description = " ".join(descriptors) else: if error_on_invalid_docstring: - raise ValueError("Found invalid Google-Style docstring.") + msg = "Found invalid Google-Style docstring." + raise ValueError(msg) description = "" args_block = None arg_descriptions = {} diff --git a/libs/core/langchain_core/utils/json.py b/libs/core/langchain_core/utils/json.py index 0ef93c05abe96..88e91899b9122 100644 --- a/libs/core/langchain_core/utils/json.py +++ b/libs/core/langchain_core/utils/json.py @@ -179,11 +179,13 @@ def parse_and_check_json_markdown(text: str, expected_keys: list[str]) -> dict: try: json_obj = parse_json_markdown(text) except json.JSONDecodeError as e: - raise OutputParserException(f"Got invalid JSON object. Error: {e}") from e + msg = f"Got invalid JSON object. Error: {e}" + raise OutputParserException(msg) from e for key in expected_keys: if key not in json_obj: - raise OutputParserException( + msg = ( f"Got invalid return object. Expected key `{key}` " f"to be present, but got {json_obj}" ) + raise OutputParserException(msg) return json_obj diff --git a/libs/core/langchain_core/utils/json_schema.py b/libs/core/langchain_core/utils/json_schema.py index be9642808c1a7..8ac177e342cc2 100644 --- a/libs/core/langchain_core/utils/json_schema.py +++ b/libs/core/langchain_core/utils/json_schema.py @@ -8,10 +8,11 @@ def _retrieve_ref(path: str, schema: dict) -> dict: components = path.split("/") if components[0] != "#": - raise ValueError( + msg = ( "ref paths are expected to be URI fragments, meaning they should start " "with #." ) + raise ValueError(msg) out = schema for component in components[1:]: if component in out: @@ -19,7 +20,8 @@ def _retrieve_ref(path: str, schema: dict) -> dict: elif component.isdigit() and int(component) in out: out = out[int(component)] else: - raise KeyError(f"Reference '{path}' not found.") + msg = f"Reference '{path}' not found." + raise KeyError(msg) return deepcopy(out) diff --git a/libs/core/langchain_core/utils/mustache.py b/libs/core/langchain_core/utils/mustache.py index 56538d945f642..89d5d9fbbf144 100644 --- a/libs/core/langchain_core/utils/mustache.py +++ b/libs/core/langchain_core/utils/mustache.py @@ -144,7 +144,8 @@ def parse_tag(template: str, l_del: str, r_del: str) -> tuple[tuple[str, str], s try: tag, template = template.split(r_del, 1) except ValueError as e: - raise ChevronError("unclosed tag " f"at line {_CURRENT_LINE}") from e + msg = "unclosed tag " f"at line {_CURRENT_LINE}" + raise ChevronError(msg) from e # Find the type meaning of the first character tag_type = tag_types.get(tag[0], "variable") @@ -164,9 +165,8 @@ def parse_tag(template: str, l_del: str, r_del: str) -> tuple[tuple[str, str], s # Otherwise we should complain else: - raise ChevronError( - "unclosed set delimiter tag\n" f"at line {_CURRENT_LINE}" - ) + msg = "unclosed set delimiter tag\n" f"at line {_CURRENT_LINE}" + raise ChevronError(msg) elif ( # If we might be a no html escape tag @@ -275,18 +275,20 @@ def tokenize( try: last_section = open_sections.pop() except IndexError as e: - raise ChevronError( + msg = ( f'Trying to close tag "{tag_key}"\n' "Looks like it was not opened.\n" f"line {_CURRENT_LINE + 1}" - ) from e + ) + raise ChevronError(msg) from e if tag_key != last_section: # Otherwise we need to complain - raise ChevronError( + msg = ( f'Trying to close tag "{tag_key}"\n' f'last open tag is "{last_section}"\n' f"line {_CURRENT_LINE + 1}" ) + raise ChevronError(msg) # Do the second check to see if we're a standalone is_standalone = r_sa_check(template, tag_type, is_standalone) @@ -313,11 +315,12 @@ def tokenize( # If there are any open sections when we're done if open_sections: # Then we need to complain - raise ChevronError( + msg = ( "Unexpected EOF\n" f'the tag "{open_sections[-1]}" was never closed\n' f"was opened at line {_LAST_TAG_LINE}" ) + raise ChevronError(msg) # diff --git a/libs/core/langchain_core/utils/pydantic.py b/libs/core/langchain_core/utils/pydantic.py index 93375e09f348b..7d81b8f70f03c 100644 --- a/libs/core/langchain_core/utils/pydantic.py +++ b/libs/core/langchain_core/utils/pydantic.py @@ -63,7 +63,8 @@ def get_pydantic_major_version() -> int: PydanticBaseModel = Union[BaseModel, pydantic.BaseModel] # type: ignore TypeBaseModel = Union[type[BaseModel], type[pydantic.BaseModel]] # type: ignore else: - raise ValueError(f"Unsupported Pydantic version: {PYDANTIC_MAJOR_VERSION}") + msg = f"Unsupported Pydantic version: {PYDANTIC_MAJOR_VERSION}" + raise ValueError(msg) TBaseModel = TypeVar("TBaseModel", bound=PydanticBaseModel) @@ -116,7 +117,8 @@ def is_basemodel_subclass(cls: type) -> bool: if issubclass(cls, BaseModelV1): return True else: - raise ValueError(f"Unsupported Pydantic version: {PYDANTIC_MAJOR_VERSION}") + msg = f"Unsupported Pydantic version: {PYDANTIC_MAJOR_VERSION}" + raise ValueError(msg) return False @@ -144,7 +146,8 @@ def is_basemodel_instance(obj: Any) -> bool: if isinstance(obj, BaseModelV1): return True else: - raise ValueError(f"Unsupported Pydantic version: {PYDANTIC_MAJOR_VERSION}") + msg = f"Unsupported Pydantic version: {PYDANTIC_MAJOR_VERSION}" + raise ValueError(msg) return False @@ -233,9 +236,8 @@ def _create_subset_model_v1( elif PYDANTIC_MAJOR_VERSION == 2: from pydantic.v1 import create_model # type: ignore else: - raise NotImplementedError( - f"Unsupported pydantic version: {PYDANTIC_MAJOR_VERSION}" - ) + msg = f"Unsupported pydantic version: {PYDANTIC_MAJOR_VERSION}" + raise NotImplementedError(msg) fields = {} @@ -339,9 +341,8 @@ def _create_subset_model( fn_description=fn_description, ) else: - raise NotImplementedError( - f"Unsupported pydantic version: {PYDANTIC_MAJOR_VERSION}" - ) + msg = f"Unsupported pydantic version: {PYDANTIC_MAJOR_VERSION}" + raise NotImplementedError(msg) if PYDANTIC_MAJOR_VERSION == 2: @@ -376,7 +377,8 @@ def get_fields( elif hasattr(model, "__fields__"): return model.__fields__ # type: ignore else: - raise TypeError(f"Expected a Pydantic model. Got {type(model)}") + msg = f"Expected a Pydantic model. Got {type(model)}" + raise TypeError(msg) elif PYDANTIC_MAJOR_VERSION == 1: from pydantic import BaseModel as BaseModelV1_ @@ -386,7 +388,8 @@ def get_fields( # type: ignore[no-redef] """Get the field names of a Pydantic model.""" return model.__fields__ # type: ignore else: - raise ValueError(f"Unsupported Pydantic version: {PYDANTIC_MAJOR_VERSION}") + msg = f"Unsupported Pydantic version: {PYDANTIC_MAJOR_VERSION}" + raise ValueError(msg) _SchemaConfig = ConfigDict( arbitrary_types_allowed=True, frozen=True, protected_namespaces=() @@ -536,11 +539,12 @@ def _remap_field_definitions(field_definitions: dict[str, Any]) -> dict[str, Any if key.startswith("_") or key in _RESERVED_NAMES: # Let's add a prefix to avoid colliding with internal pydantic fields if isinstance(value, FieldInfo): - raise NotImplementedError( + msg = ( f"Remapping for fields starting with '_' or fields with a name " f"matching a reserved name {_RESERVED_NAMES} is not supported if " f" the field is a pydantic Field instance. Got {key}." ) + raise NotImplementedError(msg) type_, default_ = value remapped[f"private_{key}"] = ( type_, @@ -583,10 +587,11 @@ def create_model_v2( if root: if field_definitions: - raise NotImplementedError( + msg = ( "When specifying __root__ no other " f"fields should be provided. Got {field_definitions}" ) + raise NotImplementedError(msg) if isinstance(root, tuple): kwargs = {"type_": root[0], "default_": root[1]} diff --git a/libs/core/langchain_core/utils/utils.py b/libs/core/langchain_core/utils/utils.py index 7bbea2d4e0cac..94f356da33116 100644 --- a/libs/core/langchain_core/utils/utils.py +++ b/libs/core/langchain_core/utils/utils.py @@ -44,11 +44,12 @@ def wrapper(*args: Any, **kwargs: Any) -> Any: invalid_groups = [i for i, count in enumerate(counts) if count != 1] if invalid_groups: invalid_group_names = [", ".join(arg_groups[i]) for i in invalid_groups] - raise ValueError( + msg = ( "Exactly one argument in each of the following" " groups must be defined:" f" {', '.join(invalid_group_names)}" ) + raise ValueError(msg) return func(*args, **kwargs) return wrapper @@ -134,10 +135,11 @@ def guard_import( module = importlib.import_module(module_name, package) except (ImportError, ModuleNotFoundError) as e: pip_name = pip_name or module_name.split(".")[0].replace("_", "-") - raise ImportError( + msg = ( f"Could not import {module_name} python package. " f"Please install it with `pip install {pip_name}`." - ) from e + ) + raise ImportError(msg) from e return module @@ -166,25 +168,29 @@ def check_package_version( """ imported_version = parse(version(package)) if lt_version is not None and imported_version >= parse(lt_version): - raise ValueError( + msg = ( f"Expected {package} version to be < {lt_version}. Received " f"{imported_version}." ) + raise ValueError(msg) if lte_version is not None and imported_version > parse(lte_version): - raise ValueError( + msg = ( f"Expected {package} version to be <= {lte_version}. Received " f"{imported_version}." ) + raise ValueError(msg) if gt_version is not None and imported_version <= parse(gt_version): - raise ValueError( + msg = ( f"Expected {package} version to be > {gt_version}. Received " f"{imported_version}." ) + raise ValueError(msg) if gte_version is not None and imported_version < parse(gte_version): - raise ValueError( + msg = ( f"Expected {package} version to be >= {gte_version}. Received " f"{imported_version}." ) + raise ValueError(msg) def get_pydantic_field_names(pydantic_cls: Any) -> set[str]: @@ -230,7 +236,8 @@ def _build_model_kwargs( extra_kwargs = values.get("model_kwargs", {}) for field_name in list(values): if field_name in extra_kwargs: - raise ValueError(f"Found {field_name} supplied twice.") + msg = f"Found {field_name} supplied twice." + raise ValueError(msg) if field_name not in all_required_field_names: warnings.warn( f"""WARNING! {field_name} is not default parameter. @@ -276,7 +283,8 @@ def build_extra_kwargs( """ for field_name in list(values): if field_name in extra_kwargs: - raise ValueError(f"Found {field_name} supplied twice.") + msg = f"Found {field_name} supplied twice." + raise ValueError(msg) if field_name not in all_required_field_names: warnings.warn( f"""WARNING! {field_name} is not default parameter. @@ -288,10 +296,11 @@ def build_extra_kwargs( invalid_model_kwargs = all_required_field_names.intersection(extra_kwargs.keys()) if invalid_model_kwargs: - raise ValueError( + msg = ( f"Parameters {invalid_model_kwargs} should be specified explicitly. " f"Instead they were passed in as part of `model_kwargs` parameter." ) + raise ValueError(msg) return extra_kwargs @@ -386,11 +395,12 @@ def get_from_env_fn() -> Optional[str]: if error_message: raise ValueError(error_message) else: - raise ValueError( + msg = ( f"Did not find {key}, please add an environment variable" f" `{key}` which contains it, or pass" f" `{key}` as a named parameter." ) + raise ValueError(msg) return get_from_env_fn @@ -449,10 +459,11 @@ def get_secret_from_env() -> Optional[SecretStr]: if error_message: raise ValueError(error_message) else: - raise ValueError( + msg = ( f"Did not find {key}, please add an environment variable" f" `{key}` which contains it, or pass" f" `{key}` as a named parameter." ) + raise ValueError(msg) return get_secret_from_env diff --git a/libs/core/langchain_core/vectorstores/base.py b/libs/core/langchain_core/vectorstores/base.py index ccc966d9f2aff..c87da224acd41 100644 --- a/libs/core/langchain_core/vectorstores/base.py +++ b/libs/core/langchain_core/vectorstores/base.py @@ -93,10 +93,11 @@ def add_texts( texts if isinstance(texts, (list, tuple)) else list(texts) ) if metadatas and len(metadatas) != len(texts_): - raise ValueError( + msg = ( "The number of metadatas must match the number of texts." f"Got {len(metadatas)} metadatas and {len(texts_)} texts." ) + raise ValueError(msg) metadatas_ = iter(metadatas) if metadatas else cycle([{}]) ids_: Iterator[Optional[str]] = iter(ids) if ids else cycle([None]) docs = [ @@ -108,9 +109,8 @@ def add_texts( kwargs["ids"] = ids return self.add_documents(docs, **kwargs) - raise NotImplementedError( - f"`add_texts` has not been implemented for {self.__class__.__name__} " - ) + msg = f"`add_texts` has not been implemented for {self.__class__.__name__} " + raise NotImplementedError(msg) @property def embeddings(self) -> Optional[Embeddings]: @@ -133,7 +133,8 @@ def delete(self, ids: Optional[list[str]] = None, **kwargs: Any) -> Optional[boo False otherwise, None if not implemented. """ - raise NotImplementedError("delete method must be implemented by subclass.") + msg = "delete method must be implemented by subclass." + raise NotImplementedError(msg) def get_by_ids(self, ids: Sequence[str], /) -> list[Document]: """Get documents by their IDs. @@ -159,9 +160,8 @@ def get_by_ids(self, ids: Sequence[str], /) -> list[Document]: .. versionadded:: 0.2.11 """ - raise NotImplementedError( - f"{self.__class__.__name__} does not yet support get_by_ids." - ) + msg = f"{self.__class__.__name__} does not yet support get_by_ids." + raise NotImplementedError(msg) # Implementations should override this method to provide an async native version. async def aget_by_ids(self, ids: Sequence[str], /) -> list[Document]: @@ -243,10 +243,11 @@ async def aadd_texts( texts if isinstance(texts, (list, tuple)) else list(texts) ) if metadatas and len(metadatas) != len(texts_): - raise ValueError( + msg = ( "The number of metadatas must match the number of texts." f"Got {len(metadatas)} metadatas and {len(texts_)} texts." ) + raise ValueError(msg) metadatas_ = iter(metadatas) if metadatas else cycle([{}]) ids_: Iterator[Optional[str]] = iter(ids) if ids else cycle([None]) @@ -284,10 +285,11 @@ def add_documents(self, documents: list[Document], **kwargs: Any) -> list[str]: texts = [doc.page_content for doc in documents] metadatas = [doc.metadata for doc in documents] return self.add_texts(texts, metadatas, **kwargs) - raise NotImplementedError( + msg = ( f"`add_documents` and `add_texts` has not been implemented " f"for {self.__class__.__name__} " ) + raise NotImplementedError(msg) async def aadd_documents( self, documents: list[Document], **kwargs: Any @@ -347,11 +349,12 @@ def search(self, query: str, search_type: str, **kwargs: Any) -> list[Document]: elif search_type == "mmr": return self.max_marginal_relevance_search(query, **kwargs) else: - raise ValueError( + msg = ( f"search_type of {search_type} not allowed. Expected " "search_type to be 'similarity', 'similarity_score_threshold'" " or 'mmr'." ) + raise ValueError(msg) async def asearch( self, query: str, search_type: str, **kwargs: Any @@ -381,10 +384,11 @@ async def asearch( elif search_type == "mmr": return await self.amax_marginal_relevance_search(query, **kwargs) else: - raise ValueError( + msg = ( f"search_type of {search_type} not allowed. Expected " "search_type to be 'similarity', 'similarity_score_threshold' or 'mmr'." ) + raise ValueError(msg) @abstractmethod def similarity_search( @@ -1035,17 +1039,19 @@ def validate_search_type(cls, values: dict) -> Any: """ search_type = values.get("search_type", "similarity") if search_type not in cls.allowed_search_types: - raise ValueError( + msg = ( f"search_type of {search_type} not allowed. Valid values are: " f"{cls.allowed_search_types}" ) + raise ValueError(msg) if search_type == "similarity_score_threshold": score_threshold = values.get("search_kwargs", {}).get("score_threshold") if (score_threshold is None) or (not isinstance(score_threshold, float)): - raise ValueError( + msg = ( "`score_threshold` is not specified with a float value(0~1) " "in `search_kwargs`." ) + raise ValueError(msg) return values def _get_ls_params(self, **kwargs: Any) -> LangSmithRetrieverParams: @@ -1084,7 +1090,8 @@ def _get_relevant_documents( query, **self.search_kwargs ) else: - raise ValueError(f"search_type of {self.search_type} not allowed.") + msg = f"search_type of {self.search_type} not allowed." + raise ValueError(msg) return docs async def _aget_relevant_documents( @@ -1106,7 +1113,8 @@ async def _aget_relevant_documents( query, **self.search_kwargs ) else: - raise ValueError(f"search_type of {self.search_type} not allowed.") + msg = f"search_type of {self.search_type} not allowed." + raise ValueError(msg) return docs def add_documents(self, documents: list[Document], **kwargs: Any) -> list[str]: diff --git a/libs/core/langchain_core/vectorstores/in_memory.py b/libs/core/langchain_core/vectorstores/in_memory.py index 0b0a7c08653f0..06aa13f785a24 100644 --- a/libs/core/langchain_core/vectorstores/in_memory.py +++ b/libs/core/langchain_core/vectorstores/in_memory.py @@ -175,10 +175,11 @@ def add_documents( vectors = self.embedding.embed_documents(texts) if ids and len(ids) != len(texts): - raise ValueError( + msg = ( f"ids must be the same length as texts. " f"Got {len(ids)} ids and {len(texts)} texts." ) + raise ValueError(msg) id_iterator: Iterator[Optional[str]] = ( iter(ids) if ids else iter(doc.id for doc in documents) @@ -207,10 +208,11 @@ async def aadd_documents( vectors = await self.embedding.aembed_documents(texts) if ids and len(ids) != len(texts): - raise ValueError( + msg = ( f"ids must be the same length as texts. " f"Got {len(ids)} ids and {len(texts)} texts." ) + raise ValueError(msg) id_iterator: Iterator[Optional[str]] = ( iter(ids) if ids else iter(doc.id for doc in documents) @@ -432,10 +434,11 @@ def max_marginal_relevance_search_by_vector( try: import numpy as np except ImportError as e: - raise ImportError( + msg = ( "numpy must be installed to use max_marginal_relevance_search " "pip install numpy" - ) from e + ) + raise ImportError(msg) from e mmr_chosen_indices = maximal_marginal_relevance( np.array(embedding, dtype=np.float32), diff --git a/libs/core/langchain_core/vectorstores/utils.py b/libs/core/langchain_core/vectorstores/utils.py index 777bb68b68de7..6a8f6758f999a 100644 --- a/libs/core/langchain_core/vectorstores/utils.py +++ b/libs/core/langchain_core/vectorstores/utils.py @@ -35,10 +35,11 @@ def _cosine_similarity(x: Matrix, y: Matrix) -> np.ndarray: try: import numpy as np except ImportError as e: - raise ImportError( + msg = ( "cosine_similarity requires numpy to be installed. " "Please install numpy with `pip install numpy`." - ) from e + ) + raise ImportError(msg) from e if len(x) == 0 or len(y) == 0: return np.array([]) @@ -46,10 +47,11 @@ def _cosine_similarity(x: Matrix, y: Matrix) -> np.ndarray: x = np.array(x) y = np.array(y) if x.shape[1] != y.shape[1]: - raise ValueError( + msg = ( f"Number of columns in X and Y must be the same. X has shape {x.shape} " f"and Y has shape {y.shape}." ) + raise ValueError(msg) try: import simsimd as simd # type: ignore[import-not-found] @@ -94,10 +96,11 @@ def maximal_marginal_relevance( try: import numpy as np except ImportError as e: - raise ImportError( + msg = ( "maximal_marginal_relevance requires numpy to be installed. " "Please install numpy with `pip install numpy`." - ) from e + ) + raise ImportError(msg) from e if min(k, len(embedding_list)) <= 0: return [] diff --git a/libs/core/pyproject.toml b/libs/core/pyproject.toml index 36a6cb9f593ef..365db50c53be1 100644 --- a/libs/core/pyproject.toml +++ b/libs/core/pyproject.toml @@ -44,7 +44,7 @@ python = ">=3.12.4" [tool.poetry.extras] [tool.ruff.lint] -select = [ "B", "C4", "E", "F", "I", "N", "PIE", "SIM", "T201", "UP", "W",] +select = [ "B", "C4", "E", "EM", "F", "I", "N", "PIE", "SIM", "T201", "UP", "W",] ignore = [ "UP007", "W293",] [tool.coverage.run] diff --git a/libs/core/tests/unit_tests/conftest.py b/libs/core/tests/unit_tests/conftest.py index 53104b12b2afa..29819a8066958 100644 --- a/libs/core/tests/unit_tests/conftest.py +++ b/libs/core/tests/unit_tests/conftest.py @@ -47,7 +47,8 @@ def test_something(): only_core = config.getoption("--only-core") or False if only_extended and only_core: - raise ValueError("Cannot specify both `--only-extended` and `--only-core`.") + msg = "Cannot specify both `--only-extended` and `--only-core`." + raise ValueError(msg) for item in items: requires_marker = item.get_closest_marker("requires") diff --git a/libs/core/tests/unit_tests/language_models/llms/test_cache.py b/libs/core/tests/unit_tests/language_models/llms/test_cache.py index 4c5eb04cb9cce..6894328724b0d 100644 --- a/libs/core/tests/unit_tests/language_models/llms/test_cache.py +++ b/libs/core/tests/unit_tests/language_models/llms/test_cache.py @@ -66,11 +66,13 @@ def __init__(self) -> None: def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up based on prompt and llm_string.""" - raise NotImplementedError("This code should not be triggered") + msg = "This code should not be triggered" + raise NotImplementedError(msg) def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update cache based on prompt and llm_string.""" - raise NotImplementedError("This code should not be triggered") + msg = "This code should not be triggered" + raise NotImplementedError(msg) def clear(self, **kwargs: Any) -> None: """Clear cache.""" diff --git a/libs/core/tests/unit_tests/load/test_serializable.py b/libs/core/tests/unit_tests/load/test_serializable.py index a8b03a801e6d8..65040a6841b2a 100644 --- a/libs/core/tests/unit_tests/load/test_serializable.py +++ b/libs/core/tests/unit_tests/load/test_serializable.py @@ -76,17 +76,20 @@ def lc_secrets(self) -> dict[str, str]: def test__is_field_useful() -> None: class ArrayObj: def __bool__(self) -> bool: - raise ValueError("Truthiness can't be determined") + msg = "Truthiness can't be determined" + raise ValueError(msg) def __eq__(self, other: object) -> bool: return self # type: ignore[return-value] class NonBoolObj: def __bool__(self) -> bool: - raise ValueError("Truthiness can't be determined") + msg = "Truthiness can't be determined" + raise ValueError(msg) def __eq__(self, other: object) -> bool: - raise ValueError("Equality can't be determined") + msg = "Equality can't be determined" + raise ValueError(msg) default_x = ArrayObj() default_y = NonBoolObj() diff --git a/libs/core/tests/unit_tests/output_parsers/test_base_parsers.py b/libs/core/tests/unit_tests/output_parsers/test_base_parsers.py index a883eabc35a99..c4c97292b0197 100644 --- a/libs/core/tests/unit_tests/output_parsers/test_base_parsers.py +++ b/libs/core/tests/unit_tests/output_parsers/test_base_parsers.py @@ -32,15 +32,13 @@ def parse_result( that support streaming """ if len(result) != 1: - raise NotImplementedError( - "This output parser can only be used with a single generation." - ) + msg = "This output parser can only be used with a single generation." + raise NotImplementedError(msg) generation = result[0] if not isinstance(generation, ChatGeneration): # Say that this one only works with chat generations - raise OutputParserException( - "This output parser can only be used with a chat generation." - ) + msg = "This output parser can only be used with a chat generation." + raise OutputParserException(msg) content = generation.message.content assert isinstance(content, str) @@ -77,15 +75,13 @@ def parse_result( that support streaming """ if len(result) != 1: - raise NotImplementedError( - "This output parser can only be used with a single generation." - ) + msg = "This output parser can only be used with a single generation." + raise NotImplementedError(msg) generation = result[0] if not isinstance(generation, ChatGeneration): # Say that this one only works with chat generations - raise OutputParserException( - "This output parser can only be used with a chat generation." - ) + msg = "This output parser can only be used with a chat generation." + raise OutputParserException(msg) content = generation.message.content assert isinstance(content, str) return content.swapcase() # type: ignore diff --git a/libs/core/tests/unit_tests/prompts/test_prompt.py b/libs/core/tests/unit_tests/prompts/test_prompt.py index 3e3452f78b4e3..cf256a8ae1d30 100644 --- a/libs/core/tests/unit_tests/prompts/test_prompt.py +++ b/libs/core/tests/unit_tests/prompts/test_prompt.py @@ -618,7 +618,8 @@ def test_prompt_falsy_vars( elif template_format == "mustache": template = "{{my_var}}" else: - raise ValueError(f"Invalid template format: {template_format}") + msg = f"Invalid template format: {template_format}" + raise ValueError(msg) prompt = PromptTemplate.from_template(template, template_format=template_format) diff --git a/libs/core/tests/unit_tests/pydantic_utils.py b/libs/core/tests/unit_tests/pydantic_utils.py index e64e1e24dfedb..e7e03750fd2df 100644 --- a/libs/core/tests/unit_tests/pydantic_utils.py +++ b/libs/core/tests/unit_tests/pydantic_utils.py @@ -77,9 +77,8 @@ def _schema(obj: Any) -> dict: """Return the schema of the object.""" if not is_basemodel_subclass(obj): - raise TypeError( - f"Object must be a Pydantic BaseModel subclass. Got {type(obj)}" - ) + msg = f"Object must be a Pydantic BaseModel subclass. Got {type(obj)}" + raise TypeError(msg) # Remap to old style schema if not hasattr(obj, "model_json_schema"): # V1 model return obj.schema() diff --git a/libs/core/tests/unit_tests/runnables/test_configurable.py b/libs/core/tests/unit_tests/runnables/test_configurable.py index 99f10f6c60060..9467d6dd0397f 100644 --- a/libs/core/tests/unit_tests/runnables/test_configurable.py +++ b/libs/core/tests/unit_tests/runnables/test_configurable.py @@ -23,7 +23,8 @@ class MyRunnable(RunnableSerializable[str, str]): @classmethod def my_error(cls, values: dict[str, Any]) -> Any: if "_my_hidden_property" in values: - raise ValueError("Cannot set _my_hidden_property") + msg = "Cannot set _my_hidden_property" + raise ValueError(msg) return values @model_validator(mode="after") diff --git a/libs/core/tests/unit_tests/runnables/test_runnable.py b/libs/core/tests/unit_tests/runnables/test_runnable.py index f9dff49c0b99d..6fdf76bf03f4f 100644 --- a/libs/core/tests/unit_tests/runnables/test_runnable.py +++ b/libs/core/tests/unit_tests/runnables/test_runnable.py @@ -2824,7 +2824,8 @@ def router(input: dict[str, Any]) -> Runnable: elif input["key"] == "english": return itemgetter("input") | english_chain else: - raise ValueError(f"Unknown key: {input['key']}") + msg = f"Unknown key: {input['key']}" + raise ValueError(msg) chain: Runnable = input_map | router assert dumps(chain, pretty=True) == snapshot @@ -2873,7 +2874,8 @@ async def arouter(input: dict[str, Any]) -> Runnable: elif input["key"] == "english": return itemgetter("input") | english_chain else: - raise ValueError(f"Unknown key: {input['key']}") + msg = f"Unknown key: {input['key']}" + raise ValueError(msg) achain: Runnable = input_map | arouter math_spy = mocker.spy(math_chain.__class__, "ainvoke") @@ -3065,7 +3067,8 @@ def test_map_stream() -> None: streamed_chunks[0] == {"llm": "i"} or {"chat": _any_id_ai_message_chunk(content="i")} ): - raise AssertionError(f"Got an unexpected chunk: {streamed_chunks[0]}") + msg = f"Got an unexpected chunk: {streamed_chunks[0]}" + raise AssertionError(msg) assert len(streamed_chunks) == len(llm_res) + len(chat_res) @@ -3714,9 +3717,11 @@ def _simple_recursion(x: int) -> Union[int, Runnable]: def test_retrying(mocker: MockerFixture) -> None: def _lambda(x: int) -> Union[int, Runnable]: if x == 1: - raise ValueError("x is 1") + msg = "x is 1" + raise ValueError(msg) elif x == 2: - raise RuntimeError("x is 2") + msg = "x is 2" + raise RuntimeError(msg) else: return x @@ -3777,9 +3782,11 @@ def _lambda(x: int) -> Union[int, Runnable]: async def test_async_retrying(mocker: MockerFixture) -> None: def _lambda(x: int) -> Union[int, Runnable]: if x == 1: - raise ValueError("x is 1") + msg = "x is 1" + raise ValueError(msg) elif x == 2: - raise RuntimeError("x is 2") + msg = "x is 2" + raise RuntimeError(msg) else: return x @@ -3872,7 +3879,8 @@ def test_runnable_lambda_stream_with_callbacks() -> None: def raise_value_error(x: int) -> int: """Raise a value error.""" - raise ValueError("x is too large") + msg = "x is too large" + raise ValueError(msg) # Check that the chain on error is invoked with pytest.raises(ValueError): @@ -3950,7 +3958,8 @@ async def test_runnable_lambda_astream_with_callbacks() -> None: def raise_value_error(x: int) -> int: """Raise a value error.""" - raise ValueError("x is too large") + msg = "x is too large" + raise ValueError(msg) # Check that the chain on error is invoked with pytest.raises(ValueError): @@ -4285,7 +4294,8 @@ def test_runnable_branch_invoke() -> None: # Test with single branch def raise_value_error(x: int) -> int: """Raise a value error.""" - raise ValueError("x is too large") + msg = "x is too large" + raise ValueError(msg) branch = RunnableBranch[int, int]( (lambda x: x > 100, raise_value_error), @@ -4349,7 +4359,8 @@ def test_runnable_branch_invoke_callbacks() -> None: def raise_value_error(x: int) -> int: """Raise a value error.""" - raise ValueError("x is too large") + msg = "x is too large" + raise ValueError(msg) branch = RunnableBranch[int, int]( (lambda x: x > 100, raise_value_error), @@ -4376,7 +4387,8 @@ async def test_runnable_branch_ainvoke_callbacks() -> None: async def raise_value_error(x: int) -> int: """Raise a value error.""" - raise ValueError("x is too large") + msg = "x is too large" + raise ValueError(msg) branch = RunnableBranch[int, int]( (lambda x: x > 100, raise_value_error), @@ -4430,7 +4442,8 @@ def test_runnable_branch_stream_with_callbacks() -> None: def raise_value_error(x: str) -> Any: """Raise a value error.""" - raise ValueError(f"x is {x}") + msg = f"x is {x}" + raise ValueError(msg) llm_res = "i'm a textbot" # sleep to better simulate a real stream @@ -4507,7 +4520,8 @@ async def test_runnable_branch_astream_with_callbacks() -> None: def raise_value_error(x: str) -> Any: """Raise a value error.""" - raise ValueError(f"x is {x}") + msg = f"x is {x}" + raise ValueError(msg) llm_res = "i'm a textbot" # sleep to better simulate a real stream diff --git a/libs/core/tests/unit_tests/runnables/test_runnable_events_v1.py b/libs/core/tests/unit_tests/runnables/test_runnable_events_v1.py index 67b303b3518e4..7389d887769d8 100644 --- a/libs/core/tests/unit_tests/runnables/test_runnable_events_v1.py +++ b/libs/core/tests/unit_tests/runnables/test_runnable_events_v1.py @@ -1596,7 +1596,8 @@ def success(inputs: str) -> str: def fail(inputs: str) -> None: """Simple func.""" - raise Exception("fail") + msg = "fail" + raise Exception(msg) chain = RunnableLambda(success) | RunnableLambda(fail).with_retry( stop_after_attempt=1, diff --git a/libs/core/tests/unit_tests/runnables/test_runnable_events_v2.py b/libs/core/tests/unit_tests/runnables/test_runnable_events_v2.py index 4014c9687e9d0..afc05ae39f61d 100644 --- a/libs/core/tests/unit_tests/runnables/test_runnable_events_v2.py +++ b/libs/core/tests/unit_tests/runnables/test_runnable_events_v2.py @@ -1552,7 +1552,8 @@ def success(inputs: str) -> str: def fail(inputs: str) -> None: """Simple func.""" - raise Exception("fail") + msg = "fail" + raise Exception(msg) chain = RunnableLambda(success) | RunnableLambda(fail).with_retry( stop_after_attempt=1, @@ -2057,7 +2058,8 @@ def invoke( self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any ) -> Output: """Invoke the runnable.""" - raise ValueError("Server side error") + msg = "Server side error" + raise ValueError(msg) def stream( self, diff --git a/libs/core/tests/unit_tests/runnables/test_tracing_interops.py b/libs/core/tests/unit_tests/runnables/test_tracing_interops.py index 6cf8b8ea612ce..af6a5e84ce9ee 100644 --- a/libs/core/tests/unit_tests/runnables/test_tracing_interops.py +++ b/libs/core/tests/unit_tests/runnables/test_tracing_interops.py @@ -293,7 +293,8 @@ def parent(a: int) -> int: elif method == "abatch": res = (await parent.abatch([1], {"callbacks": cb}))[0] # type: ignore else: - raise ValueError(f"Unknown method {method}") + msg = f"Unknown method {method}" + raise ValueError(msg) assert res == 3 posts = _get_posts(mock_client_) name_order = [ @@ -345,7 +346,8 @@ def parent(a: int) -> int: ), f"{name} not after {name_order[i-1]}" prev_dotted_order = dotted_order if name in dotted_order_map: - raise ValueError(f"Duplicate name {name}") + msg = f"Duplicate name {name}" + raise ValueError(msg) dotted_order_map[name] = dotted_order id_map[name] = posts[i]["id"] parent_id_map[name] = posts[i].get("parent_run_id") diff --git a/libs/core/tests/unit_tests/test_imports.py b/libs/core/tests/unit_tests/test_imports.py index ed336df0c3120..d046976e8e53b 100644 --- a/libs/core/tests/unit_tests/test_imports.py +++ b/libs/core/tests/unit_tests/test_imports.py @@ -52,4 +52,5 @@ def test_importable_all_via_subprocess() -> None: result = future.result() # Will raise an exception if the callable raised code, module_name = result if code != 0: - raise ValueError(f"Failed to import {module_name}.") + msg = f"Failed to import {module_name}." + raise ValueError(msg) diff --git a/libs/core/tests/unit_tests/utils/test_pydantic.py b/libs/core/tests/unit_tests/utils/test_pydantic.py index 8ba4beeab7674..01e5657866f72 100644 --- a/libs/core/tests/unit_tests/utils/test_pydantic.py +++ b/libs/core/tests/unit_tests/utils/test_pydantic.py @@ -106,7 +106,8 @@ def test_is_basemodel_subclass() -> None: assert is_basemodel_subclass(BaseModelV1) else: - raise ValueError(f"Unsupported Pydantic version: {PYDANTIC_MAJOR_VERSION}") + msg = f"Unsupported Pydantic version: {PYDANTIC_MAJOR_VERSION}" + raise ValueError(msg) def test_is_basemodel_instance() -> None: @@ -132,7 +133,8 @@ class Bar(BaseModelV1): assert is_basemodel_instance(Bar(x=5)) else: - raise ValueError(f"Unsupported Pydantic version: {PYDANTIC_MAJOR_VERSION}") + msg = f"Unsupported Pydantic version: {PYDANTIC_MAJOR_VERSION}" + raise ValueError(msg) @pytest.mark.skipif(PYDANTIC_MAJOR_VERSION != 2, reason="Only tests Pydantic v2") diff --git a/libs/core/tests/unit_tests/utils/test_utils.py b/libs/core/tests/unit_tests/utils/test_utils.py index 68ad11b3e963b..806ace226e5d0 100644 --- a/libs/core/tests/unit_tests/utils/test_utils.py +++ b/libs/core/tests/unit_tests/utils/test_utils.py @@ -177,7 +177,8 @@ def test_guard_import( elif package is not None and pip_name is not None: ret = guard_import(module_name, pip_name=pip_name, package=package) else: - raise ValueError("Invalid test case") + msg = "Invalid test case" + raise ValueError(msg) assert ret == expected @@ -204,7 +205,8 @@ def test_guard_import_failure( elif package is not None and pip_name is not None: guard_import(module_name, pip_name=pip_name, package=package) else: - raise ValueError("Invalid test case") + msg = "Invalid test case" + raise ValueError(msg) pip_name = pip_name or module_name.split(".")[0].replace("_", "-") err_msg = ( f"Could not import {module_name} python package. "