diff --git a/integrations/amazon_bedrock/src/haystack_integrations/common/amazon_bedrock/utils.py b/integrations/amazon_bedrock/src/haystack_integrations/common/amazon_bedrock/utils.py index e1683e3b3..3148818c1 100644 --- a/integrations/amazon_bedrock/src/haystack_integrations/common/amazon_bedrock/utils.py +++ b/integrations/amazon_bedrock/src/haystack_integrations/common/amazon_bedrock/utils.py @@ -34,7 +34,7 @@ def get_aws_session( :param kwargs: The kwargs passed down to the service client. Supported kwargs depend on the model chosen. See https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html. :raises AWSConfigurationError: If the provided AWS credentials are invalid. - :return: The created AWS session. + :returns: The created AWS session. """ try: return boto3.Session( @@ -54,7 +54,7 @@ def aws_configured(**kwargs) -> bool: """ Checks whether AWS configuration is provided. :param kwargs: The kwargs passed down to the generator. - :return: True if AWS configuration is provided, False otherwise. + :returns: True if AWS configuration is provided, False otherwise. """ aws_config_provided = any(key in kwargs for key in AWS_CONFIGURATION_KEYS) return aws_config_provided diff --git a/integrations/amazon_bedrock/src/haystack_integrations/components/embedders/amazon_bedrock/document_embedder.py b/integrations/amazon_bedrock/src/haystack_integrations/components/embedders/amazon_bedrock/document_embedder.py index 5a82821e3..8cf98cd45 100644 --- a/integrations/amazon_bedrock/src/haystack_integrations/components/embedders/amazon_bedrock/document_embedder.py +++ b/integrations/amazon_bedrock/src/haystack_integrations/components/embedders/amazon_bedrock/document_embedder.py @@ -235,7 +235,7 @@ def run(self, documents: List[Document]): def to_dict(self) -> Dict[str, Any]: """ Serialize this component to a dictionary. - :return: The serialized component as a dictionary. + :returns: The serialized component as a dictionary. """ return default_to_dict( self, diff --git a/integrations/amazon_bedrock/src/haystack_integrations/components/embedders/amazon_bedrock/text_embedder.py b/integrations/amazon_bedrock/src/haystack_integrations/components/embedders/amazon_bedrock/text_embedder.py index 8804702a0..ed6768737 100644 --- a/integrations/amazon_bedrock/src/haystack_integrations/components/embedders/amazon_bedrock/text_embedder.py +++ b/integrations/amazon_bedrock/src/haystack_integrations/components/embedders/amazon_bedrock/text_embedder.py @@ -154,7 +154,7 @@ def run(self, text: str): def to_dict(self) -> Dict[str, Any]: """ Serialize this component to a dictionary. - :return: The serialized component as a dictionary. + :returns: The serialized component as a dictionary. """ return default_to_dict( self, @@ -172,7 +172,7 @@ def from_dict(cls, data: Dict[str, Any]) -> "AmazonBedrockTextEmbedder": """ Deserialize this component from a dictionary. :param data: The dictionary representation of this component. - :return: The deserialized component instance. + :returns: The deserialized component instance. """ deserialize_secrets_inplace( data["init_parameters"], diff --git a/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/adapters.py b/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/adapters.py index a1704ef13..f842f0ef5 100644 --- a/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/adapters.py +++ b/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/adapters.py @@ -25,7 +25,7 @@ def prepare_body(self, prompt: str, **inference_kwargs) -> Dict[str, Any]: :param prompt: The prompt to be sent to the model. :param inference_kwargs: Additional keyword arguments passed to the handler. - :return: A dictionary containing the body for the request. + :returns: A dictionary containing the body for the request. """ def get_responses(self, response_body: Dict[str, Any]) -> List[str]: @@ -33,7 +33,7 @@ def get_responses(self, response_body: Dict[str, Any]) -> List[str]: Extracts the responses from the Amazon Bedrock response. :param response_body: The response body from the Amazon Bedrock request. - :return: A list of responses. + :returns: A list of responses. """ completions = self._extract_completions_from_response(response_body) responses = [completion.lstrip() for completion in completions] @@ -45,7 +45,7 @@ def get_stream_responses(self, stream, stream_handler: TokenStreamingHandler) -> :param stream: The streaming response from the Amazon Bedrock request. :param stream_handler: The handler for the streaming response. - :return: A list of string responses. + :returns: A list of string responses. """ tokens: List[str] = [] for event in stream: @@ -64,7 +64,7 @@ def _get_params(self, inference_kwargs: Dict[str, Any], default_params: Dict[str Includes param if it's in kwargs or its default is not None (i.e. it is actually defined). :param inference_kwargs: The inference kwargs. :param default_params: The default params. - :return: A dictionary containing the merged params. + :returns: A dictionary containing the merged params. """ kwargs = self.model_kwargs.copy() kwargs.update(inference_kwargs) @@ -80,7 +80,7 @@ def _extract_completions_from_response(self, response_body: Dict[str, Any]) -> L Extracts the responses from the Amazon Bedrock response. :param response_body: The response body from the Amazon Bedrock request. - :return: A list of string responses. + :returns: A list of string responses. """ @abstractmethod @@ -89,7 +89,7 @@ def _extract_token_from_stream(self, chunk: Dict[str, Any]) -> str: Extracts the token from a streaming chunk. :param chunk: The streaming chunk. - :return: A string token. + :returns: A string token. """ @@ -121,7 +121,7 @@ def _extract_completions_from_response(self, response_body: Dict[str, Any]) -> L Extracts the responses from the Amazon Bedrock response. :param response_body: The response body from the Amazon Bedrock request. - :return: A list of string responses. + :returns: A list of string responses. """ return [response_body["completion"]] @@ -130,7 +130,7 @@ def _extract_token_from_stream(self, chunk: Dict[str, Any]) -> str: Extracts the token from a streaming chunk. :param chunk: The streaming chunk. - :return: A string token. + :returns: A string token. """ return chunk.get("completion", "") @@ -146,7 +146,7 @@ def prepare_body(self, prompt: str, **inference_kwargs) -> Dict[str, Any]: :param prompt: The prompt to be sent to the model. :param inference_kwargs: Additional keyword arguments passed to the handler. - :return: A dictionary containing the body for the request. + :returns: A dictionary containing the body for the request. """ default_params = { "max_tokens": self.max_length, @@ -170,7 +170,7 @@ def _extract_completions_from_response(self, response_body: Dict[str, Any]) -> L Extracts the responses from the Cohere Command model response. :param response_body: The response body from the Amazon Bedrock request. - :return: A list of string responses. + :returns: A list of string responses. """ responses = [generation["text"] for generation in response_body["generations"]] return responses @@ -180,7 +180,7 @@ def _extract_token_from_stream(self, chunk: Dict[str, Any]) -> str: Extracts the token from a streaming chunk. :param chunk: The streaming chunk. - :return: A string token. + :returns: A string token. """ return chunk.get("text", "") @@ -226,7 +226,7 @@ def prepare_body(self, prompt: str, **inference_kwargs) -> Dict[str, Any]: :param prompt: The prompt to be sent to the model. :param inference_kwargs: Additional keyword arguments passed to the handler. - :return: A dictionary containing the body for the request. + :returns: A dictionary containing the body for the request. """ default_params = { "maxTokenCount": self.max_length, @@ -244,7 +244,7 @@ def _extract_completions_from_response(self, response_body: Dict[str, Any]) -> L Extracts the responses from the Titan model response. :param response_body: The response body for Titan model response. - :return: A list of string responses. + :returns: A list of string responses. """ responses = [result["outputText"] for result in response_body["results"]] return responses @@ -254,7 +254,7 @@ def _extract_token_from_stream(self, chunk: Dict[str, Any]) -> str: Extracts the token from a streaming chunk. :param chunk: The streaming chunk. - :return: A string token. + :returns: A string token. """ return chunk.get("outputText", "") @@ -270,7 +270,7 @@ def prepare_body(self, prompt: str, **inference_kwargs) -> Dict[str, Any]: :param prompt: The prompt to be sent to the model. :param inference_kwargs: Additional keyword arguments passed to the handler. - :return: A dictionary containing the body for the request. + :returns: A dictionary containing the body for the request. """ default_params = { "max_gen_len": self.max_length, @@ -287,7 +287,7 @@ def _extract_completions_from_response(self, response_body: Dict[str, Any]) -> L Extracts the responses from the Llama2 model response. :param response_body: The response body from the Llama2 model request. - :return: A list of string responses. + :returns: A list of string responses. """ return [response_body["generation"]] @@ -296,6 +296,6 @@ def _extract_token_from_stream(self, chunk: Dict[str, Any]) -> str: Extracts the token from a streaming chunk. :param chunk: The streaming chunk. - :return: A string token. + :returns: A string token. """ return chunk.get("generation", "") diff --git a/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/chat/adapters.py b/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/chat/adapters.py index d5dc100f9..196a55743 100644 --- a/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/chat/adapters.py +++ b/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/chat/adapters.py @@ -34,7 +34,7 @@ def prepare_body(self, messages: List[ChatMessage], **inference_kwargs) -> Dict[ :param messages: The chat messages to package into the request. :param inference_kwargs: Additional inference kwargs to use. - :return: The prepared body. + :returns: The prepared body. """ def get_responses(self, response_body: Dict[str, Any]) -> List[ChatMessage]: @@ -42,7 +42,7 @@ def get_responses(self, response_body: Dict[str, Any]) -> List[ChatMessage]: Extracts the responses from the Amazon Bedrock response. :param response_body: The response body. - :return: The extracted responses. + :returns: The extracted responses. """ return self._extract_messages_from_response(self.response_body_message_key(), response_body) @@ -85,7 +85,7 @@ def _get_params(self, inference_kwargs: Dict[str, Any], default_params: Dict[str :param inference_kwargs: The inference kwargs to merge. :param default_params: The default params to start with. - :return: The merged params. + :returns: The merged params. """ # Start with a copy of default_params kwargs = default_params.copy() @@ -100,7 +100,7 @@ def _ensure_token_limit(self, prompt: str) -> str: """ Ensures that the prompt is within the token limit for the model. :param prompt: The prompt to check. - :return: The resized prompt. + :returns: The resized prompt. """ resize_info = self.check_prompt(prompt) if resize_info["prompt_length"] != resize_info["new_prompt_length"]: @@ -121,7 +121,7 @@ def check_prompt(self, prompt: str) -> Dict[str, Any]: Checks the prompt length and resizes it if necessary. If the prompt is too long, it will be truncated. :param prompt: The prompt to check. - :return: A dictionary containing the resized prompt and additional information. + :returns: A dictionary containing the resized prompt and additional information. """ def _extract_messages_from_response(self, message_tag: str, response_body: Dict[str, Any]) -> List[ChatMessage]: @@ -130,7 +130,7 @@ def _extract_messages_from_response(self, message_tag: str, response_body: Dict[ :param message_tag: The key for the message in the response body. :param response_body: The response body. - :return: The extracted ChatMessage list. + :returns: The extracted ChatMessage list. """ metadata = {k: v for (k, v) in response_body.items() if k != message_tag} return [ChatMessage.from_assistant(response_body[message_tag], meta=metadata)] @@ -141,7 +141,7 @@ def response_body_message_key(self) -> str: Returns the key for the message in the response body. Subclasses should override this method to return the correct message key - where the response is located. - :return: The key for the message in the response body. + :returns: The key for the message in the response body. """ @abstractmethod @@ -150,7 +150,7 @@ def _extract_token_from_stream(self, chunk: Dict[str, Any]) -> str: Extracts the token from a streaming chunk. :param chunk: The streaming chunk. - :return: The extracted token. + :returns: The extracted token. """ @@ -192,7 +192,7 @@ def prepare_body(self, messages: List[ChatMessage], **inference_kwargs) -> Dict[ :param messages: The chat messages to package into the request. :param inference_kwargs: Additional inference kwargs to use. - :return: The prepared body. + :returns: The prepared body. """ default_params = { "max_tokens_to_sample": self.generation_kwargs.get("max_tokens_to_sample") or 512, @@ -212,7 +212,7 @@ def prepare_chat_messages(self, messages: List[ChatMessage]) -> str: Prepares the chat messages for the Anthropic Claude request. :param messages: The chat messages to prepare. - :return: The prepared chat messages as a string. + :returns: The prepared chat messages as a string. """ conversation = [] for index, message in enumerate(messages): @@ -241,7 +241,7 @@ def check_prompt(self, prompt: str) -> Dict[str, Any]: Checks the prompt length and resizes it if necessary. If the prompt is too long, it will be truncated. :param prompt: The prompt to check. - :return: A dictionary containing the resized prompt and additional information. + :returns: A dictionary containing the resized prompt and additional information. """ return self.prompt_handler(prompt) @@ -249,7 +249,7 @@ def response_body_message_key(self) -> str: """ Returns the key for the message in the response body for Anthropic Claude i.e. "completion". - :return: The key for the message in the response body. + :returns: The key for the message in the response body. """ return "completion" @@ -258,7 +258,7 @@ def _extract_token_from_stream(self, chunk: Dict[str, Any]) -> str: Extracts the token from a streaming chunk. :param chunk: The streaming chunk. - :return: The extracted token. + :returns: The extracted token. """ return chunk.get("completion", "") @@ -340,7 +340,7 @@ def prepare_chat_messages(self, messages: List[ChatMessage]) -> str: Prepares the chat messages for the Meta Llama 2 request. :param messages: The chat messages to prepare. - :return: The prepared chat messages as a string ready for the model. + :returns: The prepared chat messages as a string ready for the model. """ prepared_prompt: str = self.prompt_handler.tokenizer.apply_chat_template( conversation=messages, tokenize=False, chat_template=self.chat_template @@ -352,7 +352,7 @@ def check_prompt(self, prompt: str) -> Dict[str, Any]: Checks the prompt length and resizes it if necessary. If the prompt is too long, it will be truncated. :param prompt: The prompt to check. - :return: A dictionary containing the resized prompt and additional information. + :returns: A dictionary containing the resized prompt and additional information. """ return self.prompt_handler(prompt) @@ -361,7 +361,7 @@ def response_body_message_key(self) -> str: """ Returns the key for the message in the response body for Meta Llama 2 i.e. "generation". - :return: The key for the message in the response body. + :returns: The key for the message in the response body. """ return "generation" @@ -370,6 +370,6 @@ def _extract_token_from_stream(self, chunk: Dict[str, Any]) -> str: Extracts the token from a streaming chunk. :param chunk: The streaming chunk. - :return: The extracted token. + :returns: The extracted token. """ return chunk.get("generation", "") diff --git a/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/chat/chat_generator.py b/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/chat/chat_generator.py index 3b5a8f6cc..bea6924f6 100644 --- a/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/chat/chat_generator.py +++ b/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/chat/chat_generator.py @@ -140,7 +140,7 @@ def invoke(self, *args, **kwargs): :param args: The positional arguments passed to the generator. :param kwargs: The keyword arguments passed to the generator. - :return: List of `ChatMessage` generated by LLM. + :returns: List of `ChatMessage` generated by LLM. """ kwargs = kwargs.copy() @@ -183,7 +183,7 @@ def run(self, messages: List[ChatMessage], generation_kwargs: Optional[Dict[str, :param messages: The messages to generate a response to. :param generation_kwargs: Additional generation keyword arguments passed to the model. - :return: A dictionary with the following keys: + :returns: A dictionary with the following keys: - `replies`: The generated List of `ChatMessage` objects. """ return {"replies": self.invoke(messages=messages, **(generation_kwargs or {}))} @@ -194,7 +194,7 @@ def get_model_adapter(cls, model: str) -> Optional[Type[BedrockModelChatAdapter] Returns the model adapter for the given model. :param model: The model to get the adapter for. - :return: The model adapter for the given model, or None if the model is not supported. + :returns: The model adapter for the given model, or None if the model is not supported. """ for pattern, adapter in cls.SUPPORTED_MODEL_PATTERNS.items(): if re.fullmatch(pattern, model): diff --git a/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/generator.py b/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/generator.py index 706d29c98..f6af48ae1 100644 --- a/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/generator.py +++ b/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/generator.py @@ -142,7 +142,7 @@ def _ensure_token_limit(self, prompt: Union[str, List[Dict[str, str]]]) -> Union the initialization of the component. :param prompt: The prompt to be sent to the model. - :return: The resized prompt. + :returns: The resized prompt. """ # the prompt for this model will be of the type str if isinstance(prompt, List): @@ -171,7 +171,7 @@ def invoke(self, *args, **kwargs): :param args: Additional positional arguments passed to the generator. :param kwargs: Additional keyword arguments passed to the generator. - :return: A list of generated responses (strings). + :returns: A list of generated responses (strings). """ kwargs = kwargs.copy() prompt: str = kwargs.pop("prompt", None) @@ -225,7 +225,7 @@ def run(self, prompt: str, generation_kwargs: Optional[Dict[str, Any]] = None): :param prompt: The prompt to generate a response for. :param generation_kwargs: Additional keyword arguments passed to the generator. - :return: A dictionary with the following keys: + :returns: A dictionary with the following keys: - `replies`: A list of generated responses (strings). """ return {"replies": self.invoke(prompt=prompt, **(generation_kwargs or {}))} @@ -236,7 +236,7 @@ def get_model_adapter(cls, model: str) -> Optional[Type[BedrockModelAdapter]]: Gets the model adapter for the given model. :param model: The model name. - :return: The model adapter class, or None if no adapter is found. + :returns: The model adapter class, or None if no adapter is found. """ for pattern, adapter in cls.SUPPORTED_MODEL_PATTERNS.items(): if re.fullmatch(pattern, model): diff --git a/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/handlers.py b/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/handlers.py index b7b555ec0..ddc276264 100644 --- a/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/handlers.py +++ b/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/handlers.py @@ -34,7 +34,7 @@ def __call__(self, prompt: str, **kwargs) -> Dict[str, Union[str, int]]: :param prompt: the prompt to be sent to the model. :param kwargs: Additional keyword arguments passed to the handler. - :return: A dictionary containing the resized prompt and additional information. + :returns: A dictionary containing the resized prompt and additional information. """ resized_prompt = prompt prompt_length = 0 @@ -75,7 +75,7 @@ def __call__(self, token_received: str, **kwargs) -> str: :param token_received: The token received from the stream. :param kwargs: Additional keyword arguments passed to the handler. - :return: The token to be sent to the stream. + :returns: The token to be sent to the stream. """ pass @@ -87,7 +87,7 @@ def __call__(self, token_received, **kwargs) -> str: :param token_received: The token received from the stream. :param kwargs: Additional keyword arguments passed to the handler. - :return: The token to be sent to the stream. + :returns: The token to be sent to the stream. """ print(token_received, flush=True, end="") # noqa: T201 return token_received diff --git a/integrations/amazon_sagemaker/src/haystack_integrations/components/generators/amazon_sagemaker/sagemaker.py b/integrations/amazon_sagemaker/src/haystack_integrations/components/generators/amazon_sagemaker/sagemaker.py index 106698558..c171ccdf6 100644 --- a/integrations/amazon_sagemaker/src/haystack_integrations/components/generators/amazon_sagemaker/sagemaker.py +++ b/integrations/amazon_sagemaker/src/haystack_integrations/components/generators/amazon_sagemaker/sagemaker.py @@ -179,7 +179,7 @@ def _get_aws_session( :param aws_profile_name: AWS profile name. :raises AWSConfigurationError: If the provided AWS credentials are invalid. - :return: The created AWS session. + :returns: The created AWS session. """ try: return boto3.Session( @@ -202,7 +202,7 @@ def run(self, prompt: str, generation_kwargs: Optional[Dict[str, Any]] = None): :param generation_kwargs: Additional keyword arguments for text generation. These parameters will potentially override the parameters passed in the `__init__` method. - :return: A dictionary with the following keys: + :returns: A dictionary with the following keys: - `replies`: A list of strings containing the generated responses - `meta`: A list of dictionaries containing the metadata for each response. """ diff --git a/integrations/chroma/src/haystack_integrations/components/retrievers/chroma/retriever.py b/integrations/chroma/src/haystack_integrations/components/retrievers/chroma/retriever.py index b91dd8627..7138eff88 100644 --- a/integrations/chroma/src/haystack_integrations/components/retrievers/chroma/retriever.py +++ b/integrations/chroma/src/haystack_integrations/components/retrievers/chroma/retriever.py @@ -64,7 +64,7 @@ def run( :param query: The input data for the retriever. In this case, a plain-text query. :param top_k: The maximum number of documents to retrieve. If not specified, the default value from the constructor is used. - :return: A dictionary with the following keys: + :returns: A dictionary with the following keys: - `documents`: List of documents returned by the search engine. :raises ValueError: If the specified document store is not found or is not a MemoryDocumentStore instance. @@ -119,7 +119,7 @@ def run( Run the retriever on the given input data. :param query_embedding: the query embeddings. - :return: a dictionary with the following keys: + :returns: a dictionary with the following keys: - `documents`: List of documents returned by the search engine. """ top_k = top_k or self.top_k diff --git a/integrations/chroma/src/haystack_integrations/document_stores/chroma/document_store.py b/integrations/chroma/src/haystack_integrations/document_stores/chroma/document_store.py index 4201de23b..0db9f832a 100644 --- a/integrations/chroma/src/haystack_integrations/document_stores/chroma/document_store.py +++ b/integrations/chroma/src/haystack_integrations/document_stores/chroma/document_store.py @@ -206,7 +206,7 @@ def search(self, queries: List[str], top_k: int) -> List[List[Document]]: :param queries: the list of queries to search for. :param top_k: top_k documents to return for each query. - :return: matching documents for each query. + :returns: matching documents for each query. """ results = self._collection.query( query_texts=queries, n_results=top_k, include=["embeddings", "documents", "metadatas", "distances"] diff --git a/integrations/cohere/src/haystack_integrations/components/generators/cohere/chat/chat_generator.py b/integrations/cohere/src/haystack_integrations/components/generators/cohere/chat/chat_generator.py index b8db2d542..980441009 100644 --- a/integrations/cohere/src/haystack_integrations/components/generators/cohere/chat/chat_generator.py +++ b/integrations/cohere/src/haystack_integrations/components/generators/cohere/chat/chat_generator.py @@ -186,7 +186,7 @@ def _build_chunk(self, chunk) -> StreamingChunk: Converts the response from the Cohere API to a StreamingChunk. :param chunk: The chunk returned by the OpenAI API. :param choice: The choice returned by the OpenAI API. - :return: The StreamingChunk. + :returns: The StreamingChunk. """ chat_message = StreamingChunk(content=chunk.text, meta={"index": chunk.index, "event_type": chunk.event_type}) return chat_message @@ -195,7 +195,7 @@ def _build_message(self, cohere_response): """ Converts the non-streaming response from the Cohere API to a ChatMessage. :param cohere_response: The completion returned by the Cohere API. - :return: The ChatMessage. + :returns: The ChatMessage. """ content = cohere_response.text message = ChatMessage.from_assistant(content=content) diff --git a/integrations/cohere/src/haystack_integrations/components/generators/cohere/generator.py b/integrations/cohere/src/haystack_integrations/components/generators/cohere/generator.py index cb9486c1b..2c9a97478 100644 --- a/integrations/cohere/src/haystack_integrations/components/generators/cohere/generator.py +++ b/integrations/cohere/src/haystack_integrations/components/generators/cohere/generator.py @@ -153,7 +153,7 @@ def _build_chunk(self, chunk) -> StreamingChunk: """ Converts the response from the Cohere API to a StreamingChunk. :param chunk: The chunk returned by the OpenAI API. - :return: The StreamingChunk. + :returns: The StreamingChunk. """ streaming_chunk = StreamingChunk(content=chunk.text, meta={"index": chunk.index}) return streaming_chunk diff --git a/integrations/fastembed/src/haystack_integrations/components/embedders/fastembed/fastembed_document_embedder.py b/integrations/fastembed/src/haystack_integrations/components/embedders/fastembed/fastembed_document_embedder.py index f08ff1adc..4af8e1bbe 100644 --- a/integrations/fastembed/src/haystack_integrations/components/embedders/fastembed/fastembed_document_embedder.py +++ b/integrations/fastembed/src/haystack_integrations/components/embedders/fastembed/fastembed_document_embedder.py @@ -144,7 +144,7 @@ def run(self, documents: List[Document]): Embeds a list of Documents. :param documents: List of Documents to embed. - :return: A dictionary with the following keys: + :returns: A dictionary with the following keys: - `documents`: List of Documents with each Document's `embedding` field set to the computed embeddings. """ if not isinstance(documents, list) or documents and not isinstance(documents[0], Document): diff --git a/integrations/fastembed/src/haystack_integrations/components/embedders/fastembed/fastembed_text_embedder.py b/integrations/fastembed/src/haystack_integrations/components/embedders/fastembed/fastembed_text_embedder.py index ffba6a902..13a89d1ce 100644 --- a/integrations/fastembed/src/haystack_integrations/components/embedders/fastembed/fastembed_text_embedder.py +++ b/integrations/fastembed/src/haystack_integrations/components/embedders/fastembed/fastembed_text_embedder.py @@ -101,7 +101,7 @@ def run(self, text: str): Embeds text using the Fastembed model. :param text: A string to embed. - :return: A dictionary with the following keys: + :returns: A dictionary with the following keys: - `embedding`: A list of floats representing the embedding of the input text. :raises TypeError: If the input is not a string. :raises RuntimeError: If the embedding model has not been loaded. diff --git a/integrations/mongodb_atlas/src/haystack_integrations/components/retrievers/mongodb_atlas/embedding_retriever.py b/integrations/mongodb_atlas/src/haystack_integrations/components/retrievers/mongodb_atlas/embedding_retriever.py index a4ef3b497..e3f5062fe 100644 --- a/integrations/mongodb_atlas/src/haystack_integrations/components/retrievers/mongodb_atlas/embedding_retriever.py +++ b/integrations/mongodb_atlas/src/haystack_integrations/components/retrievers/mongodb_atlas/embedding_retriever.py @@ -75,7 +75,7 @@ def run( :param query_embedding: Embedding of the query. :param filters: Filters applied to the retrieved Documents. Overrides the value specified at initialization. :param top_k: Maximum number of Documents to return. Overrides the value specified at initialization. - :return: List of Documents similar to `query_embedding`. + :returns: List of Documents similar to `query_embedding`. """ filters = filters or self.filters top_k = top_k or self.top_k diff --git a/integrations/mongodb_atlas/src/haystack_integrations/document_stores/mongodb_atlas/document_store.py b/integrations/mongodb_atlas/src/haystack_integrations/document_stores/mongodb_atlas/document_store.py index e2f2534f5..f76a31eb0 100644 --- a/integrations/mongodb_atlas/src/haystack_integrations/document_stores/mongodb_atlas/document_store.py +++ b/integrations/mongodb_atlas/src/haystack_integrations/document_stores/mongodb_atlas/document_store.py @@ -98,7 +98,7 @@ def filter_documents(self, filters: Optional[Dict[str, Any]] = None) -> List[Doc refer to the [documentation](https://docs.haystack.deepset.ai/v2.0/docs/metadata-filtering). :param filters: The filters to apply. It returns only the documents that match the filters. - :return: A list of Documents that match the given filters. + :returns: A list of Documents that match the given filters. """ mongo_filters = haystack_filters_to_mongo(filters) documents = list(self.collection.find(mongo_filters)) @@ -114,7 +114,7 @@ def write_documents(self, documents: List[Document], policy: DuplicatePolicy = D :param policy: The duplicate policy to use when writing documents. :raises DuplicateDocumentError: If a document with the same id already exists in the document store and the policy is set to DuplicatePolicy.FAIL (or not specified). - :return: The number of documents written to the document store. + :returns: The number of documents written to the document store. """ if len(documents) > 0: @@ -211,7 +211,7 @@ def mongo_doc_to_haystack_doc(self, mongo_doc: Dict[str, Any]) -> Document: Converts the dictionary coming out of MongoDB into a Haystack document :param mongo_doc: A dictionary representing a document as stored in MongoDB - :return: A Haystack Document object + :returns: A Haystack Document object """ mongo_doc.pop("_id", None) return Document.from_dict(mongo_doc) diff --git a/integrations/opensearch/src/haystack_integrations/document_stores/opensearch/document_store.py b/integrations/opensearch/src/haystack_integrations/document_stores/opensearch/document_store.py index e91347728..e9c88274c 100644 --- a/integrations/opensearch/src/haystack_integrations/document_stores/opensearch/document_store.py +++ b/integrations/opensearch/src/haystack_integrations/document_stores/opensearch/document_store.py @@ -257,7 +257,7 @@ def _bm25_retrieval( :param scale_score: If `True` scales the Document`s scores between 0 and 1, defaults to False :param all_terms_must_match: If `True` all terms in `query` must be present in the Document, defaults to False :raises ValueError: If `query` is an empty string - :return: List of Document that match `query` + :returns: List of Document that match `query` """ if not query: @@ -314,7 +314,7 @@ def _embedding_retrieval( Filters are applied during the approximate kNN search to ensure that top_k matching documents are returned. :param top_k: Maximum number of Documents to return, defaults to 10 :raises ValueError: If `query_embedding` is an empty list - :return: List of Document that are most similar to `query_embedding` + :returns: List of Document that are most similar to `query_embedding` """ if not query_embedding: diff --git a/integrations/qdrant/src/haystack_integrations/document_stores/qdrant/document_store.py b/integrations/qdrant/src/haystack_integrations/document_stores/qdrant/document_store.py index 4a47bf59e..645db88ae 100644 --- a/integrations/qdrant/src/haystack_integrations/document_stores/qdrant/document_store.py +++ b/integrations/qdrant/src/haystack_integrations/document_stores/qdrant/document_store.py @@ -420,7 +420,7 @@ def _handle_duplicate_documents( overwrite: Update any existing documents with the same ID when adding documents. fail: an error is raised if the document ID of the document being added already exists. - :return: A list of Haystack Document objects. + :returns: A list of Haystack Document objects. """ index = index or self.index @@ -443,7 +443,7 @@ def _drop_duplicate_documents(self, documents: List[Document], index: Optional[s :param documents: A list of Haystack Document objects. :param index: name of the index - :return: A list of Haystack Document objects. + :returns: A list of Haystack Document objects. """ _hash_ids: Set = set() _documents: List[Document] = [] diff --git a/integrations/qdrant/src/haystack_integrations/document_stores/qdrant/filters.py b/integrations/qdrant/src/haystack_integrations/document_stores/qdrant/filters.py index 77d800853..fc18a3ab2 100644 --- a/integrations/qdrant/src/haystack_integrations/document_stores/qdrant/filters.py +++ b/integrations/qdrant/src/haystack_integrations/document_stores/qdrant/filters.py @@ -186,7 +186,7 @@ def _squeeze_filter(self, payload_filter: models.Filter) -> models.Filter: Simplify given payload filter, if the nested structure might be unnested. That happens if there is a single clause in that filter. :param payload_filter: - :return: + :returns: """ filter_parts = { "must": payload_filter.must, diff --git a/nodes/text2speech/text2speech/utils/text_to_speech.py b/nodes/text2speech/text2speech/utils/text_to_speech.py index 84c08e90f..e7d22cc11 100644 --- a/nodes/text2speech/text2speech/utils/text_to_speech.py +++ b/nodes/text2speech/text2speech/utils/text_to_speech.py @@ -104,7 +104,7 @@ def text_to_audio_file( leaves it untouched. :param audio_naming_function: A function mapping the input text into the audio file name. By default, the audio file gets the name from the MD5 sum of the input text. - :return: The path to the generated file. + :returns: The path to the generated file. """ if not os.path.exists(generated_audio_dir): os.mkdir(generated_audio_dir) @@ -140,7 +140,7 @@ def text_to_audio_data(self, text: str, _models_output_key: str = "wav") -> np.a :param text: The text to convert into audio. :param _models_output_key: The key in the prediction dictionary that contains the audio data. Defaults to 'wav'. - :return: A numpy array representing the audio generated by the model. + :returns: A numpy array representing the audio generated by the model. """ prediction = self.model(text) if not prediction: