diff --git a/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/chat/chat_generator.py b/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/chat/chat_generator.py index 206cb0b9a..f7bb0ba23 100644 --- a/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/chat/chat_generator.py +++ b/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/chat/chat_generator.py @@ -1,7 +1,6 @@ import json import logging import re -import warnings from typing import Any, Callable, ClassVar, Dict, List, Optional, Type from botocore.exceptions import ClientError @@ -150,12 +149,6 @@ def resolve_secret(secret: Optional[Secret]) -> Optional[str]: self.stop_words = stop_words or [] self.streaming_callback = streaming_callback - warnings.warn( - "The `meta` output of the AmazonBedrockChatGenerator will change in the next release to be inline with " - "OpenAI `meta`output keys.", - stacklevel=2, - ) - @component.output_types(replies=List[ChatMessage]) def run( self, @@ -210,6 +203,12 @@ def run( msg = f"Could not inference Amazon Bedrock model {self.model} due: {exception}" raise AmazonBedrockInferenceError(msg) from exception + # rename the meta key to be inline with OpenAI meta output keys + for response in replies: + if response.meta is not None and "usage" in response.meta: + response.meta["usage"]["prompt_tokens"] = response.meta["usage"].pop("input_tokens") + response.meta["usage"]["completion_tokens"] = response.meta["usage"].pop("output_tokens") + return {"replies": replies} @classmethod diff --git a/integrations/anthropic/src/haystack_integrations/components/generators/anthropic/chat/chat_generator.py b/integrations/anthropic/src/haystack_integrations/components/generators/anthropic/chat/chat_generator.py index 06b3dc353..9954f08c5 100644 --- a/integrations/anthropic/src/haystack_integrations/components/generators/anthropic/chat/chat_generator.py +++ b/integrations/anthropic/src/haystack_integrations/components/generators/anthropic/chat/chat_generator.py @@ -1,6 +1,5 @@ import dataclasses import json -import warnings from typing import Any, Callable, ClassVar, Dict, List, Optional, Union from haystack import component, default_from_dict, default_to_dict, logging @@ -115,12 +114,6 @@ def __init__( self.client = Anthropic(api_key=self.api_key.resolve_value()) self.ignore_tools_thinking_messages = ignore_tools_thinking_messages - warnings.warn( - "The `meta` output of the AnthropicChatGenerator will change in the next release to be inline with " - "OpenAI `meta`output keys.", - stacklevel=2, - ) - def _get_telemetry_data(self) -> Dict[str, Any]: """ Data that is sent to Posthog for usage analytics. @@ -220,6 +213,7 @@ def run(self, messages: List[ChatMessage], generation_kwargs: Optional[Dict[str, # capture stop reason and stop sequence delta = stream_event completions = [self._connect_chunks(chunks, start_event, delta)] + # if streaming is disabled, the response is an Anthropic Message elif isinstance(response, Message): has_tools_msgs = any(isinstance(content_block, ToolUseBlock) for content_block in response.content) @@ -227,6 +221,12 @@ def run(self, messages: List[ChatMessage], generation_kwargs: Optional[Dict[str, response.content = [block for block in response.content if isinstance(block, ToolUseBlock)] completions = [self._build_message(content_block, response) for content_block in response.content] + # rename the meta key to be inline with OpenAI meta output keys + for response in completions: + if response.meta is not None and "usage" in response.meta: + response.meta["usage"]["prompt_tokens"] = response.meta["usage"].pop("input_tokens") + response.meta["usage"]["completion_tokens"] = response.meta["usage"].pop("output_tokens") + return {"replies": completions} def _build_message(self, content_block: Union[TextBlock, ToolUseBlock], message: Message) -> ChatMessage: