From 740f062a2d50928bf06594c593318269a694445d Mon Sep 17 00:00:00 2001 From: Vishal Date: Tue, 21 May 2024 00:10:49 +0530 Subject: [PATCH] Fix: max_tokens typo --- .../components/generators/amazon_bedrock/chat/adapters.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/chat/adapters.py b/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/chat/adapters.py index f0a2ea368..9d33a682d 100644 --- a/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/chat/adapters.py +++ b/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/chat/adapters.py @@ -336,7 +336,7 @@ def __init__(self, generation_kwargs: Dict[str, Any]): self.prompt_handler = DefaultPromptHandler( tokenizer=tokenizer, model_max_length=model_max_length, - max_length=self.generation_kwargs.get("max_gen_len") or 512, + max_length=self.generation_kwargs.get("max_tokens") or 512, ) def prepare_body(self, messages: List[ChatMessage], **inference_kwargs) -> Dict[str, Any]: