Skip to content

Commit

Permalink
multiple: add stop attribute (#22573)
Browse files Browse the repository at this point in the history
  • Loading branch information
ccurme authored and hinthornw committed Jun 20, 2024
1 parent 912ffb4 commit 1e7ce45
Show file tree
Hide file tree
Showing 5 changed files with 26 additions and 5 deletions.
4 changes: 4 additions & 0 deletions libs/partners/ai21/langchain_ai21/chat_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,8 @@ class ChatAI21(BaseChatModel, AI21Base):
You can view the options at https://github.com/AI21Labs/ai21-python?tab=readme-ov-file#model-types"""
num_results: int = 1
"""The number of responses to generate for a given prompt."""
stop: Optional[List[str]] = None
"""Default stop sequences."""

max_tokens: int = 16
"""The maximum number of tokens to generate for each response."""
Expand Down Expand Up @@ -97,6 +99,8 @@ def _default_params(self) -> Mapping[str, Any]:
"top_k_return": self.top_k_return,
"n": self.n,
}
if self.stop:
base_params["stop_sequences"] = self.stop

if self.count_penalty is not None:
base_params["count_penalty"] = self.count_penalty.to_dict()
Expand Down
6 changes: 5 additions & 1 deletion libs/partners/anthropic/langchain_anthropic/chat_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -492,6 +492,9 @@ class Config:
max_retries: int = 2
"""Number of retries allowed for requests sent to the Anthropic Completion API."""

stop: Optional[List[str]] = Field(None, alias="stop_sequences")
"""Default stop sequences."""

anthropic_api_url: Optional[str] = Field(None, alias="base_url")
"""Base URL for API requests. Only specify if using a proxy or service emulator.
Expand Down Expand Up @@ -611,14 +614,15 @@ def _format_params(
) -> Dict:
# get system prompt if any
system, formatted_messages = _format_messages(messages)
stop_sequences = stop or self.stop
rtn = {
"model": self.model,
"max_tokens": self.max_tokens,
"messages": formatted_messages,
"temperature": self.temperature,
"top_k": self.top_k,
"top_p": self.top_p,
"stop_sequences": stop,
"stop_sequences": stop_sequences,
"system": system,
**self.model_kwargs,
**kwargs,
Expand Down
5 changes: 3 additions & 2 deletions libs/partners/fireworks/langchain_fireworks/chat_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -300,6 +300,8 @@ def is_lc_serializable(cls) -> bool:
"""Number of chat completions to generate for each prompt."""
max_tokens: Optional[int] = None
"""Maximum number of tokens to generate."""
stop: Optional[List[str]] = Field(None, alias="stop_sequences")
"""Default stop sequences."""

class Config:
"""Configuration for this pydantic object."""
Expand Down Expand Up @@ -354,6 +356,7 @@ def _default_params(self) -> Dict[str, Any]:
"stream": self.streaming,
"n": self.n,
"temperature": self.temperature,
"stop": self.stop,
**self.model_kwargs,
}
if self.max_tokens is not None:
Expand Down Expand Up @@ -443,8 +446,6 @@ def _create_message_dicts(
) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
params = self._default_params
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
message_dicts = [_convert_message_to_dict(m) for m in messages]
return message_dicts, params
Expand Down
5 changes: 3 additions & 2 deletions libs/partners/groq/langchain_groq/chat_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,8 @@ class ChatGroq(BaseChatModel):
"""Number of chat completions to generate for each prompt."""
max_tokens: Optional[int] = None
"""Maximum number of tokens to generate."""
stop: Optional[List[str]] = Field(None, alias="stop_sequences")
"""Default stop sequences."""
default_headers: Union[Mapping[str, str], None] = None
default_query: Union[Mapping[str, object], None] = None
# Configure a custom httpx client. See the
Expand Down Expand Up @@ -428,6 +430,7 @@ def _default_params(self) -> Dict[str, Any]:
"stream": self.streaming,
"n": self.n,
"temperature": self.temperature,
"stop": self.stop,
**self.model_kwargs,
}
if self.max_tokens is not None:
Expand Down Expand Up @@ -461,8 +464,6 @@ def _create_message_dicts(
) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
params = self._default_params
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
message_dicts = [_convert_message_to_dict(m) for m in messages]
return message_dicts, params
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -144,6 +144,17 @@ def test_usage_metadata(
assert isinstance(result.usage_metadata["output_tokens"], int)
assert isinstance(result.usage_metadata["total_tokens"], int)

def test_stop_sequence(
self, chat_model_class: Type[BaseChatModel], chat_model_params: dict
) -> None:
model = chat_model_class(**chat_model_params)
result = model.invoke("hi", stop=["you"])
assert isinstance(result, AIMessage)

model = chat_model_class(**chat_model_params, stop=["you"])
result = model.invoke("hi")
assert isinstance(result, AIMessage)

def test_tool_message_histories_string_content(
self,
chat_model_class: Type[BaseChatModel],
Expand Down

0 comments on commit 1e7ce45

Please sign in to comment.