From f76f05320df3059d57ed57153f30be3a8d91fddf Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Tue, 5 Sep 2023 13:11:56 -0400 Subject: [PATCH] feat(types): de-duplicate nested streaming params types (#141) --- src/anthropic/resources/completions.py | 12 +- .../types/completion_create_params.py | 104 +++--------------- 2 files changed, 24 insertions(+), 92 deletions(-) diff --git a/src/anthropic/resources/completions.py b/src/anthropic/resources/completions.py index a06dda29..8dbe79b9 100644 --- a/src/anthropic/resources/completions.py +++ b/src/anthropic/resources/completions.py @@ -23,7 +23,7 @@ def create( max_tokens_to_sample: int, model: Union[str, Literal["claude-2", "claude-instant-1"]], prompt: str, - metadata: completion_create_params.CompletionRequestNonStreamingMetadata | NotGiven = NOT_GIVEN, + metadata: completion_create_params.Metadata | NotGiven = NOT_GIVEN, stop_sequences: List[str] | NotGiven = NOT_GIVEN, stream: Literal[False] | NotGiven = NOT_GIVEN, temperature: float | NotGiven = NOT_GIVEN, @@ -116,7 +116,7 @@ def create( model: Union[str, Literal["claude-2", "claude-instant-1"]], prompt: str, stream: Literal[True], - metadata: completion_create_params.CompletionRequestStreamingMetadata | NotGiven = NOT_GIVEN, + metadata: completion_create_params.Metadata | NotGiven = NOT_GIVEN, stop_sequences: List[str] | NotGiven = NOT_GIVEN, temperature: float | NotGiven = NOT_GIVEN, top_k: int | NotGiven = NOT_GIVEN, @@ -207,7 +207,7 @@ def create( max_tokens_to_sample: int, model: Union[str, Literal["claude-2", "claude-instant-1"]], prompt: str, - metadata: completion_create_params.CompletionRequestNonStreamingMetadata | NotGiven = NOT_GIVEN, + metadata: completion_create_params.Metadata | NotGiven = NOT_GIVEN, stop_sequences: List[str] | NotGiven = NOT_GIVEN, stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN, temperature: float | NotGiven = NOT_GIVEN, @@ -253,7 +253,7 @@ async def create( max_tokens_to_sample: int, model: Union[str, Literal["claude-2", "claude-instant-1"]], prompt: str, - metadata: completion_create_params.CompletionRequestNonStreamingMetadata | NotGiven = NOT_GIVEN, + metadata: completion_create_params.Metadata | NotGiven = NOT_GIVEN, stop_sequences: List[str] | NotGiven = NOT_GIVEN, stream: Literal[False] | NotGiven = NOT_GIVEN, temperature: float | NotGiven = NOT_GIVEN, @@ -346,7 +346,7 @@ async def create( model: Union[str, Literal["claude-2", "claude-instant-1"]], prompt: str, stream: Literal[True], - metadata: completion_create_params.CompletionRequestStreamingMetadata | NotGiven = NOT_GIVEN, + metadata: completion_create_params.Metadata | NotGiven = NOT_GIVEN, stop_sequences: List[str] | NotGiven = NOT_GIVEN, temperature: float | NotGiven = NOT_GIVEN, top_k: int | NotGiven = NOT_GIVEN, @@ -437,7 +437,7 @@ async def create( max_tokens_to_sample: int, model: Union[str, Literal["claude-2", "claude-instant-1"]], prompt: str, - metadata: completion_create_params.CompletionRequestNonStreamingMetadata | NotGiven = NOT_GIVEN, + metadata: completion_create_params.Metadata | NotGiven = NOT_GIVEN, stop_sequences: List[str] | NotGiven = NOT_GIVEN, stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN, temperature: float | NotGiven = NOT_GIVEN, diff --git a/src/anthropic/types/completion_create_params.py b/src/anthropic/types/completion_create_params.py index 17b6f7ec..2022daa2 100644 --- a/src/anthropic/types/completion_create_params.py +++ b/src/anthropic/types/completion_create_params.py @@ -6,15 +6,16 @@ from typing_extensions import Literal, Required, TypedDict __all__ = [ - "CompletionCreateParams", - "CompletionRequestNonStreaming", + "CompletionRequestStreamingMetadata", "CompletionRequestNonStreamingMetadata", + "CompletionCreateParamsBase", + "Metadata", + "CompletionRequestNonStreaming", "CompletionRequestStreaming", - "CompletionRequestStreamingMetadata", ] -class CompletionRequestNonStreaming(TypedDict, total=False): +class CompletionCreateParamsBase(TypedDict, total=False): max_tokens_to_sample: Required[int] """The maximum number of tokens to generate before stopping. @@ -48,7 +49,7 @@ class CompletionRequestNonStreaming(TypedDict, total=False): for more context. """ - metadata: CompletionRequestNonStreamingMetadata + metadata: Metadata """An object describing metadata about the request.""" stop_sequences: List[str] @@ -59,14 +60,6 @@ class CompletionRequestNonStreaming(TypedDict, total=False): include additional strings that will cause the model to stop generating. """ - stream: Literal[False] - """Whether to incrementally stream the response using server-sent events. - - See - [this guide to SSE events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events) - for details. - """ - temperature: float """Amount of randomness injected into the response. @@ -91,7 +84,7 @@ class CompletionRequestNonStreaming(TypedDict, total=False): """ -class CompletionRequestNonStreamingMetadata(TypedDict, total=False): +class Metadata(TypedDict, total=False): user_id: str """An external identifier for the user who is associated with the request. @@ -101,40 +94,17 @@ class CompletionRequestNonStreamingMetadata(TypedDict, total=False): """ -class CompletionRequestStreaming(TypedDict, total=False): - max_tokens_to_sample: Required[int] - """The maximum number of tokens to generate before stopping. - - Note that our models may stop _before_ reaching this maximum. This parameter - only specifies the absolute maximum number of tokens to generate. - """ - - model: Required[Union[str, Literal["claude-2", "claude-instant-1"]]] - """The model that will complete your prompt. +class CompletionRequestNonStreaming(CompletionCreateParamsBase): + stream: Literal[False] + """Whether to incrementally stream the response using server-sent events. - As we improve Claude, we develop new versions of it that you can query. This - parameter controls which version of Claude answers your request. Right now we - are offering two model families: Claude, and Claude Instant. You can use them by - setting `model` to `"claude-2"` or `"claude-instant-1"`, respectively. See - [models](https://docs.anthropic.com/claude/reference/selecting-a-model) for - additional details. + See + [this guide to SSE events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events) + for details. """ - prompt: Required[str] - """The prompt that you want Claude to complete. - - For proper response generation you will need to format your prompt as follows: - - ```javascript - const userQuestion = r"Why is the sky blue?"; - const prompt = `\n\nHuman: ${userQuestion}\n\nAssistant:`; - ``` - - See our - [comments on prompts](https://docs.anthropic.com/claude/docs/introduction-to-prompt-design) - for more context. - """ +class CompletionRequestStreaming(CompletionCreateParamsBase): stream: Required[Literal[True]] """Whether to incrementally stream the response using server-sent events. @@ -143,49 +113,11 @@ class CompletionRequestStreaming(TypedDict, total=False): for details. """ - metadata: CompletionRequestStreamingMetadata - """An object describing metadata about the request.""" - - stop_sequences: List[str] - """Sequences that will cause the model to stop generating completion text. - - Our models stop on `"\n\nHuman:"`, and may include additional built-in stop - sequences in the future. By providing the stop_sequences parameter, you may - include additional strings that will cause the model to stop generating. - """ - - temperature: float - """Amount of randomness injected into the response. - - Defaults to 1. Ranges from 0 to 1. Use temp closer to 0 for analytical / - multiple choice, and closer to 1 for creative and generative tasks. - """ - - top_k: int - """Only sample from the top K options for each subsequent token. - Used to remove "long tail" low probability responses. - [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277). - """ - - top_p: float - """Use nucleus sampling. - - In nucleus sampling, we compute the cumulative distribution over all the options - for each subsequent token in decreasing probability order and cut it off once it - reaches a particular probability specified by `top_p`. You should either alter - `temperature` or `top_p`, but not both. - """ - - -class CompletionRequestStreamingMetadata(TypedDict, total=False): - user_id: str - """An external identifier for the user who is associated with the request. - - This should be a uuid, hash value, or other opaque identifier. Anthropic may use - this id to help detect abuse. Do not include any identifying information such as - name, email address, or phone number. - """ +CompletionRequestStreamingMetadata = Metadata +"""This is deprecated, `Metadata` should be used instead""" +CompletionRequestNonStreamingMetadata = Metadata +"""This is deprecated, `Metadata` should be used instead""" CompletionCreateParams = Union[CompletionRequestNonStreaming, CompletionRequestStreaming]