Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(types): de-duplicate nested streaming params types #141

Merged
merged 1 commit into from
Sep 5, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions src/anthropic/resources/completions.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def create(
max_tokens_to_sample: int,
model: Union[str, Literal["claude-2", "claude-instant-1"]],
prompt: str,
metadata: completion_create_params.CompletionRequestNonStreamingMetadata | NotGiven = NOT_GIVEN,
metadata: completion_create_params.Metadata | NotGiven = NOT_GIVEN,
stop_sequences: List[str] | NotGiven = NOT_GIVEN,
stream: Literal[False] | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -116,7 +116,7 @@ def create(
model: Union[str, Literal["claude-2", "claude-instant-1"]],
prompt: str,
stream: Literal[True],
metadata: completion_create_params.CompletionRequestStreamingMetadata | NotGiven = NOT_GIVEN,
metadata: completion_create_params.Metadata | NotGiven = NOT_GIVEN,
stop_sequences: List[str] | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
top_k: int | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -207,7 +207,7 @@ def create(
max_tokens_to_sample: int,
model: Union[str, Literal["claude-2", "claude-instant-1"]],
prompt: str,
metadata: completion_create_params.CompletionRequestNonStreamingMetadata | NotGiven = NOT_GIVEN,
metadata: completion_create_params.Metadata | NotGiven = NOT_GIVEN,
stop_sequences: List[str] | NotGiven = NOT_GIVEN,
stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -253,7 +253,7 @@ async def create(
max_tokens_to_sample: int,
model: Union[str, Literal["claude-2", "claude-instant-1"]],
prompt: str,
metadata: completion_create_params.CompletionRequestNonStreamingMetadata | NotGiven = NOT_GIVEN,
metadata: completion_create_params.Metadata | NotGiven = NOT_GIVEN,
stop_sequences: List[str] | NotGiven = NOT_GIVEN,
stream: Literal[False] | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -346,7 +346,7 @@ async def create(
model: Union[str, Literal["claude-2", "claude-instant-1"]],
prompt: str,
stream: Literal[True],
metadata: completion_create_params.CompletionRequestStreamingMetadata | NotGiven = NOT_GIVEN,
metadata: completion_create_params.Metadata | NotGiven = NOT_GIVEN,
stop_sequences: List[str] | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
top_k: int | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -437,7 +437,7 @@ async def create(
max_tokens_to_sample: int,
model: Union[str, Literal["claude-2", "claude-instant-1"]],
prompt: str,
metadata: completion_create_params.CompletionRequestNonStreamingMetadata | NotGiven = NOT_GIVEN,
metadata: completion_create_params.Metadata | NotGiven = NOT_GIVEN,
stop_sequences: List[str] | NotGiven = NOT_GIVEN,
stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
Expand Down
104 changes: 18 additions & 86 deletions src/anthropic/types/completion_create_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,15 +6,16 @@
from typing_extensions import Literal, Required, TypedDict

__all__ = [
"CompletionCreateParams",
"CompletionRequestNonStreaming",
"CompletionRequestStreamingMetadata",
"CompletionRequestNonStreamingMetadata",
"CompletionCreateParamsBase",
"Metadata",
"CompletionRequestNonStreaming",
"CompletionRequestStreaming",
"CompletionRequestStreamingMetadata",
]


class CompletionRequestNonStreaming(TypedDict, total=False):
class CompletionCreateParamsBase(TypedDict, total=False):
max_tokens_to_sample: Required[int]
"""The maximum number of tokens to generate before stopping.

Expand Down Expand Up @@ -48,7 +49,7 @@ class CompletionRequestNonStreaming(TypedDict, total=False):
for more context.
"""

metadata: CompletionRequestNonStreamingMetadata
metadata: Metadata
"""An object describing metadata about the request."""

stop_sequences: List[str]
Expand All @@ -59,14 +60,6 @@ class CompletionRequestNonStreaming(TypedDict, total=False):
include additional strings that will cause the model to stop generating.
"""

stream: Literal[False]
"""Whether to incrementally stream the response using server-sent events.

See
[this guide to SSE events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events)
for details.
"""

temperature: float
"""Amount of randomness injected into the response.

Expand All @@ -91,7 +84,7 @@ class CompletionRequestNonStreaming(TypedDict, total=False):
"""


class CompletionRequestNonStreamingMetadata(TypedDict, total=False):
class Metadata(TypedDict, total=False):
user_id: str
"""An external identifier for the user who is associated with the request.

Expand All @@ -101,40 +94,17 @@ class CompletionRequestNonStreamingMetadata(TypedDict, total=False):
"""


class CompletionRequestStreaming(TypedDict, total=False):
max_tokens_to_sample: Required[int]
"""The maximum number of tokens to generate before stopping.

Note that our models may stop _before_ reaching this maximum. This parameter
only specifies the absolute maximum number of tokens to generate.
"""

model: Required[Union[str, Literal["claude-2", "claude-instant-1"]]]
"""The model that will complete your prompt.
class CompletionRequestNonStreaming(CompletionCreateParamsBase):
stream: Literal[False]
"""Whether to incrementally stream the response using server-sent events.

As we improve Claude, we develop new versions of it that you can query. This
parameter controls which version of Claude answers your request. Right now we
are offering two model families: Claude, and Claude Instant. You can use them by
setting `model` to `"claude-2"` or `"claude-instant-1"`, respectively. See
[models](https://docs.anthropic.com/claude/reference/selecting-a-model) for
additional details.
See
[this guide to SSE events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events)
for details.
"""

prompt: Required[str]
"""The prompt that you want Claude to complete.

For proper response generation you will need to format your prompt as follows:

```javascript
const userQuestion = r"Why is the sky blue?";
const prompt = `\n\nHuman: ${userQuestion}\n\nAssistant:`;
```

See our
[comments on prompts](https://docs.anthropic.com/claude/docs/introduction-to-prompt-design)
for more context.
"""

class CompletionRequestStreaming(CompletionCreateParamsBase):
stream: Required[Literal[True]]
"""Whether to incrementally stream the response using server-sent events.

Expand All @@ -143,49 +113,11 @@ class CompletionRequestStreaming(TypedDict, total=False):
for details.
"""

metadata: CompletionRequestStreamingMetadata
"""An object describing metadata about the request."""

stop_sequences: List[str]
"""Sequences that will cause the model to stop generating completion text.

Our models stop on `"\n\nHuman:"`, and may include additional built-in stop
sequences in the future. By providing the stop_sequences parameter, you may
include additional strings that will cause the model to stop generating.
"""

temperature: float
"""Amount of randomness injected into the response.

Defaults to 1. Ranges from 0 to 1. Use temp closer to 0 for analytical /
multiple choice, and closer to 1 for creative and generative tasks.
"""

top_k: int
"""Only sample from the top K options for each subsequent token.

Used to remove "long tail" low probability responses.
[Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
"""

top_p: float
"""Use nucleus sampling.

In nucleus sampling, we compute the cumulative distribution over all the options
for each subsequent token in decreasing probability order and cut it off once it
reaches a particular probability specified by `top_p`. You should either alter
`temperature` or `top_p`, but not both.
"""


class CompletionRequestStreamingMetadata(TypedDict, total=False):
user_id: str
"""An external identifier for the user who is associated with the request.

This should be a uuid, hash value, or other opaque identifier. Anthropic may use
this id to help detect abuse. Do not include any identifying information such as
name, email address, or phone number.
"""
CompletionRequestStreamingMetadata = Metadata
"""This is deprecated, `Metadata` should be used instead"""

CompletionRequestNonStreamingMetadata = Metadata
"""This is deprecated, `Metadata` should be used instead"""

CompletionCreateParams = Union[CompletionRequestNonStreaming, CompletionRequestStreaming]