-
Notifications
You must be signed in to change notification settings - Fork 183
/
message.py
109 lines (79 loc) · 3.08 KB
/
message.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Optional
from typing_extensions import Literal
from .model import Model
from .usage import Usage
from .._models import BaseModel
from .content_block import ContentBlock, ContentBlock as ContentBlock
__all__ = ["Message"]
class Message(BaseModel):
id: str
"""Unique object identifier.
The format and length of IDs may change over time.
"""
content: List[ContentBlock]
"""Content generated by the model.
This is an array of content blocks, each of which has a `type` that determines
its shape.
Example:
```json
[{ "type": "text", "text": "Hi, I'm Claude." }]
```
If the request input `messages` ended with an `assistant` turn, then the
response `content` will continue directly from that last turn. You can use this
to constrain the model's output.
For example, if the input `messages` were:
```json
[
{
"role": "user",
"content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun"
},
{ "role": "assistant", "content": "The best answer is (" }
]
```
Then the response `content` might be:
```json
[{ "type": "text", "text": "B)" }]
```
"""
model: Model
"""
The model that will complete your prompt.\n\nSee
[models](https://docs.anthropic.com/en/docs/models-overview) for additional
details and options.
"""
role: Literal["assistant"]
"""Conversational role of the generated message.
This will always be `"assistant"`.
"""
stop_reason: Optional[Literal["end_turn", "max_tokens", "stop_sequence", "tool_use"]] = None
"""The reason that we stopped.
This may be one the following values:
- `"end_turn"`: the model reached a natural stopping point
- `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum
- `"stop_sequence"`: one of your provided custom `stop_sequences` was generated
- `"tool_use"`: the model invoked one or more tools
In non-streaming mode this value is always non-null. In streaming mode, it is
null in the `message_start` event and non-null otherwise.
"""
stop_sequence: Optional[str] = None
"""Which custom stop sequence was generated, if any.
This value will be a non-null string if one of your custom stop sequences was
generated.
"""
type: Literal["message"]
"""Object type.
For Messages, this is always `"message"`.
"""
usage: Usage
"""Billing and rate-limit usage.
Anthropic's API bills and rate-limits by token counts, as tokens represent the
underlying cost to our systems.
Under the hood, the API transforms requests into a format suitable for the
model. The model's output then goes through a parsing stage before becoming an
API response. As a result, the token counts in `usage` will not match one-to-one
with the exact visible content of an API request or response.
For example, `output_tokens` will be non-zero, even for an empty string response
from Claude.
"""