Skip to content

Commit

Permalink
Merge branch 'main' into add-partial-mixin
Browse files Browse the repository at this point in the history
  • Loading branch information
ivanleomk authored Nov 8, 2024
2 parents b52ec6a + 4327e14 commit 83970c6
Show file tree
Hide file tree
Showing 5 changed files with 52 additions and 33 deletions.
19 changes: 10 additions & 9 deletions .github/workflows/ai-label.yml
Original file line number Diff line number Diff line change
@@ -1,20 +1,21 @@
name: AI Labeler

on:
issues:
issues:
types: [opened, reopened]
pull_request:
pull_request:
types: [opened, reopened]

jobs:
ai-labeler:
ai-labeler:
runs-on: ubuntu-latest
permissions:
contents: read
issues: write
pull-requests: write
contents: read
issues: write
pull-requests: write
steps:
- uses: actions/checkout@v4
- uses: jlowin/ai-labeler@v0.2.0
- uses: actions/checkout@v4
- uses: jlowin/ai-labeler@v0.4.0
with:
openai-api-key: ${{ secrets.OPENAI_API_KEY }}
include-repo-labels: true
openai-api-key: ${{ secrets.OPENAI_API_KEY }}
40 changes: 22 additions & 18 deletions instructor/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,10 @@
Literal,
Any,
)
from tenacity import (
AsyncRetrying,
Retrying,
)
from collections.abc import Generator, Iterable, Awaitable, AsyncGenerator
from typing_extensions import Self
from pydantic import BaseModel
Expand Down Expand Up @@ -114,7 +118,7 @@ def create(
self: AsyncInstructor,
response_model: type[T],
messages: list[ChatCompletionMessageParam],
max_retries: int = 3,
max_retries: int | AsyncRetrying = 3,
validation_context: dict[str, Any] | None = None,
context: dict[str, Any] | None = None, # {{ edit_1 }}
strict: bool = True,
Expand All @@ -126,7 +130,7 @@ def create(
self: Self,
response_model: type[T],
messages: list[ChatCompletionMessageParam],
max_retries: int = 3,
max_retries: int | Retrying = 3,
validation_context: dict[str, Any] | None = None,
context: dict[str, Any] | None = None, # {{ edit_1 }}
strict: bool = True,
Expand All @@ -138,7 +142,7 @@ def create(
self: AsyncInstructor,
response_model: None,
messages: list[ChatCompletionMessageParam],
max_retries: int = 3,
max_retries: int | AsyncRetrying = 3,
validation_context: dict[str, Any] | None = None,
context: dict[str, Any] | None = None, # {{ edit_1 }}
strict: bool = True,
Expand All @@ -150,7 +154,7 @@ def create(
self: Self,
response_model: None,
messages: list[ChatCompletionMessageParam],
max_retries: int = 3,
max_retries: int | Retrying = 3,
validation_context: dict[str, Any] | None = None,
context: dict[str, Any] | None = None, # {{ edit_1 }}
strict: bool = True,
Expand All @@ -161,7 +165,7 @@ def create(
self,
response_model: type[T] | None,
messages: list[ChatCompletionMessageParam],
max_retries: int = 3,
max_retries: int | Retrying | AsyncRetrying = 3,
validation_context: dict[str, Any] | None = None,
context: dict[str, Any] | None = None,
strict: bool = True,
Expand All @@ -185,7 +189,7 @@ def create_partial(
self: AsyncInstructor,
response_model: type[T],
messages: list[ChatCompletionMessageParam],
max_retries: int = 3,
max_retries: int | AsyncRetrying = 3,
validation_context: dict[str, Any] | None = None,
context: dict[str, Any] | None = None, # {{ edit_1 }}
strict: bool = True,
Expand All @@ -197,7 +201,7 @@ def create_partial(
self: Self,
response_model: type[T],
messages: list[ChatCompletionMessageParam],
max_retries: int = 3,
max_retries: int | Retrying = 3,
validation_context: dict[str, Any] | None = None, # Deprecate in 2.0
context: dict[str, Any] | None = None,
strict: bool = True,
Expand All @@ -208,7 +212,7 @@ def create_partial(
self,
response_model: type[T],
messages: list[ChatCompletionMessageParam],
max_retries: int = 3,
max_retries: int | Retrying | AsyncRetrying = 3,
validation_context: dict[str, Any] | None = None, # Deprecate in 2.0
context: dict[str, Any] | None = None,
strict: bool = True,
Expand All @@ -235,7 +239,7 @@ def create_iterable(
self: AsyncInstructor,
messages: list[ChatCompletionMessageParam],
response_model: type[T],
max_retries: int = 3,
max_retries: int | AsyncRetrying = 3,
validation_context: dict[str, Any] | None = None, # Deprecate in 2.0
context: dict[str, Any] | None = None,
strict: bool = True,
Expand All @@ -247,7 +251,7 @@ def create_iterable(
self: Self,
messages: list[ChatCompletionMessageParam],
response_model: type[T],
max_retries: int = 3,
max_retries: int | Retrying = 3,
validation_context: dict[str, Any] | None = None, # Deprecate in 2.0
context: dict[str, Any] | None = None,
strict: bool = True,
Expand All @@ -258,7 +262,7 @@ def create_iterable(
self,
messages: list[ChatCompletionMessageParam],
response_model: type[T],
max_retries: int = 3,
max_retries: int | Retrying | AsyncRetrying = 3,
validation_context: dict[str, Any] | None = None, # Deprecate in 2.0
context: dict[str, Any] | None = None,
strict: bool = True,
Expand All @@ -284,7 +288,7 @@ def create_with_completion(
self: AsyncInstructor,
messages: list[ChatCompletionMessageParam],
response_model: type[T],
max_retries: int = 3,
max_retries: int | AsyncRetrying = 3,
validation_context: dict[str, Any] | None = None, # Deprecate in 2.0
context: dict[str, Any] | None = None,
strict: bool = True,
Expand All @@ -296,7 +300,7 @@ def create_with_completion(
self: Self,
messages: list[ChatCompletionMessageParam],
response_model: type[T],
max_retries: int = 3,
max_retries: int | Retrying = 3,
validation_context: dict[str, Any] | None = None, # Deprecate in 2.0
context: dict[str, Any] | None = None,
strict: bool = True,
Expand All @@ -307,7 +311,7 @@ def create_with_completion(
self,
messages: list[ChatCompletionMessageParam],
response_model: type[T],
max_retries: int = 3,
max_retries: int | Retrying | AsyncRetrying = 3,
validation_context: dict[str, Any] | None = None, # Deprecate in 2.0
context: dict[str, Any] | None = None,
strict: bool = True,
Expand Down Expand Up @@ -373,7 +377,7 @@ async def create(
self,
response_model: type[T] | None,
messages: list[ChatCompletionMessageParam],
max_retries: int = 3,
max_retries: int | AsyncRetrying = 3,
validation_context: dict[str, Any] | None = None, # Deprecate in 2.0
context: dict[str, Any] | None = None,
strict: bool = True,
Expand All @@ -395,7 +399,7 @@ async def create_partial(
self,
response_model: type[T],
messages: list[ChatCompletionMessageParam],
max_retries: int = 3,
max_retries: int | AsyncRetrying = 3,
validation_context: dict[str, Any] | None = None, # Deprecate in 2.0
context: dict[str, Any] | None = None,
strict: bool = True,
Expand All @@ -419,7 +423,7 @@ async def create_iterable(
self,
messages: list[ChatCompletionMessageParam],
response_model: type[T],
max_retries: int = 3,
max_retries: int | AsyncRetrying = 3,
validation_context: dict[str, Any] | None = None, # Deprecate in 2.0
context: dict[str, Any] | None = None,
strict: bool = True,
Expand All @@ -443,7 +447,7 @@ async def create_with_completion(
self,
messages: list[ChatCompletionMessageParam],
response_model: type[T],
max_retries: int = 3,
max_retries: int | AsyncRetrying = 3,
validation_context: dict[str, Any] | None = None, # Deprecate in 2.0
context: dict[str, Any] | None = None,
strict: bool = True,
Expand Down
13 changes: 9 additions & 4 deletions instructor/patch.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,11 @@
from instructor.mode import Mode
import logging

from tenacity import (
AsyncRetrying,
Retrying,
)

logger = logging.getLogger("instructor")

T_Model = TypeVar("T_Model", bound=BaseModel)
Expand All @@ -35,7 +40,7 @@ def __call__(
response_model: type[T_Model] | None = None,
validation_context: dict[str, Any] | None = None, # Deprecate in 2.0
context: dict[str, Any] | None = None,
max_retries: int = 1,
max_retries: int | Retrying = 1,
*args: Any,
**kwargs: Any,
) -> T_Model: ...
Expand All @@ -47,7 +52,7 @@ async def __call__(
response_model: type[T_Model] | None = None,
validation_context: dict[str, Any] | None = None, # Deprecate in 2.0
context: dict[str, Any] | None = None,
max_retries: int = 1,
max_retries: int | AsyncRetrying = 1,
*args: Any,
**kwargs: Any,
) -> T_Model: ...
Expand Down Expand Up @@ -140,7 +145,7 @@ async def new_create_async(
response_model: type[T_Model] | None = None,
validation_context: dict[str, Any] | None = None,
context: dict[str, Any] | None = None,
max_retries: int = 1,
max_retries: int | AsyncRetrying = 1,
strict: bool = True,
hooks: Hooks | None = None,
*args: T_ParamSpec.args,
Expand Down Expand Up @@ -171,7 +176,7 @@ def new_create_sync(
response_model: type[T_Model] | None = None,
validation_context: dict[str, Any] | None = None,
context: dict[str, Any] | None = None,
max_retries: int = 1,
max_retries: int | Retrying = 1,
strict: bool = True,
hooks: Hooks | None = None,
*args: T_ParamSpec.args,
Expand Down
7 changes: 5 additions & 2 deletions instructor/retry.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
from instructor.utils import update_total_usage
from instructor.validators import AsyncValidationError
from openai.types.chat import ChatCompletion
from openai.types.completion_usage import CompletionUsage
from openai.types.completion_usage import CompletionUsage, CompletionTokensDetails, PromptTokensDetails
from pydantic import BaseModel, ValidationError
from tenacity import (
AsyncRetrying,
Expand Down Expand Up @@ -71,7 +71,10 @@ def initialize_usage(mode: Mode) -> CompletionUsage | Any:
Returns:
CompletionUsage | Any: Initialized usage object.
"""
total_usage = CompletionUsage(completion_tokens=0, prompt_tokens=0, total_tokens=0)
total_usage = CompletionUsage(completion_tokens=0, prompt_tokens=0, total_tokens=0,
completion_tokens_details = CompletionTokensDetails(audio_tokens=0, reasoning_tokens=0),
prompt_token_details = PromptTokensDetails(audio_tokens=0, cached_tokens=0)
)
if mode in {Mode.ANTHROPIC_TOOLS, Mode.ANTHROPIC_JSON}:
from anthropic.types import Usage as AnthropicUsage

Expand Down
6 changes: 6 additions & 0 deletions instructor/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,12 @@ def update_total_usage(
total_usage.completion_tokens += response_usage.completion_tokens or 0
total_usage.prompt_tokens += response_usage.prompt_tokens or 0
total_usage.total_tokens += response_usage.total_tokens or 0
if (rtd := response_usage.completion_tokens_details) and (ttd := total_usage.completion_tokens_details):
ttd.audio_tokens = (ttd.audio_tokens or 0) + (rtd.audio_tokens or 0)
ttd.reasoning_tokens = (ttd.reasoning_tokens or 0) + (rtd.reasoning_tokens or 0)
if (rpd := response_usage.prompt_tokens_details) and (tpd := total_usage.prompt_tokens_details):
tpd.audio_tokens = (tpd.audio_tokens or 0) + (rpd.audio_tokens or 0)
tpd.cached_tokens = (tpd.cached_tokens or 0) + (rpd.cached_tokens or 0)
response.usage = total_usage # Replace each response usage with the total usage
return response

Expand Down

0 comments on commit 83970c6

Please sign in to comment.