-
Notifications
You must be signed in to change notification settings - Fork 233
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #1025 from Agenta-AI/gh/sdk-output-format-change
Enhancement - Improve SDK output format
- Loading branch information
Showing
7 changed files
with
241 additions
and
7 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,166 @@ | ||
# https://raw.githubusercontent.com/langchain-ai/langchain/23eb480c3866db8693a3a2d63b787c898c54bb35/libs/community/langchain_community/callbacks/openai_info.py | ||
MODEL_COST_PER_1K_TOKENS = { | ||
# GPT-4 input | ||
"gpt-4": 0.03, | ||
"gpt-4-0314": 0.03, | ||
"gpt-4-0613": 0.03, | ||
"gpt-4-32k": 0.06, | ||
"gpt-4-32k-0314": 0.06, | ||
"gpt-4-32k-0613": 0.06, | ||
"gpt-4-vision-preview": 0.01, | ||
"gpt-4-1106-preview": 0.01, | ||
# GPT-4 output | ||
"gpt-4-completion": 0.06, | ||
"gpt-4-0314-completion": 0.06, | ||
"gpt-4-0613-completion": 0.06, | ||
"gpt-4-32k-completion": 0.12, | ||
"gpt-4-32k-0314-completion": 0.12, | ||
"gpt-4-32k-0613-completion": 0.12, | ||
"gpt-4-vision-preview-completion": 0.03, | ||
"gpt-4-1106-preview-completion": 0.03, | ||
# GPT-3.5 input | ||
"gpt-3.5-turbo": 0.0015, | ||
"gpt-3.5-turbo-0301": 0.0015, | ||
"gpt-3.5-turbo-0613": 0.0015, | ||
"gpt-3.5-turbo-1106": 0.001, | ||
"gpt-3.5-turbo-instruct": 0.0015, | ||
"gpt-3.5-turbo-16k": 0.003, | ||
"gpt-3.5-turbo-16k-0613": 0.003, | ||
# GPT-3.5 output | ||
"gpt-3.5-turbo-completion": 0.002, | ||
"gpt-3.5-turbo-0301-completion": 0.002, | ||
"gpt-3.5-turbo-0613-completion": 0.002, | ||
"gpt-3.5-turbo-1106-completion": 0.002, | ||
"gpt-3.5-turbo-instruct-completion": 0.002, | ||
"gpt-3.5-turbo-16k-completion": 0.004, | ||
"gpt-3.5-turbo-16k-0613-completion": 0.004, | ||
# Azure GPT-35 input | ||
"gpt-35-turbo": 0.0015, # Azure OpenAI version of ChatGPT | ||
"gpt-35-turbo-0301": 0.0015, # Azure OpenAI version of ChatGPT | ||
"gpt-35-turbo-0613": 0.0015, | ||
"gpt-35-turbo-instruct": 0.0015, | ||
"gpt-35-turbo-16k": 0.003, | ||
"gpt-35-turbo-16k-0613": 0.003, | ||
# Azure GPT-35 output | ||
"gpt-35-turbo-completion": 0.002, # Azure OpenAI version of ChatGPT | ||
"gpt-35-turbo-0301-completion": 0.002, # Azure OpenAI version of ChatGPT | ||
"gpt-35-turbo-0613-completion": 0.002, | ||
"gpt-35-turbo-instruct-completion": 0.002, | ||
"gpt-35-turbo-16k-completion": 0.004, | ||
"gpt-35-turbo-16k-0613-completion": 0.004, | ||
# Others | ||
"text-ada-001": 0.0004, | ||
"ada": 0.0004, | ||
"text-babbage-001": 0.0005, | ||
"babbage": 0.0005, | ||
"text-curie-001": 0.002, | ||
"curie": 0.002, | ||
"text-davinci-003": 0.02, | ||
"text-davinci-002": 0.02, | ||
"code-davinci-002": 0.02, | ||
# Fine Tuned input | ||
"babbage-002-finetuned": 0.0016, | ||
"davinci-002-finetuned": 0.012, | ||
"gpt-3.5-turbo-0613-finetuned": 0.012, | ||
# Fine Tuned output | ||
"babbage-002-finetuned-completion": 0.0016, | ||
"davinci-002-finetuned-completion": 0.012, | ||
"gpt-3.5-turbo-0613-finetuned-completion": 0.016, | ||
# Azure Fine Tuned input | ||
"babbage-002-azure-finetuned": 0.0004, | ||
"davinci-002-azure-finetuned": 0.002, | ||
"gpt-35-turbo-0613-azure-finetuned": 0.0015, | ||
# Azure Fine Tuned output | ||
"babbage-002-azure-finetuned-completion": 0.0004, | ||
"davinci-002-azure-finetuned-completion": 0.002, | ||
"gpt-35-turbo-0613-azure-finetuned-completion": 0.002, | ||
# Legacy fine-tuned models | ||
"ada-finetuned-legacy": 0.0016, | ||
"babbage-finetuned-legacy": 0.0024, | ||
"curie-finetuned-legacy": 0.012, | ||
"davinci-finetuned-legacy": 0.12, | ||
} | ||
|
||
|
||
def standardize_model_name( | ||
model_name: str, | ||
is_completion: bool = False, | ||
) -> str: | ||
""" | ||
Standardize the model name to a format that can be used in the OpenAI API. | ||
Args: | ||
model_name: Model name to standardize. | ||
is_completion: Whether the model is used for completion or not. | ||
Defaults to False. | ||
Returns: | ||
Standardized model name. | ||
""" | ||
|
||
model_name = model_name.lower() | ||
if ".ft-" in model_name: | ||
model_name = model_name.split(".ft-")[0] + "-azure-finetuned" | ||
if ":ft-" in model_name: | ||
model_name = model_name.split(":")[0] + "-finetuned-legacy" | ||
if "ft:" in model_name: | ||
model_name = model_name.split(":")[1] + "-finetuned" | ||
if is_completion and ( | ||
model_name.startswith("gpt-4") | ||
or model_name.startswith("gpt-3.5") | ||
or model_name.startswith("gpt-35") | ||
or ("finetuned" in model_name and "legacy" not in model_name) | ||
): | ||
return model_name + "-completion" | ||
else: | ||
return model_name | ||
|
||
|
||
def get_openai_token_cost_for_model( | ||
model_name: str, num_tokens: int, is_completion: bool = False | ||
) -> float: | ||
""" | ||
Get the cost in USD for a given model and number of tokens. | ||
Args: | ||
model_name: Name of the model | ||
num_tokens: Number of tokens. | ||
is_completion: Whether the model is used for completion or not. | ||
Defaults to False. | ||
Returns: | ||
Cost in USD. | ||
""" | ||
|
||
model_name = standardize_model_name(model_name, is_completion=is_completion) | ||
if model_name not in MODEL_COST_PER_1K_TOKENS: | ||
raise ValueError( | ||
f"Unknown model: {model_name}. Please provide a valid OpenAI model name." | ||
"Known models are: " + ", ".join(MODEL_COST_PER_1K_TOKENS.keys()) | ||
) | ||
return MODEL_COST_PER_1K_TOKENS[model_name] * (num_tokens / 1000) | ||
|
||
|
||
def calculate_token_usage(model_name: str, token_usage: dict) -> float: | ||
"""Calculates the total cost of using a language model based on the model name and token | ||
usage. | ||
Args: | ||
model_name: The name of the model used to determine the cost per token. | ||
token_usage: Contains information about the usage of tokens for a particular model. | ||
Returns: | ||
Total cost of using a model. | ||
""" | ||
|
||
completion_tokens = token_usage.get("completion_tokens", 0) | ||
prompt_tokens = token_usage.get("prompt_tokens", 0) | ||
model_name = standardize_model_name(model_name) | ||
if model_name in MODEL_COST_PER_1K_TOKENS: | ||
completion_cost = get_openai_token_cost_for_model( | ||
model_name, completion_tokens, is_completion=True | ||
) | ||
prompt_cost = get_openai_token_cost_for_model(model_name, prompt_tokens) | ||
total_cost = prompt_cost + completion_cost | ||
return total_cost | ||
return 0 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,41 @@ | ||
import agenta as ag | ||
from agenta import FloatParam, MessagesInput, MultipleChoiceParam | ||
from openai import AsyncOpenAI | ||
|
||
|
||
client = AsyncOpenAI() | ||
|
||
SYSTEM_PROMPT = "You have expertise in offering technical ideas to startups." | ||
CHAT_LLM_GPT = [ | ||
"gpt-3.5-turbo-16k", | ||
"gpt-3.5-turbo-0301", | ||
"gpt-3.5-turbo-0613", | ||
"gpt-3.5-turbo-16k-0613", | ||
"gpt-4", | ||
] | ||
|
||
ag.init() | ||
ag.config.default( | ||
temperature=FloatParam(0.2), | ||
model=MultipleChoiceParam("gpt-3.5-turbo", CHAT_LLM_GPT), | ||
max_tokens=ag.IntParam(-1, -1, 4000), | ||
prompt_system=ag.TextParam(SYSTEM_PROMPT), | ||
) | ||
|
||
|
||
@ag.entrypoint | ||
async def chat(inputs: MessagesInput = MessagesInput()): | ||
messages = [{"role": "system", "content": ag.config.prompt_system}] + inputs | ||
max_tokens = ag.config.max_tokens if ag.config.max_tokens != -1 else None | ||
chat_completion = await client.chat.completions.create( | ||
model=ag.config.model, | ||
messages=messages, | ||
temperature=ag.config.temperature, | ||
max_tokens=max_tokens, | ||
) | ||
token_usage = chat_completion.usage.dict() | ||
return { | ||
"message": chat_completion.choices[0].message.content, | ||
**{"usage": token_usage}, | ||
"cost": ag.calculate_token_usage(ag.config.model, token_usage), | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,2 @@ | ||
agenta | ||
openai |