diff --git a/.all-contributorsrc b/.all-contributorsrc
index 5bd7afafd8..dcb1ee33c9 100644
--- a/.all-contributorsrc
+++ b/.all-contributorsrc
@@ -363,6 +363,15 @@
"contributions": [
"code"
]
+ },
+ {
+ "login": "romainrbr",
+ "name": "Romain Brucker",
+ "avatar_url": "https://avatars.githubusercontent.com/u/10381609?v=4",
+ "profile": "https://github.com/romainrbr",
+ "contributions": [
+ "code"
+ ]
}
],
"contributorsPerLine": 7,
diff --git a/README.md b/README.md
index 9e938564d2..ffa56e2b10 100644
--- a/README.md
+++ b/README.md
@@ -20,12 +20,24 @@
+
+
+
+
-
+
+
+
+
+
+
+
+
+
@@ -71,12 +83,11 @@
@@ -85,54 +96,26 @@
# โน๏ธ About
-Building production-ready LLM-powered applications is currently very difficult. It involves countless iterations of prompt engineering, parameter tuning, and architectures.
-
-Agenta provides you with the tools to quickly do prompt engineering and ๐งช **experiment**, โ๏ธ **evaluate**, and :rocket: **deploy** your LLM apps. All without imposing any restrictions on your choice of framework, library, or model.
-
-
-
-
-
-
-
-
+Agenta is an end-to-end LLMOps platform. It provides the tools for **prompt engineering and management**, โ๏ธ **evaluation**, and :rocket: **deployment**. All without imposing any restrictions on your choice of framework, library, or model.
+Agenta allows developers and product teams to collaborate and build robust AI applications in less time.
-# Demo
-https://github.com/Agenta-AI/agenta/assets/57623556/99733147-2b78-4b95-852f-67475e4ce9ed
+## ๐จ How does it work?
-# Quick Start
+| Using an LLM App Template (For Non-Technical Users) | Starting from Code |
+| ------------- | ------------- |
+|1. [Create an application using a pre-built template from our UI](https://cloud.agenta.ai?utm_source=github&utm_medium=readme&utm_campaign=github) 2. Access a playground where you can test and compare different prompts and configurations side-by-side. 3. Systematically evaluate your application using pre-built or custom evaluators. 4. Deploy the application to production with one click. |1. [Add a few lines to any LLM application code to automatically create a playground for it](https://docs.agenta.ai/tutorials/first-app-with-langchain) 2. Experiment with prompts and configurations, and compare them side-by-side in the playground. 3. Systematically evaluate your application using pre-built or custom evaluators. 4. Deploy the application to production with one click. |
+
-
+### [Try the cloud version](https://cloud.agenta.ai?utm_source=github&utm_medium=readme&utm_campaign=github)
+### [Create your first application in one-minute](https://docs.agenta.ai/quickstart/getting-started-ui)
+### [Create an application using Langchain](https://docs.agenta.ai/tutorials/first-app-with-langchain)
+### [Self-host agenta](https://docs.agenta.ai/self-host/host-locally)
+### [Read the Documentation](https://docs.agenta.ai)
+### [Check the Cookbook](https://docs.agenta.ai/cookbook)
# Features
@@ -207,8 +190,8 @@ Now your team can ๐ iterate, ๐งช experiment, and โ๏ธ evaluate different v
-# Support
-Talk with the founders for any commercial inquiries.
+# Enterprise Support
+Contact us here for enterprise support and early access to agenta self-managed enterprise with Kubernetes support.
# Disabling Anonymized Tracking
@@ -231,7 +214,7 @@ Check out our [Contributing Guide](https://docs.agenta.ai/contributing/getting-s
## Contributors โจ
-[![All Contributors](https://img.shields.io/badge/all_contributors-38-orange.svg?style=flat-square)](#contributors-)
+[![All Contributors](https://img.shields.io/badge/all_contributors-39-orange.svg?style=flat-square)](#contributors-)
Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/docs/en/emoji-key)):
@@ -290,6 +273,7 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
diego ๐ป
brockWith ๐ป
Dennis Zelada ๐ป
+ Romain Brucker ๐ป
diff --git a/agenta-backend/agenta_backend/main.py b/agenta-backend/agenta_backend/main.py
index 568895aa10..f16f7e8a50 100644
--- a/agenta-backend/agenta_backend/main.py
+++ b/agenta-backend/agenta_backend/main.py
@@ -88,3 +88,8 @@ async def lifespan(application: FastAPI, cache=True):
app.include_router(organization_router.router, prefix="/organizations")
app.include_router(bases_router.router, prefix="/bases")
app.include_router(configs_router.router, prefix="/configs")
+
+if os.environ["FEATURE_FLAG"] in ["cloud", "ee"]:
+ import agenta_backend.cloud.main as cloud
+
+ app = cloud.extend_app_schema(app)
diff --git a/agenta-backend/agenta_backend/routers/app_router.py b/agenta-backend/agenta_backend/routers/app_router.py
index d67775e4a7..857e4ce181 100644
--- a/agenta-backend/agenta_backend/routers/app_router.py
+++ b/agenta-backend/agenta_backend/routers/app_router.py
@@ -4,7 +4,8 @@
from fastapi.responses import JSONResponse
from agenta_backend.config import settings
from typing import List, Optional
-from fastapi import APIRouter, HTTPException, Request
+from fastapi import HTTPException, Request
+from agenta_backend.utils.common import APIRouter
from agenta_backend.services.selectors import get_user_own_org
from agenta_backend.services import (
app_manager,
@@ -49,7 +50,11 @@
logger.setLevel(logging.DEBUG)
-@router.get("/{app_id}/variants/", response_model=List[AppVariantOutput])
+@router.get(
+ "/{app_id}/variants/",
+ response_model=List[AppVariantOutput],
+ operation_id="list_app_variants",
+)
async def list_app_variants(
app_id: str,
request: Request,
@@ -90,7 +95,11 @@ async def list_app_variants(
raise HTTPException(status_code=500, detail=str(e))
-@router.get("/get_variant_by_env/", response_model=AppVariantOutput)
+@router.get(
+ "/get_variant_by_env/",
+ response_model=AppVariantOutput,
+ operation_id="get_variant_by_env",
+)
async def get_variant_by_env(
app_id: str,
environment: str,
@@ -134,7 +143,7 @@ async def get_variant_by_env(
raise HTTPException(status_code=500, detail=str(e))
-@router.post("/", response_model=CreateAppOutput)
+@router.post("/", response_model=CreateAppOutput, operation_id="create_app")
async def create_app(
payload: CreateApp,
request: Request,
@@ -180,7 +189,7 @@ async def create_app(
raise HTTPException(status_code=500, detail=str(e))
-@router.get("/", response_model=List[App])
+@router.get("/", response_model=List[App], operation_id="list_apps")
async def list_apps(
request: Request,
app_name: Optional[str] = None,
@@ -209,7 +218,7 @@ async def list_apps(
raise HTTPException(status_code=500, detail=str(e))
-@router.post("/{app_id}/variant/from-image/")
+@router.post("/{app_id}/variant/from-image/", operation_id="add_variant_from_image")
async def add_variant_from_image(
app_id: str,
payload: AddVariantFromImagePayload,
@@ -271,7 +280,7 @@ async def add_variant_from_image(
raise HTTPException(status_code=500, detail=str(e))
-@router.delete("/{app_id}/")
+@router.delete("/{app_id}/", operation_id="remove_app")
async def remove_app(app_id: str, request: Request):
"""Remove app, all its variant, containers and images
@@ -301,7 +310,10 @@ async def remove_app(app_id: str, request: Request):
raise HTTPException(status_code=500, detail=detail)
-@router.post("/app_and_variant_from_template/")
+@router.post(
+ "/app_and_variant_from_template/",
+ operation_id="create_app_and_variant_from_template",
+)
async def create_app_and_variant_from_template(
payload: CreateAppVariant,
request: Request,
@@ -406,7 +418,11 @@ async def create_app_and_variant_from_template(
raise HTTPException(status_code=500, detail=str(e))
-@router.get("/{app_id}/environments/", response_model=List[EnvironmentOutput])
+@router.get(
+ "/{app_id}/environments/",
+ response_model=List[EnvironmentOutput],
+ operation_id="list_environments",
+)
async def list_environments(
app_id: str,
request: Request,
diff --git a/agenta-backend/agenta_backend/routers/bases_router.py b/agenta-backend/agenta_backend/routers/bases_router.py
index b9a57767c3..ae58937800 100644
--- a/agenta-backend/agenta_backend/routers/bases_router.py
+++ b/agenta-backend/agenta_backend/routers/bases_router.py
@@ -1,6 +1,7 @@
import os
from typing import List, Optional
-from fastapi import APIRouter, Request, HTTPException
+from fastapi import Request, HTTPException
+from agenta_backend.utils.common import APIRouter
from agenta_backend.models.api.api_models import BaseOutput
from fastapi.responses import JSONResponse
from agenta_backend.services import db_manager
@@ -22,7 +23,7 @@
router = APIRouter()
-@router.get("/", response_model=List[BaseOutput])
+@router.get("/", response_model=List[BaseOutput], operation_id="list_bases")
async def list_bases(
request: Request,
app_id: Optional[str] = None,
diff --git a/agenta-backend/agenta_backend/routers/configs_router.py b/agenta-backend/agenta_backend/routers/configs_router.py
index 719a62f175..bb58060fcd 100644
--- a/agenta-backend/agenta_backend/routers/configs_router.py
+++ b/agenta-backend/agenta_backend/routers/configs_router.py
@@ -1,6 +1,7 @@
import os
from typing import Optional
-from fastapi import APIRouter, Request, HTTPException
+from fastapi import Request, HTTPException
+from agenta_backend.utils.common import APIRouter
import logging
from agenta_backend.models.api.api_models import (
@@ -26,7 +27,7 @@
router = APIRouter()
-@router.post("/")
+@router.post("/", operation_id="save_config")
async def save_config(
payload: SaveConfigPayload,
request: Request,
@@ -73,7 +74,7 @@ async def save_config(
raise HTTPException(status_code=500, detail=str(e)) from e
-@router.get("/", response_model=GetConfigReponse)
+@router.get("/", response_model=GetConfigReponse, operation_id="get_config")
async def get_config(
request: Request,
base_id: str,
diff --git a/agenta-backend/agenta_backend/routers/container_router.py b/agenta-backend/agenta_backend/routers/container_router.py
index e8a5261542..f0fc073154 100644
--- a/agenta-backend/agenta_backend/routers/container_router.py
+++ b/agenta-backend/agenta_backend/routers/container_router.py
@@ -7,7 +7,8 @@
Template,
)
from agenta_backend.services import db_manager
-from fastapi import APIRouter, Request, UploadFile, HTTPException
+from fastapi import Request, UploadFile, HTTPException
+from agenta_backend.utils.common import APIRouter
from fastapi.responses import JSONResponse
if os.environ["FEATURE_FLAG"] in ["cloud", "ee"]:
@@ -35,7 +36,7 @@
# TODO: We need to improve this to use the introduced abstraction to also use start and stop service
-@router.post("/build_image/")
+@router.post("/build_image/", operation_id="build_image")
async def build_image(
app_id: str,
base_name: str,
@@ -71,7 +72,7 @@ async def build_image(
return image_result
-@router.post("/restart_container/")
+@router.post("/restart_container/", operation_id="restart_container")
async def restart_docker_container(
payload: RestartAppContainer,
request: Request,
@@ -100,7 +101,7 @@ async def restart_docker_container(
return JSONResponse({"message": str(ex)}, status_code=500)
-@router.get("/templates/")
+@router.get("/templates/", operation_id="container_templates")
async def container_templates(
request: Request,
) -> Union[List[Template], str]:
@@ -121,7 +122,7 @@ async def container_templates(
return templates
-@router.get("/container_url/")
+@router.get("/container_url/", operation_id="construct_app_container_url")
async def construct_app_container_url(
request: Request,
base_id: Optional[str] = None,
diff --git a/agenta-backend/agenta_backend/routers/environment_router.py b/agenta-backend/agenta_backend/routers/environment_router.py
index b843c39f22..a64b11027e 100644
--- a/agenta-backend/agenta_backend/routers/environment_router.py
+++ b/agenta-backend/agenta_backend/routers/environment_router.py
@@ -3,7 +3,8 @@
from fastapi.responses import JSONResponse
from agenta_backend.services import db_manager
-from fastapi import APIRouter, Request, HTTPException
+from fastapi import Request, HTTPException
+from agenta_backend.utils.common import APIRouter
from agenta_backend.utils.common import check_access_to_app, check_access_to_variant
from agenta_backend.models.api.api_models import (
EnvironmentOutput,
@@ -24,7 +25,7 @@
router = APIRouter()
-@router.post("/deploy/")
+@router.post("/deploy/", operation_id="deploy_to_environment")
async def deploy_to_environment(
payload: DeployToEnvironmentPayload,
request: Request,
diff --git a/agenta-backend/agenta_backend/routers/evaluation_router.py b/agenta-backend/agenta_backend/routers/evaluation_router.py
index 3cf1b45494..732632ae78 100644
--- a/agenta-backend/agenta_backend/routers/evaluation_router.py
+++ b/agenta-backend/agenta_backend/routers/evaluation_router.py
@@ -1,44 +1,23 @@
import os
import secrets
-from typing import List, Dict
+from typing import List
from fastapi.responses import JSONResponse
from fastapi.encoders import jsonable_encoder
-from fastapi import HTTPException, APIRouter, Body, Request, status, Response
+from fastapi import HTTPException, Request, status, Response
-from agenta_backend.services.helpers import format_inputs, format_outputs
+from agenta_backend.utils.common import APIRouter
from agenta_backend.models.api.evaluation_model import (
- AICritiqueCreate,
- CustomEvaluationNames,
Evaluation,
EvaluationScenario,
- CustomEvaluationOutput,
- CustomEvaluationDetail,
- ExecuteCustomEvaluationCode,
NewEvaluation,
DeleteEvaluation,
- EvaluationType,
- CreateCustomEvaluation,
EvaluationWebhook,
- SimpleEvaluationOutput,
)
-from agenta_backend.services.evaluation_service import (
- UpdateEvaluationScenarioError,
- fetch_custom_evaluation_names,
- fetch_custom_evaluations,
- fetch_custom_evaluation_detail,
- get_evaluation_scenario_score,
- update_evaluation_scenario_score,
- create_custom_code_evaluation,
- update_custom_code_evaluation,
- execute_custom_code_evaluation,
-)
-from agenta_backend.services import evaluation_service
-from agenta_backend.utils.common import check_access_to_app
from agenta_backend.services import db_manager
-from agenta_backend.models import converters
-from agenta_backend.services import results_service
from agenta_backend.tasks.evaluations import evaluate
+from agenta_backend.services import evaluation_service
+from agenta_backend.utils.common import check_access_to_app
if os.environ["FEATURE_FLAG"] in ["cloud", "ee"]:
@@ -48,10 +27,14 @@
else:
from agenta_backend.services.selectors import get_user_and_org_id
+
+# Initialize api router
router = APIRouter()
-@router.post("/")
+@router.post(
+ "/", response_model=List[Evaluation], operation_id="create_evaluation"
+)
async def create_evaluation(
payload: NewEvaluation,
request: Request,
@@ -109,7 +92,7 @@ async def create_evaluation(
)
-@router.get("/{evaluation_id}/status/")
+@router.get("/{evaluation_id}/status/", operation_id="fetch_evaluation_status")
async def fetch_evaluation_status(evaluation_id: str, request: Request):
"""Fetches the status of the evaluation.
@@ -132,7 +115,7 @@ async def fetch_evaluation_status(evaluation_id: str, request: Request):
raise HTTPException(status_code=500, detail=str(exc))
-@router.get("/{evaluation_id}/results/")
+@router.get("/{evaluation_id}/results/", operation_id="fetch_evaluation_results")
async def fetch_evaluation_results(evaluation_id: str, request: Request):
"""Fetches the results of the evaluation
@@ -158,6 +141,7 @@ async def fetch_evaluation_results(evaluation_id: str, request: Request):
@router.get(
"/{evaluation_id}/evaluation_scenarios/",
response_model=List[EvaluationScenario],
+ operation_id="fetch_evaluation_scenarios",
)
async def fetch_evaluation_scenarios(
evaluation_id: str,
@@ -183,64 +167,6 @@ async def fetch_evaluation_scenarios(
return eval_scenarios
-@router.post("/{evaluation_id}/evaluation_scenario/")
-async def create_evaluation_scenario(
- evaluation_id: str,
- evaluation_scenario: EvaluationScenario,
- request: Request,
-):
- """Create a new evaluation scenario for a given evaluation ID.
-
- Raises:
- HTTPException: If evaluation not found or access denied.
-
- Returns:
- None: 204 No Content status code upon success.
- """
- user_org_data = await get_user_and_org_id(request.state.user_id)
- await evaluation_service.create_evaluation_scenario(
- evaluation_id, evaluation_scenario, **user_org_data
- )
- return Response(status_code=status.HTTP_204_NO_CONTENT)
-
-
-@router.post("/evaluation_scenario/ai_critique/", response_model=str)
-async def evaluate_ai_critique(
- payload: AICritiqueCreate,
- request: Request,
-) -> str:
- """
- Evaluate AI critique based on the given payload.
-
- Args:
- payload (AICritiqueCreate): The payload containing data for AI critique evaluation.
- stoken_session (SessionContainer): The session container verified by `verify_session`.
-
- Returns:
- str: The output of the AI critique evaluation.
-
- Raises:
- HTTPException: If any exception occurs during the evaluation.
- """
- try:
- # Extract data from the payload
- payload_dict = payload.dict()
-
- # Run AI critique evaluation
- output = evaluate_with_ai_critique(
- llm_app_prompt_template=payload_dict["llm_app_prompt_template"],
- llm_app_inputs=payload_dict["inputs"],
- correct_answer=payload_dict["correct_answer"],
- app_variant_output=payload_dict["outputs"][0]["variant_output"],
- evaluation_prompt_template=payload_dict["evaluation_prompt_template"],
- open_ai_key=payload_dict["open_ai_key"],
- )
- return output
-
- except Exception as e:
- raise HTTPException(400, f"Failed to evaluate AI critique: {str(e)}")
-
-
@router.get("/", response_model=List[Evaluation])
async def fetch_list_evaluations(
app_id: str,
@@ -260,7 +186,9 @@ async def fetch_list_evaluations(
)
-@router.get("/{evaluation_id}/", response_model=Evaluation)
+@router.get(
+ "/{evaluation_id}/", response_model=Evaluation, operation_id="fetch_evaluation"
+)
async def fetch_evaluation(
evaluation_id: str,
request: Request,
@@ -277,7 +205,7 @@ async def fetch_evaluation(
return await evaluation_service.fetch_evaluation(evaluation_id, **user_org_data)
-@router.delete("/", response_model=List[str])
+@router.delete("/", response_model=List[str], operation_id="delete_evaluations")
async def delete_evaluations(
delete_evaluations: DeleteEvaluation,
request: Request,
@@ -300,176 +228,11 @@ async def delete_evaluations(
return Response(status_code=status.HTTP_204_NO_CONTENT)
-@router.post("/custom_evaluation/")
-async def create_custom_evaluation(
- custom_evaluation_payload: CreateCustomEvaluation,
- request: Request,
-):
- """Create evaluation with custom python code.
-
- Args:
- \n custom_evaluation_payload (CreateCustomEvaluation): the required payload
- """
-
- # Get user and organization id
- user_org_data: dict = await get_user_and_org_id(request.state.user_id)
-
- # create custom evaluation in database
- evaluation_id = await create_custom_code_evaluation(
- custom_evaluation_payload, **user_org_data
- )
-
- return JSONResponse(
- {
- "status": "success",
- "message": "Evaluation created successfully.",
- "evaluation_id": evaluation_id,
- },
- status_code=200,
- )
-
-
-@router.put("/custom_evaluation/{id}")
-async def update_custom_evaluation(
- id: str,
- updated_data: CreateCustomEvaluation,
- request: Request,
-):
- """Update a custom code evaluation.
- Args:
- id (str): the ID of the custom evaluation to update
- updated_data (CreateCustomEvaluation): the payload with updated data
- stoken_session (SessionContainer): session container for authentication
- """
-
- # Get user and organization id
- kwargs: dict = await get_user_and_org_id(request.state.user_id)
-
- # Update the evaluation with the provided data
- updated_evaluation_id = await update_custom_code_evaluation(
- id, updated_data, **kwargs
- )
-
- return JSONResponse(
- {
- "status": "success",
- "message": "Evaluation edited successfully.",
- "evaluation_id": updated_evaluation_id,
- },
- status_code=200,
- )
-
-
-@router.get(
- "/custom_evaluation/list/{app_id}/",
- response_model=List[CustomEvaluationOutput],
-)
-async def list_custom_evaluations(
- app_id: str,
- request: Request,
-):
- """List the custom code evaluations for a given app.
-
- Args:
- app_id (str): the id of the app
-
- Returns:
- List[CustomEvaluationOutput]: a list of custom evaluation
- """
-
- # Get user and organization id
- user_org_data: dict = await get_user_and_org_id(request.state.user_id)
-
- # Fetch custom evaluations from database
- evaluations = await fetch_custom_evaluations(app_id, **user_org_data)
- return evaluations
-
-
-@router.get(
- "/custom_evaluation/{id}/",
- response_model=CustomEvaluationDetail,
-)
-async def get_custom_evaluation(
- id: str,
- request: Request,
-):
- """Get the custom code evaluation detail.
-
- Args:
- id (str): the id of the custom evaluation
-
- Returns:
- CustomEvaluationDetail: Detail of the custom evaluation
- """
-
- # Get user and organization id
- user_org_data: dict = await get_user_and_org_id(request.state.user_id)
-
- # Fetch custom evaluations from database
- evaluation = await fetch_custom_evaluation_detail(id, **user_org_data)
- return evaluation
-
-
-@router.get(
- "/custom_evaluation/{app_name}/names/",
- response_model=List[CustomEvaluationNames],
-)
-async def get_custom_evaluation_names(app_name: str, request: Request):
- """Get the names of custom evaluation for a given app.
-
- Args:
- app_name (str): the name of the app the evaluation belongs to
-
- Returns:
- List[CustomEvaluationNames]: the list of name of custom evaluations
- """
- # Get user and organization id
- user_org_data: dict = await get_user_and_org_id(request.state.user_id)
-
- custom_eval_names = await fetch_custom_evaluation_names(app_name, **user_org_data)
- return custom_eval_names
-
-
@router.post(
- "/custom_evaluation/execute/{evaluation_id}/",
+ "/webhook_example_fake/",
+ response_model=EvaluationWebhook,
+ operation_id="webhook_example_fake",
)
-async def execute_custom_evaluation(
- evaluation_id: str,
- payload: ExecuteCustomEvaluationCode,
- request: Request,
-):
- """Execute a custom evaluation code.
-
- Args:
- evaluation_id (str): the custom evaluation id
- payload (ExecuteCustomEvaluationCode): the required payload
-
- Returns:
- float: the result of the evaluation custom code
- """
-
- # Get user and organization id
- user_org_data: dict = await get_user_and_org_id(request.state.user_id)
-
- # Execute custom code evaluation
- formatted_inputs = format_inputs(payload.inputs)
- formatted_outputs = format_outputs(payload.outputs)
- output = list(formatted_outputs.values())[
- 0
- ] # for now we expect one output as a string
- result = await execute_custom_code_evaluation(
- evaluation_id,
- payload.app_id,
- output,
- payload.correct_answer,
- payload.variant_id,
- formatted_inputs,
- **user_org_data,
- )
- return result
-
-
-@router.post("/webhook_example_fake/", response_model=EvaluationWebhook)
async def webhook_example_fake():
"""Returns a fake score response for example webhook evaluation
diff --git a/agenta-backend/agenta_backend/routers/health_router.py b/agenta-backend/agenta_backend/routers/health_router.py
index e59dc08e0f..0998db3bac 100644
--- a/agenta-backend/agenta_backend/routers/health_router.py
+++ b/agenta-backend/agenta_backend/routers/health_router.py
@@ -1,8 +1,9 @@
-from fastapi import APIRouter, status
+from fastapi import status
+from agenta_backend.utils.common import APIRouter
router = APIRouter()
-@router.get("/", status_code=status.HTTP_200_OK)
+@router.get("/", status_code=status.HTTP_200_OK, operation_id="health_check")
def health_check():
return {"status": "ok"}
diff --git a/agenta-backend/agenta_backend/routers/observability_router.py b/agenta-backend/agenta_backend/routers/observability_router.py
index 131c579f44..f385a99c33 100644
--- a/agenta-backend/agenta_backend/routers/observability_router.py
+++ b/agenta-backend/agenta_backend/routers/observability_router.py
@@ -1,13 +1,14 @@
import os
from typing import List
-from fastapi import APIRouter, Request
+from fastapi import Request
+from agenta_backend.utils.common import APIRouter
from agenta_backend.services.event_db_manager import (
get_variant_traces,
create_app_trace,
create_trace_span,
- get_single_trace,
+ get_trace_single,
trace_status_update,
get_trace_spans,
add_feedback_to_trace,
@@ -37,7 +38,7 @@
router = APIRouter()
-@router.post("/traces/", response_model=str)
+@router.post("/traces/", response_model=str, operation_id="create_trace")
async def create_trace(
payload: CreateTrace,
request: Request,
@@ -48,7 +49,11 @@ async def create_trace(
return trace
-@router.get("/traces/{app_id}/{variant_id}/", response_model=List[Trace])
+@router.get(
+ "/traces/{app_id}/{variant_id}/",
+ response_model=List[Trace],
+ operation_id="get_traces",
+)
async def get_traces(
app_id: str,
variant_id: str,
@@ -60,18 +65,20 @@ async def get_traces(
return traces
-@router.get("/traces/{trace_id}/", response_model=Trace)
-async def get_trace(
+@router.get(
+ "/traces/{trace_id}/", response_model=Trace, operation_id="get_single_trace"
+)
+async def get_single_trace(
trace_id: str,
request: Request,
):
# Get user and org id
kwargs: dict = await get_user_and_org_id(request.state.user_id)
- trace = await get_single_trace(trace_id, **kwargs)
+ trace = await get_trace_single(trace_id, **kwargs)
return trace
-@router.post("/spans/", response_model=str)
+@router.post("/spans/", response_model=str, operation_id="create_span")
async def create_span(
payload: CreateSpan,
request: Request,
@@ -82,7 +89,9 @@ async def create_span(
return spans_id
-@router.get("/spans/{trace_id}/", response_model=List[Span])
+@router.get(
+ "/spans/{trace_id}/", response_model=List[Span], operation_id="get_spans_of_trace"
+)
async def get_spans_of_trace(
trace_id: str,
request: Request,
@@ -93,7 +102,9 @@ async def get_spans_of_trace(
return spans
-@router.put("/traces/{trace_id}/", response_model=bool)
+@router.put(
+ "/traces/{trace_id}/", response_model=bool, operation_id="update_trace_status"
+)
async def update_trace_status(
trace_id: str,
payload: UpdateTrace,
@@ -105,7 +116,9 @@ async def update_trace_status(
return trace
-@router.post("/feedbacks/{trace_id}/", response_model=str)
+@router.post(
+ "/feedbacks/{trace_id}/", response_model=str, operation_id="create_feedback"
+)
async def create_feedback(
trace_id: str,
payload: CreateFeedback,
@@ -117,7 +130,11 @@ async def create_feedback(
return feedback
-@router.get("/feedbacks/{trace_id}/", response_model=List[Feedback])
+@router.get(
+ "/feedbacks/{trace_id}/",
+ response_model=List[Feedback],
+ operation_id="get_feedbacks",
+)
async def get_feedbacks(trace_id: str, request: Request):
# Get user and org id
kwargs: dict = await get_user_and_org_id(request.state.user_id)
@@ -125,7 +142,11 @@ async def get_feedbacks(trace_id: str, request: Request):
return feedbacks
-@router.get("/feedbacks/{trace_id}/{feedback_id}/", response_model=Feedback)
+@router.get(
+ "/feedbacks/{trace_id}/{feedback_id}/",
+ response_model=Feedback,
+ operation_id="get_feedback",
+)
async def get_feedback(
trace_id: str,
feedback_id: str,
@@ -137,7 +158,11 @@ async def get_feedback(
return feedback
-@router.put("/feedbacks/{trace_id}/{feedback_id}/", response_model=Feedback)
+@router.put(
+ "/feedbacks/{trace_id}/{feedback_id}/",
+ response_model=Feedback,
+ operation_id="update_feedback",
+)
async def update_feedback(
trace_id: str,
feedback_id: str,
diff --git a/agenta-backend/agenta_backend/routers/organization_router.py b/agenta-backend/agenta_backend/routers/organization_router.py
index 077fed1fb7..c8f4edf129 100644
--- a/agenta-backend/agenta_backend/routers/organization_router.py
+++ b/agenta-backend/agenta_backend/routers/organization_router.py
@@ -4,7 +4,8 @@
import os
import logging
-from fastapi import APIRouter, HTTPException, Request
+from fastapi import HTTPException, Request
+from agenta_backend.utils.common import APIRouter
from agenta_backend.services.selectors import get_user_own_org
from agenta_backend.models.api.organization_models import (
OrganizationOutput,
@@ -25,7 +26,7 @@
logger.setLevel(logging.DEBUG)
-@router.get("/", response_model=list[Organization])
+@router.get("/", response_model=list[Organization], operation_id="list_organizations")
async def list_organizations(
request: Request,
):
@@ -66,7 +67,7 @@ async def list_organizations(
)
-@router.get("/own/")
+@router.get("/own/", response_model=OrganizationOutput, operation_id="get_own_org")
async def get_user_organization(
request: Request,
):
diff --git a/agenta-backend/agenta_backend/routers/testset_router.py b/agenta-backend/agenta_backend/routers/testset_router.py
index 3cec86b8a8..ec69df388a 100644
--- a/agenta-backend/agenta_backend/routers/testset_router.py
+++ b/agenta-backend/agenta_backend/routers/testset_router.py
@@ -7,7 +7,8 @@
from datetime import datetime
from typing import Optional, List
-from fastapi import HTTPException, APIRouter, UploadFile, File, Form, Request
+from fastapi import HTTPException, UploadFile, File, Form, Request
+from agenta_backend.utils.common import APIRouter
from fastapi.responses import JSONResponse
from pydantic import ValidationError
@@ -36,7 +37,9 @@
from agenta_backend.services.selectors import get_user_and_org_id
-@router.post("/upload/", response_model=TestSetSimpleResponse)
+@router.post(
+ "/upload/", response_model=TestSetSimpleResponse, operation_id="upload_file"
+)
async def upload_file(
request: Request,
upload_type: str = Form(None),
@@ -114,7 +117,9 @@ async def upload_file(
)
-@router.post("/endpoint/", response_model=TestSetSimpleResponse)
+@router.post(
+ "/endpoint/", response_model=TestSetSimpleResponse, operation_id="import_testset"
+)
async def import_testset(
request: Request,
endpoint: str = Form(None),
@@ -191,7 +196,9 @@ async def import_testset(
) from error
-@router.post("/{app_id}/")
+@router.post(
+ "/{app_id}/", response_model=TestSetSimpleResponse, operation_id="create_testset"
+)
async def create_testset(
app_id: str,
csvdata: NewTestset,
@@ -245,7 +252,7 @@ async def create_testset(
raise HTTPException(status_code=500, detail=str(e))
-@router.put("/{testset_id}/")
+@router.put("/{testset_id}/", operation_id="update_testset")
async def update_testset(
testset_id: str,
csvdata: NewTestset,
@@ -297,7 +304,7 @@ async def update_testset(
raise HTTPException(status_code=500, detail=str(e))
-@router.get("/", tags=["testsets"])
+@router.get("/", operation_id="get_testsets")
async def get_testsets(
app_id: str,
request: Request,
@@ -337,8 +344,8 @@ async def get_testsets(
]
-@router.get("/{testset_id}/", tags=["testsets"])
-async def get_testset(
+@router.get("/{testset_id}/", operation_id="get_single_testset")
+async def get_single_testset(
testset_id: str,
request: Request,
):
@@ -367,7 +374,7 @@ async def get_testset(
return testset_db_to_pydantic(test_set)
-@router.delete("/", response_model=List[str])
+@router.delete("/", response_model=List[str], operation_id="delete_testsets")
async def delete_testsets(
delete_testsets: DeleteTestsets,
request: Request,
diff --git a/agenta-backend/agenta_backend/routers/user_profile.py b/agenta-backend/agenta_backend/routers/user_profile.py
index 5f4cf57fea..a751ae72d7 100644
--- a/agenta-backend/agenta_backend/routers/user_profile.py
+++ b/agenta-backend/agenta_backend/routers/user_profile.py
@@ -1,8 +1,9 @@
import os
from agenta_backend.models.db_models import UserDB
-from fastapi import APIRouter, HTTPException, Request
+from fastapi import HTTPException, Request
from agenta_backend.models.api.user_models import User
from agenta_backend.services import db_manager
+from agenta_backend.utils.common import APIRouter
router = APIRouter()
@@ -14,7 +15,7 @@
from agenta_backend.services.selectors import get_user_and_org_id
-@router.get("/")
+@router.get("/", operation_id="user_profile")
async def user_profile(
request: Request,
):
diff --git a/agenta-backend/agenta_backend/routers/variants_router.py b/agenta-backend/agenta_backend/routers/variants_router.py
index 776ad7ee33..e20668c59f 100644
--- a/agenta-backend/agenta_backend/routers/variants_router.py
+++ b/agenta-backend/agenta_backend/routers/variants_router.py
@@ -3,7 +3,8 @@
from docker.errors import DockerException
from fastapi.responses import JSONResponse
from typing import Any, Optional, Union
-from fastapi import APIRouter, HTTPException, Request, Body
+from fastapi import HTTPException, Request, Body
+from agenta_backend.utils.common import APIRouter
from agenta_backend.services import (
app_manager,
db_manager,
@@ -36,7 +37,7 @@
logger.setLevel(logging.DEBUG)
-@router.post("/from-base/")
+@router.post("/from-base/", operation_id="add_variant_from_base_and_config")
async def add_variant_from_base_and_config(
payload: AddVariantFromBasePayload,
request: Request,
@@ -78,7 +79,7 @@ async def add_variant_from_base_and_config(
raise HTTPException(status_code=500, detail=str(e))
-@router.delete("/{variant_id}/")
+@router.delete("/{variant_id}/", operation_id="remove_variant")
async def remove_variant(
variant_id: str,
request: Request,
@@ -122,7 +123,7 @@ async def remove_variant(
raise HTTPException(status_code=500, detail=detail)
-@router.put("/{variant_id}/parameters/")
+@router.put("/{variant_id}/parameters/", operation_id="update_variant_parameters")
async def update_variant_parameters(
request: Request,
variant_id: str,
@@ -171,7 +172,7 @@ async def update_variant_parameters(
raise HTTPException(status_code=500, detail=detail)
-@router.put("/{variant_id}/image/")
+@router.put("/{variant_id}/image/", operation_id="update_variant_image")
async def update_variant_image(
variant_id: str,
image: Image,
@@ -220,7 +221,7 @@ async def update_variant_image(
raise HTTPException(status_code=500, detail=detail)
-@router.put("/{variant_id}/")
+@router.put("/{variant_id}/", operation_id="start_variant")
async def start_variant(
request: Request,
variant_id: str,
diff --git a/agenta-backend/agenta_backend/services/docker_utils.py b/agenta-backend/agenta_backend/services/docker_utils.py
index 73c0b88a3e..df5623f14b 100644
--- a/agenta-backend/agenta_backend/services/docker_utils.py
+++ b/agenta-backend/agenta_backend/services/docker_utils.py
@@ -114,6 +114,7 @@ def start_container(
name=container_name,
environment=env_vars,
extra_hosts=extra_hosts,
+ restart_policy={"Name": "always"},
)
# Check the container's status
sleep(0.5)
diff --git a/agenta-backend/agenta_backend/services/evaluation_service.py b/agenta-backend/agenta_backend/services/evaluation_service.py
index 93250002a7..91e69f42e1 100644
--- a/agenta-backend/agenta_backend/services/evaluation_service.py
+++ b/agenta-backend/agenta_backend/services/evaluation_service.py
@@ -1,8 +1,7 @@
import logging
-from agenta_backend.services.security.sandbox import execute_code_safely
from bson import ObjectId
from datetime import datetime
-from typing import Dict, List, Any, Optional
+from typing import Dict, List, Any
from fastapi import HTTPException
@@ -25,9 +24,10 @@
NewHumanEvaluation,
)
from agenta_backend.models import converters
-from agenta_backend.utils.common import engine, check_access_to_app
-from agenta_backend.services.db_manager import query, get_user
from agenta_backend.services import db_manager
+from agenta_backend.services.db_manager import query, get_user
+from agenta_backend.utils.common import engine, check_access_to_app
+from agenta_backend.services.security.sandbox import execute_code_safely
from agenta_backend.models.db_models import (
AppVariantDB,
EvaluationDB,
@@ -438,7 +438,7 @@ async def update_human_evaluation_scenario(
await engine.save(eval_scenario)
-async def update_evaluation_scenario_score(
+async def update_evaluation_scenario_score_service(
evaluation_scenario_id: str, score: float, **user_org_data: dict
) -> None:
"""
@@ -461,7 +461,7 @@ async def update_evaluation_scenario_score(
await engine.save(eval_scenario)
-async def get_evaluation_scenario_score(
+async def get_evaluation_scenario_score_service(
evaluation_scenario_id: str, **user_org_data: dict
) -> Dict[str, str]:
"""
diff --git a/agenta-backend/agenta_backend/services/event_db_manager.py b/agenta-backend/agenta_backend/services/event_db_manager.py
index 34ead44c51..cbc2971644 100644
--- a/agenta-backend/agenta_backend/services/event_db_manager.py
+++ b/agenta-backend/agenta_backend/services/event_db_manager.py
@@ -85,7 +85,7 @@ async def create_app_trace(payload: CreateTrace, **kwargs: dict) -> str:
return trace_db_to_pydantic(trace)["trace_id"]
-async def get_single_trace(trace_id: str, **kwargs: dict) -> Trace:
+async def get_trace_single(trace_id: str, **kwargs: dict) -> Trace:
"""Get a single trace.
Args:
diff --git a/agenta-backend/agenta_backend/utils/common.py b/agenta-backend/agenta_backend/utils/common.py
index 57aad1ec95..25c382d952 100644
--- a/agenta-backend/agenta_backend/utils/common.py
+++ b/agenta-backend/agenta_backend/utils/common.py
@@ -1,8 +1,10 @@
+import logging
from bson import ObjectId
from odmantic import query
-from fastapi.responses import JSONResponse
-from typing import Dict, List, Union, Optional
+from fastapi.types import DecoratedCallable
+from fastapi import APIRouter as FastAPIRouter
from agenta_backend.models.db_engine import DBEngine
+from typing import Dict, List, Union, Optional, Any, Callable
from agenta_backend.models.db_models import (
UserDB,
AppVariantDB,
@@ -10,13 +12,53 @@
AppDB,
VariantBaseDB,
)
-import logging
engine = DBEngine().engine()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
+class APIRouter(FastAPIRouter):
+ """
+ Extends the FastAPIRouter class to provide support for alternate paths ending with a forward slash.
+
+ Methods:
+ - api_route: Adds a route to the router with both the original path and an alternate path ending with a forward slash.
+ """
+
+ def api_route(
+ self, path: str, *, include_in_schema: bool = True, **kwargs: Any
+ ) -> Callable[[DecoratedCallable], DecoratedCallable]:
+ """
+ Decorator method that adds a route to the router with both the original path and an alternate path ending with a forward slash.
+
+ Parameters:
+ - path (str): The original path for the route.
+ - include_in_schema (bool): Whether to include the route in the generated OpenAPI schema. Default is True.
+ - **kwargs (Any): Additional keyword arguments to pass to the underlying api_route method.
+
+ Returns:
+ - decorator (Callable[[DecoratedCallable], DecoratedCallable]): A decorator function that can be used to decorate a route function.
+ """
+ if path.endswith("/"):
+ path = path[:-1]
+
+ add_path = super().api_route(
+ path, include_in_schema=include_in_schema, **kwargs
+ )
+
+ alternate_path = path + "/"
+ add_alternate_path = super().api_route(
+ alternate_path, include_in_schema=False, **kwargs
+ )
+
+ def decorator(func: DecoratedCallable) -> DecoratedCallable:
+ add_alternate_path(func)
+ return add_path(func)
+
+ return decorator
+
+
async def get_organization(org_id: str) -> OrganizationDB:
org = await engine.find_one(OrganizationDB, OrganizationDB.id == ObjectId(org_id))
if org is not None:
diff --git a/agenta-cli/agenta/__init__.py b/agenta-cli/agenta/__init__.py
index b73eb24d60..8a683f19e1 100644
--- a/agenta-cli/agenta/__init__.py
+++ b/agenta-cli/agenta/__init__.py
@@ -10,6 +10,7 @@
MessagesInput,
TextParam,
FileInputURL,
+ BinaryParam,
)
from .sdk.utils.preinit import PreInitObject
from .sdk.agenta_init import Config, init
diff --git a/agenta-cli/agenta/cli/helper.py b/agenta-cli/agenta/cli/helper.py
index 96fd406bae..8979c7e39c 100644
--- a/agenta-cli/agenta/cli/helper.py
+++ b/agenta-cli/agenta/cli/helper.py
@@ -1,9 +1,9 @@
+import os
import sys
import toml
import click
import questionary
from pathlib import Path
-from agenta.client import client
from typing import Any, List, MutableMapping
from agenta.client.api_models import AppVariant
@@ -12,6 +12,10 @@
from pathlib import Path
import toml
+from agenta.client.backend.client import AgentaApi
+
+BACKEND_URL_SUFFIX = os.environ.get("BACKEND_URL_SUFFIX", "api")
+
def get_global_config(var_name: str) -> Optional[Any]:
"""
@@ -127,8 +131,13 @@ def update_variants_from_backend(
Returns:
a new config object later to be saved using toml.dump(config, config_file.open('w'))
"""
+ client = AgentaApi(
+ base_url=f"{host}/{BACKEND_URL_SUFFIX}",
+ api_key=api_key,
+ )
+
try:
- variants: List[AppVariant] = client.list_variants(app_id, host, api_key)
+ variants: List[AppVariant] = client.list_app_variants(app_id=app_id)
except Exception as ex:
raise ex
@@ -146,7 +155,7 @@ def update_config_from_backend(config_file: Path, host: str):
assert config_file.exists(), "Config file does not exist!"
config = toml.load(config_file)
app_id = config["app_id"]
- api_key = config.get("api_key", None)
+ api_key = config.get("api_key", "")
if "variants" not in config:
config["variants"] = []
if "variant_ids" not in config:
diff --git a/agenta-cli/agenta/cli/main.py b/agenta-cli/agenta/cli/main.py
index 0844833e14..64d95d1026 100644
--- a/agenta-cli/agenta/cli/main.py
+++ b/agenta-cli/agenta/cli/main.py
@@ -1,3 +1,4 @@
+import os
import re
import shutil
import sys
@@ -9,11 +10,14 @@
import toml
from agenta.cli import helper
-from agenta.client import client
from agenta.cli import variant_configs
from agenta.cli import variant_commands
from agenta.cli import evaluation_commands
+from agenta.client.backend.client import AgentaApi
+
+BACKEND_URL_SUFFIX = os.environ.get("BACKEND_URL_SUFFIX", "api")
+
def print_version(ctx, param, value):
if not value or ctx.resilient_parsing:
@@ -110,7 +114,6 @@ def init(app_name: str):
backend_host = "https://cloud.agenta.ai"
api_key = helper.get_api_key(backend_host)
- client.validate_api_key(api_key, backend_host)
elif where_question is None: # User pressed Ctrl+C
sys.exit(0)
@@ -120,13 +123,32 @@ def init(app_name: str):
else "http://" + backend_host
)
- # Get app_id after creating new app in the backend server
- app_id = client.create_new_app(
- app_name,
- backend_host,
- api_key if where_question == "On agenta cloud" else None,
+ # initialize the client with the backend url and api key
+ client = AgentaApi(
+ base_url=f"{backend_host}/{BACKEND_URL_SUFFIX}",
+ api_key=api_key if where_question == "On agenta cloud" else "",
)
+ # validate the api key if it is provided
+ if where_question == "On agenta cloud":
+ try:
+ key_prefix = api_key.split(".")[0]
+ client.validate_api_key(key_prefix=key_prefix)
+ except Exception as ex:
+ if ex.status_code == 401:
+ click.echo(click.style("Error: Invalid API key", fg="red"))
+ sys.exit(1)
+ else:
+ click.echo(click.style(f"Error: {ex}", fg="red"))
+ sys.exit(1)
+
+ # Get app_id after creating new app in the backend server
+ try:
+ app_id = client.create_app(app_name=app_name).app_id
+ except Exception as ex:
+ click.echo(click.style(f"Error: {ex}", fg="red"))
+ sys.exit(1)
+
# Set app toml configuration
config = {
"app_name": app_name,
diff --git a/agenta-cli/agenta/cli/variant_commands.py b/agenta-cli/agenta/cli/variant_commands.py
index 95104a80a6..20f84f3603 100644
--- a/agenta-cli/agenta/cli/variant_commands.py
+++ b/agenta-cli/agenta/cli/variant_commands.py
@@ -1,3 +1,4 @@
+import os
import re
import sys
from typing import List
@@ -9,11 +10,15 @@
import questionary
import toml
from agenta.cli import helper
-from agenta.client import client
from agenta.cli.telemetry import event_track
from agenta.client.api_models import AppVariant, Image
from agenta.docker.docker_utils import build_tar_docker_container
+from agenta.client.api import add_variant_to_server
+from agenta.client.backend.client import AgentaApi
+
+BACKEND_URL_SUFFIX = os.environ.get("BACKEND_URL_SUFFIX", "api")
+
@click.group()
def variant():
@@ -32,7 +37,6 @@ def add_variant(
variant_name: the name of the variant
app_folder: the folder of the app
file_name: the name of the file to run.
- host: the host to use for the variant
config_name: the name of the config to use for now it is always default
Returns:
the name of the code base and variant(useful for serve)
@@ -44,7 +48,7 @@ def add_variant(
app_name = config["app_name"]
app_id = config["app_id"]
- api_key = config.get("api_key", None)
+ api_key = config.get("api_key", "")
config_name = "default"
base_name = file_name.removesuffix(".py")
@@ -93,6 +97,11 @@ def add_variant(
variant_name = f"{base_name}.{config_name}"
overwrite = False
+ client = AgentaApi(
+ base_url=f"{host}/{BACKEND_URL_SUFFIX}",
+ api_key=api_key,
+ )
+
if variant_name in config["variants"]:
overwrite = questionary.confirm(
"This variant already exists. Do you want to overwrite it?"
@@ -116,9 +125,13 @@ def add_variant(
fg="bright_black",
)
)
- image: Image = client.send_docker_tar(
- app_id, base_name, tar_path, host, api_key
- )
+ with tar_path.open("rb") as tar_file:
+ built_image: Image = client.build_image(
+ app_id=app_id,
+ base_name=base_name,
+ tar_file=tar_file,
+ )
+ image = Image(**built_image.dict())
if tar_path.exists():
tar_path.unlink()
@@ -137,12 +150,13 @@ def add_variant(
)
variant_id = config["variant_ids"][config["variants"].index(variant_name)]
client.update_variant_image(
- variant_id, image, host, api_key
+ variant_id=variant_id,
+ request=image, # because Fern code uses "request: Image" instead of "image: Image"
) # this automatically restarts
else:
click.echo(click.style(f"Adding {variant_name} to server...", fg="yellow"))
- response = client.add_variant_to_server(
- app_id, base_name, image, host, api_key
+ response = add_variant_to_server(
+ app_id, base_name, image, f"{host}/{BACKEND_URL_SUFFIX}", api_key
)
variant_id = response["variant_id"]
config["variants"].append(variant_name)
@@ -160,7 +174,8 @@ def add_variant(
if overwrite:
# Track a deployment event
if tracking_enabled:
- user_id = client.retrieve_user_id(host, api_key)
+ get_user_id = client.user_profile()
+ user_id = get_user_id["id"]
event_track.capture_event(
user_id,
"app_deployment",
@@ -182,7 +197,8 @@ def add_variant(
else:
# Track a deployment event
if tracking_enabled:
- user_id = client.retrieve_user_id(host, api_key)
+ get_user_id = client.user_profile()
+ user_id = get_user_id["id"]
event_track.capture_event(
user_id,
"app_deployment",
@@ -220,8 +236,8 @@ def start_variant(variant_id: str, app_folder: str, host: str):
app_folder = Path(app_folder)
config_file = app_folder / "config.toml"
config = toml.load(config_file)
- api_key = config.get("api_key", None)
app_id = config["app_id"]
+ api_key = config.get("api_key", "")
if len(config["variants"]) == 0:
click.echo("No variants found. Please add a variant first.")
@@ -242,7 +258,12 @@ def start_variant(variant_id: str, app_folder: str, host: str):
).ask()
variant_id = config["variant_ids"][config["variants"].index(variant_name)]
- endpoint = client.start_variant(variant_id=variant_id, host=host, api_key=api_key)
+ client = AgentaApi(
+ base_url=f"{host}/{BACKEND_URL_SUFFIX}",
+ api_key=api_key,
+ )
+
+ endpoint = client.start_variant(variant_id=variant_id, action={"action": "START"})
click.echo("\n" + click.style("Congratulations! ๐", bold=True, fg="green"))
click.echo(
click.style("Your app has been deployed locally as an API. ๐", fg="cyan")
@@ -278,7 +299,7 @@ def remove_variant(variant_name: str, app_folder: str, host: str):
config_file = Path(app_folder) / "config.toml"
config = toml.load(config_file)
app_name = config["app_name"]
- api_key = config.get("api_key", None)
+ api_key = config.get("api_key", "")
if not config["variants"]:
click.echo(
@@ -303,8 +324,14 @@ def remove_variant(variant_name: str, app_folder: str, host: str):
"Please choose a variant", choices=config["variants"]
).ask()
variant_id = config["variant_ids"][config["variants"].index(variant_name)]
+
+ client = AgentaApi(
+ base_url=f"{host}/{BACKEND_URL_SUFFIX}",
+ api_key=api_key,
+ )
+
try:
- client.remove_variant(variant_id, host, api_key)
+ client.remove_variant(variant_id=variant_id)
except Exception as ex:
click.echo(
click.style(
@@ -331,13 +358,18 @@ def list_variants(app_folder: str, host: str):
"""
config_file = Path(app_folder) / "config.toml"
config = toml.load(config_file)
- app_id = config["app_id"]
app_name = config["app_name"]
- api_key = config.get("api_key", None)
+ app_id = config["app_id"]
+ api_key = config.get("api_key", "")
variants = []
+ client = AgentaApi(
+ base_url=f"{host}/{BACKEND_URL_SUFFIX}",
+ api_key=api_key,
+ )
+
try:
- variants: List[AppVariant] = client.list_variants(app_id, host, api_key)
+ variants: List[AppVariant] = client.list_app_variants(app_id=app_id)
except Exception as ex:
raise ex
@@ -437,6 +469,13 @@ def serve_cli(ctx, app_folder: str, file_name: str):
click.echo(click.style(f"Error message: {str(e)}", fg="red"))
return
+ try:
+ api_key = helper.get_global_config("api_key")
+ except Exception as e:
+ click.echo(click.style("Failed to retrieve the api key.", fg="red"))
+ click.echo(click.style(f"Error message: {str(e)}", fg="red"))
+ return
+
try:
variant_id = add_variant(app_folder=app_folder, file_name=file_name, host=host)
except Exception as e:
diff --git a/agenta-cli/agenta/client/Readme.md b/agenta-cli/agenta/client/Readme.md
index 32e05bce73..0850350b76 100644
--- a/agenta-cli/agenta/client/Readme.md
+++ b/agenta-cli/agenta/client/Readme.md
@@ -1,3 +1,96 @@
Client code to communicate with the backend.
-Currently the models are manually copied from the backend code. This needs to change.
\ No newline at end of file
+Currently the models are manually copied from the backend code. This needs to change.
+
+# Generate Backend
+
+To generate the client code using Fern, follow the steps below.
+
+1. Open a Terminal and navigate to the folder where this Readme.md file is. For example;
+ ```
+ $ cd agenta/agenta-cli/agenta/client
+ ```
+
+2. Next ensure you have installed Fern by executing the command;
+ ```
+ $ npm install -g fern-api
+ ```
+3. Execute this command to initialize Fern to import and use the OpenAPI spec;
+
+> To use an OpenAPI spec, you can pass in the filepath or URL.
+> We'll be using a url to the openapi.json for [Agenta Cloud](https://cloud.agenta.ai)
+
+```
+fern init --openapi https://cloud.agenta.ai/api/openapi.json
+```
+
+4. Add the Fern Python SDK;
+ ```bash
+ fern add fern-python-sdk
+ ```
+
+5. Go to the generators.yml, which would look like this;
+
+ ```yaml
+ default-group: local
+ groups:
+ local:
+ generators:
+ - name: fernapi/fern-typescript-node-sdk
+ version: 0.7.2
+ output:
+ location: local-file-system
+ path: ../generated/typescript
+ - name: fernapi/fern-python-sdk
+ version: 0.6.0
+ ```
+
+ Replace the following;
+
+ ```yaml
+ - name: fernapi/fern-typescript-node-sdk
+ version: 0.7.2
+ ```
+
+ with this and delete it from the bottom of the file after;
+
+ ```yaml
+ - name: fernapi/fern-python-sdk
+ version: 0.6.0
+ ```
+
+6. Change the path from this `path: ../generated/typescript` to this path: `../backend`
+
+ Now your generators.yml should look like this;
+ ```yaml
+ default-group: local
+ groups:
+ local:
+ generators:
+ - name: fernapi/fern-python-sdk
+ version: 0.6.0
+ output:
+ location: local-file-system
+ path: ../backend
+ ```
+
+
+
+7. Go to the fern.config.json file and change the value of "organization" to `agenta`
+
+
+
+9. Generate the client code
+
+ ```bash
+ fern generate
+ ```
+
+10. Change the timeout for the build_image function endpoint
+ Go to the client.py in the generated code folder search for the `build_image` function in the AgentaApi class and change the timeout to 600.
+ When done, it should look like this;
+
+
+
+
+11. Delete the fern folder.
diff --git a/agenta-cli/agenta/client/api.py b/agenta-cli/agenta/client/api.py
new file mode 100644
index 0000000000..6da7869151
--- /dev/null
+++ b/agenta-cli/agenta/client/api.py
@@ -0,0 +1,74 @@
+import os
+import toml
+import time
+import click
+from typing import Dict
+from pathlib import Path
+from agenta.client.backend import client
+from agenta.client.api_models import Image
+from requests.exceptions import RequestException
+from agenta.client.backend.client import AgentaApi
+from agenta.client.exceptions import APIRequestError
+
+
+def add_variant_to_server(
+ app_id: str,
+ base_name: str,
+ image: Image,
+ backend_url: str,
+ api_key: str,
+ retries=10,
+ backoff_factor=1,
+) -> Dict:
+ """
+ Adds a variant to the server with a retry mechanism and a single-line loading state.
+
+ Args:
+ app_id (str): The ID of the app to add the variant to.
+ base_name (str): The base name for the variant.
+ image (Image): The image to use for the variant.
+ retries (int): Number of times to retry the request.
+ backoff_factor (float): Factor to determine the delay between retries (exponential backoff).
+
+ Returns:
+ dict: The JSON response from the server.
+
+ Raises:
+ APIRequestError: If the request to the server fails after retrying.
+ """
+
+ click.echo(
+ click.style("Waiting for the variant to be ready", fg="yellow"), nl=False
+ )
+
+ client = AgentaApi(
+ base_url=backend_url,
+ api_key=api_key,
+ )
+ for attempt in range(retries):
+ try:
+ response = client.add_variant_from_image(
+ app_id=app_id,
+ variant_name=f"{base_name.lower()}.default",
+ base_name=base_name,
+ config_name="default",
+ docker_id=image.docker_id,
+ tags=image.tags,
+ )
+ click.echo(click.style("\nVariant added successfully.", fg="green"))
+ return response
+ except RequestException as e:
+ if attempt < retries - 1:
+ click.echo(click.style(".", fg="yellow"), nl=False)
+ time.sleep(backoff_factor * (2**attempt))
+ else:
+ raise APIRequestError(
+ click.style(
+ f"\nRequest to app_variant endpoint failed with status code {response.status_code} and error message: {e}.",
+ fg="red",
+ )
+ )
+ except Exception as e:
+ raise APIRequestError(
+ click.style(f"\nAn unexpected error occurred: {e}", fg="red")
+ )
diff --git a/agenta-cli/agenta/client/backend/__init__.py b/agenta-cli/agenta/client/backend/__init__.py
new file mode 100644
index 0000000000..eb6978b547
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/__init__.py
@@ -0,0 +1,97 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .types import (
+ AddVariantFromBaseAndConfigResponse,
+ App,
+ AppVariantOutput,
+ BaseOutput,
+ BodyImportTestset,
+ ContainerTemplatesResponse,
+ CreateAppOutput,
+ CreateCustomEvaluation,
+ CustomEvaluationDetail,
+ CustomEvaluationNames,
+ CustomEvaluationOutput,
+ DockerEnvVars,
+ EnvironmentOutput,
+ Evaluation,
+ EvaluationScenario,
+ EvaluationScenarioInput,
+ EvaluationScenarioOutput,
+ EvaluationScenarioScore,
+ EvaluationScenarioUpdateScore,
+ EvaluationStatusEnum,
+ EvaluationType,
+ EvaluationTypeSettings,
+ EvaluationWebhook,
+ Feedback,
+ GetConfigReponse,
+ HttpValidationError,
+ Image,
+ InviteRequest,
+ ListApiKeysOutput,
+ NewTestset,
+ Organization,
+ OrganizationOutput,
+ SimpleEvaluationOutput,
+ Span,
+ Template,
+ TemplateImageInfo,
+ TestSetOutputResponse,
+ TestSetSimpleResponse,
+ Trace,
+ Uri,
+ ValidationError,
+ ValidationErrorLocItem,
+ VariantAction,
+ VariantActionEnum,
+)
+from .errors import UnprocessableEntityError
+
+__all__ = [
+ "AddVariantFromBaseAndConfigResponse",
+ "App",
+ "AppVariantOutput",
+ "BaseOutput",
+ "BodyImportTestset",
+ "ContainerTemplatesResponse",
+ "CreateAppOutput",
+ "CreateCustomEvaluation",
+ "CustomEvaluationDetail",
+ "CustomEvaluationNames",
+ "CustomEvaluationOutput",
+ "DockerEnvVars",
+ "EnvironmentOutput",
+ "Evaluation",
+ "EvaluationScenario",
+ "EvaluationScenarioInput",
+ "EvaluationScenarioOutput",
+ "EvaluationScenarioScore",
+ "EvaluationScenarioUpdateScore",
+ "EvaluationStatusEnum",
+ "EvaluationType",
+ "EvaluationTypeSettings",
+ "EvaluationWebhook",
+ "Feedback",
+ "GetConfigReponse",
+ "HttpValidationError",
+ "Image",
+ "InviteRequest",
+ "ListApiKeysOutput",
+ "NewTestset",
+ "Organization",
+ "OrganizationOutput",
+ "SimpleEvaluationOutput",
+ "Span",
+ "Template",
+ "TemplateImageInfo",
+ "TestSetOutputResponse",
+ "TestSetSimpleResponse",
+ "Trace",
+ "UnprocessableEntityError",
+ "Uri",
+ "ValidationError",
+ "ValidationErrorLocItem",
+ "VariantAction",
+ "VariantActionEnum",
+]
diff --git a/agenta-cli/agenta/client/backend/client.py b/agenta-cli/agenta/client/backend/client.py
new file mode 100644
index 0000000000..5cd775daa1
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/client.py
@@ -0,0 +1,5695 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+import urllib.parse
+from json.decoder import JSONDecodeError
+
+import httpx
+
+from .core.api_error import ApiError
+from .core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from .core.jsonable_encoder import jsonable_encoder
+from .core.remove_none_from_dict import remove_none_from_dict
+from .errors.unprocessable_entity_error import UnprocessableEntityError
+from .types.add_variant_from_base_and_config_response import (
+ AddVariantFromBaseAndConfigResponse,
+)
+from .types.app import App
+from .types.app_variant_output import AppVariantOutput
+from .types.base_output import BaseOutput
+from .types.container_templates_response import ContainerTemplatesResponse
+from .types.create_app_output import CreateAppOutput
+from .types.create_custom_evaluation import CreateCustomEvaluation
+from .types.custom_evaluation_detail import CustomEvaluationDetail
+from .types.custom_evaluation_names import CustomEvaluationNames
+from .types.custom_evaluation_output import CustomEvaluationOutput
+from .types.docker_env_vars import DockerEnvVars
+from .types.environment_output import EnvironmentOutput
+from .types.evaluation import Evaluation
+from .types.evaluation_scenario import EvaluationScenario
+from .types.evaluation_scenario_input import EvaluationScenarioInput
+from .types.evaluation_scenario_output import EvaluationScenarioOutput
+from .types.evaluation_scenario_update_score import EvaluationScenarioUpdateScore
+from .types.evaluation_status_enum import EvaluationStatusEnum
+from .types.evaluation_type import EvaluationType
+from .types.evaluation_type_settings import EvaluationTypeSettings
+from .types.evaluation_webhook import EvaluationWebhook
+from .types.feedback import Feedback
+from .types.get_config_reponse import GetConfigReponse
+from .types.http_validation_error import HttpValidationError
+from .types.image import Image
+from .types.invite_request import InviteRequest
+from .types.list_api_keys_output import ListApiKeysOutput
+from .types.new_testset import NewTestset
+from .types.organization import Organization
+from .types.organization_output import OrganizationOutput
+from .types.simple_evaluation_output import SimpleEvaluationOutput
+from .types.span import Span
+from .types.test_set_output_response import TestSetOutputResponse
+from .types.test_set_simple_response import TestSetSimpleResponse
+from .types.trace import Trace
+from .types.uri import Uri
+from .types.variant_action import VariantAction
+
+try:
+ import pydantic.v1 as pydantic # type: ignore
+except ImportError:
+ import pydantic # type: ignore
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class AgentaApi:
+ def __init__(
+ self, *, base_url: str, api_key: str, timeout: typing.Optional[float] = 60
+ ):
+ self._client_wrapper = SyncClientWrapper(
+ base_url=base_url,
+ api_key=api_key,
+ httpx_client=httpx.Client(timeout=timeout),
+ )
+
+ def list_api_keys(self) -> typing.List[ListApiKeysOutput]:
+ """
+ List all API keys associated with the authenticated user.
+
+ Args:
+ request (Request): The incoming request object.
+
+ Returns:
+ List[ListAPIKeysOutput]: A list of API Keys associated with the user.
+
+ ---
+ from agenta.client import AgentaApi
+
+ client = AgentaApi(api_key="YOUR_API_KEY", base_url="https://yourhost.com/path/to/api")
+ client.list_api_keys()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "keys"),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.List[ListApiKeysOutput], _response.json()) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def create_api_key(self) -> str:
+ """
+ Creates an API key for a user.
+
+ Args:
+ request (Request): The request object containing the user ID in the request state.
+
+ Returns:
+ str: The created API key.
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "keys"),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(str, _response.json()) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def delete_api_key(self, key_prefix: str) -> typing.Dict[str, typing.Any]:
+ """
+ Delete an API key with the given key prefix for the authenticated user.
+
+ Args:
+ key_prefix (str): The prefix of the API key to be deleted.
+ request (Request): The incoming request object.
+
+ Returns:
+ dict: A dictionary containing a success message upon successful deletion.
+
+ Raises:
+ HTTPException: If the API key is not found or does not belong to the user.
+
+ Parameters:
+ - key_prefix: str.
+ ---
+ from agenta.client import AgentaApi
+
+ client = AgentaApi(api_key="YOUR_API_KEY", base_url="https://yourhost.com/path/to/api")
+ client.delete_api_key(key_prefix="key-prefix")
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "DELETE",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", f"keys/{key_prefix}"
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Dict[str, typing.Any], _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def validate_api_key(self, key_prefix: str) -> bool:
+ """
+ This Function is called by the CLI and is used to validate an API key provided by a user in agenta init setup.
+ Returns:
+ bool: True. If the request reaches this point, the API key is valid.
+
+ Parameters:
+ - key_prefix: str.
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", f"keys/{key_prefix}/validate"
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(bool, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def fetch_organization_details(self, org_id: str) -> typing.Any:
+ """
+ Get an organization's details.
+
+ Raises:
+ HTTPException: _description_
+ Permission Denied
+
+ Returns:
+ OrganizationDB Instance
+
+ Parameters:
+ - org_id: str.
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", f"organizations_ee/{org_id}"
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def invite_to_org(self, org_id: str, *, request: InviteRequest) -> typing.Any:
+ """
+ Invite a user to an Organization.
+
+ Raises:
+ HTTPException: _description_; status_code: 500
+ HTTPException: This Organization doesn't exist; status_code: 400
+ HTTPException: Failed to invite user to organization; status_code: 403
+ HTTPException: You cannot invite yourself to your own organization; status_code: 400
+ HTTPException: You do not have permission to access this organization; status_code: 500
+
+ Returns:
+ JSONResponse: Invited user to organization; status_code: 200
+
+ Parameters:
+ - org_id: str.
+
+ - request: InviteRequest.
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"organizations_ee/{org_id}/invite",
+ ),
+ json=jsonable_encoder(request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def resend_invitation(self, org_id: str, *, request: InviteRequest) -> typing.Any:
+ """
+ Resend an invitation to a user to an Organization.
+
+ Raises:
+ HTTPException: _description_; status_code: 500
+ HTTPException: Invitation not found or has expired; status_code: 400
+ HTTPException: You already belong to this organization; status_code: 400
+
+ Returns:
+ JSONResponse: Resent invitation to user; status_code: 200
+
+ Parameters:
+ - org_id: str.
+
+ - request: InviteRequest.
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"organizations_ee/{org_id}/invite/resend",
+ ),
+ json=jsonable_encoder(request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def add_user_to_org(self, org_id: str, *, token: str) -> typing.Any:
+ """
+ Invite a user to an Organization.
+
+ Raises:
+ HTTPException: _description_; status_code: 500
+ HTTPException: Invitation not found or has expired; status_code: 400
+ HTTPException: You already belong to this organization; status_code: 400
+
+ Returns:
+ JSONResponse: Added user to organization; status_code: 200
+
+ Parameters:
+ - org_id: str.
+
+ - token: str.
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"organizations_ee/{org_id}/accept",
+ ),
+ json=jsonable_encoder({"token": token}),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def create_organization(self, *, request: Organization) -> typing.Any:
+ """
+ Parameters:
+ - request: Organization.
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", "organizations_ee/create"
+ ),
+ json=jsonable_encoder(request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def update_organization(
+ self,
+ org_id: str,
+ *,
+ name: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ ) -> typing.Any:
+ """
+ Parameters:
+ - org_id: str.
+
+ - name: typing.Optional[str].
+
+ - description: typing.Optional[str].
+ """
+ _request: typing.Dict[str, typing.Any] = {}
+ if name is not OMIT:
+ _request["name"] = name
+ if description is not OMIT:
+ _request["description"] = description
+ _response = self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"organizations_ee/{org_id}/update",
+ ),
+ json=jsonable_encoder(_request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def health_check(self) -> typing.Any:
+ _response = self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "health"),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def user_profile(self) -> typing.Any:
+ _response = self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "profile"),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def list_app_variants(self, app_id: str) -> typing.List[AppVariantOutput]:
+ """
+ Retrieve a list of app variants for a given app ID.
+
+ Args:
+ app_id (str): The ID of the app to retrieve variants for.
+ stoken_session (SessionContainer, optional): The session container to verify the user's session. Defaults to Depends(verify_session()).
+
+ Returns:
+ List[AppVariantOutput]: A list of app variants for the given app ID.
+
+ Parameters:
+ - app_id: str.
+ ---
+ from agenta.client import AgentaApi
+
+ client = AgentaApi(api_key="YOUR_API_KEY", base_url="https://yourhost.com/path/to/api")
+ client.list_app_variants(app_id="app-id")
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", f"apps/{app_id}/variants"
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.List[AppVariantOutput], _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get_variant_by_env(self, *, app_id: str, environment: str) -> AppVariantOutput:
+ """
+ Retrieve the app variant based on the provided app_id and environment.
+
+ Args:
+ app_id (str): The ID of the app to retrieve the variant for.
+ environment (str): The environment of the app variant to retrieve.
+ stoken_session (SessionContainer, optional): The session token container. Defaults to Depends(verify_session()).
+
+ Raises:
+ HTTPException: If the app variant is not found (status_code=500), or if a ValueError is raised (status_code=400), or if any other exception is raised (status_code=500).
+
+ Returns:
+ AppVariantOutput: The retrieved app variant.
+
+ Parameters:
+ - app_id: str.
+
+ - environment: str.
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", "apps/get_variant_by_env"
+ ),
+ params=remove_none_from_dict(
+ {"app_id": app_id, "environment": environment}
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(AppVariantOutput, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def list_apps(
+ self,
+ *,
+ app_name: typing.Optional[str] = None,
+ org_id: typing.Optional[str] = None,
+ ) -> typing.List[App]:
+ """
+ Retrieve a list of apps filtered by app_name and org_id.
+
+ Args:
+ app_name (Optional[str]): The name of the app to filter by.
+ org_id (Optional[str]): The ID of the organization to filter by.
+ stoken_session (SessionContainer): The session container.
+
+ Returns:
+ List[App]: A list of apps filtered by app_name and org_id.
+
+ Raises:
+ HTTPException: If there was an error retrieving the list of apps.
+
+ Parameters:
+ - app_name: typing.Optional[str].
+
+ - org_id: typing.Optional[str].
+ ---
+ from agenta.client import AgentaApi
+
+ client = AgentaApi(api_key="YOUR_API_KEY", base_url="https://yourhost.com/path/to/api")
+ client.list_apps()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "apps"),
+ params=remove_none_from_dict({"app_name": app_name, "org_id": org_id}),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.List[App], _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def create_app(
+ self, *, app_name: str, organization_id: typing.Optional[str] = OMIT
+ ) -> CreateAppOutput:
+ """
+ Create a new app for a user or organization.
+
+ Args:
+ payload (CreateApp): The payload containing the app name and organization ID (optional).
+ stoken_session (SessionContainer): The session container containing the user's session token.
+
+ Returns:
+ CreateAppOutput: The output containing the newly created app's ID and name.
+
+ Raises:
+ HTTPException: If there is an error creating the app or the user does not have permission to access the app.
+
+ Parameters:
+ - app_name: str.
+
+ - organization_id: typing.Optional[str].
+ """
+ _request: typing.Dict[str, typing.Any] = {"app_name": app_name}
+ if organization_id is not OMIT:
+ _request["organization_id"] = organization_id
+ _response = self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "apps"),
+ json=jsonable_encoder(_request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(CreateAppOutput, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def add_variant_from_image(
+ self,
+ app_id: str,
+ *,
+ variant_name: str,
+ docker_id: str,
+ tags: str,
+ base_name: typing.Optional[str] = OMIT,
+ config_name: typing.Optional[str] = OMIT,
+ ) -> typing.Any:
+ """
+ Add a new variant to an app based on a Docker image.
+
+ Args:
+ app_id (str): The ID of the app to add the variant to.
+ payload (AddVariantFromImagePayload): The payload containing information about the variant to add.
+ stoken_session (SessionContainer, optional): The session container. Defaults to Depends(verify_session()).
+
+ Raises:
+ HTTPException: If the feature flag is set to "demo" or if the image does not have a tag starting with the registry name (agenta-server) or if the image is not found or if the user does not have access to the app.
+
+ Returns:
+ dict: The newly added variant.
+
+ Parameters:
+ - app_id: str.
+
+ - variant_name: str.
+
+ - docker_id: str.
+
+ - tags: str.
+
+ - base_name: typing.Optional[str].
+
+ - config_name: typing.Optional[str].
+ """
+ _request: typing.Dict[str, typing.Any] = {
+ "variant_name": variant_name,
+ "docker_id": docker_id,
+ "tags": tags,
+ }
+ if base_name is not OMIT:
+ _request["base_name"] = base_name
+ if config_name is not OMIT:
+ _request["config_name"] = config_name
+ _response = self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"apps/{app_id}/variant/from-image",
+ ),
+ json=jsonable_encoder(_request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def remove_app(self, app_id: str) -> typing.Any:
+ """
+ Remove app, all its variant, containers and images
+
+ Arguments:
+ app -- App to remove
+
+ Parameters:
+ - app_id: str.
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "DELETE",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", f"apps/{app_id}"
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def create_app_and_variant_from_template(
+ self,
+ *,
+ app_name: str,
+ template_id: str,
+ env_vars: typing.Dict[str, str],
+ organization_id: typing.Optional[str] = OMIT,
+ ) -> AppVariantOutput:
+ """
+ Create an app and variant from a template.
+
+ Args:
+ payload (CreateAppVariant): The payload containing the app and variant information.
+ stoken_session (SessionContainer, optional): The session container. Defaults to Depends(verify_session()).
+
+ Raises:
+ HTTPException: If the user has reached the app limit or if an app with the same name already exists.
+
+ Returns:
+ AppVariantOutput: The output of the created app variant.
+
+ Parameters:
+ - app_name: str.
+
+ - template_id: str.
+
+ - env_vars: typing.Dict[str, str].
+
+ - organization_id: typing.Optional[str].
+ """
+ _request: typing.Dict[str, typing.Any] = {
+ "app_name": app_name,
+ "template_id": template_id,
+ "env_vars": env_vars,
+ }
+ if organization_id is not OMIT:
+ _request["organization_id"] = organization_id
+ _response = self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ "apps/app_and_variant_from_template",
+ ),
+ json=jsonable_encoder(_request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(AppVariantOutput, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def list_environments(self, app_id: str) -> typing.List[EnvironmentOutput]:
+ """
+ Retrieve a list of environments for a given app ID.
+
+ Args:
+ app_id (str): The ID of the app to retrieve environments for.
+ stoken_session (SessionContainer, optional): The session container. Defaults to Depends(verify_session()).
+
+ Returns:
+ List[EnvironmentOutput]: A list of environment objects.
+
+ Parameters:
+ - app_id: str.
+ ---
+ from agenta.client import AgentaApi
+
+ client = AgentaApi(api_key="YOUR_API_KEY", base_url="https://yourhost.com/path/to/api")
+ client.list_environments(app_id="app-id")
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", f"apps/{app_id}/environments"
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.List[EnvironmentOutput], _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def add_variant_from_base_and_config(
+ self,
+ *,
+ base_id: str,
+ new_variant_name: str,
+ new_config_name: str,
+ parameters: typing.Dict[str, typing.Any],
+ ) -> AddVariantFromBaseAndConfigResponse:
+ """
+ Add a new variant based on an existing one.
+ Same as POST /config
+
+ Args:
+ payload (AddVariantFromBasePayload): Payload containing base variant ID, new variant name, and parameters.
+ stoken_session (SessionContainer, optional): Session container. Defaults to result of verify_session().
+
+ Raises:
+ HTTPException: Raised if the variant could not be added or accessed.
+
+ Returns:
+ Union[AppVariantOutput, Any]: New variant details or exception.
+
+ Parameters:
+ - base_id: str.
+
+ - new_variant_name: str.
+
+ - new_config_name: str.
+
+ - parameters: typing.Dict[str, typing.Any].
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", "variants/from-base"
+ ),
+ json=jsonable_encoder(
+ {
+ "base_id": base_id,
+ "new_variant_name": new_variant_name,
+ "new_config_name": new_config_name,
+ "parameters": parameters,
+ }
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(AddVariantFromBaseAndConfigResponse, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def start_variant(
+ self,
+ variant_id: str,
+ *,
+ action: VariantAction,
+ env_vars: typing.Optional[DockerEnvVars] = OMIT,
+ ) -> Uri:
+ """
+ Start a variant of an app.
+
+ Args:
+ variant_id (str): The ID of the variant to start.
+ action (VariantAction): The action to perform on the variant (start).
+ env_vars (Optional[DockerEnvVars], optional): The environment variables to inject to the Docker container. Defaults to None.
+ stoken_session (SessionContainer, optional): The session container. Defaults to Depends(verify_session()).
+
+ Returns:
+ URI: The URL of the started variant.
+
+ Raises:
+ HTTPException: If the app container cannot be started.
+
+ Parameters:
+ - variant_id: str.
+
+ - action: VariantAction.
+
+ - env_vars: typing.Optional[DockerEnvVars].
+ """
+ _request: typing.Dict[str, typing.Any] = {"action": action}
+ if env_vars is not OMIT:
+ _request["env_vars"] = env_vars
+ _response = self._client_wrapper.httpx_client.request(
+ "PUT",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", f"variants/{variant_id}"
+ ),
+ json=jsonable_encoder(_request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(Uri, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def remove_variant(self, variant_id: str) -> typing.Any:
+ """
+ Remove a variant from the server.
+ In the case it's the last variant using the image, stop the container and remove the image.
+
+ Arguments:
+ app_variant -- AppVariant to remove
+
+ Raises:
+ HTTPException: If there is a problem removing the app variant
+
+ Parameters:
+ - variant_id: str.
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "DELETE",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", f"variants/{variant_id}"
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def update_variant_parameters(
+ self, variant_id: str, *, parameters: typing.Dict[str, typing.Any]
+ ) -> typing.Any:
+ """
+ Updates the parameters for an app variant.
+
+ Args:
+ variant_id (str): The ID of the app variant to update.
+ payload (UpdateVariantParameterPayload): The payload containing the updated parameters.
+ stoken_session (SessionContainer, optional): The session container. Defaults to Depends(verify_session()).
+
+ Raises:
+ HTTPException: If there is an error while trying to update the app variant.
+
+ Returns:
+ JSONResponse: A JSON response containing the updated app variant parameters.
+
+ Parameters:
+ - variant_id: str.
+
+ - parameters: typing.Dict[str, typing.Any].
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "PUT",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"variants/{variant_id}/parameters",
+ ),
+ json=jsonable_encoder({"parameters": parameters}),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def update_variant_image(self, variant_id: str, *, request: Image) -> typing.Any:
+ """
+ Updates the image used in an app variant.
+
+ Args:
+ variant_id (str): The ID of the app variant to update.
+ image (Image): The image information to update.
+
+ Raises:
+ HTTPException: If an error occurs while trying to update the app variant.
+
+ Returns:
+ JSONResponse: A JSON response indicating whether the update was successful or not.
+
+ Parameters:
+ - variant_id: str.
+
+ - request: Image.
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "PUT",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"variants/{variant_id}/image",
+ ),
+ json=jsonable_encoder(request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def fetch_list_evaluations(self, *, app_id: str) -> typing.List[Evaluation]:
+ """
+ Fetches a list of evaluations, optionally filtered by an app ID.
+
+ Args:
+ app_id (Optional[str]): An optional app ID to filter the evaluations.
+
+ Returns:
+ List[Evaluation]: A list of evaluations.
+
+ Parameters:
+ - app_id: str.
+ ---
+ from agenta.client import AgentaApi
+
+ client = AgentaApi(api_key="YOUR_API_KEY", base_url="https://yourhost.com/path/to/api")
+ client.fetch_list_evaluations(app_id="app-id")
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", "evaluations"
+ ),
+ params=remove_none_from_dict({"app_id": app_id}),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.List[Evaluation], _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def create_evaluation(
+ self,
+ *,
+ app_id: str,
+ variant_ids: typing.List[str],
+ evaluation_type: EvaluationType,
+ evaluation_type_settings: typing.Optional[EvaluationTypeSettings] = OMIT,
+ inputs: typing.List[str],
+ testset_id: str,
+ status: str,
+ ) -> SimpleEvaluationOutput:
+ """
+ Creates a new comparison table document
+ Raises:
+ HTTPException: _description_
+ Returns:
+ _description_
+
+ Parameters:
+ - app_id: str.
+
+ - variant_ids: typing.List[str].
+
+ - evaluation_type: EvaluationType.
+
+ - evaluation_type_settings: typing.Optional[EvaluationTypeSettings].
+
+ - inputs: typing.List[str].
+
+ - testset_id: str.
+
+ - status: str.
+ """
+ _request: typing.Dict[str, typing.Any] = {
+ "app_id": app_id,
+ "variant_ids": variant_ids,
+ "evaluation_type": evaluation_type,
+ "inputs": inputs,
+ "testset_id": testset_id,
+ "status": status,
+ }
+ if evaluation_type_settings is not OMIT:
+ _request["evaluation_type_settings"] = evaluation_type_settings
+ _response = self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", "evaluations"
+ ),
+ json=jsonable_encoder(_request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(SimpleEvaluationOutput, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def delete_evaluations(
+ self, *, evaluations_ids: typing.List[str]
+ ) -> typing.List[str]:
+ """
+ Delete specific comparison tables based on their unique IDs.
+
+ Args:
+ delete_evaluations (List[str]): The unique identifiers of the comparison tables to delete.
+
+ Returns:
+ A list of the deleted comparison tables' IDs.
+
+ Parameters:
+ - evaluations_ids: typing.List[str].
+ ---
+ from agenta.client import AgentaApi
+
+ client = AgentaApi(api_key="YOUR_API_KEY", base_url="https://yourhost.com/path/to/api")
+ client.delete_evaluations(evaluations_ids=[])
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "DELETE",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", "evaluations"
+ ),
+ json=jsonable_encoder({"evaluations_ids": evaluations_ids}),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.List[str], _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def fetch_evaluation(self, evaluation_id: str) -> Evaluation:
+ """
+ Fetches a single evaluation based on its ID.
+
+ Args:
+ evaluation_id (str): The ID of the evaluation to fetch.
+
+ Returns:
+ Evaluation: The fetched evaluation.
+
+ Parameters:
+ - evaluation_id: str.
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"evaluations/{evaluation_id}",
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(Evaluation, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def update_evaluation(
+ self,
+ evaluation_id: str,
+ *,
+ status: typing.Optional[EvaluationStatusEnum] = OMIT,
+ evaluation_type_settings: typing.Optional[EvaluationTypeSettings] = OMIT,
+ ) -> typing.Any:
+ """
+ Updates an evaluation's status.
+
+ Raises:
+ HTTPException: If the columns in the test set do not match with the inputs in the variant.
+
+ Returns:
+ None: A 204 No Content status code, indicating that the update was successful.
+
+ Parameters:
+ - evaluation_id: str.
+
+ - status: typing.Optional[EvaluationStatusEnum].
+
+ - evaluation_type_settings: typing.Optional[EvaluationTypeSettings].
+ """
+ _request: typing.Dict[str, typing.Any] = {}
+ if status is not OMIT:
+ _request["status"] = status
+ if evaluation_type_settings is not OMIT:
+ _request["evaluation_type_settings"] = evaluation_type_settings
+ _response = self._client_wrapper.httpx_client.request(
+ "PUT",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"evaluations/{evaluation_id}",
+ ),
+ json=jsonable_encoder(_request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def fetch_evaluation_scenarios(
+ self, evaluation_id: str
+ ) -> typing.List[EvaluationScenario]:
+ """
+ Fetches evaluation scenarios for a given evaluation ID.
+
+ Arguments:
+ evaluation_id (str): The ID of the evaluation for which to fetch scenarios.
+
+ Raises:
+ HTTPException: If the evaluation is not found or access is denied.
+
+ Returns:
+ List[EvaluationScenario]: A list of evaluation scenarios.
+
+ Parameters:
+ - evaluation_id: str.
+ ---
+ from agenta.client import AgentaApi
+
+ client = AgentaApi(api_key="YOUR_API_KEY", base_url="https://yourhost.com/path/to/api")
+ client.fetch_evaluation_scenarios(evaluation_id="evaluation-id")
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"evaluations/{evaluation_id}/evaluation_scenarios",
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.List[EvaluationScenario], _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def create_evaluation_scenario(
+ self, evaluation_id: str, *, request: EvaluationScenario
+ ) -> typing.Any:
+ """
+ Create a new evaluation scenario for a given evaluation ID.
+
+ Raises:
+ HTTPException: If evaluation not found or access denied.
+
+ Returns:
+ None: 204 No Content status code upon success.
+
+ Parameters:
+ - evaluation_id: str.
+
+ - request: EvaluationScenario.
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"evaluations/{evaluation_id}/evaluation_scenario",
+ ),
+ json=jsonable_encoder(request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def update_evaluation_scenario(
+ self,
+ evaluation_id: str,
+ evaluation_scenario_id: str,
+ evaluation_type: EvaluationType,
+ *,
+ vote: typing.Optional[str] = OMIT,
+ score: typing.Optional[EvaluationScenarioUpdateScore] = OMIT,
+ correct_answer: typing.Optional[str] = OMIT,
+ outputs: typing.Optional[typing.List[EvaluationScenarioOutput]] = OMIT,
+ inputs: typing.Optional[typing.List[EvaluationScenarioInput]] = OMIT,
+ is_pinned: typing.Optional[bool] = OMIT,
+ note: typing.Optional[str] = OMIT,
+ ) -> typing.Any:
+ """
+ Updates an evaluation scenario's vote or score based on its type.
+
+ Raises:
+ HTTPException: If update fails or unauthorized.
+
+ Returns:
+ None: 204 No Content status code upon successful update.
+
+ Parameters:
+ - evaluation_id: str.
+
+ - evaluation_scenario_id: str.
+
+ - evaluation_type: EvaluationType.
+
+ - vote: typing.Optional[str].
+
+ - score: typing.Optional[EvaluationScenarioUpdateScore].
+
+ - correct_answer: typing.Optional[str].
+
+ - outputs: typing.Optional[typing.List[EvaluationScenarioOutput]].
+
+ - inputs: typing.Optional[typing.List[EvaluationScenarioInput]].
+
+ - is_pinned: typing.Optional[bool].
+
+ - note: typing.Optional[str].
+ """
+ _request: typing.Dict[str, typing.Any] = {}
+ if vote is not OMIT:
+ _request["vote"] = vote
+ if score is not OMIT:
+ _request["score"] = score
+ if correct_answer is not OMIT:
+ _request["correct_answer"] = correct_answer
+ if outputs is not OMIT:
+ _request["outputs"] = outputs
+ if inputs is not OMIT:
+ _request["inputs"] = inputs
+ if is_pinned is not OMIT:
+ _request["is_pinned"] = is_pinned
+ if note is not OMIT:
+ _request["note"] = note
+ _response = self._client_wrapper.httpx_client.request(
+ "PUT",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"evaluations/{evaluation_id}/evaluation_scenario/{evaluation_scenario_id}/{evaluation_type}",
+ ),
+ json=jsonable_encoder(_request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def evaluate_ai_critique(
+ self,
+ *,
+ correct_answer: str,
+ llm_app_prompt_template: typing.Optional[str] = OMIT,
+ inputs: typing.List[EvaluationScenarioInput],
+ outputs: typing.List[EvaluationScenarioOutput],
+ evaluation_prompt_template: typing.Optional[str] = OMIT,
+ open_ai_key: typing.Optional[str] = OMIT,
+ ) -> str:
+ """
+ Evaluate AI critique based on the given payload.
+
+ Args:
+ payload (AICritiqueCreate): The payload containing data for AI critique evaluation.
+ stoken_session (SessionContainer): The session container verified by `verify_session`.
+
+ Returns:
+ str: The output of the AI critique evaluation.
+
+ Raises:
+ HTTPException: If any exception occurs during the evaluation.
+
+ Parameters:
+ - correct_answer: str.
+
+ - llm_app_prompt_template: typing.Optional[str].
+
+ - inputs: typing.List[EvaluationScenarioInput].
+
+ - outputs: typing.List[EvaluationScenarioOutput].
+
+ - evaluation_prompt_template: typing.Optional[str].
+
+ - open_ai_key: typing.Optional[str].
+ """
+ _request: typing.Dict[str, typing.Any] = {
+ "correct_answer": correct_answer,
+ "inputs": inputs,
+ "outputs": outputs,
+ }
+ if llm_app_prompt_template is not OMIT:
+ _request["llm_app_prompt_template"] = llm_app_prompt_template
+ if evaluation_prompt_template is not OMIT:
+ _request["evaluation_prompt_template"] = evaluation_prompt_template
+ if open_ai_key is not OMIT:
+ _request["open_ai_key"] = open_ai_key
+ _response = self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ "evaluations/evaluation_scenario/ai_critique",
+ ),
+ json=jsonable_encoder(_request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(str, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get_evaluation_scenario_score(
+ self, evaluation_scenario_id: str
+ ) -> typing.Dict[str, str]:
+ """
+ Fetch the score of a specific evaluation scenario.
+
+ Args:
+ evaluation_scenario_id: The ID of the evaluation scenario to fetch.
+ stoken_session: Session data, verified by `verify_session`.
+
+ Returns:
+ Dictionary containing the scenario ID and its score.
+
+ Parameters:
+ - evaluation_scenario_id: str.
+ ---
+ from agenta.client import AgentaApi
+
+ client = AgentaApi(api_key="YOUR_API_KEY", base_url="https://yourhost.com/path/to/api")
+ client.get_evaluation_scenario_score(evaluation_scenario_id="evaluation-scenario-id")
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"evaluations/evaluation_scenario/{evaluation_scenario_id}/score",
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Dict[str, str], _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def update_evaluation_scenario_score(
+ self, evaluation_scenario_id: str, *, score: float
+ ) -> typing.Any:
+ """
+ Updates the score of an evaluation scenario.
+
+ Raises:
+ HTTPException: Server error if the evaluation update fails.
+
+ Returns:
+ None: 204 No Content status code upon successful update.
+
+ Parameters:
+ - evaluation_scenario_id: str.
+
+ - score: float.
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "PUT",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"evaluations/evaluation_scenario/{evaluation_scenario_id}/score",
+ ),
+ json=jsonable_encoder({"score": score}),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def fetch_results(self, evaluation_id: str) -> typing.Any:
+ """
+ Fetch all the results for one the comparison table
+
+ Arguments:
+ evaluation*id -- \_description*
+
+ Returns:
+ _description_
+
+ Parameters:
+ - evaluation_id: str.
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"evaluations/{evaluation_id}/results",
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def create_custom_evaluation(
+ self, *, request: CreateCustomEvaluation
+ ) -> typing.Any:
+ """
+ Create evaluation with custom python code.
+
+ Args:
+
+ custom_evaluation_payload (CreateCustomEvaluation): the required payload
+
+ Parameters:
+ - request: CreateCustomEvaluation.
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ "evaluations/custom_evaluation",
+ ),
+ json=jsonable_encoder(request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get_custom_evaluation(self, id: str) -> CustomEvaluationDetail:
+ """
+ Get the custom code evaluation detail.
+
+ Args:
+ id (str): the id of the custom evaluation
+
+ Returns:
+ CustomEvaluationDetail: Detail of the custom evaluation
+
+ Parameters:
+ - id: str.
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"evaluations/custom_evaluation/{id}",
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(CustomEvaluationDetail, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def update_custom_evaluation(
+ self, id: str, *, request: CreateCustomEvaluation
+ ) -> typing.Any:
+ """
+ Update a custom code evaluation.
+ Args:
+ id (str): the ID of the custom evaluation to update
+ updated_data (CreateCustomEvaluation): the payload with updated data
+ stoken_session (SessionContainer): session container for authentication
+
+ Parameters:
+ - id: str.
+
+ - request: CreateCustomEvaluation.
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "PUT",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"evaluations/custom_evaluation/{id}",
+ ),
+ json=jsonable_encoder(request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def list_custom_evaluations(
+ self, app_id: str
+ ) -> typing.List[CustomEvaluationOutput]:
+ """
+ List the custom code evaluations for a given app.
+
+ Args:
+ app_id (str): the id of the app
+
+ Returns:
+ List[CustomEvaluationOutput]: a list of custom evaluation
+
+ Parameters:
+ - app_id: str.
+ ---
+ from agenta.client import AgentaApi
+
+ client = AgentaApi(api_key="YOUR_API_KEY", base_url="https://yourhost.com/path/to/api")
+ client.list_custom_evaluations(app_id="app-id")
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"evaluations/custom_evaluation/list/{app_id}",
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.List[CustomEvaluationOutput], _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get_custom_evaluation_names(
+ self, app_name: str
+ ) -> typing.List[CustomEvaluationNames]:
+ """
+ Get the names of custom evaluation for a given app.
+
+ Args:
+ app_name (str): the name of the app the evaluation belongs to
+
+ Returns:
+ List[CustomEvaluationNames]: the list of name of custom evaluations
+
+ Parameters:
+ - app_name: str.
+ ---
+ from agenta.client import AgentaApi
+
+ client = AgentaApi(api_key="YOUR_API_KEY", base_url="https://yourhost.com/path/to/api")
+ client.get_custom_evaluation_names(app_name="app-name")
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"evaluations/custom_evaluation/{app_name}/names",
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.List[CustomEvaluationNames], _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def execute_custom_evaluation(
+ self,
+ evaluation_id: str,
+ *,
+ inputs: typing.List[typing.Dict[str, typing.Any]],
+ app_id: str,
+ variant_id: str,
+ correct_answer: str,
+ outputs: typing.List[typing.Dict[str, typing.Any]],
+ ) -> typing.Any:
+ """
+ Execute a custom evaluation code.
+
+ Args:
+ evaluation_id (str): the custom evaluation id
+ payload (ExecuteCustomEvaluationCode): the required payload
+
+ Returns:
+ float: the result of the evaluation custom code
+
+ Parameters:
+ - evaluation_id: str.
+
+ - inputs: typing.List[typing.Dict[str, typing.Any]].
+
+ - app_id: str.
+
+ - variant_id: str.
+
+ - correct_answer: str.
+
+ - outputs: typing.List[typing.Dict[str, typing.Any]].
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"evaluations/custom_evaluation/execute/{evaluation_id}",
+ ),
+ json=jsonable_encoder(
+ {
+ "inputs": inputs,
+ "app_id": app_id,
+ "variant_id": variant_id,
+ "correct_answer": correct_answer,
+ "outputs": outputs,
+ }
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def webhook_example_fake(self) -> EvaluationWebhook:
+ """
+ Returns a fake score response for example webhook evaluation
+
+ Returns:
+ _description_
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ "evaluations/webhook_example_fake",
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(EvaluationWebhook, _response.json()) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def upload_file(
+ self, *, upload_type: str, file: typing.IO, testset_name: str, app_id: str
+ ) -> TestSetSimpleResponse:
+ """
+ Uploads a CSV or JSON file and saves its data to MongoDB.
+
+ Args:
+ upload_type : Either a json or csv file.
+ file (UploadFile): The CSV or JSON file to upload.
+ testset_name (Optional): the name of the testset if provided.
+
+ Returns:
+ dict: The result of the upload process.
+
+ Parameters:
+ - upload_type: str.
+
+ - file: typing.IO.
+
+ - testset_name: str.
+
+ - app_id: str.
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", "testsets/upload"
+ ),
+ data=jsonable_encoder(
+ {
+ "upload_type": upload_type,
+ "testset_name": testset_name,
+ "app_id": app_id,
+ }
+ ),
+ files={"file": file},
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(TestSetSimpleResponse, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def import_testset(self) -> TestSetSimpleResponse:
+ """
+ Import JSON testset data from an endpoint and save it to MongoDB.
+
+ Args:
+ endpoint (str): An endpoint URL to import data from.
+ testset_name (str): the name of the testset if provided.
+
+ Returns:
+ dict: The result of the import process.
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", "testsets/endpoint"
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(TestSetSimpleResponse, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def create_testset(
+ self, app_id: str, *, request: NewTestset
+ ) -> TestSetSimpleResponse:
+ """
+ Create a testset with given name and app_name, save the testset to MongoDB.
+
+ Args:
+ name (str): name of the test set.
+ app_name (str): name of the application.
+ testset (Dict[str, str]): test set data.
+
+ Returns:
+ str: The id of the test set created.
+
+ Parameters:
+ - app_id: str.
+
+ - request: NewTestset.
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", f"testsets/{app_id}"
+ ),
+ json=jsonable_encoder(request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(TestSetSimpleResponse, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get_single_testset(self, testset_id: str) -> typing.Any:
+ """
+ Fetch a specific testset in a MongoDB collection using its \_id.
+
+ Args:
+ testset_id (str): The \_id of the testset to fetch.
+
+ Returns:
+ The requested testset if found, else an HTTPException.
+
+ Parameters:
+ - testset_id: str.
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", f"testsets/{testset_id}"
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def update_testset(self, testset_id: str, *, request: NewTestset) -> typing.Any:
+ """
+ Update a testset with given id, update the testset in MongoDB.
+
+ Args:
+ testset_id (str): id of the test set to be updated.
+ csvdata (NewTestset): New data to replace the old testset.
+
+ Returns:
+ str: The id of the test set updated.
+
+ Parameters:
+ - testset_id: str.
+
+ - request: NewTestset.
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "PUT",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", f"testsets/{testset_id}"
+ ),
+ json=jsonable_encoder(request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get_testsets(self, *, app_id: str) -> typing.List[TestSetOutputResponse]:
+ """
+ Get all testsets.
+
+ Returns:
+
+ - A list of testset objects.
+
+ Raises:
+
+ - `HTTPException` with status code 404 if no testsets are found.
+
+ Parameters:
+ - app_id: str.
+ ---
+ from agenta.client import AgentaApi
+
+ client = AgentaApi(api_key="YOUR_API_KEY", base_url="https://yourhost.com/path/to/api")
+ client.get_testsets(app_id="app-id")
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "testsets"),
+ params=remove_none_from_dict({"app_id": app_id}),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.List[TestSetOutputResponse], _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def delete_testsets(self, *, testset_ids: typing.List[str]) -> typing.List[str]:
+ """
+ Delete specific testsets based on their unique IDs.
+
+ Args:
+ testset_ids (List[str]): The unique identifiers of the testsets to delete.
+
+ Returns:
+ A list of the deleted testsets' IDs.
+
+ Parameters:
+ - testset_ids: typing.List[str].
+ ---
+ from agenta.client import AgentaApi
+
+ client = AgentaApi(api_key="YOUR_API_KEY", base_url="https://yourhost.com/path/to/api")
+ client.delete_testsets(testset_ids=[])
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "DELETE",
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "testsets"),
+ json=jsonable_encoder({"testset_ids": testset_ids}),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.List[str], _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def build_image(self, *, app_id: str, base_name: str, tar_file: typing.IO) -> Image:
+ """
+ Builds a Docker image from a tar file containing the application code.
+
+ Args:
+ app_id (str): The ID of the application to build the image for.
+ base_name (str): The base name of the image to build.
+ tar_file (UploadFile): The tar file containing the application code.
+ stoken_session (SessionContainer): The session container for the user making the request.
+
+ Returns:
+ Image: The Docker image that was built.
+
+ Parameters:
+ - app_id: str.
+
+ - base_name: str.
+
+ - tar_file: typing.IO.
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", "containers/build_image"
+ ),
+ params=remove_none_from_dict({"app_id": app_id, "base_name": base_name}),
+ data=jsonable_encoder({}),
+ files={"tar_file": tar_file},
+ headers=self._client_wrapper.get_headers(),
+ timeout=600,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(Image, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def restart_container(self, *, variant_id: str) -> typing.Dict[str, typing.Any]:
+ """
+ Restart docker container.
+
+ Args:
+ payload (RestartAppContainer) -- the required data (app_name and variant_name)
+
+ Parameters:
+ - variant_id: str.
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ "containers/restart_container",
+ ),
+ json=jsonable_encoder({"variant_id": variant_id}),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Dict[str, typing.Any], _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def container_templates(self) -> ContainerTemplatesResponse:
+ """
+ Returns a list of templates available for creating new containers.
+
+ Parameters:
+ stoken_session (SessionContainer): The session container for the user.
+
+ Returns:
+
+ Union[List[Template], str]: A list of templates or an error message.
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", "containers/templates"
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(ContainerTemplatesResponse, _response.json()) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def construct_app_container_url(
+ self,
+ *,
+ base_id: typing.Optional[str] = None,
+ variant_id: typing.Optional[str] = None,
+ ) -> Uri:
+ """
+ Constructs the URL for an app container based on the provided base_id or variant_id.
+
+ Args:
+ base_id (Optional[str]): The ID of the base to use for the app container.
+ variant_id (Optional[str]): The ID of the variant to use for the app container.
+ stoken_session (SessionContainer): The session container for the user.
+
+ Returns:
+ URI: The URI for the app container.
+
+ Raises:
+ HTTPException: If the base or variant cannot be found or the user does not have access.
+
+ Parameters:
+ - base_id: typing.Optional[str].
+
+ - variant_id: typing.Optional[str].
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", "containers/container_url"
+ ),
+ params=remove_none_from_dict(
+ {"base_id": base_id, "variant_id": variant_id}
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(Uri, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def deploy_to_environment(
+ self, *, environment_name: str, variant_id: str
+ ) -> typing.Any:
+ """
+ Deploys a given variant to an environment
+
+ Args:
+ environment_name: Name of the environment to deploy to.
+ variant_id: variant id to deploy.
+ stoken_session: . Defaults to Depends(verify_session()).
+
+ Raises:
+ HTTPException: If the deployment fails.
+
+ Parameters:
+ - environment_name: str.
+
+ - variant_id: str.
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", "environments/deploy"
+ ),
+ json=jsonable_encoder(
+ {"environment_name": environment_name, "variant_id": variant_id}
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def create_trace(
+ self,
+ *,
+ app_id: typing.Optional[str] = OMIT,
+ variant_id: typing.Optional[str] = OMIT,
+ cost: typing.Optional[float] = OMIT,
+ latency: float,
+ status: str,
+ token_consumption: typing.Optional[int] = OMIT,
+ tags: typing.Optional[typing.List[str]] = OMIT,
+ start_time: dt.datetime,
+ end_time: dt.datetime,
+ spans: typing.List[str],
+ ) -> str:
+ """
+ Parameters:
+ - app_id: typing.Optional[str].
+
+ - variant_id: typing.Optional[str].
+
+ - cost: typing.Optional[float].
+
+ - latency: float.
+
+ - status: str.
+
+ - token_consumption: typing.Optional[int].
+
+ - tags: typing.Optional[typing.List[str]].
+
+ - start_time: dt.datetime.
+
+ - end_time: dt.datetime.
+
+ - spans: typing.List[str].
+ """
+ _request: typing.Dict[str, typing.Any] = {
+ "latency": latency,
+ "status": status,
+ "start_time": start_time,
+ "end_time": end_time,
+ "spans": spans,
+ }
+ if app_id is not OMIT:
+ _request["app_id"] = app_id
+ if variant_id is not OMIT:
+ _request["variant_id"] = variant_id
+ if cost is not OMIT:
+ _request["cost"] = cost
+ if token_consumption is not OMIT:
+ _request["token_consumption"] = token_consumption
+ if tags is not OMIT:
+ _request["tags"] = tags
+ _response = self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", "observability/traces"
+ ),
+ json=jsonable_encoder(_request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(str, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get_traces(self, app_id: str, variant_id: str) -> typing.List[Trace]:
+ """
+ Parameters:
+ - app_id: str.
+
+ - variant_id: str.
+ ---
+ from agenta.client import AgentaApi
+
+ client = AgentaApi(api_key="YOUR_API_KEY", base_url="https://yourhost.com/path/to/api")
+ client.get_traces(app_id="app-id", variant_id="variant-id")
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"observability/traces/{app_id}/{variant_id}",
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.List[Trace], _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get_single_trace(self, trace_id: str) -> Trace:
+ """
+ Parameters:
+ - trace_id: str.
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"observability/traces/{trace_id}",
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(Trace, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def update_trace_status(self, trace_id: str, *, status: str) -> bool:
+ """
+ Parameters:
+ - trace_id: str.
+
+ - status: str.
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "PUT",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"observability/traces/{trace_id}",
+ ),
+ json=jsonable_encoder({"status": status}),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(bool, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def create_span(
+ self,
+ *,
+ parent_span_id: typing.Optional[str] = OMIT,
+ meta: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ event_name: str,
+ event_type: typing.Optional[str] = OMIT,
+ start_time: dt.datetime,
+ duration: typing.Optional[int] = OMIT,
+ status: str,
+ end_time: dt.datetime,
+ inputs: typing.Optional[typing.List[str]] = OMIT,
+ outputs: typing.Optional[typing.List[str]] = OMIT,
+ prompt_template: typing.Optional[str] = OMIT,
+ tokens_input: typing.Optional[int] = OMIT,
+ tokens_output: typing.Optional[int] = OMIT,
+ token_total: typing.Optional[int] = OMIT,
+ cost: typing.Optional[float] = OMIT,
+ tags: typing.Optional[typing.List[str]] = OMIT,
+ ) -> str:
+ """
+ Parameters:
+ - parent_span_id: typing.Optional[str].
+
+ - meta: typing.Optional[typing.Dict[str, typing.Any]].
+
+ - event_name: str.
+
+ - event_type: typing.Optional[str].
+
+ - start_time: dt.datetime.
+
+ - duration: typing.Optional[int].
+
+ - status: str.
+
+ - end_time: dt.datetime.
+
+ - inputs: typing.Optional[typing.List[str]].
+
+ - outputs: typing.Optional[typing.List[str]].
+
+ - prompt_template: typing.Optional[str].
+
+ - tokens_input: typing.Optional[int].
+
+ - tokens_output: typing.Optional[int].
+
+ - token_total: typing.Optional[int].
+
+ - cost: typing.Optional[float].
+
+ - tags: typing.Optional[typing.List[str]].
+ """
+ _request: typing.Dict[str, typing.Any] = {
+ "event_name": event_name,
+ "start_time": start_time,
+ "status": status,
+ "end_time": end_time,
+ }
+ if parent_span_id is not OMIT:
+ _request["parent_span_id"] = parent_span_id
+ if meta is not OMIT:
+ _request["meta"] = meta
+ if event_type is not OMIT:
+ _request["event_type"] = event_type
+ if duration is not OMIT:
+ _request["duration"] = duration
+ if inputs is not OMIT:
+ _request["inputs"] = inputs
+ if outputs is not OMIT:
+ _request["outputs"] = outputs
+ if prompt_template is not OMIT:
+ _request["prompt_template"] = prompt_template
+ if tokens_input is not OMIT:
+ _request["tokens_input"] = tokens_input
+ if tokens_output is not OMIT:
+ _request["tokens_output"] = tokens_output
+ if token_total is not OMIT:
+ _request["token_total"] = token_total
+ if cost is not OMIT:
+ _request["cost"] = cost
+ if tags is not OMIT:
+ _request["tags"] = tags
+ _response = self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", "observability/spans"
+ ),
+ json=jsonable_encoder(_request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(str, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get_spans_of_trace(self, trace_id: str) -> typing.List[Span]:
+ """
+ Parameters:
+ - trace_id: str.
+ ---
+ from agenta.client import AgentaApi
+
+ client = AgentaApi(api_key="YOUR_API_KEY", base_url="https://yourhost.com/path/to/api")
+ client.get_spans_of_trace(trace_id="trace-id")
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"observability/spans/{trace_id}",
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.List[Span], _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get_feedbacks(self, trace_id: str) -> typing.List[Feedback]:
+ """
+ Parameters:
+ - trace_id: str.
+ ---
+ from agenta.client import AgentaApi
+
+ client = AgentaApi(api_key="YOUR_API_KEY", base_url="https://yourhost.com/path/to/api")
+ client.get_feedbacks(trace_id="trace-id")
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"observability/feedbacks/{trace_id}",
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.List[Feedback], _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def create_feedback(
+ self,
+ trace_id: str,
+ *,
+ feedback: typing.Optional[str] = OMIT,
+ score: typing.Optional[float] = OMIT,
+ meta: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ ) -> str:
+ """
+ Parameters:
+ - trace_id: str.
+
+ - feedback: typing.Optional[str].
+
+ - score: typing.Optional[float].
+
+ - meta: typing.Optional[typing.Dict[str, typing.Any]].
+ """
+ _request: typing.Dict[str, typing.Any] = {}
+ if feedback is not OMIT:
+ _request["feedback"] = feedback
+ if score is not OMIT:
+ _request["score"] = score
+ if meta is not OMIT:
+ _request["meta"] = meta
+ _response = self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"observability/feedbacks/{trace_id}",
+ ),
+ json=jsonable_encoder(_request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(str, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get_feedback(self, trace_id: str, feedback_id: str) -> Feedback:
+ """
+ Parameters:
+ - trace_id: str.
+
+ - feedback_id: str.
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"observability/feedbacks/{trace_id}/{feedback_id}",
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(Feedback, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def update_feedback(
+ self,
+ trace_id: str,
+ feedback_id: str,
+ *,
+ feedback: str,
+ score: typing.Optional[float] = OMIT,
+ meta: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ ) -> Feedback:
+ """
+ Parameters:
+ - trace_id: str.
+
+ - feedback_id: str.
+
+ - feedback: str.
+
+ - score: typing.Optional[float].
+
+ - meta: typing.Optional[typing.Dict[str, typing.Any]].
+ """
+ _request: typing.Dict[str, typing.Any] = {"feedback": feedback}
+ if score is not OMIT:
+ _request["score"] = score
+ if meta is not OMIT:
+ _request["meta"] = meta
+ _response = self._client_wrapper.httpx_client.request(
+ "PUT",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"observability/feedbacks/{trace_id}/{feedback_id}",
+ ),
+ json=jsonable_encoder(_request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(Feedback, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def list_organizations(self) -> typing.List[Organization]:
+ """
+ Returns a list of organizations associated with the user's session.
+
+ Args:
+ stoken_session (SessionContainer): The user's session token.
+
+ Returns:
+ list[Organization]: A list of organizations associated with the user's session.
+
+ Raises:
+ HTTPException: If there is an error retrieving the organizations from the database.
+
+ ---
+ from agenta.client import AgentaApi
+
+ client = AgentaApi(api_key="YOUR_API_KEY", base_url="https://yourhost.com/path/to/api")
+ client.list_organizations()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", "organizations"
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.List[Organization], _response.json()) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get_own_org(self) -> OrganizationOutput:
+ _response = self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", "organizations/own"
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(OrganizationOutput, _response.json()) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def list_bases(
+ self,
+ *,
+ app_id: typing.Optional[str] = None,
+ base_name: typing.Optional[str] = None,
+ ) -> typing.List[BaseOutput]:
+ """
+ Retrieve a list of bases filtered by app_id and base_name.
+
+ Args:
+ request (Request): The incoming request.
+ app_id (Optional[str], optional): The ID of the app to filter by. Defaults to None.
+ base_name (Optional[str], optional): The name of the base to filter by. Defaults to None.
+
+ Returns:
+ List[BaseOutput]: A list of BaseOutput objects representing the filtered bases.
+
+ Raises:
+ HTTPException: If there was an error retrieving the bases.
+
+ Parameters:
+ - app_id: typing.Optional[str].
+
+ - base_name: typing.Optional[str].
+ ---
+ from agenta.client import AgentaApi
+
+ client = AgentaApi(api_key="YOUR_API_KEY", base_url="https://yourhost.com/path/to/api")
+ client.list_bases()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "bases"),
+ params=remove_none_from_dict({"app_id": app_id, "base_name": base_name}),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.List[BaseOutput], _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get_config(
+ self,
+ *,
+ base_id: str,
+ config_name: typing.Optional[str] = None,
+ environment_name: typing.Optional[str] = None,
+ ) -> GetConfigReponse:
+ """
+ Parameters:
+ - base_id: str.
+
+ - config_name: typing.Optional[str].
+
+ - environment_name: typing.Optional[str].
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "configs"),
+ params=remove_none_from_dict(
+ {
+ "base_id": base_id,
+ "config_name": config_name,
+ "environment_name": environment_name,
+ }
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(GetConfigReponse, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def save_config(
+ self,
+ *,
+ base_id: str,
+ config_name: str,
+ parameters: typing.Dict[str, typing.Any],
+ overwrite: bool,
+ ) -> typing.Any:
+ """
+ Parameters:
+ - base_id: str.
+
+ - config_name: str.
+
+ - parameters: typing.Dict[str, typing.Any].
+
+ - overwrite: bool.
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "configs"),
+ json=jsonable_encoder(
+ {
+ "base_id": base_id,
+ "config_name": config_name,
+ "parameters": parameters,
+ "overwrite": overwrite,
+ }
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncAgentaApi:
+ def __init__(
+ self, *, base_url: str, api_key: str, timeout: typing.Optional[float] = 60
+ ):
+ self._client_wrapper = AsyncClientWrapper(
+ base_url=base_url,
+ api_key=api_key,
+ httpx_client=httpx.AsyncClient(timeout=timeout),
+ )
+
+ async def list_api_keys(self) -> typing.List[ListApiKeysOutput]:
+ """
+ List all API keys associated with the authenticated user.
+
+ Args:
+ request (Request): The incoming request object.
+
+ Returns:
+ List[ListAPIKeysOutput]: A list of API Keys associated with the user.
+
+ ---
+ from agenta.client import AsyncAgentaApi
+
+ client = AsyncAgentaApi(api_key="YOUR_API_KEY", base_url="https://yourhost.com/path/to/api")
+ await client.list_api_keys()
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "keys"),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.List[ListApiKeysOutput], _response.json()) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def create_api_key(self) -> str:
+ """
+ Creates an API key for a user.
+
+ Args:
+ request (Request): The request object containing the user ID in the request state.
+
+ Returns:
+ str: The created API key.
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "keys"),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(str, _response.json()) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def delete_api_key(self, key_prefix: str) -> typing.Dict[str, typing.Any]:
+ """
+ Delete an API key with the given key prefix for the authenticated user.
+
+ Args:
+ key_prefix (str): The prefix of the API key to be deleted.
+ request (Request): The incoming request object.
+
+ Returns:
+ dict: A dictionary containing a success message upon successful deletion.
+
+ Raises:
+ HTTPException: If the API key is not found or does not belong to the user.
+
+ Parameters:
+ - key_prefix: str.
+ ---
+ from agenta.client import AsyncAgentaApi
+
+ client = AsyncAgentaApi(api_key="YOUR_API_KEY", base_url="https://yourhost.com/path/to/api")
+ await client.delete_api_key(key_prefix="key-prefix")
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "DELETE",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", f"keys/{key_prefix}"
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Dict[str, typing.Any], _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def validate_api_key(self, key_prefix: str) -> bool:
+ """
+ This Function is called by the CLI and is used to validate an API key provided by a user in agenta init setup.
+ Returns:
+ bool: True. If the request reaches this point, the API key is valid.
+
+ Parameters:
+ - key_prefix: str.
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", f"keys/{key_prefix}/validate"
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(bool, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def fetch_organization_details(self, org_id: str) -> typing.Any:
+ """
+ Get an organization's details.
+
+ Raises:
+ HTTPException: _description_
+ Permission Denied
+
+ Returns:
+ OrganizationDB Instance
+
+ Parameters:
+ - org_id: str.
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", f"organizations_ee/{org_id}"
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def invite_to_org(self, org_id: str, *, request: InviteRequest) -> typing.Any:
+ """
+ Invite a user to an Organization.
+
+ Raises:
+ HTTPException: _description_; status_code: 500
+ HTTPException: This Organization doesn't exist; status_code: 400
+ HTTPException: Failed to invite user to organization; status_code: 403
+ HTTPException: You cannot invite yourself to your own organization; status_code: 400
+ HTTPException: You do not have permission to access this organization; status_code: 500
+
+ Returns:
+ JSONResponse: Invited user to organization; status_code: 200
+
+ Parameters:
+ - org_id: str.
+
+ - request: InviteRequest.
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"organizations_ee/{org_id}/invite",
+ ),
+ json=jsonable_encoder(request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def resend_invitation(
+ self, org_id: str, *, request: InviteRequest
+ ) -> typing.Any:
+ """
+ Resend an invitation to a user to an Organization.
+
+ Raises:
+ HTTPException: _description_; status_code: 500
+ HTTPException: Invitation not found or has expired; status_code: 400
+ HTTPException: You already belong to this organization; status_code: 400
+
+ Returns:
+ JSONResponse: Resent invitation to user; status_code: 200
+
+ Parameters:
+ - org_id: str.
+
+ - request: InviteRequest.
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"organizations_ee/{org_id}/invite/resend",
+ ),
+ json=jsonable_encoder(request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def add_user_to_org(self, org_id: str, *, token: str) -> typing.Any:
+ """
+ Invite a user to an Organization.
+
+ Raises:
+ HTTPException: _description_; status_code: 500
+ HTTPException: Invitation not found or has expired; status_code: 400
+ HTTPException: You already belong to this organization; status_code: 400
+
+ Returns:
+ JSONResponse: Added user to organization; status_code: 200
+
+ Parameters:
+ - org_id: str.
+
+ - token: str.
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"organizations_ee/{org_id}/accept",
+ ),
+ json=jsonable_encoder({"token": token}),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def create_organization(self, *, request: Organization) -> typing.Any:
+ """
+ Parameters:
+ - request: Organization.
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", "organizations_ee/create"
+ ),
+ json=jsonable_encoder(request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def update_organization(
+ self,
+ org_id: str,
+ *,
+ name: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ ) -> typing.Any:
+ """
+ Parameters:
+ - org_id: str.
+
+ - name: typing.Optional[str].
+
+ - description: typing.Optional[str].
+ """
+ _request: typing.Dict[str, typing.Any] = {}
+ if name is not OMIT:
+ _request["name"] = name
+ if description is not OMIT:
+ _request["description"] = description
+ _response = await self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"organizations_ee/{org_id}/update",
+ ),
+ json=jsonable_encoder(_request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def health_check(self) -> typing.Any:
+ _response = await self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "health"),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def user_profile(self) -> typing.Any:
+ _response = await self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "profile"),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def list_app_variants(self, app_id: str) -> typing.List[AppVariantOutput]:
+ """
+ Retrieve a list of app variants for a given app ID.
+
+ Args:
+ app_id (str): The ID of the app to retrieve variants for.
+ stoken_session (SessionContainer, optional): The session container to verify the user's session. Defaults to Depends(verify_session()).
+
+ Returns:
+ List[AppVariantOutput]: A list of app variants for the given app ID.
+
+ Parameters:
+ - app_id: str.
+ ---
+ from agenta.client import AsyncAgentaApi
+
+ client = AsyncAgentaApi(api_key="YOUR_API_KEY", base_url="https://yourhost.com/path/to/api")
+ await client.list_app_variants(app_id="app-id")
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", f"apps/{app_id}/variants"
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.List[AppVariantOutput], _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_variant_by_env(
+ self, *, app_id: str, environment: str
+ ) -> AppVariantOutput:
+ """
+ Retrieve the app variant based on the provided app_id and environment.
+
+ Args:
+ app_id (str): The ID of the app to retrieve the variant for.
+ environment (str): The environment of the app variant to retrieve.
+ stoken_session (SessionContainer, optional): The session token container. Defaults to Depends(verify_session()).
+
+ Raises:
+ HTTPException: If the app variant is not found (status_code=500), or if a ValueError is raised (status_code=400), or if any other exception is raised (status_code=500).
+
+ Returns:
+ AppVariantOutput: The retrieved app variant.
+
+ Parameters:
+ - app_id: str.
+
+ - environment: str.
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", "apps/get_variant_by_env"
+ ),
+ params=remove_none_from_dict(
+ {"app_id": app_id, "environment": environment}
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(AppVariantOutput, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def list_apps(
+ self,
+ *,
+ app_name: typing.Optional[str] = None,
+ org_id: typing.Optional[str] = None,
+ ) -> typing.List[App]:
+ """
+ Retrieve a list of apps filtered by app_name and org_id.
+
+ Args:
+ app_name (Optional[str]): The name of the app to filter by.
+ org_id (Optional[str]): The ID of the organization to filter by.
+ stoken_session (SessionContainer): The session container.
+
+ Returns:
+ List[App]: A list of apps filtered by app_name and org_id.
+
+ Raises:
+ HTTPException: If there was an error retrieving the list of apps.
+
+ Parameters:
+ - app_name: typing.Optional[str].
+
+ - org_id: typing.Optional[str].
+ ---
+ from agenta.client import AsyncAgentaApi
+
+ client = AsyncAgentaApi(api_key="YOUR_API_KEY", base_url="https://yourhost.com/path/to/api")
+ await client.list_apps()
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "apps"),
+ params=remove_none_from_dict({"app_name": app_name, "org_id": org_id}),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.List[App], _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def create_app(
+ self, *, app_name: str, organization_id: typing.Optional[str] = OMIT
+ ) -> CreateAppOutput:
+ """
+ Create a new app for a user or organization.
+
+ Args:
+ payload (CreateApp): The payload containing the app name and organization ID (optional).
+ stoken_session (SessionContainer): The session container containing the user's session token.
+
+ Returns:
+ CreateAppOutput: The output containing the newly created app's ID and name.
+
+ Raises:
+ HTTPException: If there is an error creating the app or the user does not have permission to access the app.
+
+ Parameters:
+ - app_name: str.
+
+ - organization_id: typing.Optional[str].
+ """
+ _request: typing.Dict[str, typing.Any] = {"app_name": app_name}
+ if organization_id is not OMIT:
+ _request["organization_id"] = organization_id
+ _response = await self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "apps"),
+ json=jsonable_encoder(_request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(CreateAppOutput, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def add_variant_from_image(
+ self,
+ app_id: str,
+ *,
+ variant_name: str,
+ docker_id: str,
+ tags: str,
+ base_name: typing.Optional[str] = OMIT,
+ config_name: typing.Optional[str] = OMIT,
+ ) -> typing.Any:
+ """
+ Add a new variant to an app based on a Docker image.
+
+ Args:
+ app_id (str): The ID of the app to add the variant to.
+ payload (AddVariantFromImagePayload): The payload containing information about the variant to add.
+ stoken_session (SessionContainer, optional): The session container. Defaults to Depends(verify_session()).
+
+ Raises:
+ HTTPException: If the feature flag is set to "demo" or if the image does not have a tag starting with the registry name (agenta-server) or if the image is not found or if the user does not have access to the app.
+
+ Returns:
+ dict: The newly added variant.
+
+ Parameters:
+ - app_id: str.
+
+ - variant_name: str.
+
+ - docker_id: str.
+
+ - tags: str.
+
+ - base_name: typing.Optional[str].
+
+ - config_name: typing.Optional[str].
+ """
+ _request: typing.Dict[str, typing.Any] = {
+ "variant_name": variant_name,
+ "docker_id": docker_id,
+ "tags": tags,
+ }
+ if base_name is not OMIT:
+ _request["base_name"] = base_name
+ if config_name is not OMIT:
+ _request["config_name"] = config_name
+ _response = await self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"apps/{app_id}/variant/from-image",
+ ),
+ json=jsonable_encoder(_request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def remove_app(self, app_id: str) -> typing.Any:
+ """
+ Remove app, all its variant, containers and images
+
+ Arguments:
+ app -- App to remove
+
+ Parameters:
+ - app_id: str.
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "DELETE",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", f"apps/{app_id}"
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def create_app_and_variant_from_template(
+ self,
+ *,
+ app_name: str,
+ template_id: str,
+ env_vars: typing.Dict[str, str],
+ organization_id: typing.Optional[str] = OMIT,
+ ) -> AppVariantOutput:
+ """
+ Create an app and variant from a template.
+
+ Args:
+ payload (CreateAppVariant): The payload containing the app and variant information.
+ stoken_session (SessionContainer, optional): The session container. Defaults to Depends(verify_session()).
+
+ Raises:
+ HTTPException: If the user has reached the app limit or if an app with the same name already exists.
+
+ Returns:
+ AppVariantOutput: The output of the created app variant.
+
+ Parameters:
+ - app_name: str.
+
+ - template_id: str.
+
+ - env_vars: typing.Dict[str, str].
+
+ - organization_id: typing.Optional[str].
+ """
+ _request: typing.Dict[str, typing.Any] = {
+ "app_name": app_name,
+ "template_id": template_id,
+ "env_vars": env_vars,
+ }
+ if organization_id is not OMIT:
+ _request["organization_id"] = organization_id
+ _response = await self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ "apps/app_and_variant_from_template",
+ ),
+ json=jsonable_encoder(_request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(AppVariantOutput, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def list_environments(self, app_id: str) -> typing.List[EnvironmentOutput]:
+ """
+ Retrieve a list of environments for a given app ID.
+
+ Args:
+ app_id (str): The ID of the app to retrieve environments for.
+ stoken_session (SessionContainer, optional): The session container. Defaults to Depends(verify_session()).
+
+ Returns:
+ List[EnvironmentOutput]: A list of environment objects.
+
+ Parameters:
+ - app_id: str.
+ ---
+ from agenta.client import AsyncAgentaApi
+
+ client = AsyncAgentaApi(api_key="YOUR_API_KEY", base_url="https://yourhost.com/path/to/api")
+ await client.list_environments(app_id="app-id")
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", f"apps/{app_id}/environments"
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.List[EnvironmentOutput], _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def add_variant_from_base_and_config(
+ self,
+ *,
+ base_id: str,
+ new_variant_name: str,
+ new_config_name: str,
+ parameters: typing.Dict[str, typing.Any],
+ ) -> AddVariantFromBaseAndConfigResponse:
+ """
+ Add a new variant based on an existing one.
+ Same as POST /config
+
+ Args:
+ payload (AddVariantFromBasePayload): Payload containing base variant ID, new variant name, and parameters.
+ stoken_session (SessionContainer, optional): Session container. Defaults to result of verify_session().
+
+ Raises:
+ HTTPException: Raised if the variant could not be added or accessed.
+
+ Returns:
+ Union[AppVariantOutput, Any]: New variant details or exception.
+
+ Parameters:
+ - base_id: str.
+
+ - new_variant_name: str.
+
+ - new_config_name: str.
+
+ - parameters: typing.Dict[str, typing.Any].
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", "variants/from-base"
+ ),
+ json=jsonable_encoder(
+ {
+ "base_id": base_id,
+ "new_variant_name": new_variant_name,
+ "new_config_name": new_config_name,
+ "parameters": parameters,
+ }
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(AddVariantFromBaseAndConfigResponse, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def start_variant(
+ self,
+ variant_id: str,
+ *,
+ action: VariantAction,
+ env_vars: typing.Optional[DockerEnvVars] = OMIT,
+ ) -> Uri:
+ """
+ Start a variant of an app.
+
+ Args:
+ variant_id (str): The ID of the variant to start.
+ action (VariantAction): The action to perform on the variant (start).
+ env_vars (Optional[DockerEnvVars], optional): The environment variables to inject to the Docker container. Defaults to None.
+ stoken_session (SessionContainer, optional): The session container. Defaults to Depends(verify_session()).
+
+ Returns:
+ URI: The URL of the started variant.
+
+ Raises:
+ HTTPException: If the app container cannot be started.
+
+ Parameters:
+ - variant_id: str.
+
+ - action: VariantAction.
+
+ - env_vars: typing.Optional[DockerEnvVars].
+ """
+ _request: typing.Dict[str, typing.Any] = {"action": action}
+ if env_vars is not OMIT:
+ _request["env_vars"] = env_vars
+ _response = await self._client_wrapper.httpx_client.request(
+ "PUT",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", f"variants/{variant_id}"
+ ),
+ json=jsonable_encoder(_request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(Uri, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def remove_variant(self, variant_id: str) -> typing.Any:
+ """
+ Remove a variant from the server.
+ In the case it's the last variant using the image, stop the container and remove the image.
+
+ Arguments:
+ app_variant -- AppVariant to remove
+
+ Raises:
+ HTTPException: If there is a problem removing the app variant
+
+ Parameters:
+ - variant_id: str.
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "DELETE",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", f"variants/{variant_id}"
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def update_variant_parameters(
+ self, variant_id: str, *, parameters: typing.Dict[str, typing.Any]
+ ) -> typing.Any:
+ """
+ Updates the parameters for an app variant.
+
+ Args:
+ variant_id (str): The ID of the app variant to update.
+ payload (UpdateVariantParameterPayload): The payload containing the updated parameters.
+ stoken_session (SessionContainer, optional): The session container. Defaults to Depends(verify_session()).
+
+ Raises:
+ HTTPException: If there is an error while trying to update the app variant.
+
+ Returns:
+ JSONResponse: A JSON response containing the updated app variant parameters.
+
+ Parameters:
+ - variant_id: str.
+
+ - parameters: typing.Dict[str, typing.Any].
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "PUT",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"variants/{variant_id}/parameters",
+ ),
+ json=jsonable_encoder({"parameters": parameters}),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def update_variant_image(
+ self, variant_id: str, *, request: Image
+ ) -> typing.Any:
+ """
+ Updates the image used in an app variant.
+
+ Args:
+ variant_id (str): The ID of the app variant to update.
+ image (Image): The image information to update.
+
+ Raises:
+ HTTPException: If an error occurs while trying to update the app variant.
+
+ Returns:
+ JSONResponse: A JSON response indicating whether the update was successful or not.
+
+ Parameters:
+ - variant_id: str.
+
+ - request: Image.
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "PUT",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"variants/{variant_id}/image",
+ ),
+ json=jsonable_encoder(request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def fetch_list_evaluations(self, *, app_id: str) -> typing.List[Evaluation]:
+ """
+ Fetches a list of evaluations, optionally filtered by an app ID.
+
+ Args:
+ app_id (Optional[str]): An optional app ID to filter the evaluations.
+
+ Returns:
+ List[Evaluation]: A list of evaluations.
+
+ Parameters:
+ - app_id: str.
+ ---
+ from agenta.client import AsyncAgentaApi
+
+ client = AsyncAgentaApi(api_key="YOUR_API_KEY", base_url="https://yourhost.com/path/to/api")
+ await client.fetch_list_evaluations(app_id="app-id")
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", "evaluations"
+ ),
+ params=remove_none_from_dict({"app_id": app_id}),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.List[Evaluation], _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def create_evaluation(
+ self,
+ *,
+ app_id: str,
+ variant_ids: typing.List[str],
+ evaluation_type: EvaluationType,
+ evaluation_type_settings: typing.Optional[EvaluationTypeSettings] = OMIT,
+ inputs: typing.List[str],
+ testset_id: str,
+ status: str,
+ ) -> SimpleEvaluationOutput:
+ """
+ Creates a new comparison table document
+ Raises:
+ HTTPException: _description_
+ Returns:
+ _description_
+
+ Parameters:
+ - app_id: str.
+
+ - variant_ids: typing.List[str].
+
+ - evaluation_type: EvaluationType.
+
+ - evaluation_type_settings: typing.Optional[EvaluationTypeSettings].
+
+ - inputs: typing.List[str].
+
+ - testset_id: str.
+
+ - status: str.
+ """
+ _request: typing.Dict[str, typing.Any] = {
+ "app_id": app_id,
+ "variant_ids": variant_ids,
+ "evaluation_type": evaluation_type,
+ "inputs": inputs,
+ "testset_id": testset_id,
+ "status": status,
+ }
+ if evaluation_type_settings is not OMIT:
+ _request["evaluation_type_settings"] = evaluation_type_settings
+ _response = await self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", "evaluations"
+ ),
+ json=jsonable_encoder(_request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(SimpleEvaluationOutput, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def delete_evaluations(
+ self, *, evaluations_ids: typing.List[str]
+ ) -> typing.List[str]:
+ """
+ Delete specific comparison tables based on their unique IDs.
+
+ Args:
+ delete_evaluations (List[str]): The unique identifiers of the comparison tables to delete.
+
+ Returns:
+ A list of the deleted comparison tables' IDs.
+
+ Parameters:
+ - evaluations_ids: typing.List[str].
+ ---
+ from agenta.client import AsyncAgentaApi
+
+ client = AsyncAgentaApi(api_key="YOUR_API_KEY", base_url="https://yourhost.com/path/to/api")
+ await client.delete_evaluations(evaluations_ids=[])
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "DELETE",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", "evaluations"
+ ),
+ json=jsonable_encoder({"evaluations_ids": evaluations_ids}),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.List[str], _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def fetch_evaluation(self, evaluation_id: str) -> Evaluation:
+ """
+ Fetches a single evaluation based on its ID.
+
+ Args:
+ evaluation_id (str): The ID of the evaluation to fetch.
+
+ Returns:
+ Evaluation: The fetched evaluation.
+
+ Parameters:
+ - evaluation_id: str.
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"evaluations/{evaluation_id}",
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(Evaluation, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def update_evaluation(
+ self,
+ evaluation_id: str,
+ *,
+ status: typing.Optional[EvaluationStatusEnum] = OMIT,
+ evaluation_type_settings: typing.Optional[EvaluationTypeSettings] = OMIT,
+ ) -> typing.Any:
+ """
+ Updates an evaluation's status.
+
+ Raises:
+ HTTPException: If the columns in the test set do not match with the inputs in the variant.
+
+ Returns:
+ None: A 204 No Content status code, indicating that the update was successful.
+
+ Parameters:
+ - evaluation_id: str.
+
+ - status: typing.Optional[EvaluationStatusEnum].
+
+ - evaluation_type_settings: typing.Optional[EvaluationTypeSettings].
+ """
+ _request: typing.Dict[str, typing.Any] = {}
+ if status is not OMIT:
+ _request["status"] = status
+ if evaluation_type_settings is not OMIT:
+ _request["evaluation_type_settings"] = evaluation_type_settings
+ _response = await self._client_wrapper.httpx_client.request(
+ "PUT",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"evaluations/{evaluation_id}",
+ ),
+ json=jsonable_encoder(_request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def fetch_evaluation_scenarios(
+ self, evaluation_id: str
+ ) -> typing.List[EvaluationScenario]:
+ """
+ Fetches evaluation scenarios for a given evaluation ID.
+
+ Arguments:
+ evaluation_id (str): The ID of the evaluation for which to fetch scenarios.
+
+ Raises:
+ HTTPException: If the evaluation is not found or access is denied.
+
+ Returns:
+ List[EvaluationScenario]: A list of evaluation scenarios.
+
+ Parameters:
+ - evaluation_id: str.
+ ---
+ from agenta.client import AsyncAgentaApi
+
+ client = AsyncAgentaApi(api_key="YOUR_API_KEY", base_url="https://yourhost.com/path/to/api")
+ await client.fetch_evaluation_scenarios(evaluation_id="evaluation-id")
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"evaluations/{evaluation_id}/evaluation_scenarios",
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.List[EvaluationScenario], _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def create_evaluation_scenario(
+ self, evaluation_id: str, *, request: EvaluationScenario
+ ) -> typing.Any:
+ """
+ Create a new evaluation scenario for a given evaluation ID.
+
+ Raises:
+ HTTPException: If evaluation not found or access denied.
+
+ Returns:
+ None: 204 No Content status code upon success.
+
+ Parameters:
+ - evaluation_id: str.
+
+ - request: EvaluationScenario.
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"evaluations/{evaluation_id}/evaluation_scenario",
+ ),
+ json=jsonable_encoder(request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def update_evaluation_scenario(
+ self,
+ evaluation_id: str,
+ evaluation_scenario_id: str,
+ evaluation_type: EvaluationType,
+ *,
+ vote: typing.Optional[str] = OMIT,
+ score: typing.Optional[EvaluationScenarioUpdateScore] = OMIT,
+ correct_answer: typing.Optional[str] = OMIT,
+ outputs: typing.Optional[typing.List[EvaluationScenarioOutput]] = OMIT,
+ inputs: typing.Optional[typing.List[EvaluationScenarioInput]] = OMIT,
+ is_pinned: typing.Optional[bool] = OMIT,
+ note: typing.Optional[str] = OMIT,
+ ) -> typing.Any:
+ """
+ Updates an evaluation scenario's vote or score based on its type.
+
+ Raises:
+ HTTPException: If update fails or unauthorized.
+
+ Returns:
+ None: 204 No Content status code upon successful update.
+
+ Parameters:
+ - evaluation_id: str.
+
+ - evaluation_scenario_id: str.
+
+ - evaluation_type: EvaluationType.
+
+ - vote: typing.Optional[str].
+
+ - score: typing.Optional[EvaluationScenarioUpdateScore].
+
+ - correct_answer: typing.Optional[str].
+
+ - outputs: typing.Optional[typing.List[EvaluationScenarioOutput]].
+
+ - inputs: typing.Optional[typing.List[EvaluationScenarioInput]].
+
+ - is_pinned: typing.Optional[bool].
+
+ - note: typing.Optional[str].
+ """
+ _request: typing.Dict[str, typing.Any] = {}
+ if vote is not OMIT:
+ _request["vote"] = vote
+ if score is not OMIT:
+ _request["score"] = score
+ if correct_answer is not OMIT:
+ _request["correct_answer"] = correct_answer
+ if outputs is not OMIT:
+ _request["outputs"] = outputs
+ if inputs is not OMIT:
+ _request["inputs"] = inputs
+ if is_pinned is not OMIT:
+ _request["is_pinned"] = is_pinned
+ if note is not OMIT:
+ _request["note"] = note
+ _response = await self._client_wrapper.httpx_client.request(
+ "PUT",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"evaluations/{evaluation_id}/evaluation_scenario/{evaluation_scenario_id}/{evaluation_type}",
+ ),
+ json=jsonable_encoder(_request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def evaluate_ai_critique(
+ self,
+ *,
+ correct_answer: str,
+ llm_app_prompt_template: typing.Optional[str] = OMIT,
+ inputs: typing.List[EvaluationScenarioInput],
+ outputs: typing.List[EvaluationScenarioOutput],
+ evaluation_prompt_template: typing.Optional[str] = OMIT,
+ open_ai_key: typing.Optional[str] = OMIT,
+ ) -> str:
+ """
+ Evaluate AI critique based on the given payload.
+
+ Args:
+ payload (AICritiqueCreate): The payload containing data for AI critique evaluation.
+ stoken_session (SessionContainer): The session container verified by `verify_session`.
+
+ Returns:
+ str: The output of the AI critique evaluation.
+
+ Raises:
+ HTTPException: If any exception occurs during the evaluation.
+
+ Parameters:
+ - correct_answer: str.
+
+ - llm_app_prompt_template: typing.Optional[str].
+
+ - inputs: typing.List[EvaluationScenarioInput].
+
+ - outputs: typing.List[EvaluationScenarioOutput].
+
+ - evaluation_prompt_template: typing.Optional[str].
+
+ - open_ai_key: typing.Optional[str].
+ """
+ _request: typing.Dict[str, typing.Any] = {
+ "correct_answer": correct_answer,
+ "inputs": inputs,
+ "outputs": outputs,
+ }
+ if llm_app_prompt_template is not OMIT:
+ _request["llm_app_prompt_template"] = llm_app_prompt_template
+ if evaluation_prompt_template is not OMIT:
+ _request["evaluation_prompt_template"] = evaluation_prompt_template
+ if open_ai_key is not OMIT:
+ _request["open_ai_key"] = open_ai_key
+ _response = await self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ "evaluations/evaluation_scenario/ai_critique",
+ ),
+ json=jsonable_encoder(_request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(str, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_evaluation_scenario_score(
+ self, evaluation_scenario_id: str
+ ) -> typing.Dict[str, str]:
+ """
+ Fetch the score of a specific evaluation scenario.
+
+ Args:
+ evaluation_scenario_id: The ID of the evaluation scenario to fetch.
+ stoken_session: Session data, verified by `verify_session`.
+
+ Returns:
+ Dictionary containing the scenario ID and its score.
+
+ Parameters:
+ - evaluation_scenario_id: str.
+ ---
+ from agenta.client import AsyncAgentaApi
+
+ client = AsyncAgentaApi(api_key="YOUR_API_KEY", base_url="https://yourhost.com/path/to/api")
+ await client.get_evaluation_scenario_score(evaluation_scenario_id="evaluation-scenario-id")
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"evaluations/evaluation_scenario/{evaluation_scenario_id}/score",
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Dict[str, str], _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def update_evaluation_scenario_score(
+ self, evaluation_scenario_id: str, *, score: float
+ ) -> typing.Any:
+ """
+ Updates the score of an evaluation scenario.
+
+ Raises:
+ HTTPException: Server error if the evaluation update fails.
+
+ Returns:
+ None: 204 No Content status code upon successful update.
+
+ Parameters:
+ - evaluation_scenario_id: str.
+
+ - score: float.
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "PUT",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"evaluations/evaluation_scenario/{evaluation_scenario_id}/score",
+ ),
+ json=jsonable_encoder({"score": score}),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def fetch_results(self, evaluation_id: str) -> typing.Any:
+ """
+ Fetch all the results for one the comparison table
+
+ Arguments:
+ evaluation*id -- \_description*
+
+ Returns:
+ _description_
+
+ Parameters:
+ - evaluation_id: str.
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"evaluations/{evaluation_id}/results",
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def create_custom_evaluation(
+ self, *, request: CreateCustomEvaluation
+ ) -> typing.Any:
+ """
+ Create evaluation with custom python code.
+
+ Args:
+
+ custom_evaluation_payload (CreateCustomEvaluation): the required payload
+
+ Parameters:
+ - request: CreateCustomEvaluation.
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ "evaluations/custom_evaluation",
+ ),
+ json=jsonable_encoder(request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_custom_evaluation(self, id: str) -> CustomEvaluationDetail:
+ """
+ Get the custom code evaluation detail.
+
+ Args:
+ id (str): the id of the custom evaluation
+
+ Returns:
+ CustomEvaluationDetail: Detail of the custom evaluation
+
+ Parameters:
+ - id: str.
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"evaluations/custom_evaluation/{id}",
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(CustomEvaluationDetail, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def update_custom_evaluation(
+ self, id: str, *, request: CreateCustomEvaluation
+ ) -> typing.Any:
+ """
+ Update a custom code evaluation.
+ Args:
+ id (str): the ID of the custom evaluation to update
+ updated_data (CreateCustomEvaluation): the payload with updated data
+ stoken_session (SessionContainer): session container for authentication
+
+ Parameters:
+ - id: str.
+
+ - request: CreateCustomEvaluation.
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "PUT",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"evaluations/custom_evaluation/{id}",
+ ),
+ json=jsonable_encoder(request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def list_custom_evaluations(
+ self, app_id: str
+ ) -> typing.List[CustomEvaluationOutput]:
+ """
+ List the custom code evaluations for a given app.
+
+ Args:
+ app_id (str): the id of the app
+
+ Returns:
+ List[CustomEvaluationOutput]: a list of custom evaluation
+
+ Parameters:
+ - app_id: str.
+ ---
+ from agenta.client import AsyncAgentaApi
+
+ client = AsyncAgentaApi(api_key="YOUR_API_KEY", base_url="https://yourhost.com/path/to/api")
+ await client.list_custom_evaluations(app_id="app-id")
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"evaluations/custom_evaluation/list/{app_id}",
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.List[CustomEvaluationOutput], _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_custom_evaluation_names(
+ self, app_name: str
+ ) -> typing.List[CustomEvaluationNames]:
+ """
+ Get the names of custom evaluation for a given app.
+
+ Args:
+ app_name (str): the name of the app the evaluation belongs to
+
+ Returns:
+ List[CustomEvaluationNames]: the list of name of custom evaluations
+
+ Parameters:
+ - app_name: str.
+ ---
+ from agenta.client import AsyncAgentaApi
+
+ client = AsyncAgentaApi(api_key="YOUR_API_KEY", base_url="https://yourhost.com/path/to/api")
+ await client.get_custom_evaluation_names(app_name="app-name")
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"evaluations/custom_evaluation/{app_name}/names",
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.List[CustomEvaluationNames], _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def execute_custom_evaluation(
+ self,
+ evaluation_id: str,
+ *,
+ inputs: typing.List[typing.Dict[str, typing.Any]],
+ app_id: str,
+ variant_id: str,
+ correct_answer: str,
+ outputs: typing.List[typing.Dict[str, typing.Any]],
+ ) -> typing.Any:
+ """
+ Execute a custom evaluation code.
+
+ Args:
+ evaluation_id (str): the custom evaluation id
+ payload (ExecuteCustomEvaluationCode): the required payload
+
+ Returns:
+ float: the result of the evaluation custom code
+
+ Parameters:
+ - evaluation_id: str.
+
+ - inputs: typing.List[typing.Dict[str, typing.Any]].
+
+ - app_id: str.
+
+ - variant_id: str.
+
+ - correct_answer: str.
+
+ - outputs: typing.List[typing.Dict[str, typing.Any]].
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"evaluations/custom_evaluation/execute/{evaluation_id}",
+ ),
+ json=jsonable_encoder(
+ {
+ "inputs": inputs,
+ "app_id": app_id,
+ "variant_id": variant_id,
+ "correct_answer": correct_answer,
+ "outputs": outputs,
+ }
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def webhook_example_fake(self) -> EvaluationWebhook:
+ """
+ Returns a fake score response for example webhook evaluation
+
+ Returns:
+ _description_
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ "evaluations/webhook_example_fake",
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(EvaluationWebhook, _response.json()) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def upload_file(
+ self, *, upload_type: str, file: typing.IO, testset_name: str, app_id: str
+ ) -> TestSetSimpleResponse:
+ """
+ Uploads a CSV or JSON file and saves its data to MongoDB.
+
+ Args:
+ upload_type : Either a json or csv file.
+ file (UploadFile): The CSV or JSON file to upload.
+ testset_name (Optional): the name of the testset if provided.
+
+ Returns:
+ dict: The result of the upload process.
+
+ Parameters:
+ - upload_type: str.
+
+ - file: typing.IO.
+
+ - testset_name: str.
+
+ - app_id: str.
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", "testsets/upload"
+ ),
+ data=jsonable_encoder(
+ {
+ "upload_type": upload_type,
+ "testset_name": testset_name,
+ "app_id": app_id,
+ }
+ ),
+ files={"file": file},
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(TestSetSimpleResponse, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def import_testset(self) -> TestSetSimpleResponse:
+ """
+ Import JSON testset data from an endpoint and save it to MongoDB.
+
+ Args:
+ endpoint (str): An endpoint URL to import data from.
+ testset_name (str): the name of the testset if provided.
+
+ Returns:
+ dict: The result of the import process.
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", "testsets/endpoint"
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(TestSetSimpleResponse, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def create_testset(
+ self, app_id: str, *, request: NewTestset
+ ) -> TestSetSimpleResponse:
+ """
+ Create a testset with given name and app_name, save the testset to MongoDB.
+
+ Args:
+ name (str): name of the test set.
+ app_name (str): name of the application.
+ testset (Dict[str, str]): test set data.
+
+ Returns:
+ str: The id of the test set created.
+
+ Parameters:
+ - app_id: str.
+
+ - request: NewTestset.
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", f"testsets/{app_id}"
+ ),
+ json=jsonable_encoder(request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(TestSetSimpleResponse, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_single_testset(self, testset_id: str) -> typing.Any:
+ """
+ Fetch a specific testset in a MongoDB collection using its \_id.
+
+ Args:
+ testset_id (str): The \_id of the testset to fetch.
+
+ Returns:
+ The requested testset if found, else an HTTPException.
+
+ Parameters:
+ - testset_id: str.
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", f"testsets/{testset_id}"
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def update_testset(
+ self, testset_id: str, *, request: NewTestset
+ ) -> typing.Any:
+ """
+ Update a testset with given id, update the testset in MongoDB.
+
+ Args:
+ testset_id (str): id of the test set to be updated.
+ csvdata (NewTestset): New data to replace the old testset.
+
+ Returns:
+ str: The id of the test set updated.
+
+ Parameters:
+ - testset_id: str.
+
+ - request: NewTestset.
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "PUT",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", f"testsets/{testset_id}"
+ ),
+ json=jsonable_encoder(request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_testsets(self, *, app_id: str) -> typing.List[TestSetOutputResponse]:
+ """
+ Get all testsets.
+
+ Returns:
+
+ - A list of testset objects.
+
+ Raises:
+
+ - `HTTPException` with status code 404 if no testsets are found.
+
+ Parameters:
+ - app_id: str.
+ ---
+ from agenta.client import AsyncAgentaApi
+
+ client = AsyncAgentaApi(api_key="YOUR_API_KEY", base_url="https://yourhost.com/path/to/api")
+ await client.get_testsets(app_id="app-id")
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "testsets"),
+ params=remove_none_from_dict({"app_id": app_id}),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.List[TestSetOutputResponse], _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def delete_testsets(
+ self, *, testset_ids: typing.List[str]
+ ) -> typing.List[str]:
+ """
+ Delete specific testsets based on their unique IDs.
+
+ Args:
+ testset_ids (List[str]): The unique identifiers of the testsets to delete.
+
+ Returns:
+ A list of the deleted testsets' IDs.
+
+ Parameters:
+ - testset_ids: typing.List[str].
+ ---
+ from agenta.client import AsyncAgentaApi
+
+ client = AsyncAgentaApi(api_key="YOUR_API_KEY", base_url="https://yourhost.com/path/to/api")
+ await client.delete_testsets(testset_ids=[])
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "DELETE",
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "testsets"),
+ json=jsonable_encoder({"testset_ids": testset_ids}),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.List[str], _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def build_image(
+ self, *, app_id: str, base_name: str, tar_file: typing.IO
+ ) -> Image:
+ """
+ Builds a Docker image from a tar file containing the application code.
+
+ Args:
+ app_id (str): The ID of the application to build the image for.
+ base_name (str): The base name of the image to build.
+ tar_file (UploadFile): The tar file containing the application code.
+ stoken_session (SessionContainer): The session container for the user making the request.
+
+ Returns:
+ Image: The Docker image that was built.
+
+ Parameters:
+ - app_id: str.
+
+ - base_name: str.
+
+ - tar_file: typing.IO.
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", "containers/build_image"
+ ),
+ params=remove_none_from_dict({"app_id": app_id, "base_name": base_name}),
+ data=jsonable_encoder({}),
+ files={"tar_file": tar_file},
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(Image, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def restart_container(
+ self, *, variant_id: str
+ ) -> typing.Dict[str, typing.Any]:
+ """
+ Restart docker container.
+
+ Args:
+ payload (RestartAppContainer) -- the required data (app_name and variant_name)
+
+ Parameters:
+ - variant_id: str.
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ "containers/restart_container",
+ ),
+ json=jsonable_encoder({"variant_id": variant_id}),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Dict[str, typing.Any], _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def container_templates(self) -> ContainerTemplatesResponse:
+ """
+ Returns a list of templates available for creating new containers.
+
+ Parameters:
+ stoken_session (SessionContainer): The session container for the user.
+
+ Returns:
+
+ Union[List[Template], str]: A list of templates or an error message.
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", "containers/templates"
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(ContainerTemplatesResponse, _response.json()) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def construct_app_container_url(
+ self,
+ *,
+ base_id: typing.Optional[str] = None,
+ variant_id: typing.Optional[str] = None,
+ ) -> Uri:
+ """
+ Constructs the URL for an app container based on the provided base_id or variant_id.
+
+ Args:
+ base_id (Optional[str]): The ID of the base to use for the app container.
+ variant_id (Optional[str]): The ID of the variant to use for the app container.
+ stoken_session (SessionContainer): The session container for the user.
+
+ Returns:
+ URI: The URI for the app container.
+
+ Raises:
+ HTTPException: If the base or variant cannot be found or the user does not have access.
+
+ Parameters:
+ - base_id: typing.Optional[str].
+
+ - variant_id: typing.Optional[str].
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", "containers/container_url"
+ ),
+ params=remove_none_from_dict(
+ {"base_id": base_id, "variant_id": variant_id}
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(Uri, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def deploy_to_environment(
+ self, *, environment_name: str, variant_id: str
+ ) -> typing.Any:
+ """
+ Deploys a given variant to an environment
+
+ Args:
+ environment_name: Name of the environment to deploy to.
+ variant_id: variant id to deploy.
+ stoken_session: . Defaults to Depends(verify_session()).
+
+ Raises:
+ HTTPException: If the deployment fails.
+
+ Parameters:
+ - environment_name: str.
+
+ - variant_id: str.
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", "environments/deploy"
+ ),
+ json=jsonable_encoder(
+ {"environment_name": environment_name, "variant_id": variant_id}
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def create_trace(
+ self,
+ *,
+ app_id: typing.Optional[str] = OMIT,
+ variant_id: typing.Optional[str] = OMIT,
+ cost: typing.Optional[float] = OMIT,
+ latency: float,
+ status: str,
+ token_consumption: typing.Optional[int] = OMIT,
+ tags: typing.Optional[typing.List[str]] = OMIT,
+ start_time: dt.datetime,
+ end_time: dt.datetime,
+ spans: typing.List[str],
+ ) -> str:
+ """
+ Parameters:
+ - app_id: typing.Optional[str].
+
+ - variant_id: typing.Optional[str].
+
+ - cost: typing.Optional[float].
+
+ - latency: float.
+
+ - status: str.
+
+ - token_consumption: typing.Optional[int].
+
+ - tags: typing.Optional[typing.List[str]].
+
+ - start_time: dt.datetime.
+
+ - end_time: dt.datetime.
+
+ - spans: typing.List[str].
+ """
+ _request: typing.Dict[str, typing.Any] = {
+ "latency": latency,
+ "status": status,
+ "start_time": start_time,
+ "end_time": end_time,
+ "spans": spans,
+ }
+ if app_id is not OMIT:
+ _request["app_id"] = app_id
+ if variant_id is not OMIT:
+ _request["variant_id"] = variant_id
+ if cost is not OMIT:
+ _request["cost"] = cost
+ if token_consumption is not OMIT:
+ _request["token_consumption"] = token_consumption
+ if tags is not OMIT:
+ _request["tags"] = tags
+ _response = await self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", "observability/traces"
+ ),
+ json=jsonable_encoder(_request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(str, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_traces(self, app_id: str, variant_id: str) -> typing.List[Trace]:
+ """
+ Parameters:
+ - app_id: str.
+
+ - variant_id: str.
+ ---
+ from agenta.client import AsyncAgentaApi
+
+ client = AsyncAgentaApi(api_key="YOUR_API_KEY", base_url="https://yourhost.com/path/to/api")
+ await client.get_traces(app_id="app-id", variant_id="variant-id")
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"observability/traces/{app_id}/{variant_id}",
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.List[Trace], _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_single_trace(self, trace_id: str) -> Trace:
+ """
+ Parameters:
+ - trace_id: str.
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"observability/traces/{trace_id}",
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(Trace, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def update_trace_status(self, trace_id: str, *, status: str) -> bool:
+ """
+ Parameters:
+ - trace_id: str.
+
+ - status: str.
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "PUT",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"observability/traces/{trace_id}",
+ ),
+ json=jsonable_encoder({"status": status}),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(bool, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def create_span(
+ self,
+ *,
+ parent_span_id: typing.Optional[str] = OMIT,
+ meta: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ event_name: str,
+ event_type: typing.Optional[str] = OMIT,
+ start_time: dt.datetime,
+ duration: typing.Optional[int] = OMIT,
+ status: str,
+ end_time: dt.datetime,
+ inputs: typing.Optional[typing.List[str]] = OMIT,
+ outputs: typing.Optional[typing.List[str]] = OMIT,
+ prompt_template: typing.Optional[str] = OMIT,
+ tokens_input: typing.Optional[int] = OMIT,
+ tokens_output: typing.Optional[int] = OMIT,
+ token_total: typing.Optional[int] = OMIT,
+ cost: typing.Optional[float] = OMIT,
+ tags: typing.Optional[typing.List[str]] = OMIT,
+ ) -> str:
+ """
+ Parameters:
+ - parent_span_id: typing.Optional[str].
+
+ - meta: typing.Optional[typing.Dict[str, typing.Any]].
+
+ - event_name: str.
+
+ - event_type: typing.Optional[str].
+
+ - start_time: dt.datetime.
+
+ - duration: typing.Optional[int].
+
+ - status: str.
+
+ - end_time: dt.datetime.
+
+ - inputs: typing.Optional[typing.List[str]].
+
+ - outputs: typing.Optional[typing.List[str]].
+
+ - prompt_template: typing.Optional[str].
+
+ - tokens_input: typing.Optional[int].
+
+ - tokens_output: typing.Optional[int].
+
+ - token_total: typing.Optional[int].
+
+ - cost: typing.Optional[float].
+
+ - tags: typing.Optional[typing.List[str]].
+ """
+ _request: typing.Dict[str, typing.Any] = {
+ "event_name": event_name,
+ "start_time": start_time,
+ "status": status,
+ "end_time": end_time,
+ }
+ if parent_span_id is not OMIT:
+ _request["parent_span_id"] = parent_span_id
+ if meta is not OMIT:
+ _request["meta"] = meta
+ if event_type is not OMIT:
+ _request["event_type"] = event_type
+ if duration is not OMIT:
+ _request["duration"] = duration
+ if inputs is not OMIT:
+ _request["inputs"] = inputs
+ if outputs is not OMIT:
+ _request["outputs"] = outputs
+ if prompt_template is not OMIT:
+ _request["prompt_template"] = prompt_template
+ if tokens_input is not OMIT:
+ _request["tokens_input"] = tokens_input
+ if tokens_output is not OMIT:
+ _request["tokens_output"] = tokens_output
+ if token_total is not OMIT:
+ _request["token_total"] = token_total
+ if cost is not OMIT:
+ _request["cost"] = cost
+ if tags is not OMIT:
+ _request["tags"] = tags
+ _response = await self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", "observability/spans"
+ ),
+ json=jsonable_encoder(_request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(str, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_spans_of_trace(self, trace_id: str) -> typing.List[Span]:
+ """
+ Parameters:
+ - trace_id: str.
+ ---
+ from agenta.client import AsyncAgentaApi
+
+ client = AsyncAgentaApi(api_key="YOUR_API_KEY", base_url="https://yourhost.com/path/to/api")
+ await client.get_spans_of_trace(trace_id="trace-id")
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"observability/spans/{trace_id}",
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.List[Span], _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_feedbacks(self, trace_id: str) -> typing.List[Feedback]:
+ """
+ Parameters:
+ - trace_id: str.
+ ---
+ from agenta.client import AsyncAgentaApi
+
+ client = AsyncAgentaApi(api_key="YOUR_API_KEY", base_url="https://yourhost.com/path/to/api")
+ await client.get_feedbacks(trace_id="trace-id")
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"observability/feedbacks/{trace_id}",
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.List[Feedback], _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def create_feedback(
+ self,
+ trace_id: str,
+ *,
+ feedback: typing.Optional[str] = OMIT,
+ score: typing.Optional[float] = OMIT,
+ meta: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ ) -> str:
+ """
+ Parameters:
+ - trace_id: str.
+
+ - feedback: typing.Optional[str].
+
+ - score: typing.Optional[float].
+
+ - meta: typing.Optional[typing.Dict[str, typing.Any]].
+ """
+ _request: typing.Dict[str, typing.Any] = {}
+ if feedback is not OMIT:
+ _request["feedback"] = feedback
+ if score is not OMIT:
+ _request["score"] = score
+ if meta is not OMIT:
+ _request["meta"] = meta
+ _response = await self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"observability/feedbacks/{trace_id}",
+ ),
+ json=jsonable_encoder(_request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(str, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_feedback(self, trace_id: str, feedback_id: str) -> Feedback:
+ """
+ Parameters:
+ - trace_id: str.
+
+ - feedback_id: str.
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"observability/feedbacks/{trace_id}/{feedback_id}",
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(Feedback, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def update_feedback(
+ self,
+ trace_id: str,
+ feedback_id: str,
+ *,
+ feedback: str,
+ score: typing.Optional[float] = OMIT,
+ meta: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ ) -> Feedback:
+ """
+ Parameters:
+ - trace_id: str.
+
+ - feedback_id: str.
+
+ - feedback: str.
+
+ - score: typing.Optional[float].
+
+ - meta: typing.Optional[typing.Dict[str, typing.Any]].
+ """
+ _request: typing.Dict[str, typing.Any] = {"feedback": feedback}
+ if score is not OMIT:
+ _request["score"] = score
+ if meta is not OMIT:
+ _request["meta"] = meta
+ _response = await self._client_wrapper.httpx_client.request(
+ "PUT",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/",
+ f"observability/feedbacks/{trace_id}/{feedback_id}",
+ ),
+ json=jsonable_encoder(_request),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(Feedback, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def list_organizations(self) -> typing.List[Organization]:
+ """
+ Returns a list of organizations associated with the user's session.
+
+ Args:
+ stoken_session (SessionContainer): The user's session token.
+
+ Returns:
+ list[Organization]: A list of organizations associated with the user's session.
+
+ Raises:
+ HTTPException: If there is an error retrieving the organizations from the database.
+
+ ---
+ from agenta.client import AsyncAgentaApi
+
+ client = AsyncAgentaApi(api_key="YOUR_API_KEY", base_url="https://yourhost.com/path/to/api")
+ await client.list_organizations()
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", "organizations"
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.List[Organization], _response.json()) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_own_org(self) -> OrganizationOutput:
+ _response = await self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(
+ f"{self._client_wrapper.get_base_url()}/", "organizations/own"
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(OrganizationOutput, _response.json()) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def list_bases(
+ self,
+ *,
+ app_id: typing.Optional[str] = None,
+ base_name: typing.Optional[str] = None,
+ ) -> typing.List[BaseOutput]:
+ """
+ Retrieve a list of bases filtered by app_id and base_name.
+
+ Args:
+ request (Request): The incoming request.
+ app_id (Optional[str], optional): The ID of the app to filter by. Defaults to None.
+ base_name (Optional[str], optional): The name of the base to filter by. Defaults to None.
+
+ Returns:
+ List[BaseOutput]: A list of BaseOutput objects representing the filtered bases.
+
+ Raises:
+ HTTPException: If there was an error retrieving the bases.
+
+ Parameters:
+ - app_id: typing.Optional[str].
+
+ - base_name: typing.Optional[str].
+ ---
+ from agenta.client import AsyncAgentaApi
+
+ client = AsyncAgentaApi(api_key="YOUR_API_KEY", base_url="https://yourhost.com/path/to/api")
+ await client.list_bases()
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "bases"),
+ params=remove_none_from_dict({"app_id": app_id, "base_name": base_name}),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.List[BaseOutput], _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_config(
+ self,
+ *,
+ base_id: str,
+ config_name: typing.Optional[str] = None,
+ environment_name: typing.Optional[str] = None,
+ ) -> GetConfigReponse:
+ """
+ Parameters:
+ - base_id: str.
+
+ - config_name: typing.Optional[str].
+
+ - environment_name: typing.Optional[str].
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "GET",
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "configs"),
+ params=remove_none_from_dict(
+ {
+ "base_id": base_id,
+ "config_name": config_name,
+ "environment_name": environment_name,
+ }
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(GetConfigReponse, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def save_config(
+ self,
+ *,
+ base_id: str,
+ config_name: str,
+ parameters: typing.Dict[str, typing.Any],
+ overwrite: bool,
+ ) -> typing.Any:
+ """
+ Parameters:
+ - base_id: str.
+
+ - config_name: str.
+
+ - parameters: typing.Dict[str, typing.Any].
+
+ - overwrite: bool.
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "POST",
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "configs"),
+ json=jsonable_encoder(
+ {
+ "base_id": base_id,
+ "config_name": config_name,
+ "parameters": parameters,
+ "overwrite": overwrite,
+ }
+ ),
+ headers=self._client_wrapper.get_headers(),
+ timeout=60,
+ )
+ if 200 <= _response.status_code < 300:
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
+ try:
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/agenta-cli/agenta/client/backend/core/__init__.py b/agenta-cli/agenta/client/backend/core/__init__.py
new file mode 100644
index 0000000000..24149550b4
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/core/__init__.py
@@ -0,0 +1,17 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .api_error import ApiError
+from .client_wrapper import AsyncClientWrapper, BaseClientWrapper, SyncClientWrapper
+from .datetime_utils import serialize_datetime
+from .jsonable_encoder import jsonable_encoder
+from .remove_none_from_dict import remove_none_from_dict
+
+__all__ = [
+ "ApiError",
+ "AsyncClientWrapper",
+ "BaseClientWrapper",
+ "SyncClientWrapper",
+ "jsonable_encoder",
+ "remove_none_from_dict",
+ "serialize_datetime",
+]
diff --git a/agenta-cli/agenta/client/backend/core/api_error.py b/agenta-cli/agenta/client/backend/core/api_error.py
new file mode 100644
index 0000000000..da734b5806
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/core/api_error.py
@@ -0,0 +1,17 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+
+class ApiError(Exception):
+ status_code: typing.Optional[int]
+ body: typing.Any
+
+ def __init__(
+ self, *, status_code: typing.Optional[int] = None, body: typing.Any = None
+ ):
+ self.status_code = status_code
+ self.body = body
+
+ def __str__(self) -> str:
+ return f"status_code: {self.status_code}, body: {self.body}"
diff --git a/agenta-cli/agenta/client/backend/core/client_wrapper.py b/agenta-cli/agenta/client/backend/core/client_wrapper.py
new file mode 100644
index 0000000000..56c1d13c76
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/core/client_wrapper.py
@@ -0,0 +1,31 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import httpx
+
+
+class BaseClientWrapper:
+ def __init__(self, *, api_key: str, base_url: str):
+ self.api_key = api_key
+ self._base_url = base_url
+
+ def get_headers(self) -> typing.Dict[str, str]:
+ headers: typing.Dict[str, str] = {"X-Fern-Language": "Python"}
+ headers["Authorization"] = self.api_key
+ return headers
+
+ def get_base_url(self) -> str:
+ return self._base_url
+
+
+class SyncClientWrapper(BaseClientWrapper):
+ def __init__(self, *, api_key: str, base_url: str, httpx_client: httpx.Client):
+ super().__init__(api_key=api_key, base_url=base_url)
+ self.httpx_client = httpx_client
+
+
+class AsyncClientWrapper(BaseClientWrapper):
+ def __init__(self, *, api_key: str, base_url: str, httpx_client: httpx.AsyncClient):
+ super().__init__(api_key=api_key, base_url=base_url)
+ self.httpx_client = httpx_client
diff --git a/agenta-cli/agenta/client/backend/core/datetime_utils.py b/agenta-cli/agenta/client/backend/core/datetime_utils.py
new file mode 100644
index 0000000000..47344e9d9c
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/core/datetime_utils.py
@@ -0,0 +1,30 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+
+
+def serialize_datetime(v: dt.datetime) -> str:
+ """
+ Serialize a datetime including timezone info.
+
+ Uses the timezone info provided if present, otherwise uses the current runtime's timezone info.
+
+ UTC datetimes end in "Z" while all other timezones are represented as offset from UTC, e.g. +05:00.
+ """
+
+ def _serialize_zoned_datetime(v: dt.datetime) -> str:
+ if v.tzinfo is not None and v.tzinfo.tzname(None) == dt.timezone.utc.tzname(
+ None
+ ):
+ # UTC is a special case where we use "Z" at the end instead of "+00:00"
+ return v.isoformat().replace("+00:00", "Z")
+ else:
+ # Delegate to the typical +/- offset format
+ return v.isoformat()
+
+ if v.tzinfo is not None:
+ return _serialize_zoned_datetime(v)
+ else:
+ local_tz = dt.datetime.now().astimezone().tzinfo
+ localized_dt = v.replace(tzinfo=local_tz)
+ return _serialize_zoned_datetime(localized_dt)
diff --git a/agenta-cli/agenta/client/backend/core/jsonable_encoder.py b/agenta-cli/agenta/client/backend/core/jsonable_encoder.py
new file mode 100644
index 0000000000..0e297e18f0
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/core/jsonable_encoder.py
@@ -0,0 +1,109 @@
+# This file was auto-generated by Fern from our API Definition.
+
+"""
+jsonable_encoder converts a Python object to a JSON-friendly dict
+(e.g. datetimes to strings, Pydantic models to dicts).
+
+Taken from FastAPI, and made a bit simpler
+https://github.com/tiangolo/fastapi/blob/master/fastapi/encoders.py
+"""
+
+import dataclasses
+import datetime as dt
+from collections import defaultdict
+from enum import Enum
+from pathlib import PurePath
+from types import GeneratorType
+from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
+
+try:
+ import pydantic.v1 as pydantic # type: ignore
+except ImportError:
+ import pydantic # type: ignore
+
+from .datetime_utils import serialize_datetime
+
+SetIntStr = Set[Union[int, str]]
+DictIntStrAny = Dict[Union[int, str], Any]
+
+
+def generate_encoders_by_class_tuples(
+ type_encoder_map: Dict[Any, Callable[[Any], Any]]
+) -> Dict[Callable[[Any], Any], Tuple[Any, ...]]:
+ encoders_by_class_tuples: Dict[Callable[[Any], Any], Tuple[Any, ...]] = defaultdict(
+ tuple
+ )
+ for type_, encoder in type_encoder_map.items():
+ encoders_by_class_tuples[encoder] += (type_,)
+ return encoders_by_class_tuples
+
+
+encoders_by_class_tuples = generate_encoders_by_class_tuples(
+ pydantic.json.ENCODERS_BY_TYPE
+)
+
+
+def jsonable_encoder(
+ obj: Any, custom_encoder: Optional[Dict[Any, Callable[[Any], Any]]] = None
+) -> Any:
+ custom_encoder = custom_encoder or {}
+ if custom_encoder:
+ if type(obj) in custom_encoder:
+ return custom_encoder[type(obj)](obj)
+ else:
+ for encoder_type, encoder_instance in custom_encoder.items():
+ if isinstance(obj, encoder_type):
+ return encoder_instance(obj)
+ if isinstance(obj, pydantic.BaseModel):
+ encoder = getattr(obj.__config__, "json_encoders", {})
+ if custom_encoder:
+ encoder.update(custom_encoder)
+ obj_dict = obj.dict(by_alias=True)
+ if "__root__" in obj_dict:
+ obj_dict = obj_dict["__root__"]
+ return jsonable_encoder(obj_dict, custom_encoder=encoder)
+ if dataclasses.is_dataclass(obj):
+ obj_dict = dataclasses.asdict(obj)
+ return jsonable_encoder(obj_dict, custom_encoder=custom_encoder)
+ if isinstance(obj, Enum):
+ return obj.value
+ if isinstance(obj, PurePath):
+ return str(obj)
+ if isinstance(obj, (str, int, float, type(None))):
+ return obj
+ if isinstance(obj, dt.date):
+ return str(obj)
+ if isinstance(obj, dt.datetime):
+ return serialize_datetime(obj)
+ if isinstance(obj, dict):
+ encoded_dict = {}
+ allowed_keys = set(obj.keys())
+ for key, value in obj.items():
+ if key in allowed_keys:
+ encoded_key = jsonable_encoder(key, custom_encoder=custom_encoder)
+ encoded_value = jsonable_encoder(value, custom_encoder=custom_encoder)
+ encoded_dict[encoded_key] = encoded_value
+ return encoded_dict
+ if isinstance(obj, (list, set, frozenset, GeneratorType, tuple)):
+ encoded_list = []
+ for item in obj:
+ encoded_list.append(jsonable_encoder(item, custom_encoder=custom_encoder))
+ return encoded_list
+
+ if type(obj) in pydantic.json.ENCODERS_BY_TYPE:
+ return pydantic.json.ENCODERS_BY_TYPE[type(obj)](obj)
+ for encoder, classes_tuple in encoders_by_class_tuples.items():
+ if isinstance(obj, classes_tuple):
+ return encoder(obj)
+
+ try:
+ data = dict(obj)
+ except Exception as e:
+ errors: List[Exception] = []
+ errors.append(e)
+ try:
+ data = vars(obj)
+ except Exception as e:
+ errors.append(e)
+ raise ValueError(errors) from e
+ return jsonable_encoder(data, custom_encoder=custom_encoder)
diff --git a/agenta-cli/agenta/client/backend/core/remove_none_from_dict.py b/agenta-cli/agenta/client/backend/core/remove_none_from_dict.py
new file mode 100644
index 0000000000..2da30f7133
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/core/remove_none_from_dict.py
@@ -0,0 +1,11 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from typing import Any, Dict, Optional
+
+
+def remove_none_from_dict(original: Dict[str, Optional[Any]]) -> Dict[str, Any]:
+ new: Dict[str, Any] = {}
+ for key, value in original.items():
+ if value is not None:
+ new[key] = value
+ return new
diff --git a/agenta-cli/agenta/client/backend/errors/__init__.py b/agenta-cli/agenta/client/backend/errors/__init__.py
new file mode 100644
index 0000000000..cb64e066bf
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/errors/__init__.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .unprocessable_entity_error import UnprocessableEntityError
+
+__all__ = ["UnprocessableEntityError"]
diff --git a/agenta-cli/agenta/client/backend/errors/unprocessable_entity_error.py b/agenta-cli/agenta/client/backend/errors/unprocessable_entity_error.py
new file mode 100644
index 0000000000..47470a70e7
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/errors/unprocessable_entity_error.py
@@ -0,0 +1,9 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.api_error import ApiError
+from ..types.http_validation_error import HttpValidationError
+
+
+class UnprocessableEntityError(ApiError):
+ def __init__(self, body: HttpValidationError):
+ super().__init__(status_code=422, body=body)
diff --git a/agenta-cli/agenta/client/backend/types/__init__.py b/agenta-cli/agenta/client/backend/types/__init__.py
new file mode 100644
index 0000000000..4be042f7a1
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/__init__.py
@@ -0,0 +1,95 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .add_variant_from_base_and_config_response import (
+ AddVariantFromBaseAndConfigResponse,
+)
+from .app import App
+from .app_variant_output import AppVariantOutput
+from .base_output import BaseOutput
+from .body_import_testset import BodyImportTestset
+from .container_templates_response import ContainerTemplatesResponse
+from .create_app_output import CreateAppOutput
+from .create_custom_evaluation import CreateCustomEvaluation
+from .custom_evaluation_detail import CustomEvaluationDetail
+from .custom_evaluation_names import CustomEvaluationNames
+from .custom_evaluation_output import CustomEvaluationOutput
+from .docker_env_vars import DockerEnvVars
+from .environment_output import EnvironmentOutput
+from .evaluation import Evaluation
+from .evaluation_scenario import EvaluationScenario
+from .evaluation_scenario_input import EvaluationScenarioInput
+from .evaluation_scenario_output import EvaluationScenarioOutput
+from .evaluation_scenario_score import EvaluationScenarioScore
+from .evaluation_scenario_update_score import EvaluationScenarioUpdateScore
+from .evaluation_status_enum import EvaluationStatusEnum
+from .evaluation_type import EvaluationType
+from .evaluation_type_settings import EvaluationTypeSettings
+from .evaluation_webhook import EvaluationWebhook
+from .feedback import Feedback
+from .get_config_reponse import GetConfigReponse
+from .http_validation_error import HttpValidationError
+from .image import Image
+from .invite_request import InviteRequest
+from .list_api_keys_output import ListApiKeysOutput
+from .new_testset import NewTestset
+from .organization import Organization
+from .organization_output import OrganizationOutput
+from .simple_evaluation_output import SimpleEvaluationOutput
+from .span import Span
+from .template import Template
+from .template_image_info import TemplateImageInfo
+from .test_set_output_response import TestSetOutputResponse
+from .test_set_simple_response import TestSetSimpleResponse
+from .trace import Trace
+from .uri import Uri
+from .validation_error import ValidationError
+from .validation_error_loc_item import ValidationErrorLocItem
+from .variant_action import VariantAction
+from .variant_action_enum import VariantActionEnum
+
+__all__ = [
+ "AddVariantFromBaseAndConfigResponse",
+ "App",
+ "AppVariantOutput",
+ "BaseOutput",
+ "BodyImportTestset",
+ "ContainerTemplatesResponse",
+ "CreateAppOutput",
+ "CreateCustomEvaluation",
+ "CustomEvaluationDetail",
+ "CustomEvaluationNames",
+ "CustomEvaluationOutput",
+ "DockerEnvVars",
+ "EnvironmentOutput",
+ "Evaluation",
+ "EvaluationScenario",
+ "EvaluationScenarioInput",
+ "EvaluationScenarioOutput",
+ "EvaluationScenarioScore",
+ "EvaluationScenarioUpdateScore",
+ "EvaluationStatusEnum",
+ "EvaluationType",
+ "EvaluationTypeSettings",
+ "EvaluationWebhook",
+ "Feedback",
+ "GetConfigReponse",
+ "HttpValidationError",
+ "Image",
+ "InviteRequest",
+ "ListApiKeysOutput",
+ "NewTestset",
+ "Organization",
+ "OrganizationOutput",
+ "SimpleEvaluationOutput",
+ "Span",
+ "Template",
+ "TemplateImageInfo",
+ "TestSetOutputResponse",
+ "TestSetSimpleResponse",
+ "Trace",
+ "Uri",
+ "ValidationError",
+ "ValidationErrorLocItem",
+ "VariantAction",
+ "VariantActionEnum",
+]
diff --git a/agenta-cli/agenta/client/backend/types/add_variant_from_base_and_config_response.py b/agenta-cli/agenta/client/backend/types/add_variant_from_base_and_config_response.py
new file mode 100644
index 0000000000..f9d71ca1b3
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/add_variant_from_base_and_config_response.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+from .app_variant_output import AppVariantOutput
+
+AddVariantFromBaseAndConfigResponse = typing.Union[AppVariantOutput, typing.Any]
diff --git a/agenta-cli/agenta/client/backend/types/app.py b/agenta-cli/agenta/client/backend/types/app.py
new file mode 100644
index 0000000000..7611dc75e9
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/app.py
@@ -0,0 +1,37 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+
+try:
+ import pydantic.v1 as pydantic # type: ignore
+except ImportError:
+ import pydantic # type: ignore
+
+
+class App(pydantic.BaseModel):
+ app_id: str
+ app_name: str
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().dict(**kwargs_with_defaults)
+
+ class Config:
+ frozen = True
+ smart_union = True
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/agenta-cli/agenta/client/backend/types/app_variant_output.py b/agenta-cli/agenta/client/backend/types/app_variant_output.py
new file mode 100644
index 0000000000..f5f3328493
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/app_variant_output.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+
+try:
+ import pydantic.v1 as pydantic # type: ignore
+except ImportError:
+ import pydantic # type: ignore
+
+
+class AppVariantOutput(pydantic.BaseModel):
+ app_id: str
+ app_name: str
+ variant_id: str
+ variant_name: str
+ parameters: typing.Optional[typing.Dict[str, typing.Any]]
+ previous_variant_name: typing.Optional[str]
+ organization_id: str
+ user_id: str
+ base_name: str
+ base_id: str
+ config_name: str
+ config_id: str
+ uri: typing.Optional[str]
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().dict(**kwargs_with_defaults)
+
+ class Config:
+ frozen = True
+ smart_union = True
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/agenta-cli/agenta/client/backend/types/base_output.py b/agenta-cli/agenta/client/backend/types/base_output.py
new file mode 100644
index 0000000000..43a0e31eae
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/base_output.py
@@ -0,0 +1,37 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+
+try:
+ import pydantic.v1 as pydantic # type: ignore
+except ImportError:
+ import pydantic # type: ignore
+
+
+class BaseOutput(pydantic.BaseModel):
+ base_id: str
+ base_name: str
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().dict(**kwargs_with_defaults)
+
+ class Config:
+ frozen = True
+ smart_union = True
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/agenta-cli/agenta/client/backend/types/body_import_testset.py b/agenta-cli/agenta/client/backend/types/body_import_testset.py
new file mode 100644
index 0000000000..f06788d80d
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/body_import_testset.py
@@ -0,0 +1,38 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+
+try:
+ import pydantic.v1 as pydantic # type: ignore
+except ImportError:
+ import pydantic # type: ignore
+
+
+class BodyImportTestset(pydantic.BaseModel):
+ endpoint: typing.Optional[str]
+ testset_name: typing.Optional[str]
+ app_id: typing.Optional[str]
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().dict(**kwargs_with_defaults)
+
+ class Config:
+ frozen = True
+ smart_union = True
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/agenta-cli/agenta/client/backend/types/container_templates_response.py b/agenta-cli/agenta/client/backend/types/container_templates_response.py
new file mode 100644
index 0000000000..41b0455c43
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/container_templates_response.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+from .template import Template
+
+ContainerTemplatesResponse = typing.Union[typing.List[Template], str]
diff --git a/agenta-cli/agenta/client/backend/types/create_app_output.py b/agenta-cli/agenta/client/backend/types/create_app_output.py
new file mode 100644
index 0000000000..a736531605
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/create_app_output.py
@@ -0,0 +1,37 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+
+try:
+ import pydantic.v1 as pydantic # type: ignore
+except ImportError:
+ import pydantic # type: ignore
+
+
+class CreateAppOutput(pydantic.BaseModel):
+ app_id: str
+ app_name: str
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().dict(**kwargs_with_defaults)
+
+ class Config:
+ frozen = True
+ smart_union = True
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/agenta-cli/agenta/client/backend/types/create_custom_evaluation.py b/agenta-cli/agenta/client/backend/types/create_custom_evaluation.py
new file mode 100644
index 0000000000..452f784611
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/create_custom_evaluation.py
@@ -0,0 +1,38 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+
+try:
+ import pydantic.v1 as pydantic # type: ignore
+except ImportError:
+ import pydantic # type: ignore
+
+
+class CreateCustomEvaluation(pydantic.BaseModel):
+ evaluation_name: str
+ python_code: str
+ app_id: str
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().dict(**kwargs_with_defaults)
+
+ class Config:
+ frozen = True
+ smart_union = True
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/agenta-cli/agenta/client/backend/types/custom_evaluation_detail.py b/agenta-cli/agenta/client/backend/types/custom_evaluation_detail.py
new file mode 100644
index 0000000000..2ae2a3f96f
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/custom_evaluation_detail.py
@@ -0,0 +1,41 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+
+try:
+ import pydantic.v1 as pydantic # type: ignore
+except ImportError:
+ import pydantic # type: ignore
+
+
+class CustomEvaluationDetail(pydantic.BaseModel):
+ id: str
+ app_id: str
+ evaluation_name: str
+ python_code: str
+ created_at: dt.datetime
+ updated_at: dt.datetime
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().dict(**kwargs_with_defaults)
+
+ class Config:
+ frozen = True
+ smart_union = True
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/agenta-cli/agenta/client/backend/types/custom_evaluation_names.py b/agenta-cli/agenta/client/backend/types/custom_evaluation_names.py
new file mode 100644
index 0000000000..860cf6b3db
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/custom_evaluation_names.py
@@ -0,0 +1,37 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+
+try:
+ import pydantic.v1 as pydantic # type: ignore
+except ImportError:
+ import pydantic # type: ignore
+
+
+class CustomEvaluationNames(pydantic.BaseModel):
+ id: str
+ evaluation_name: str
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().dict(**kwargs_with_defaults)
+
+ class Config:
+ frozen = True
+ smart_union = True
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/agenta-cli/agenta/client/backend/types/custom_evaluation_output.py b/agenta-cli/agenta/client/backend/types/custom_evaluation_output.py
new file mode 100644
index 0000000000..3a9d2a8be0
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/custom_evaluation_output.py
@@ -0,0 +1,39 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+
+try:
+ import pydantic.v1 as pydantic # type: ignore
+except ImportError:
+ import pydantic # type: ignore
+
+
+class CustomEvaluationOutput(pydantic.BaseModel):
+ id: str
+ app_id: str
+ evaluation_name: str
+ created_at: dt.datetime
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().dict(**kwargs_with_defaults)
+
+ class Config:
+ frozen = True
+ smart_union = True
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/agenta-cli/agenta/client/backend/types/docker_env_vars.py b/agenta-cli/agenta/client/backend/types/docker_env_vars.py
new file mode 100644
index 0000000000..ea0c144502
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/docker_env_vars.py
@@ -0,0 +1,36 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+
+try:
+ import pydantic.v1 as pydantic # type: ignore
+except ImportError:
+ import pydantic # type: ignore
+
+
+class DockerEnvVars(pydantic.BaseModel):
+ env_vars: typing.Dict[str, str]
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().dict(**kwargs_with_defaults)
+
+ class Config:
+ frozen = True
+ smart_union = True
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/agenta-cli/agenta/client/backend/types/environment_output.py b/agenta-cli/agenta/client/backend/types/environment_output.py
new file mode 100644
index 0000000000..61b22b45a3
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/environment_output.py
@@ -0,0 +1,39 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+
+try:
+ import pydantic.v1 as pydantic # type: ignore
+except ImportError:
+ import pydantic # type: ignore
+
+
+class EnvironmentOutput(pydantic.BaseModel):
+ name: str
+ app_id: str
+ deployed_app_variant_id: typing.Optional[str]
+ deployed_variant_name: typing.Optional[str]
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().dict(**kwargs_with_defaults)
+
+ class Config:
+ frozen = True
+ smart_union = True
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/agenta-cli/agenta/client/backend/types/evaluation.py b/agenta-cli/agenta/client/backend/types/evaluation.py
new file mode 100644
index 0000000000..b0211abd38
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/evaluation.py
@@ -0,0 +1,50 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from .evaluation_type import EvaluationType
+from .evaluation_type_settings import EvaluationTypeSettings
+
+try:
+ import pydantic.v1 as pydantic # type: ignore
+except ImportError:
+ import pydantic # type: ignore
+
+
+class Evaluation(pydantic.BaseModel):
+ id: str
+ app_id: str
+ user_id: str
+ user_username: str
+ evaluation_type: EvaluationType
+ evaluation_type_settings: typing.Optional[EvaluationTypeSettings]
+ variant_ids: typing.List[str]
+ variant_names: typing.List[str]
+ testset_id: str
+ testset_name: str
+ status: str
+ created_at: dt.datetime
+ updated_at: dt.datetime
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().dict(**kwargs_with_defaults)
+
+ class Config:
+ frozen = True
+ smart_union = True
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/agenta-cli/agenta/client/backend/types/evaluation_scenario.py b/agenta-cli/agenta/client/backend/types/evaluation_scenario.py
new file mode 100644
index 0000000000..6e49169ba7
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/evaluation_scenario.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from .evaluation_scenario_input import EvaluationScenarioInput
+from .evaluation_scenario_output import EvaluationScenarioOutput
+from .evaluation_scenario_score import EvaluationScenarioScore
+
+try:
+ import pydantic.v1 as pydantic # type: ignore
+except ImportError:
+ import pydantic # type: ignore
+
+
+class EvaluationScenario(pydantic.BaseModel):
+ id: typing.Optional[str]
+ evaluation_id: str
+ inputs: typing.List[EvaluationScenarioInput]
+ outputs: typing.List[EvaluationScenarioOutput]
+ vote: typing.Optional[str]
+ score: typing.Optional[EvaluationScenarioScore]
+ evaluation: typing.Optional[str]
+ correct_answer: typing.Optional[str]
+ is_pinned: typing.Optional[bool]
+ note: typing.Optional[str]
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().dict(**kwargs_with_defaults)
+
+ class Config:
+ frozen = True
+ smart_union = True
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/agenta-cli/agenta/client/backend/types/evaluation_scenario_input.py b/agenta-cli/agenta/client/backend/types/evaluation_scenario_input.py
new file mode 100644
index 0000000000..ff78ae974f
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/evaluation_scenario_input.py
@@ -0,0 +1,37 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+
+try:
+ import pydantic.v1 as pydantic # type: ignore
+except ImportError:
+ import pydantic # type: ignore
+
+
+class EvaluationScenarioInput(pydantic.BaseModel):
+ input_name: str
+ input_value: str
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().dict(**kwargs_with_defaults)
+
+ class Config:
+ frozen = True
+ smart_union = True
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/agenta-cli/agenta/client/backend/types/evaluation_scenario_output.py b/agenta-cli/agenta/client/backend/types/evaluation_scenario_output.py
new file mode 100644
index 0000000000..17ecf2cc53
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/evaluation_scenario_output.py
@@ -0,0 +1,37 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+
+try:
+ import pydantic.v1 as pydantic # type: ignore
+except ImportError:
+ import pydantic # type: ignore
+
+
+class EvaluationScenarioOutput(pydantic.BaseModel):
+ variant_id: str
+ variant_output: str
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().dict(**kwargs_with_defaults)
+
+ class Config:
+ frozen = True
+ smart_union = True
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/agenta-cli/agenta/client/backend/types/evaluation_scenario_score.py b/agenta-cli/agenta/client/backend/types/evaluation_scenario_score.py
new file mode 100644
index 0000000000..0dc572cd6d
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/evaluation_scenario_score.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+EvaluationScenarioScore = typing.Union[int, str]
diff --git a/agenta-cli/agenta/client/backend/types/evaluation_scenario_update_score.py b/agenta-cli/agenta/client/backend/types/evaluation_scenario_update_score.py
new file mode 100644
index 0000000000..5c87996489
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/evaluation_scenario_update_score.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+EvaluationScenarioUpdateScore = typing.Union[int, str]
diff --git a/agenta-cli/agenta/client/backend/types/evaluation_status_enum.py b/agenta-cli/agenta/client/backend/types/evaluation_status_enum.py
new file mode 100644
index 0000000000..159716b2e6
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/evaluation_status_enum.py
@@ -0,0 +1,33 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import enum
+import typing
+
+T_Result = typing.TypeVar("T_Result")
+
+
+class EvaluationStatusEnum(str, enum.Enum):
+ """
+ An enumeration.
+ """
+
+ EVALUATION_INITIALIZED = "EVALUATION_INITIALIZED"
+ EVALUATION_STARTED = "EVALUATION_STARTED"
+ COMPARISON_RUN_STARTED = "COMPARISON_RUN_STARTED"
+ EVALUATION_FINISHED = "EVALUATION_FINISHED"
+
+ def visit(
+ self,
+ evaluation_initialized: typing.Callable[[], T_Result],
+ evaluation_started: typing.Callable[[], T_Result],
+ comparison_run_started: typing.Callable[[], T_Result],
+ evaluation_finished: typing.Callable[[], T_Result],
+ ) -> T_Result:
+ if self is EvaluationStatusEnum.EVALUATION_INITIALIZED:
+ return evaluation_initialized()
+ if self is EvaluationStatusEnum.EVALUATION_STARTED:
+ return evaluation_started()
+ if self is EvaluationStatusEnum.COMPARISON_RUN_STARTED:
+ return comparison_run_started()
+ if self is EvaluationStatusEnum.EVALUATION_FINISHED:
+ return evaluation_finished()
diff --git a/agenta-cli/agenta/client/backend/types/evaluation_type.py b/agenta-cli/agenta/client/backend/types/evaluation_type.py
new file mode 100644
index 0000000000..29990df5e9
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/evaluation_type.py
@@ -0,0 +1,53 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import enum
+import typing
+
+T_Result = typing.TypeVar("T_Result")
+
+
+class EvaluationType(str, enum.Enum):
+ """
+ An enumeration.
+ """
+
+ AUTO_EXACT_MATCH = "auto_exact_match"
+ AUTO_SIMILARITY_MATCH = "auto_similarity_match"
+ AUTO_REGEX_TEST = "auto_regex_test"
+ AUTO_WEBHOOK_TEST = "auto_webhook_test"
+ AUTO_AI_CRITIQUE = "auto_ai_critique"
+ HUMAN_A_B_TESTING = "human_a_b_testing"
+ HUMAN_SCORING = "human_scoring"
+ CUSTOM_CODE_RUN = "custom_code_run"
+ SINGLE_MODEL_TEST = "single_model_test"
+
+ def visit(
+ self,
+ auto_exact_match: typing.Callable[[], T_Result],
+ auto_similarity_match: typing.Callable[[], T_Result],
+ auto_regex_test: typing.Callable[[], T_Result],
+ auto_webhook_test: typing.Callable[[], T_Result],
+ auto_ai_critique: typing.Callable[[], T_Result],
+ human_a_b_testing: typing.Callable[[], T_Result],
+ human_scoring: typing.Callable[[], T_Result],
+ custom_code_run: typing.Callable[[], T_Result],
+ single_model_test: typing.Callable[[], T_Result],
+ ) -> T_Result:
+ if self is EvaluationType.AUTO_EXACT_MATCH:
+ return auto_exact_match()
+ if self is EvaluationType.AUTO_SIMILARITY_MATCH:
+ return auto_similarity_match()
+ if self is EvaluationType.AUTO_REGEX_TEST:
+ return auto_regex_test()
+ if self is EvaluationType.AUTO_WEBHOOK_TEST:
+ return auto_webhook_test()
+ if self is EvaluationType.AUTO_AI_CRITIQUE:
+ return auto_ai_critique()
+ if self is EvaluationType.HUMAN_A_B_TESTING:
+ return human_a_b_testing()
+ if self is EvaluationType.HUMAN_SCORING:
+ return human_scoring()
+ if self is EvaluationType.CUSTOM_CODE_RUN:
+ return custom_code_run()
+ if self is EvaluationType.SINGLE_MODEL_TEST:
+ return single_model_test()
diff --git a/agenta-cli/agenta/client/backend/types/evaluation_type_settings.py b/agenta-cli/agenta/client/backend/types/evaluation_type_settings.py
new file mode 100644
index 0000000000..3b6c1d0691
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/evaluation_type_settings.py
@@ -0,0 +1,42 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+
+try:
+ import pydantic.v1 as pydantic # type: ignore
+except ImportError:
+ import pydantic # type: ignore
+
+
+class EvaluationTypeSettings(pydantic.BaseModel):
+ similarity_threshold: typing.Optional[float]
+ regex_pattern: typing.Optional[str]
+ regex_should_match: typing.Optional[bool]
+ webhook_url: typing.Optional[str]
+ custom_code_evaluation_id: typing.Optional[str]
+ llm_app_prompt_template: typing.Optional[str]
+ evaluation_prompt_template: typing.Optional[str]
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().dict(**kwargs_with_defaults)
+
+ class Config:
+ frozen = True
+ smart_union = True
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/agenta-cli/agenta/client/backend/types/evaluation_webhook.py b/agenta-cli/agenta/client/backend/types/evaluation_webhook.py
new file mode 100644
index 0000000000..c8d71dc626
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/evaluation_webhook.py
@@ -0,0 +1,36 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+
+try:
+ import pydantic.v1 as pydantic # type: ignore
+except ImportError:
+ import pydantic # type: ignore
+
+
+class EvaluationWebhook(pydantic.BaseModel):
+ score: float
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().dict(**kwargs_with_defaults)
+
+ class Config:
+ frozen = True
+ smart_union = True
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/agenta-cli/agenta/client/backend/types/feedback.py b/agenta-cli/agenta/client/backend/types/feedback.py
new file mode 100644
index 0000000000..e4fa0db6e1
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/feedback.py
@@ -0,0 +1,40 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+
+try:
+ import pydantic.v1 as pydantic # type: ignore
+except ImportError:
+ import pydantic # type: ignore
+
+
+class Feedback(pydantic.BaseModel):
+ feedback: typing.Optional[str]
+ score: typing.Optional[float]
+ meta: typing.Optional[typing.Dict[str, typing.Any]]
+ feedback_id: str
+ created_at: typing.Optional[dt.datetime]
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().dict(**kwargs_with_defaults)
+
+ class Config:
+ frozen = True
+ smart_union = True
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/agenta-cli/agenta/client/backend/types/get_config_reponse.py b/agenta-cli/agenta/client/backend/types/get_config_reponse.py
new file mode 100644
index 0000000000..e3a62dc9fb
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/get_config_reponse.py
@@ -0,0 +1,39 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+
+try:
+ import pydantic.v1 as pydantic # type: ignore
+except ImportError:
+ import pydantic # type: ignore
+
+
+class GetConfigReponse(pydantic.BaseModel):
+ config_id: str
+ config_name: str
+ current_version: int
+ parameters: typing.Dict[str, typing.Any]
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().dict(**kwargs_with_defaults)
+
+ class Config:
+ frozen = True
+ smart_union = True
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/agenta-cli/agenta/client/backend/types/http_validation_error.py b/agenta-cli/agenta/client/backend/types/http_validation_error.py
new file mode 100644
index 0000000000..02fc88d818
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/http_validation_error.py
@@ -0,0 +1,37 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from .validation_error import ValidationError
+
+try:
+ import pydantic.v1 as pydantic # type: ignore
+except ImportError:
+ import pydantic # type: ignore
+
+
+class HttpValidationError(pydantic.BaseModel):
+ detail: typing.Optional[typing.List[ValidationError]]
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().dict(**kwargs_with_defaults)
+
+ class Config:
+ frozen = True
+ smart_union = True
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/agenta-cli/agenta/client/backend/types/image.py b/agenta-cli/agenta/client/backend/types/image.py
new file mode 100644
index 0000000000..714bf93e9c
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/image.py
@@ -0,0 +1,39 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+
+try:
+ import pydantic.v1 as pydantic # type: ignore
+except ImportError:
+ import pydantic # type: ignore
+
+
+class Image(pydantic.BaseModel):
+ type: typing.Optional[str]
+ docker_id: str
+ tags: str
+ organization_id: typing.Optional[str]
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().dict(**kwargs_with_defaults)
+
+ class Config:
+ frozen = True
+ smart_union = True
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/agenta-cli/agenta/client/backend/types/invite_request.py b/agenta-cli/agenta/client/backend/types/invite_request.py
new file mode 100644
index 0000000000..38a759ad10
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/invite_request.py
@@ -0,0 +1,36 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+
+try:
+ import pydantic.v1 as pydantic # type: ignore
+except ImportError:
+ import pydantic # type: ignore
+
+
+class InviteRequest(pydantic.BaseModel):
+ email: str
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().dict(**kwargs_with_defaults)
+
+ class Config:
+ frozen = True
+ smart_union = True
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/agenta-cli/agenta/client/backend/types/list_api_keys_output.py b/agenta-cli/agenta/client/backend/types/list_api_keys_output.py
new file mode 100644
index 0000000000..0a925376d3
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/list_api_keys_output.py
@@ -0,0 +1,39 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+
+try:
+ import pydantic.v1 as pydantic # type: ignore
+except ImportError:
+ import pydantic # type: ignore
+
+
+class ListApiKeysOutput(pydantic.BaseModel):
+ prefix: str
+ created_at: dt.datetime
+ last_used_at: typing.Optional[dt.datetime]
+ expiration_date: typing.Optional[dt.datetime]
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().dict(**kwargs_with_defaults)
+
+ class Config:
+ frozen = True
+ smart_union = True
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/agenta-cli/agenta/client/backend/types/new_testset.py b/agenta-cli/agenta/client/backend/types/new_testset.py
new file mode 100644
index 0000000000..7f931de42e
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/new_testset.py
@@ -0,0 +1,37 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+
+try:
+ import pydantic.v1 as pydantic # type: ignore
+except ImportError:
+ import pydantic # type: ignore
+
+
+class NewTestset(pydantic.BaseModel):
+ name: str
+ csvdata: typing.List[typing.Dict[str, str]]
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().dict(**kwargs_with_defaults)
+
+ class Config:
+ frozen = True
+ smart_union = True
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/agenta-cli/agenta/client/backend/types/organization.py b/agenta-cli/agenta/client/backend/types/organization.py
new file mode 100644
index 0000000000..df4d90e2ec
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/organization.py
@@ -0,0 +1,42 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+
+try:
+ import pydantic.v1 as pydantic # type: ignore
+except ImportError:
+ import pydantic # type: ignore
+
+
+class Organization(pydantic.BaseModel):
+ id: typing.Optional[str]
+ name: str
+ description: typing.Optional[str]
+ type: typing.Optional[str]
+ owner: str
+ members: typing.Optional[typing.List[str]]
+ invitations: typing.Optional[typing.List[typing.Any]]
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().dict(**kwargs_with_defaults)
+
+ class Config:
+ frozen = True
+ smart_union = True
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/agenta-cli/agenta/client/backend/types/organization_output.py b/agenta-cli/agenta/client/backend/types/organization_output.py
new file mode 100644
index 0000000000..93b88e4cf4
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/organization_output.py
@@ -0,0 +1,37 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+
+try:
+ import pydantic.v1 as pydantic # type: ignore
+except ImportError:
+ import pydantic # type: ignore
+
+
+class OrganizationOutput(pydantic.BaseModel):
+ id: str
+ name: str
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().dict(**kwargs_with_defaults)
+
+ class Config:
+ frozen = True
+ smart_union = True
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/agenta-cli/agenta/client/backend/types/simple_evaluation_output.py b/agenta-cli/agenta/client/backend/types/simple_evaluation_output.py
new file mode 100644
index 0000000000..ed86be2b2e
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/simple_evaluation_output.py
@@ -0,0 +1,41 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from .evaluation_type import EvaluationType
+
+try:
+ import pydantic.v1 as pydantic # type: ignore
+except ImportError:
+ import pydantic # type: ignore
+
+
+class SimpleEvaluationOutput(pydantic.BaseModel):
+ id: str
+ variant_ids: typing.List[str]
+ app_id: str
+ status: str
+ evaluation_type: EvaluationType
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().dict(**kwargs_with_defaults)
+
+ class Config:
+ frozen = True
+ smart_union = True
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/agenta-cli/agenta/client/backend/types/span.py b/agenta-cli/agenta/client/backend/types/span.py
new file mode 100644
index 0000000000..562d5783e0
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/span.py
@@ -0,0 +1,52 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+
+try:
+ import pydantic.v1 as pydantic # type: ignore
+except ImportError:
+ import pydantic # type: ignore
+
+
+class Span(pydantic.BaseModel):
+ parent_span_id: typing.Optional[str]
+ meta: typing.Optional[typing.Dict[str, typing.Any]]
+ event_name: str
+ event_type: typing.Optional[str]
+ start_time: dt.datetime
+ duration: typing.Optional[int]
+ status: str
+ end_time: dt.datetime
+ inputs: typing.Optional[typing.List[str]]
+ outputs: typing.Optional[typing.List[str]]
+ prompt_template: typing.Optional[str]
+ tokens_input: typing.Optional[int]
+ tokens_output: typing.Optional[int]
+ token_total: typing.Optional[int]
+ cost: typing.Optional[float]
+ tags: typing.Optional[typing.List[str]]
+ span_id: str
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().dict(**kwargs_with_defaults)
+
+ class Config:
+ frozen = True
+ smart_union = True
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/agenta-cli/agenta/client/backend/types/template.py b/agenta-cli/agenta/client/backend/types/template.py
new file mode 100644
index 0000000000..b509e795d4
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/template.py
@@ -0,0 +1,38 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from .template_image_info import TemplateImageInfo
+
+try:
+ import pydantic.v1 as pydantic # type: ignore
+except ImportError:
+ import pydantic # type: ignore
+
+
+class Template(pydantic.BaseModel):
+ id: str
+ image: TemplateImageInfo
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().dict(**kwargs_with_defaults)
+
+ class Config:
+ frozen = True
+ smart_union = True
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/agenta-cli/agenta/client/backend/types/template_image_info.py b/agenta-cli/agenta/client/backend/types/template_image_info.py
new file mode 100644
index 0000000000..b0c01e7504
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/template_image_info.py
@@ -0,0 +1,43 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+
+try:
+ import pydantic.v1 as pydantic # type: ignore
+except ImportError:
+ import pydantic # type: ignore
+
+
+class TemplateImageInfo(pydantic.BaseModel):
+ name: str
+ size: typing.Optional[int]
+ digest: typing.Optional[str]
+ title: str
+ description: str
+ last_pushed: typing.Optional[dt.datetime]
+ repo_name: typing.Optional[str]
+ template_uri: typing.Optional[str]
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().dict(**kwargs_with_defaults)
+
+ class Config:
+ frozen = True
+ smart_union = True
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/agenta-cli/agenta/client/backend/types/test_set_output_response.py b/agenta-cli/agenta/client/backend/types/test_set_output_response.py
new file mode 100644
index 0000000000..715ebcdcd5
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/test_set_output_response.py
@@ -0,0 +1,39 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+
+try:
+ import pydantic.v1 as pydantic # type: ignore
+except ImportError:
+ import pydantic # type: ignore
+
+
+class TestSetOutputResponse(pydantic.BaseModel):
+ id: str = pydantic.Field(alias="_id")
+ name: str
+ created_at: dt.datetime
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().dict(**kwargs_with_defaults)
+
+ class Config:
+ frozen = True
+ smart_union = True
+ allow_population_by_field_name = True
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/agenta-cli/agenta/client/backend/types/test_set_simple_response.py b/agenta-cli/agenta/client/backend/types/test_set_simple_response.py
new file mode 100644
index 0000000000..b6ac04a871
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/test_set_simple_response.py
@@ -0,0 +1,38 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+
+try:
+ import pydantic.v1 as pydantic # type: ignore
+except ImportError:
+ import pydantic # type: ignore
+
+
+class TestSetSimpleResponse(pydantic.BaseModel):
+ id: str
+ name: str
+ created_at: str
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().dict(**kwargs_with_defaults)
+
+ class Config:
+ frozen = True
+ smart_union = True
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/agenta-cli/agenta/client/backend/types/trace.py b/agenta-cli/agenta/client/backend/types/trace.py
new file mode 100644
index 0000000000..9e06a32be5
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/trace.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from .feedback import Feedback
+
+try:
+ import pydantic.v1 as pydantic # type: ignore
+except ImportError:
+ import pydantic # type: ignore
+
+
+class Trace(pydantic.BaseModel):
+ app_id: typing.Optional[str]
+ variant_id: typing.Optional[str]
+ cost: typing.Optional[float]
+ latency: float
+ status: str
+ token_consumption: typing.Optional[int]
+ tags: typing.Optional[typing.List[str]]
+ start_time: dt.datetime
+ end_time: dt.datetime
+ trace_id: str
+ spans: typing.List[str]
+ feedbacks: typing.Optional[typing.List[Feedback]]
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().dict(**kwargs_with_defaults)
+
+ class Config:
+ frozen = True
+ smart_union = True
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/agenta-cli/agenta/client/backend/types/uri.py b/agenta-cli/agenta/client/backend/types/uri.py
new file mode 100644
index 0000000000..608f89ef6f
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/uri.py
@@ -0,0 +1,36 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+
+try:
+ import pydantic.v1 as pydantic # type: ignore
+except ImportError:
+ import pydantic # type: ignore
+
+
+class Uri(pydantic.BaseModel):
+ uri: str
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().dict(**kwargs_with_defaults)
+
+ class Config:
+ frozen = True
+ smart_union = True
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/agenta-cli/agenta/client/backend/types/validation_error.py b/agenta-cli/agenta/client/backend/types/validation_error.py
new file mode 100644
index 0000000000..ffd8cc872f
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/validation_error.py
@@ -0,0 +1,39 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from .validation_error_loc_item import ValidationErrorLocItem
+
+try:
+ import pydantic.v1 as pydantic # type: ignore
+except ImportError:
+ import pydantic # type: ignore
+
+
+class ValidationError(pydantic.BaseModel):
+ loc: typing.List[ValidationErrorLocItem]
+ msg: str
+ type: str
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().dict(**kwargs_with_defaults)
+
+ class Config:
+ frozen = True
+ smart_union = True
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/agenta-cli/agenta/client/backend/types/validation_error_loc_item.py b/agenta-cli/agenta/client/backend/types/validation_error_loc_item.py
new file mode 100644
index 0000000000..9a0a83fef5
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/validation_error_loc_item.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ValidationErrorLocItem = typing.Union[str, int]
diff --git a/agenta-cli/agenta/client/backend/types/variant_action.py b/agenta-cli/agenta/client/backend/types/variant_action.py
new file mode 100644
index 0000000000..a614ac00f5
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/variant_action.py
@@ -0,0 +1,37 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from .variant_action_enum import VariantActionEnum
+
+try:
+ import pydantic.v1 as pydantic # type: ignore
+except ImportError:
+ import pydantic # type: ignore
+
+
+class VariantAction(pydantic.BaseModel):
+ action: VariantActionEnum
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults: typing.Any = {
+ "by_alias": True,
+ "exclude_unset": True,
+ **kwargs,
+ }
+ return super().dict(**kwargs_with_defaults)
+
+ class Config:
+ frozen = True
+ smart_union = True
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/agenta-cli/agenta/client/backend/types/variant_action_enum.py b/agenta-cli/agenta/client/backend/types/variant_action_enum.py
new file mode 100644
index 0000000000..6f5f8c19c5
--- /dev/null
+++ b/agenta-cli/agenta/client/backend/types/variant_action_enum.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import enum
+import typing
+
+T_Result = typing.TypeVar("T_Result")
+
+
+class VariantActionEnum(str, enum.Enum):
+ """
+ An enumeration.
+ """
+
+ START = "START"
+ STOP = "STOP"
+
+ def visit(
+ self, start: typing.Callable[[], T_Result], stop: typing.Callable[[], T_Result]
+ ) -> T_Result:
+ if self is VariantActionEnum.START:
+ return start()
+ if self is VariantActionEnum.STOP:
+ return stop()
diff --git a/agenta-cli/agenta/client/exceptions.py b/agenta-cli/agenta/client/exceptions.py
new file mode 100644
index 0000000000..ac6fafe529
--- /dev/null
+++ b/agenta-cli/agenta/client/exceptions.py
@@ -0,0 +1,2 @@
+class APIRequestError(Exception):
+ """Exception to be raised when an API request fails."""
diff --git a/agenta-cli/agenta/config.py b/agenta-cli/agenta/config.py
index a072adf9a6..10e5e1965a 100644
--- a/agenta-cli/agenta/config.py
+++ b/agenta-cli/agenta/config.py
@@ -1,4 +1,4 @@
-from pydantic import BaseSettings
+from pydantic.v1 import BaseSettings
import os
import toml
diff --git a/agenta-cli/agenta/sdk/__init__.py b/agenta-cli/agenta/sdk/__init__.py
index b10b8c1e17..ebd87f40ba 100644
--- a/agenta-cli/agenta/sdk/__init__.py
+++ b/agenta-cli/agenta/sdk/__init__.py
@@ -12,6 +12,7 @@
TextParam,
MessagesInput,
FileInputURL,
+ BinaryParam,
)
from .agenta_init import Config, init
diff --git a/agenta-cli/agenta/sdk/agenta_decorator.py b/agenta-cli/agenta/sdk/agenta_decorator.py
index d47a45f719..677b9c0ae2 100644
--- a/agenta-cli/agenta/sdk/agenta_decorator.py
+++ b/agenta-cli/agenta/sdk/agenta_decorator.py
@@ -26,6 +26,7 @@
TextParam,
MessagesInput,
FileInputURL,
+ BinaryParam,
)
app = FastAPI()
@@ -352,6 +353,7 @@ def override_schema(openapi_schema: dict, func_name: str, endpoint: str, params:
- The default value for DictInput instance
- The default value for MessagesParam instance
- The default value for FileInputURL instance
+ - The default value for BinaryParam instance
- ... [PLEASE ADD AT EACH CHANGE]
Args:
@@ -424,3 +426,6 @@ def find_in_schema(schema: dict, param_name: str, xparam: str):
):
subschema = find_in_schema(schema_to_override, param_name, "file_url")
subschema["default"] = "https://example.com"
+ if isinstance(param_val, BinaryParam):
+ subschema = find_in_schema(schema_to_override, param_name, "bool")
+ subschema["default"] = param_val.default
diff --git a/agenta-cli/agenta/sdk/agenta_init.py b/agenta-cli/agenta/sdk/agenta_init.py
index 30590e7fbb..6803fecd80 100644
--- a/agenta-cli/agenta/sdk/agenta_init.py
+++ b/agenta-cli/agenta/sdk/agenta_init.py
@@ -1,13 +1,26 @@
import os
import logging
from typing import Any, Optional
-from agenta.client import client
from .utils.globals import set_global
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
+from agenta.client.backend.client import AgentaApi
+from agenta.client.exceptions import APIRequestError
+
+BACKEND_URL_SUFFIX = os.environ.get("BACKEND_URL_SUFFIX", "api")
+CLIENT_API_KEY = os.environ.get("AGENTA_API_KEY")
+CLIENT_HOST = os.environ.get("AGENTA_HOST", "http://localhost")
+
+# initialize the client with the backend url and api key
+backend_url = f"{CLIENT_HOST}/{BACKEND_URL_SUFFIX}"
+client = AgentaApi(
+ base_url=backend_url,
+ api_key=CLIENT_API_KEY if CLIENT_API_KEY else "",
+)
+
class AgentaSingleton:
"""Singleton class to save all the "global variables" for the sdk."""
@@ -61,23 +74,31 @@ def init(
f"Warning: Your configuration will not be saved permanently since app_name and base_name are not provided."
)
else:
- app_id = client.get_app_by_name(
- app_name=app_name, host=host, api_key=api_key
- )
- base_id = client.get_base_by_app_id_and_name(
- app_id=app_id, base_name=base_name, host=host, api_key=api_key
- )
+ try:
+ get_app_id = client.list_apps(app_name=app_name)
+ app_id = get_app_id.app_id
+
+ if not app_id:
+ raise APIRequestError(
+ f"App with name {app_name} does not exist on the server."
+ )
+
+ get_base_id = client.list_bases(app_id=app_id, base_name=base_name)
+ base_id = get_base_id.base_id
+ except Exception as ex:
+ raise APIRequestError(
+ f"Failed to get base id and/or app_id from the server with error: {ex}"
+ )
self.base_id = base_id
self.host = host
self.api_key = api_key
- self.config = Config(base_id=base_id, host=host, api_key=api_key)
+ self.config = Config(base_id=base_id, host=host)
class Config:
- def __init__(self, base_id, host, api_key):
+ def __init__(self, base_id, host):
self.base_id = base_id
self.host = host
- self.api_key = api_key
if base_id is None or host is None:
self.persist = False
else:
@@ -113,13 +134,11 @@ def push(self, config_name: str, overwrite=True, **kwargs):
if not self.persist:
return
try:
- client.save_variant_config(
+ client.save_config(
base_id=self.base_id,
config_name=config_name,
parameters=kwargs,
overwrite=overwrite,
- host=self.host,
- api_key=self.api_key,
)
except Exception as ex:
logger.warning(
@@ -137,18 +156,13 @@ def pull(self, config_name: str = "default", environment_name: str = None):
if self.persist:
try:
if environment_name:
- config = client.fetch_variant_config(
- base_id=self.base_id,
- host=self.host,
- api_key=self.api_key,
- environment_name=environment_name,
+ config = client.get_config(
+ base_id=self.base_id, environment_name=environment_name
)
else:
- config = client.fetch_variant_config(
+ config = client.get_config(
base_id=self.base_id,
- host=self.host,
- api_key=self.api_key,
config_name=config_name,
)
except Exception as ex:
diff --git a/agenta-cli/agenta/sdk/types.py b/agenta-cli/agenta/sdk/types.py
index 8c22032bf8..408c6ade3e 100644
--- a/agenta-cli/agenta/sdk/types.py
+++ b/agenta-cli/agenta/sdk/types.py
@@ -1,7 +1,7 @@
import json
from typing import Any, Dict, List
-from pydantic import BaseModel, Extra, HttpUrl
+from pydantic import BaseModel, Extra, HttpUrl, Field
class InFile:
@@ -29,6 +29,22 @@ def __modify_schema__(cls, field_schema):
field_schema.update({"x-parameter": "text"})
+class BinaryParam(int):
+ def __new__(cls, value: bool = False):
+ instance = super().__new__(cls, int(value))
+ instance.default = value
+ return instance
+
+ @classmethod
+ def __modify_schema__(cls, field_schema):
+ field_schema.update(
+ {
+ "x-parameter": "bool",
+ "type": "boolean",
+ }
+ )
+
+
class IntParam(int):
def __new__(cls, default: int = 6, minval: float = 1, maxval: float = 10):
instance = super().__new__(cls, default)
diff --git a/agenta-cli/poetry.lock b/agenta-cli/poetry.lock
index 940e1ffff7..2d6fb497ac 100644
--- a/agenta-cli/poetry.lock
+++ b/agenta-cli/poetry.lock
@@ -1,52 +1,54 @@
# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand.
+[[package]]
+name = "annotated-types"
+version = "0.6.0"
+description = "Reusable constraint types to use with typing.Annotated"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "annotated_types-0.6.0-py3-none-any.whl", hash = "sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43"},
+ {file = "annotated_types-0.6.0.tar.gz", hash = "sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d"},
+]
+
[[package]]
name = "anyio"
-version = "3.6.2"
+version = "3.7.1"
description = "High level compatibility layer for multiple asynchronous event loop implementations"
optional = false
-python-versions = ">=3.6.2"
+python-versions = ">=3.7"
files = [
- {file = "anyio-3.6.2-py3-none-any.whl", hash = "sha256:fbbe32bd270d2a2ef3ed1c5d45041250284e31fc0a4df4a5a6071842051a51e3"},
- {file = "anyio-3.6.2.tar.gz", hash = "sha256:25ea0d673ae30af41a0c442f81cf3b38c7e79fdc7b60335a4c14e05eb0947421"},
+ {file = "anyio-3.7.1-py3-none-any.whl", hash = "sha256:91dee416e570e92c64041bd18b900d1d6fa78dff7048769ce5ac5ddad004fbb5"},
+ {file = "anyio-3.7.1.tar.gz", hash = "sha256:44a3c9aba0f5defa43261a8b3efb97891f2bd7d804e0e1f56419befa1adfc780"},
]
[package.dependencies]
+exceptiongroup = {version = "*", markers = "python_version < \"3.11\""}
idna = ">=2.8"
sniffio = ">=1.1"
[package.extras]
-doc = ["packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"]
-test = ["contextlib2", "coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "mock (>=4)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (<0.15)", "uvloop (>=0.15)"]
-trio = ["trio (>=0.16,<0.22)"]
-
-[[package]]
-name = "appnope"
-version = "0.1.3"
-description = "Disable App Nap on macOS >= 10.9"
-optional = false
-python-versions = "*"
-files = [
- {file = "appnope-0.1.3-py2.py3-none-any.whl", hash = "sha256:265a455292d0bd8a72453494fa24df5a11eb18373a60c7c0430889f22548605e"},
- {file = "appnope-0.1.3.tar.gz", hash = "sha256:02bd91c4de869fbb1e1c50aafc4098827a7a54ab2f39d9dcba6c9547ed920e24"},
-]
+doc = ["Sphinx", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme (>=1.2.2)", "sphinxcontrib-jquery"]
+test = ["anyio[trio]", "coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "mock (>=4)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"]
+trio = ["trio (<0.22)"]
[[package]]
name = "asttokens"
-version = "2.2.1"
+version = "2.4.1"
description = "Annotate AST trees with source code positions"
optional = false
python-versions = "*"
files = [
- {file = "asttokens-2.2.1-py2.py3-none-any.whl", hash = "sha256:6b0ac9e93fb0335014d382b8fa9b3afa7df546984258005da0b9e7095b3deb1c"},
- {file = "asttokens-2.2.1.tar.gz", hash = "sha256:4622110b2a6f30b77e1473affaa97e711bc2f07d3f10848420ff1898edbe94f3"},
+ {file = "asttokens-2.4.1-py2.py3-none-any.whl", hash = "sha256:051ed49c3dcae8913ea7cd08e46a606dba30b79993209636c4875bc1d637bc24"},
+ {file = "asttokens-2.4.1.tar.gz", hash = "sha256:b03869718ba9a6eb027e134bfdf69f38a236d681c83c160d510768af11254ba0"},
]
[package.dependencies]
-six = "*"
+six = ">=1.12.0"
[package.extras]
-test = ["astroid", "pytest"]
+astroid = ["astroid (>=1,<2)", "astroid (>=2,<4)"]
+test = ["astroid (>=1,<2)", "astroid (>=2,<4)", "pytest"]
[[package]]
name = "atomicwrites"
@@ -76,17 +78,6 @@ docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-
tests = ["attrs[tests-no-zope]", "zope-interface"]
tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
-[[package]]
-name = "backcall"
-version = "0.2.0"
-description = "Specifications for callback functions passed in to an API"
-optional = false
-python-versions = "*"
-files = [
- {file = "backcall-0.2.0-py2.py3-none-any.whl", hash = "sha256:fbbce6a29f263178a1f7915c1940bde0ec2b2a967566fe1c65c1dfb7422bd255"},
- {file = "backcall-0.2.0.tar.gz", hash = "sha256:5cbdbf27be5e7cfadb448baf0aa95508f91f2bbc6c6437cd9cd06e2a4c215e1e"},
-]
-
[[package]]
name = "backoff"
version = "2.2.1"
@@ -100,108 +91,123 @@ files = [
[[package]]
name = "certifi"
-version = "2023.5.7"
+version = "2023.11.17"
description = "Python package for providing Mozilla's CA Bundle."
optional = false
python-versions = ">=3.6"
files = [
- {file = "certifi-2023.5.7-py3-none-any.whl", hash = "sha256:c6c2e98f5c7869efca1f8916fed228dd91539f9f1b444c314c06eef02980c716"},
- {file = "certifi-2023.5.7.tar.gz", hash = "sha256:0f0d56dc5a6ad56fd4ba36484d6cc34451e1c6548c61daad8c320169f91eddc7"},
+ {file = "certifi-2023.11.17-py3-none-any.whl", hash = "sha256:e036ab49d5b79556f99cfc2d9320b34cfbe5be05c5871b51de9329f0603b0474"},
+ {file = "certifi-2023.11.17.tar.gz", hash = "sha256:9b469f3a900bf28dc19b8cfbf8019bf47f7fdd1a65a1d4ffb98fc14166beb4d1"},
]
[[package]]
name = "charset-normalizer"
-version = "3.1.0"
+version = "3.3.2"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
optional = false
python-versions = ">=3.7.0"
files = [
- {file = "charset-normalizer-3.1.0.tar.gz", hash = "sha256:34e0a2f9c370eb95597aae63bf85eb5e96826d81e3dcf88b8886012906f509b5"},
- {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e0ac8959c929593fee38da1c2b64ee9778733cdf03c482c9ff1d508b6b593b2b"},
- {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d7fc3fca01da18fbabe4625d64bb612b533533ed10045a2ac3dd194bfa656b60"},
- {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:04eefcee095f58eaabe6dc3cc2262f3bcd776d2c67005880894f447b3f2cb9c1"},
- {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20064ead0717cf9a73a6d1e779b23d149b53daf971169289ed2ed43a71e8d3b0"},
- {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1435ae15108b1cb6fffbcea2af3d468683b7afed0169ad718451f8db5d1aff6f"},
- {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c84132a54c750fda57729d1e2599bb598f5fa0344085dbde5003ba429a4798c0"},
- {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75f2568b4189dda1c567339b48cba4ac7384accb9c2a7ed655cd86b04055c795"},
- {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11d3bcb7be35e7b1bba2c23beedac81ee893ac9871d0ba79effc7fc01167db6c"},
- {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:891cf9b48776b5c61c700b55a598621fdb7b1e301a550365571e9624f270c203"},
- {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5f008525e02908b20e04707a4f704cd286d94718f48bb33edddc7d7b584dddc1"},
- {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:b06f0d3bf045158d2fb8837c5785fe9ff9b8c93358be64461a1089f5da983137"},
- {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:49919f8400b5e49e961f320c735388ee686a62327e773fa5b3ce6721f7e785ce"},
- {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:22908891a380d50738e1f978667536f6c6b526a2064156203d418f4856d6e86a"},
- {file = "charset_normalizer-3.1.0-cp310-cp310-win32.whl", hash = "sha256:12d1a39aa6b8c6f6248bb54550efcc1c38ce0d8096a146638fd4738e42284448"},
- {file = "charset_normalizer-3.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:65ed923f84a6844de5fd29726b888e58c62820e0769b76565480e1fdc3d062f8"},
- {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9a3267620866c9d17b959a84dd0bd2d45719b817245e49371ead79ed4f710d19"},
- {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6734e606355834f13445b6adc38b53c0fd45f1a56a9ba06c2058f86893ae8017"},
- {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f8303414c7b03f794347ad062c0516cee0e15f7a612abd0ce1e25caf6ceb47df"},
- {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaf53a6cebad0eae578f062c7d462155eada9c172bd8c4d250b8c1d8eb7f916a"},
- {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3dc5b6a8ecfdc5748a7e429782598e4f17ef378e3e272eeb1340ea57c9109f41"},
- {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e1b25e3ad6c909f398df8921780d6a3d120d8c09466720226fc621605b6f92b1"},
- {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ca564606d2caafb0abe6d1b5311c2649e8071eb241b2d64e75a0d0065107e62"},
- {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b82fab78e0b1329e183a65260581de4375f619167478dddab510c6c6fb04d9b6"},
- {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bd7163182133c0c7701b25e604cf1611c0d87712e56e88e7ee5d72deab3e76b5"},
- {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:11d117e6c63e8f495412d37e7dc2e2fff09c34b2d09dbe2bee3c6229577818be"},
- {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:cf6511efa4801b9b38dc5546d7547d5b5c6ef4b081c60b23e4d941d0eba9cbeb"},
- {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:abc1185d79f47c0a7aaf7e2412a0eb2c03b724581139193d2d82b3ad8cbb00ac"},
- {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cb7b2ab0188829593b9de646545175547a70d9a6e2b63bf2cd87a0a391599324"},
- {file = "charset_normalizer-3.1.0-cp311-cp311-win32.whl", hash = "sha256:c36bcbc0d5174a80d6cccf43a0ecaca44e81d25be4b7f90f0ed7bcfbb5a00909"},
- {file = "charset_normalizer-3.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:cca4def576f47a09a943666b8f829606bcb17e2bc2d5911a46c8f8da45f56755"},
- {file = "charset_normalizer-3.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0c95f12b74681e9ae127728f7e5409cbbef9cd914d5896ef238cc779b8152373"},
- {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fca62a8301b605b954ad2e9c3666f9d97f63872aa4efcae5492baca2056b74ab"},
- {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac0aa6cd53ab9a31d397f8303f92c42f534693528fafbdb997c82bae6e477ad9"},
- {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c3af8e0f07399d3176b179f2e2634c3ce9c1301379a6b8c9c9aeecd481da494f"},
- {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a5fc78f9e3f501a1614a98f7c54d3969f3ad9bba8ba3d9b438c3bc5d047dd28"},
- {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:628c985afb2c7d27a4800bfb609e03985aaecb42f955049957814e0491d4006d"},
- {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:74db0052d985cf37fa111828d0dd230776ac99c740e1a758ad99094be4f1803d"},
- {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1e8fcdd8f672a1c4fc8d0bd3a2b576b152d2a349782d1eb0f6b8e52e9954731d"},
- {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:04afa6387e2b282cf78ff3dbce20f0cc071c12dc8f685bd40960cc68644cfea6"},
- {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:dd5653e67b149503c68c4018bf07e42eeed6b4e956b24c00ccdf93ac79cdff84"},
- {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d2686f91611f9e17f4548dbf050e75b079bbc2a82be565832bc8ea9047b61c8c"},
- {file = "charset_normalizer-3.1.0-cp37-cp37m-win32.whl", hash = "sha256:4155b51ae05ed47199dc5b2a4e62abccb274cee6b01da5b895099b61b1982974"},
- {file = "charset_normalizer-3.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:322102cdf1ab682ecc7d9b1c5eed4ec59657a65e1c146a0da342b78f4112db23"},
- {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e633940f28c1e913615fd624fcdd72fdba807bf53ea6925d6a588e84e1151531"},
- {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3a06f32c9634a8705f4ca9946d667609f52cf130d5548881401f1eb2c39b1e2c"},
- {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7381c66e0561c5757ffe616af869b916c8b4e42b367ab29fedc98481d1e74e14"},
- {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3573d376454d956553c356df45bb824262c397c6e26ce43e8203c4c540ee0acb"},
- {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e89df2958e5159b811af9ff0f92614dabf4ff617c03a4c1c6ff53bf1c399e0e1"},
- {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:78cacd03e79d009d95635e7d6ff12c21eb89b894c354bd2b2ed0b4763373693b"},
- {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de5695a6f1d8340b12a5d6d4484290ee74d61e467c39ff03b39e30df62cf83a0"},
- {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c60b9c202d00052183c9be85e5eaf18a4ada0a47d188a83c8f5c5b23252f649"},
- {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f645caaf0008bacf349875a974220f1f1da349c5dbe7c4ec93048cdc785a3326"},
- {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ea9f9c6034ea2d93d9147818f17c2a0860d41b71c38b9ce4d55f21b6f9165a11"},
- {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:80d1543d58bd3d6c271b66abf454d437a438dff01c3e62fdbcd68f2a11310d4b"},
- {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:73dc03a6a7e30b7edc5b01b601e53e7fc924b04e1835e8e407c12c037e81adbd"},
- {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6f5c2e7bc8a4bf7c426599765b1bd33217ec84023033672c1e9a8b35eaeaaaf8"},
- {file = "charset_normalizer-3.1.0-cp38-cp38-win32.whl", hash = "sha256:12a2b561af122e3d94cdb97fe6fb2bb2b82cef0cdca131646fdb940a1eda04f0"},
- {file = "charset_normalizer-3.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:3160a0fd9754aab7d47f95a6b63ab355388d890163eb03b2d2b87ab0a30cfa59"},
- {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:38e812a197bf8e71a59fe55b757a84c1f946d0ac114acafaafaf21667a7e169e"},
- {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6baf0baf0d5d265fa7944feb9f7451cc316bfe30e8df1a61b1bb08577c554f31"},
- {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8f25e17ab3039b05f762b0a55ae0b3632b2e073d9c8fc88e89aca31a6198e88f"},
- {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3747443b6a904001473370d7810aa19c3a180ccd52a7157aacc264a5ac79265e"},
- {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b116502087ce8a6b7a5f1814568ccbd0e9f6cfd99948aa59b0e241dc57cf739f"},
- {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d16fd5252f883eb074ca55cb622bc0bee49b979ae4e8639fff6ca3ff44f9f854"},
- {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21fa558996782fc226b529fdd2ed7866c2c6ec91cee82735c98a197fae39f706"},
- {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f6c7a8a57e9405cad7485f4c9d3172ae486cfef1344b5ddd8e5239582d7355e"},
- {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ac3775e3311661d4adace3697a52ac0bab17edd166087d493b52d4f4f553f9f0"},
- {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:10c93628d7497c81686e8e5e557aafa78f230cd9e77dd0c40032ef90c18f2230"},
- {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:6f4f4668e1831850ebcc2fd0b1cd11721947b6dc7c00bf1c6bd3c929ae14f2c7"},
- {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:0be65ccf618c1e7ac9b849c315cc2e8a8751d9cfdaa43027d4f6624bd587ab7e"},
- {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:53d0a3fa5f8af98a1e261de6a3943ca631c526635eb5817a87a59d9a57ebf48f"},
- {file = "charset_normalizer-3.1.0-cp39-cp39-win32.whl", hash = "sha256:a04f86f41a8916fe45ac5024ec477f41f886b3c435da2d4e3d2709b22ab02af1"},
- {file = "charset_normalizer-3.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:830d2948a5ec37c386d3170c483063798d7879037492540f10a475e3fd6f244b"},
- {file = "charset_normalizer-3.1.0-py3-none-any.whl", hash = "sha256:3d9098b479e78c85080c98e1e35ff40b4a31d8953102bb0fd7d1b6f8a2111a3d"},
+ {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"},
+ {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"},
]
[[package]]
name = "click"
-version = "8.1.3"
+version = "8.1.7"
description = "Composable command line interface toolkit"
optional = false
python-versions = ">=3.7"
files = [
- {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"},
- {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"},
+ {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"},
+ {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"},
]
[package.dependencies]
@@ -231,13 +237,13 @@ files = [
[[package]]
name = "docker"
-version = "6.1.1"
+version = "6.1.3"
description = "A Python library for the Docker Engine API."
optional = false
python-versions = ">=3.7"
files = [
- {file = "docker-6.1.1-py3-none-any.whl", hash = "sha256:8308b23d3d0982c74f7aa0a3abd774898c0c4fba006e9c3bde4f68354e470fe2"},
- {file = "docker-6.1.1.tar.gz", hash = "sha256:5ec18b9c49d48ee145a5b5824bb126dc32fc77931e18444783fc07a7724badc0"},
+ {file = "docker-6.1.3-py3-none-any.whl", hash = "sha256:aecd2277b8bf8e506e484f6ab7aec39abe0038e29fa4a6d3ba86c3fe01844ed9"},
+ {file = "docker-6.1.3.tar.gz", hash = "sha256:aa6d17830045ba5ef0168d5eaa34d37beeb113948c413affe1d5991fc11f9a20"},
]
[package.dependencies]
@@ -251,69 +257,82 @@ websocket-client = ">=0.32.0"
ssh = ["paramiko (>=2.4.3)"]
[[package]]
-name = "executing"
+name = "exceptiongroup"
version = "1.2.0"
+description = "Backport of PEP 654 (exception groups)"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "exceptiongroup-1.2.0-py3-none-any.whl", hash = "sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14"},
+ {file = "exceptiongroup-1.2.0.tar.gz", hash = "sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68"},
+]
+
+[package.extras]
+test = ["pytest (>=6)"]
+
+[[package]]
+name = "executing"
+version = "2.0.1"
description = "Get the currently executing AST node of a frame, and other information"
optional = false
-python-versions = "*"
+python-versions = ">=3.5"
files = [
- {file = "executing-1.2.0-py2.py3-none-any.whl", hash = "sha256:0314a69e37426e3608aada02473b4161d4caf5a4b244d1d0c48072b8fee7bacc"},
- {file = "executing-1.2.0.tar.gz", hash = "sha256:19da64c18d2d851112f09c287f8d3dbbdf725ab0e569077efb6cdcbd3497c107"},
+ {file = "executing-2.0.1-py2.py3-none-any.whl", hash = "sha256:eac49ca94516ccc753f9fb5ce82603156e590b27525a8bc32cce8ae302eb61bc"},
+ {file = "executing-2.0.1.tar.gz", hash = "sha256:35afe2ce3affba8ee97f2d69927fa823b08b472b7b994e36a52a964b93d16147"},
]
[package.extras]
-tests = ["asttokens", "littleutils", "pytest", "rich"]
+tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipython", "littleutils", "pytest", "rich"]
[[package]]
name = "fastapi"
-version = "0.95.1"
+version = "0.105.0"
description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "fastapi-0.95.1-py3-none-any.whl", hash = "sha256:a870d443e5405982e1667dfe372663abf10754f246866056336d7f01c21dab07"},
- {file = "fastapi-0.95.1.tar.gz", hash = "sha256:9569f0a381f8a457ec479d90fa01005cfddaae07546eb1f3fa035bc4797ae7d5"},
+ {file = "fastapi-0.105.0-py3-none-any.whl", hash = "sha256:f19ebf6fdc82a3281d10f2cb4774bdfa90238e3b40af3525a0c09fd08ad1c480"},
+ {file = "fastapi-0.105.0.tar.gz", hash = "sha256:4d12838819aa52af244580675825e750ad67c9df4614f557a769606af902cf22"},
]
[package.dependencies]
-pydantic = ">=1.6.2,<1.7 || >1.7,<1.7.1 || >1.7.1,<1.7.2 || >1.7.2,<1.7.3 || >1.7.3,<1.8 || >1.8,<1.8.1 || >1.8.1,<2.0.0"
-starlette = ">=0.26.1,<0.27.0"
+anyio = ">=3.7.1,<4.0.0"
+pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<2.0.0 || >2.0.0,<2.0.1 || >2.0.1,<2.1.0 || >2.1.0,<3.0.0"
+starlette = ">=0.27.0,<0.28.0"
+typing-extensions = ">=4.8.0"
[package.extras]
-all = ["email-validator (>=1.1.1)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=2.11.2)", "orjson (>=3.2.1)", "python-multipart (>=0.0.5)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"]
-dev = ["pre-commit (>=2.17.0,<3.0.0)", "ruff (==0.0.138)", "uvicorn[standard] (>=0.12.0,<0.21.0)"]
-doc = ["mdx-include (>=1.4.1,<2.0.0)", "mkdocs (>=1.1.2,<2.0.0)", "mkdocs-markdownextradata-plugin (>=0.1.7,<0.3.0)", "mkdocs-material (>=8.1.4,<9.0.0)", "pyyaml (>=5.3.1,<7.0.0)", "typer-cli (>=0.0.13,<0.0.14)", "typer[all] (>=0.6.1,<0.8.0)"]
-test = ["anyio[trio] (>=3.2.1,<4.0.0)", "black (==23.1.0)", "coverage[toml] (>=6.5.0,<8.0)", "databases[sqlite] (>=0.3.2,<0.7.0)", "email-validator (>=1.1.1,<2.0.0)", "flask (>=1.1.2,<3.0.0)", "httpx (>=0.23.0,<0.24.0)", "isort (>=5.0.6,<6.0.0)", "mypy (==0.982)", "orjson (>=3.2.1,<4.0.0)", "passlib[bcrypt] (>=1.7.2,<2.0.0)", "peewee (>=3.13.3,<4.0.0)", "pytest (>=7.1.3,<8.0.0)", "python-jose[cryptography] (>=3.3.0,<4.0.0)", "python-multipart (>=0.0.5,<0.0.7)", "pyyaml (>=5.3.1,<7.0.0)", "ruff (==0.0.138)", "sqlalchemy (>=1.3.18,<1.4.43)", "types-orjson (==3.6.2)", "types-ujson (==5.7.0.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0,<6.0.0)"]
+all = ["email-validator (>=2.0.0)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=2.11.2)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.5)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"]
[[package]]
name = "idna"
-version = "3.4"
+version = "3.6"
description = "Internationalized Domain Names in Applications (IDNA)"
optional = false
python-versions = ">=3.5"
files = [
- {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"},
- {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"},
+ {file = "idna-3.6-py3-none-any.whl", hash = "sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f"},
+ {file = "idna-3.6.tar.gz", hash = "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca"},
]
[[package]]
name = "importlib-metadata"
-version = "6.7.0"
+version = "6.11.0"
description = "Read metadata from Python packages"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "importlib_metadata-6.7.0-py3-none-any.whl", hash = "sha256:cb52082e659e97afc5dac71e79de97d8681de3aa07ff18578330904a9d18e5b5"},
- {file = "importlib_metadata-6.7.0.tar.gz", hash = "sha256:1aaf550d4f73e5d6783e7acb77aec43d49da8017410afae93822cc9cca98c4d4"},
+ {file = "importlib_metadata-6.11.0-py3-none-any.whl", hash = "sha256:f0afba6205ad8f8947c7d338b5342d5db2afbfd82f9cbef7879a9539cc12eb9b"},
+ {file = "importlib_metadata-6.11.0.tar.gz", hash = "sha256:1231cf92d825c9e03cfc4da076a16de6422c863558229ea0b22b675657463443"},
]
[package.dependencies]
zipp = ">=0.5"
[package.extras]
-docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
+docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"]
perf = ["ipython"]
-testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"]
+testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"]
[[package]]
name = "iniconfig"
@@ -344,61 +363,59 @@ tomli = {version = "*", markers = "python_version > \"3.6\" and python_version <
[[package]]
name = "ipython"
-version = "8.13.2"
+version = "8.18.1"
description = "IPython: Productive Interactive Computing"
optional = false
python-versions = ">=3.9"
files = [
- {file = "ipython-8.13.2-py3-none-any.whl", hash = "sha256:ffca270240fbd21b06b2974e14a86494d6d29290184e788275f55e0b55914926"},
- {file = "ipython-8.13.2.tar.gz", hash = "sha256:7dff3fad32b97f6488e02f87b970f309d082f758d7b7fc252e3b19ee0e432dbb"},
+ {file = "ipython-8.18.1-py3-none-any.whl", hash = "sha256:e8267419d72d81955ec1177f8a29aaa90ac80ad647499201119e2f05e99aa397"},
+ {file = "ipython-8.18.1.tar.gz", hash = "sha256:ca6f079bb33457c66e233e4580ebfc4128855b4cf6370dddd73842a9563e8a27"},
]
[package.dependencies]
-appnope = {version = "*", markers = "sys_platform == \"darwin\""}
-backcall = "*"
colorama = {version = "*", markers = "sys_platform == \"win32\""}
decorator = "*"
+exceptiongroup = {version = "*", markers = "python_version < \"3.11\""}
jedi = ">=0.16"
matplotlib-inline = "*"
pexpect = {version = ">4.3", markers = "sys_platform != \"win32\""}
-pickleshare = "*"
-prompt-toolkit = ">=3.0.30,<3.0.37 || >3.0.37,<3.1.0"
+prompt-toolkit = ">=3.0.41,<3.1.0"
pygments = ">=2.4.0"
stack-data = "*"
traitlets = ">=5"
typing-extensions = {version = "*", markers = "python_version < \"3.10\""}
[package.extras]
-all = ["black", "curio", "docrepr", "ipykernel", "ipyparallel", "ipywidgets", "matplotlib", "matplotlib (!=3.2.0)", "nbconvert", "nbformat", "notebook", "numpy (>=1.21)", "pandas", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio", "qtconsole", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "trio", "typing-extensions"]
+all = ["black", "curio", "docrepr", "exceptiongroup", "ipykernel", "ipyparallel", "ipywidgets", "matplotlib", "matplotlib (!=3.2.0)", "nbconvert", "nbformat", "notebook", "numpy (>=1.22)", "pandas", "pickleshare", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio (<0.22)", "qtconsole", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "trio", "typing-extensions"]
black = ["black"]
-doc = ["docrepr", "ipykernel", "matplotlib", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "typing-extensions"]
+doc = ["docrepr", "exceptiongroup", "ipykernel", "matplotlib", "pickleshare", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio (<0.22)", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "typing-extensions"]
kernel = ["ipykernel"]
nbconvert = ["nbconvert"]
nbformat = ["nbformat"]
notebook = ["ipywidgets", "notebook"]
parallel = ["ipyparallel"]
qtconsole = ["qtconsole"]
-test = ["pytest (<7.1)", "pytest-asyncio", "testpath"]
-test-extra = ["curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.21)", "pandas", "pytest (<7.1)", "pytest-asyncio", "testpath", "trio"]
+test = ["pickleshare", "pytest (<7.1)", "pytest-asyncio (<0.22)", "testpath"]
+test-extra = ["curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.22)", "pandas", "pickleshare", "pytest (<7.1)", "pytest-asyncio (<0.22)", "testpath", "trio"]
[[package]]
name = "jedi"
-version = "0.18.2"
+version = "0.19.1"
description = "An autocompletion tool for Python that can be used for text editors."
optional = false
python-versions = ">=3.6"
files = [
- {file = "jedi-0.18.2-py2.py3-none-any.whl", hash = "sha256:203c1fd9d969ab8f2119ec0a3342e0b49910045abe6af0a3ae83a5764d54639e"},
- {file = "jedi-0.18.2.tar.gz", hash = "sha256:bae794c30d07f6d910d32a7048af09b5a39ed740918da923c6b780790ebac612"},
+ {file = "jedi-0.19.1-py2.py3-none-any.whl", hash = "sha256:e983c654fe5c02867aef4cdfce5a2fbb4a50adc0af145f70504238f18ef5e7e0"},
+ {file = "jedi-0.19.1.tar.gz", hash = "sha256:cf0496f3651bc65d7174ac1b7d043eff454892c708a87d1b683e57b569927ffd"},
]
[package.dependencies]
-parso = ">=0.8.0,<0.9.0"
+parso = ">=0.8.3,<0.9.0"
[package.extras]
docs = ["Jinja2 (==2.11.3)", "MarkupSafe (==1.1.1)", "Pygments (==2.8.1)", "alabaster (==0.7.12)", "babel (==2.9.1)", "chardet (==4.0.0)", "commonmark (==0.8.1)", "docutils (==0.17.1)", "future (==0.18.2)", "idna (==2.10)", "imagesize (==1.2.0)", "mock (==1.0.1)", "packaging (==20.9)", "pyparsing (==2.4.7)", "pytz (==2021.1)", "readthedocs-sphinx-ext (==2.1.4)", "recommonmark (==0.5.0)", "requests (==2.25.1)", "six (==1.15.0)", "snowballstemmer (==2.1.0)", "sphinx (==1.8.5)", "sphinx-rtd-theme (==0.4.3)", "sphinxcontrib-serializinghtml (==1.1.4)", "sphinxcontrib-websupport (==1.2.4)", "urllib3 (==1.26.4)"]
-qa = ["flake8 (==3.8.3)", "mypy (==0.782)"]
-testing = ["Django (<3.1)", "attrs", "colorama", "docopt", "pytest (<7.0.0)"]
+qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"]
+testing = ["Django", "attrs", "colorama", "docopt", "pytest (<7.0.0)"]
[[package]]
name = "matplotlib-inline"
@@ -427,13 +444,13 @@ files = [
[[package]]
name = "packaging"
-version = "23.1"
+version = "23.2"
description = "Core utilities for Python packages"
optional = false
python-versions = ">=3.7"
files = [
- {file = "packaging-23.1-py3-none-any.whl", hash = "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61"},
- {file = "packaging-23.1.tar.gz", hash = "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"},
+ {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"},
+ {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"},
]
[[package]]
@@ -453,38 +470,27 @@ testing = ["docopt", "pytest (<6.0.0)"]
[[package]]
name = "pexpect"
-version = "4.8.0"
+version = "4.9.0"
description = "Pexpect allows easy control of interactive console applications."
optional = false
python-versions = "*"
files = [
- {file = "pexpect-4.8.0-py2.py3-none-any.whl", hash = "sha256:0b48a55dcb3c05f3329815901ea4fc1537514d6ba867a152b581d69ae3710937"},
- {file = "pexpect-4.8.0.tar.gz", hash = "sha256:fc65a43959d153d0114afe13997d439c22823a27cefceb5ff35c2178c6784c0c"},
+ {file = "pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523"},
+ {file = "pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f"},
]
[package.dependencies]
ptyprocess = ">=0.5"
-[[package]]
-name = "pickleshare"
-version = "0.7.5"
-description = "Tiny 'shelve'-like database with concurrency support"
-optional = false
-python-versions = "*"
-files = [
- {file = "pickleshare-0.7.5-py2.py3-none-any.whl", hash = "sha256:9649af414d74d4df115d5d718f82acb59c9d418196b7b4290ed47a12ce62df56"},
- {file = "pickleshare-0.7.5.tar.gz", hash = "sha256:87683d47965c1da65cdacaf31c8441d12b8044cdec9aca500cd78fc2c683afca"},
-]
-
[[package]]
name = "pluggy"
-version = "1.0.0"
+version = "1.3.0"
description = "plugin and hook calling mechanisms for python"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.8"
files = [
- {file = "pluggy-1.0.0-py2.py3-none-any.whl", hash = "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"},
- {file = "pluggy-1.0.0.tar.gz", hash = "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159"},
+ {file = "pluggy-1.3.0-py3-none-any.whl", hash = "sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7"},
+ {file = "pluggy-1.3.0.tar.gz", hash = "sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12"},
]
[package.extras]
@@ -493,13 +499,13 @@ testing = ["pytest", "pytest-benchmark"]
[[package]]
name = "posthog"
-version = "3.0.2"
+version = "3.1.0"
description = "Integrate PostHog into any python application."
optional = false
python-versions = "*"
files = [
- {file = "posthog-3.0.2-py2.py3-none-any.whl", hash = "sha256:a8c0af6f2401fbe50f90e68c4143d0824b54e872de036b1c2f23b5abb39d88ce"},
- {file = "posthog-3.0.2.tar.gz", hash = "sha256:701fba6e446a4de687c6e861b587e7b7741955ad624bf34fe013c06a0fec6fb3"},
+ {file = "posthog-3.1.0-py2.py3-none-any.whl", hash = "sha256:acd033530bdfc275dce5587f205f62378991ecb9b7cd5479e79c7f4ac575d319"},
+ {file = "posthog-3.1.0.tar.gz", hash = "sha256:db17a2c511e18757aec12b6632ddcc1fa318743dad88a4666010467a3d9468da"},
]
[package.dependencies]
@@ -512,17 +518,17 @@ six = ">=1.5"
[package.extras]
dev = ["black", "flake8", "flake8-print", "isort", "pre-commit"]
sentry = ["django", "sentry-sdk"]
-test = ["coverage", "flake8", "freezegun (==0.3.15)", "mock (>=2.0.0)", "pylint", "pytest"]
+test = ["coverage", "flake8", "freezegun (==0.3.15)", "mock (>=2.0.0)", "pylint", "pytest", "pytest-timeout"]
[[package]]
name = "prompt-toolkit"
-version = "3.0.38"
+version = "3.0.43"
description = "Library for building powerful interactive command lines in Python"
optional = false
python-versions = ">=3.7.0"
files = [
- {file = "prompt_toolkit-3.0.38-py3-none-any.whl", hash = "sha256:45ea77a2f7c60418850331366c81cf6b5b9cf4c7fd34616f733c5427e6abbb1f"},
- {file = "prompt_toolkit-3.0.38.tar.gz", hash = "sha256:23ac5d50538a9a38c8bde05fecb47d0b403ecd0662857a86f886f798563d5b9b"},
+ {file = "prompt_toolkit-3.0.43-py3-none-any.whl", hash = "sha256:a11a29cb3bf0a28a387fe5122cdb649816a957cd9261dcedf8c9f1fef33eacf6"},
+ {file = "prompt_toolkit-3.0.43.tar.gz", hash = "sha256:3527b7af26106cbc65a040bcc84839a3566ec1b051bb0bfe953631e704b0ff7d"},
]
[package.dependencies]
@@ -566,69 +572,154 @@ files = [
[[package]]
name = "pydantic"
-version = "1.10.7"
-description = "Data validation and settings management using python type hints"
+version = "2.5.3"
+description = "Data validation using Python type hints"
optional = false
python-versions = ">=3.7"
files = [
- {file = "pydantic-1.10.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e79e999e539872e903767c417c897e729e015872040e56b96e67968c3b918b2d"},
- {file = "pydantic-1.10.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:01aea3a42c13f2602b7ecbbea484a98169fb568ebd9e247593ea05f01b884b2e"},
- {file = "pydantic-1.10.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:516f1ed9bc2406a0467dd777afc636c7091d71f214d5e413d64fef45174cfc7a"},
- {file = "pydantic-1.10.7-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae150a63564929c675d7f2303008d88426a0add46efd76c3fc797cd71cb1b46f"},
- {file = "pydantic-1.10.7-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ecbbc51391248116c0a055899e6c3e7ffbb11fb5e2a4cd6f2d0b93272118a209"},
- {file = "pydantic-1.10.7-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f4a2b50e2b03d5776e7f21af73e2070e1b5c0d0df255a827e7c632962f8315af"},
- {file = "pydantic-1.10.7-cp310-cp310-win_amd64.whl", hash = "sha256:a7cd2251439988b413cb0a985c4ed82b6c6aac382dbaff53ae03c4b23a70e80a"},
- {file = "pydantic-1.10.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:68792151e174a4aa9e9fc1b4e653e65a354a2fa0fed169f7b3d09902ad2cb6f1"},
- {file = "pydantic-1.10.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe2507b8ef209da71b6fb5f4e597b50c5a34b78d7e857c4f8f3115effaef5fe"},
- {file = "pydantic-1.10.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10a86d8c8db68086f1e30a530f7d5f83eb0685e632e411dbbcf2d5c0150e8dcd"},
- {file = "pydantic-1.10.7-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d75ae19d2a3dbb146b6f324031c24f8a3f52ff5d6a9f22f0683694b3afcb16fb"},
- {file = "pydantic-1.10.7-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:464855a7ff7f2cc2cf537ecc421291b9132aa9c79aef44e917ad711b4a93163b"},
- {file = "pydantic-1.10.7-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:193924c563fae6ddcb71d3f06fa153866423ac1b793a47936656e806b64e24ca"},
- {file = "pydantic-1.10.7-cp311-cp311-win_amd64.whl", hash = "sha256:b4a849d10f211389502059c33332e91327bc154acc1845f375a99eca3afa802d"},
- {file = "pydantic-1.10.7-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:cc1dde4e50a5fc1336ee0581c1612215bc64ed6d28d2c7c6f25d2fe3e7c3e918"},
- {file = "pydantic-1.10.7-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0cfe895a504c060e5d36b287ee696e2fdad02d89e0d895f83037245218a87fe"},
- {file = "pydantic-1.10.7-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:670bb4683ad1e48b0ecb06f0cfe2178dcf74ff27921cdf1606e527d2617a81ee"},
- {file = "pydantic-1.10.7-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:950ce33857841f9a337ce07ddf46bc84e1c4946d2a3bba18f8280297157a3fd1"},
- {file = "pydantic-1.10.7-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:c15582f9055fbc1bfe50266a19771bbbef33dd28c45e78afbe1996fd70966c2a"},
- {file = "pydantic-1.10.7-cp37-cp37m-win_amd64.whl", hash = "sha256:82dffb306dd20bd5268fd6379bc4bfe75242a9c2b79fec58e1041fbbdb1f7914"},
- {file = "pydantic-1.10.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8c7f51861d73e8b9ddcb9916ae7ac39fb52761d9ea0df41128e81e2ba42886cd"},
- {file = "pydantic-1.10.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6434b49c0b03a51021ade5c4daa7d70c98f7a79e95b551201fff682fc1661245"},
- {file = "pydantic-1.10.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64d34ab766fa056df49013bb6e79921a0265204c071984e75a09cbceacbbdd5d"},
- {file = "pydantic-1.10.7-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:701daea9ffe9d26f97b52f1d157e0d4121644f0fcf80b443248434958fd03dc3"},
- {file = "pydantic-1.10.7-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:cf135c46099ff3f919d2150a948ce94b9ce545598ef2c6c7bf55dca98a304b52"},
- {file = "pydantic-1.10.7-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b0f85904f73161817b80781cc150f8b906d521fa11e3cdabae19a581c3606209"},
- {file = "pydantic-1.10.7-cp38-cp38-win_amd64.whl", hash = "sha256:9f6f0fd68d73257ad6685419478c5aece46432f4bdd8d32c7345f1986496171e"},
- {file = "pydantic-1.10.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c230c0d8a322276d6e7b88c3f7ce885f9ed16e0910354510e0bae84d54991143"},
- {file = "pydantic-1.10.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:976cae77ba6a49d80f461fd8bba183ff7ba79f44aa5cfa82f1346b5626542f8e"},
- {file = "pydantic-1.10.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d45fc99d64af9aaf7e308054a0067fdcd87ffe974f2442312372dfa66e1001d"},
- {file = "pydantic-1.10.7-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d2a5ebb48958754d386195fe9e9c5106f11275867051bf017a8059410e9abf1f"},
- {file = "pydantic-1.10.7-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:abfb7d4a7cd5cc4e1d1887c43503a7c5dd608eadf8bc615413fc498d3e4645cd"},
- {file = "pydantic-1.10.7-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:80b1fab4deb08a8292d15e43a6edccdffa5377a36a4597bb545b93e79c5ff0a5"},
- {file = "pydantic-1.10.7-cp39-cp39-win_amd64.whl", hash = "sha256:d71e69699498b020ea198468e2480a2f1e7433e32a3a99760058c6520e2bea7e"},
- {file = "pydantic-1.10.7-py3-none-any.whl", hash = "sha256:0cd181f1d0b1d00e2b705f1bf1ac7799a2d938cce3376b8007df62b29be3c2c6"},
- {file = "pydantic-1.10.7.tar.gz", hash = "sha256:cfc83c0678b6ba51b0532bea66860617c4cd4251ecf76e9846fa5a9f3454e97e"},
+ {file = "pydantic-2.5.3-py3-none-any.whl", hash = "sha256:d0caf5954bee831b6bfe7e338c32b9e30c85dfe080c843680783ac2b631673b4"},
+ {file = "pydantic-2.5.3.tar.gz", hash = "sha256:b3ef57c62535b0941697cce638c08900d87fcb67e29cfa99e8a68f747f393f7a"},
]
[package.dependencies]
-typing-extensions = ">=4.2.0"
+annotated-types = ">=0.4.0"
+pydantic-core = "2.14.6"
+typing-extensions = ">=4.6.1"
[package.extras]
-dotenv = ["python-dotenv (>=0.10.4)"]
-email = ["email-validator (>=1.0.3)"]
+email = ["email-validator (>=2.0.0)"]
+
+[[package]]
+name = "pydantic-core"
+version = "2.14.6"
+description = ""
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "pydantic_core-2.14.6-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:72f9a942d739f09cd42fffe5dc759928217649f070056f03c70df14f5770acf9"},
+ {file = "pydantic_core-2.14.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6a31d98c0d69776c2576dda4b77b8e0c69ad08e8b539c25c7d0ca0dc19a50d6c"},
+ {file = "pydantic_core-2.14.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5aa90562bc079c6c290f0512b21768967f9968e4cfea84ea4ff5af5d917016e4"},
+ {file = "pydantic_core-2.14.6-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:370ffecb5316ed23b667d99ce4debe53ea664b99cc37bfa2af47bc769056d534"},
+ {file = "pydantic_core-2.14.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f85f3843bdb1fe80e8c206fe6eed7a1caeae897e496542cee499c374a85c6e08"},
+ {file = "pydantic_core-2.14.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9862bf828112e19685b76ca499b379338fd4c5c269d897e218b2ae8fcb80139d"},
+ {file = "pydantic_core-2.14.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:036137b5ad0cb0004c75b579445a1efccd072387a36c7f217bb8efd1afbe5245"},
+ {file = "pydantic_core-2.14.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:92879bce89f91f4b2416eba4429c7b5ca22c45ef4a499c39f0c5c69257522c7c"},
+ {file = "pydantic_core-2.14.6-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0c08de15d50fa190d577e8591f0329a643eeaed696d7771760295998aca6bc66"},
+ {file = "pydantic_core-2.14.6-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:36099c69f6b14fc2c49d7996cbf4f87ec4f0e66d1c74aa05228583225a07b590"},
+ {file = "pydantic_core-2.14.6-cp310-none-win32.whl", hash = "sha256:7be719e4d2ae6c314f72844ba9d69e38dff342bc360379f7c8537c48e23034b7"},
+ {file = "pydantic_core-2.14.6-cp310-none-win_amd64.whl", hash = "sha256:36fa402dcdc8ea7f1b0ddcf0df4254cc6b2e08f8cd80e7010d4c4ae6e86b2a87"},
+ {file = "pydantic_core-2.14.6-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:dea7fcd62915fb150cdc373212141a30037e11b761fbced340e9db3379b892d4"},
+ {file = "pydantic_core-2.14.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ffff855100bc066ff2cd3aa4a60bc9534661816b110f0243e59503ec2df38421"},
+ {file = "pydantic_core-2.14.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b027c86c66b8627eb90e57aee1f526df77dc6d8b354ec498be9a757d513b92b"},
+ {file = "pydantic_core-2.14.6-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:00b1087dabcee0b0ffd104f9f53d7d3eaddfaa314cdd6726143af6bc713aa27e"},
+ {file = "pydantic_core-2.14.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:75ec284328b60a4e91010c1acade0c30584f28a1f345bc8f72fe8b9e46ec6a96"},
+ {file = "pydantic_core-2.14.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e1f4744eea1501404b20b0ac059ff7e3f96a97d3e3f48ce27a139e053bb370b"},
+ {file = "pydantic_core-2.14.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2602177668f89b38b9f84b7b3435d0a72511ddef45dc14446811759b82235a1"},
+ {file = "pydantic_core-2.14.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6c8edaea3089bf908dd27da8f5d9e395c5b4dc092dbcce9b65e7156099b4b937"},
+ {file = "pydantic_core-2.14.6-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:478e9e7b360dfec451daafe286998d4a1eeaecf6d69c427b834ae771cad4b622"},
+ {file = "pydantic_core-2.14.6-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b6ca36c12a5120bad343eef193cc0122928c5c7466121da7c20f41160ba00ba2"},
+ {file = "pydantic_core-2.14.6-cp311-none-win32.whl", hash = "sha256:2b8719037e570639e6b665a4050add43134d80b687288ba3ade18b22bbb29dd2"},
+ {file = "pydantic_core-2.14.6-cp311-none-win_amd64.whl", hash = "sha256:78ee52ecc088c61cce32b2d30a826f929e1708f7b9247dc3b921aec367dc1b23"},
+ {file = "pydantic_core-2.14.6-cp311-none-win_arm64.whl", hash = "sha256:a19b794f8fe6569472ff77602437ec4430f9b2b9ec7a1105cfd2232f9ba355e6"},
+ {file = "pydantic_core-2.14.6-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:667aa2eac9cd0700af1ddb38b7b1ef246d8cf94c85637cbb03d7757ca4c3fdec"},
+ {file = "pydantic_core-2.14.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cdee837710ef6b56ebd20245b83799fce40b265b3b406e51e8ccc5b85b9099b7"},
+ {file = "pydantic_core-2.14.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c5bcf3414367e29f83fd66f7de64509a8fd2368b1edf4351e862910727d3e51"},
+ {file = "pydantic_core-2.14.6-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:26a92ae76f75d1915806b77cf459811e772d8f71fd1e4339c99750f0e7f6324f"},
+ {file = "pydantic_core-2.14.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a983cca5ed1dd9a35e9e42ebf9f278d344603bfcb174ff99a5815f953925140a"},
+ {file = "pydantic_core-2.14.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cb92f9061657287eded380d7dc455bbf115430b3aa4741bdc662d02977e7d0af"},
+ {file = "pydantic_core-2.14.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4ace1e220b078c8e48e82c081e35002038657e4b37d403ce940fa679e57113b"},
+ {file = "pydantic_core-2.14.6-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ef633add81832f4b56d3b4c9408b43d530dfca29e68fb1b797dcb861a2c734cd"},
+ {file = "pydantic_core-2.14.6-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7e90d6cc4aad2cc1f5e16ed56e46cebf4877c62403a311af20459c15da76fd91"},
+ {file = "pydantic_core-2.14.6-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e8a5ac97ea521d7bde7621d86c30e86b798cdecd985723c4ed737a2aa9e77d0c"},
+ {file = "pydantic_core-2.14.6-cp312-none-win32.whl", hash = "sha256:f27207e8ca3e5e021e2402ba942e5b4c629718e665c81b8b306f3c8b1ddbb786"},
+ {file = "pydantic_core-2.14.6-cp312-none-win_amd64.whl", hash = "sha256:b3e5fe4538001bb82e2295b8d2a39356a84694c97cb73a566dc36328b9f83b40"},
+ {file = "pydantic_core-2.14.6-cp312-none-win_arm64.whl", hash = "sha256:64634ccf9d671c6be242a664a33c4acf12882670b09b3f163cd00a24cffbd74e"},
+ {file = "pydantic_core-2.14.6-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:24368e31be2c88bd69340fbfe741b405302993242ccb476c5c3ff48aeee1afe0"},
+ {file = "pydantic_core-2.14.6-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:e33b0834f1cf779aa839975f9d8755a7c2420510c0fa1e9fa0497de77cd35d2c"},
+ {file = "pydantic_core-2.14.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6af4b3f52cc65f8a0bc8b1cd9676f8c21ef3e9132f21fed250f6958bd7223bed"},
+ {file = "pydantic_core-2.14.6-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d15687d7d7f40333bd8266f3814c591c2e2cd263fa2116e314f60d82086e353a"},
+ {file = "pydantic_core-2.14.6-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:095b707bb287bfd534044166ab767bec70a9bba3175dcdc3371782175c14e43c"},
+ {file = "pydantic_core-2.14.6-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94fc0e6621e07d1e91c44e016cc0b189b48db053061cc22d6298a611de8071bb"},
+ {file = "pydantic_core-2.14.6-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ce830e480f6774608dedfd4a90c42aac4a7af0a711f1b52f807130c2e434c06"},
+ {file = "pydantic_core-2.14.6-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a306cdd2ad3a7d795d8e617a58c3a2ed0f76c8496fb7621b6cd514eb1532cae8"},
+ {file = "pydantic_core-2.14.6-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:2f5fa187bde8524b1e37ba894db13aadd64faa884657473b03a019f625cee9a8"},
+ {file = "pydantic_core-2.14.6-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:438027a975cc213a47c5d70672e0d29776082155cfae540c4e225716586be75e"},
+ {file = "pydantic_core-2.14.6-cp37-none-win32.whl", hash = "sha256:f96ae96a060a8072ceff4cfde89d261837b4294a4f28b84a28765470d502ccc6"},
+ {file = "pydantic_core-2.14.6-cp37-none-win_amd64.whl", hash = "sha256:e646c0e282e960345314f42f2cea5e0b5f56938c093541ea6dbf11aec2862391"},
+ {file = "pydantic_core-2.14.6-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:db453f2da3f59a348f514cfbfeb042393b68720787bbef2b4c6068ea362c8149"},
+ {file = "pydantic_core-2.14.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3860c62057acd95cc84044e758e47b18dcd8871a328ebc8ccdefd18b0d26a21b"},
+ {file = "pydantic_core-2.14.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:36026d8f99c58d7044413e1b819a67ca0e0b8ebe0f25e775e6c3d1fabb3c38fb"},
+ {file = "pydantic_core-2.14.6-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8ed1af8692bd8d2a29d702f1a2e6065416d76897d726e45a1775b1444f5928a7"},
+ {file = "pydantic_core-2.14.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:314ccc4264ce7d854941231cf71b592e30d8d368a71e50197c905874feacc8a8"},
+ {file = "pydantic_core-2.14.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:982487f8931067a32e72d40ab6b47b1628a9c5d344be7f1a4e668fb462d2da42"},
+ {file = "pydantic_core-2.14.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2dbe357bc4ddda078f79d2a36fc1dd0494a7f2fad83a0a684465b6f24b46fe80"},
+ {file = "pydantic_core-2.14.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2f6ffc6701a0eb28648c845f4945a194dc7ab3c651f535b81793251e1185ac3d"},
+ {file = "pydantic_core-2.14.6-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:7f5025db12fc6de7bc1104d826d5aee1d172f9ba6ca936bf6474c2148ac336c1"},
+ {file = "pydantic_core-2.14.6-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:dab03ed811ed1c71d700ed08bde8431cf429bbe59e423394f0f4055f1ca0ea60"},
+ {file = "pydantic_core-2.14.6-cp38-none-win32.whl", hash = "sha256:dfcbebdb3c4b6f739a91769aea5ed615023f3c88cb70df812849aef634c25fbe"},
+ {file = "pydantic_core-2.14.6-cp38-none-win_amd64.whl", hash = "sha256:99b14dbea2fdb563d8b5a57c9badfcd72083f6006caf8e126b491519c7d64ca8"},
+ {file = "pydantic_core-2.14.6-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:4ce8299b481bcb68e5c82002b96e411796b844d72b3e92a3fbedfe8e19813eab"},
+ {file = "pydantic_core-2.14.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b9a9d92f10772d2a181b5ca339dee066ab7d1c9a34ae2421b2a52556e719756f"},
+ {file = "pydantic_core-2.14.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd9e98b408384989ea4ab60206b8e100d8687da18b5c813c11e92fd8212a98e0"},
+ {file = "pydantic_core-2.14.6-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4f86f1f318e56f5cbb282fe61eb84767aee743ebe32c7c0834690ebea50c0a6b"},
+ {file = "pydantic_core-2.14.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86ce5fcfc3accf3a07a729779d0b86c5d0309a4764c897d86c11089be61da160"},
+ {file = "pydantic_core-2.14.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dcf1978be02153c6a31692d4fbcc2a3f1db9da36039ead23173bc256ee3b91b"},
+ {file = "pydantic_core-2.14.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eedf97be7bc3dbc8addcef4142f4b4164066df0c6f36397ae4aaed3eb187d8ab"},
+ {file = "pydantic_core-2.14.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d5f916acf8afbcab6bacbb376ba7dc61f845367901ecd5e328fc4d4aef2fcab0"},
+ {file = "pydantic_core-2.14.6-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:8a14c192c1d724c3acbfb3f10a958c55a2638391319ce8078cb36c02283959b9"},
+ {file = "pydantic_core-2.14.6-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0348b1dc6b76041516e8a854ff95b21c55f5a411c3297d2ca52f5528e49d8411"},
+ {file = "pydantic_core-2.14.6-cp39-none-win32.whl", hash = "sha256:de2a0645a923ba57c5527497daf8ec5df69c6eadf869e9cd46e86349146e5975"},
+ {file = "pydantic_core-2.14.6-cp39-none-win_amd64.whl", hash = "sha256:aca48506a9c20f68ee61c87f2008f81f8ee99f8d7f0104bff3c47e2d148f89d9"},
+ {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:d5c28525c19f5bb1e09511669bb57353d22b94cf8b65f3a8d141c389a55dec95"},
+ {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:78d0768ee59baa3de0f4adac9e3748b4b1fffc52143caebddfd5ea2961595277"},
+ {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b93785eadaef932e4fe9c6e12ba67beb1b3f1e5495631419c784ab87e975670"},
+ {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a874f21f87c485310944b2b2734cd6d318765bcbb7515eead33af9641816506e"},
+ {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b89f4477d915ea43b4ceea6756f63f0288941b6443a2b28c69004fe07fde0d0d"},
+ {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:172de779e2a153d36ee690dbc49c6db568d7b33b18dc56b69a7514aecbcf380d"},
+ {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:dfcebb950aa7e667ec226a442722134539e77c575f6cfaa423f24371bb8d2e94"},
+ {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:55a23dcd98c858c0db44fc5c04fc7ed81c4b4d33c653a7c45ddaebf6563a2f66"},
+ {file = "pydantic_core-2.14.6-pp37-pypy37_pp73-macosx_10_7_x86_64.whl", hash = "sha256:4241204e4b36ab5ae466ecec5c4c16527a054c69f99bba20f6f75232a6a534e2"},
+ {file = "pydantic_core-2.14.6-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e574de99d735b3fc8364cba9912c2bec2da78775eba95cbb225ef7dda6acea24"},
+ {file = "pydantic_core-2.14.6-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1302a54f87b5cd8528e4d6d1bf2133b6aa7c6122ff8e9dc5220fbc1e07bffebd"},
+ {file = "pydantic_core-2.14.6-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f8e81e4b55930e5ffab4a68db1af431629cf2e4066dbdbfef65348b8ab804ea8"},
+ {file = "pydantic_core-2.14.6-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:c99462ffc538717b3e60151dfaf91125f637e801f5ab008f81c402f1dff0cd0f"},
+ {file = "pydantic_core-2.14.6-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e4cf2d5829f6963a5483ec01578ee76d329eb5caf330ecd05b3edd697e7d768a"},
+ {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:cf10b7d58ae4a1f07fccbf4a0a956d705356fea05fb4c70608bb6fa81d103cda"},
+ {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:399ac0891c284fa8eb998bcfa323f2234858f5d2efca3950ae58c8f88830f145"},
+ {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c6a5c79b28003543db3ba67d1df336f253a87d3112dac3a51b94f7d48e4c0e1"},
+ {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:599c87d79cab2a6a2a9df4aefe0455e61e7d2aeede2f8577c1b7c0aec643ee8e"},
+ {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43e166ad47ba900f2542a80d83f9fc65fe99eb63ceec4debec160ae729824052"},
+ {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3a0b5db001b98e1c649dd55afa928e75aa4087e587b9524a4992316fa23c9fba"},
+ {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:747265448cb57a9f37572a488a57d873fd96bf51e5bb7edb52cfb37124516da4"},
+ {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:7ebe3416785f65c28f4f9441e916bfc8a54179c8dea73c23023f7086fa601c5d"},
+ {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:86c963186ca5e50d5c8287b1d1c9d3f8f024cbe343d048c5bd282aec2d8641f2"},
+ {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:e0641b506486f0b4cd1500a2a65740243e8670a2549bb02bc4556a83af84ae03"},
+ {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71d72ca5eaaa8d38c8df16b7deb1a2da4f650c41b58bb142f3fb75d5ad4a611f"},
+ {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27e524624eace5c59af499cd97dc18bb201dc6a7a2da24bfc66ef151c69a5f2a"},
+ {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a3dde6cac75e0b0902778978d3b1646ca9f438654395a362cb21d9ad34b24acf"},
+ {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:00646784f6cd993b1e1c0e7b0fdcbccc375d539db95555477771c27555e3c556"},
+ {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:23598acb8ccaa3d1d875ef3b35cb6376535095e9405d91a3d57a8c7db5d29341"},
+ {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7f41533d7e3cf9520065f610b41ac1c76bc2161415955fbcead4981b22c7611e"},
+ {file = "pydantic_core-2.14.6.tar.gz", hash = "sha256:1fd0c1d395372843fba13a51c28e3bb9d59bd7aebfeb17358ffaaa1e4dbbe948"},
+]
+
+[package.dependencies]
+typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
[[package]]
name = "pygments"
-version = "2.15.1"
+version = "2.17.2"
description = "Pygments is a syntax highlighting package written in Python."
optional = false
python-versions = ">=3.7"
files = [
- {file = "Pygments-2.15.1-py3-none-any.whl", hash = "sha256:db2db3deb4b4179f399a09054b023b6a586b76499d36965813c71aa8ed7b5fd1"},
- {file = "Pygments-2.15.1.tar.gz", hash = "sha256:8ace4d3c1dd481894b2005f560ead0f9f19ee64fe983366be1a21e171d12775c"},
+ {file = "pygments-2.17.2-py3-none-any.whl", hash = "sha256:b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c"},
+ {file = "pygments-2.17.2.tar.gz", hash = "sha256:da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367"},
]
[package.extras]
plugins = ["importlib-metadata"]
+windows-terminal = ["colorama (>=0.4.6)"]
[[package]]
name = "pytest"
@@ -738,13 +829,13 @@ docs = ["Sphinx (>=3.3,<4.0)", "sphinx-autobuild (>=2020.9.1,<2021.0.0)", "sphin
[[package]]
name = "requests"
-version = "2.30.0"
+version = "2.31.0"
description = "Python HTTP for Humans."
optional = false
python-versions = ">=3.7"
files = [
- {file = "requests-2.30.0-py3-none-any.whl", hash = "sha256:10e94cc4f3121ee6da529d358cdaeaff2f1c409cd377dbc72b825852f2f7e294"},
- {file = "requests-2.30.0.tar.gz", hash = "sha256:239d7d4458afcb28a692cdd298d87542235f4ca8d36d03a15bfc128a6559a2f4"},
+ {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"},
+ {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"},
]
[package.dependencies]
@@ -781,13 +872,13 @@ files = [
[[package]]
name = "stack-data"
-version = "0.6.2"
+version = "0.6.3"
description = "Extract data from python stack frames and tracebacks for informative displays"
optional = false
python-versions = "*"
files = [
- {file = "stack_data-0.6.2-py3-none-any.whl", hash = "sha256:cbb2a53eb64e5785878201a97ed7c7b94883f48b87bfb0bbe8b623c74679e4a8"},
- {file = "stack_data-0.6.2.tar.gz", hash = "sha256:32d2dd0376772d01b6cb9fc996f3c8b57a357089dec328ed4b6553d037eaf815"},
+ {file = "stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695"},
+ {file = "stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9"},
]
[package.dependencies]
@@ -800,13 +891,13 @@ tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"]
[[package]]
name = "starlette"
-version = "0.26.1"
+version = "0.27.0"
description = "The little ASGI library that shines."
optional = false
python-versions = ">=3.7"
files = [
- {file = "starlette-0.26.1-py3-none-any.whl", hash = "sha256:e87fce5d7cbdde34b76f0ac69013fd9d190d581d80681493016666e6f96c6d5e"},
- {file = "starlette-0.26.1.tar.gz", hash = "sha256:41da799057ea8620e4667a3e69a5b1923ebd32b1819c8fa75634bbe8d8bea9bd"},
+ {file = "starlette-0.27.0-py3-none-any.whl", hash = "sha256:918416370e846586541235ccd38a474c08b80443ed31c578a418e2209b3eef91"},
+ {file = "starlette-0.27.0.tar.gz", hash = "sha256:6a6b0d042acb8d469a01eba54e9cda6cbd24ac602c4cd016723117d6a7e73b75"},
]
[package.dependencies]
@@ -840,90 +931,89 @@ files = [
[[package]]
name = "traitlets"
-version = "5.9.0"
+version = "5.14.0"
description = "Traitlets Python configuration system"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "traitlets-5.9.0-py3-none-any.whl", hash = "sha256:9e6ec080259b9a5940c797d58b613b5e31441c2257b87c2e795c5228ae80d2d8"},
- {file = "traitlets-5.9.0.tar.gz", hash = "sha256:f6cde21a9c68cf756af02035f72d5a723bf607e862e7be33ece505abf4a3bad9"},
+ {file = "traitlets-5.14.0-py3-none-any.whl", hash = "sha256:f14949d23829023013c47df20b4a76ccd1a85effb786dc060f34de7948361b33"},
+ {file = "traitlets-5.14.0.tar.gz", hash = "sha256:fcdaa8ac49c04dfa0ed3ee3384ef6dfdb5d6f3741502be247279407679296772"},
]
[package.extras]
docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"]
-test = ["argcomplete (>=2.0)", "pre-commit", "pytest", "pytest-mock"]
+test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0,<7.5)", "pytest-mock", "pytest-mypy-testing"]
[[package]]
name = "typing-extensions"
-version = "4.5.0"
-description = "Backported and Experimental Type Hints for Python 3.7+"
+version = "4.9.0"
+description = "Backported and Experimental Type Hints for Python 3.8+"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "typing_extensions-4.5.0-py3-none-any.whl", hash = "sha256:fb33085c39dd998ac16d1431ebc293a8b3eedd00fd4a32de0ff79002c19511b4"},
- {file = "typing_extensions-4.5.0.tar.gz", hash = "sha256:5cb5f4a79139d699607b3ef622a1dedafa84e115ab0024e0d9c044a9479ca7cb"},
+ {file = "typing_extensions-4.9.0-py3-none-any.whl", hash = "sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd"},
+ {file = "typing_extensions-4.9.0.tar.gz", hash = "sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783"},
]
[[package]]
name = "urllib3"
-version = "2.0.2"
+version = "2.1.0"
description = "HTTP library with thread-safe connection pooling, file post, and more."
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "urllib3-2.0.2-py3-none-any.whl", hash = "sha256:d055c2f9d38dc53c808f6fdc8eab7360b6fdbbde02340ed25cfbcd817c62469e"},
- {file = "urllib3-2.0.2.tar.gz", hash = "sha256:61717a1095d7e155cdb737ac7bb2f4324a858a1e2e6466f6d03ff630ca68d3cc"},
+ {file = "urllib3-2.1.0-py3-none-any.whl", hash = "sha256:55901e917a5896a349ff771be919f8bd99aff50b79fe58fec595eb37bbc56bb3"},
+ {file = "urllib3-2.1.0.tar.gz", hash = "sha256:df7aa8afb0148fa78488e7899b2c59b5f4ffcfa82e6c54ccb9dd37c1d7b52d54"},
]
[package.extras]
brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"]
-secure = ["certifi", "cryptography (>=1.9)", "idna (>=2.0.0)", "pyopenssl (>=17.1.0)", "urllib3-secure-extra"]
socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
zstd = ["zstandard (>=0.18.0)"]
[[package]]
name = "wcwidth"
-version = "0.2.6"
+version = "0.2.12"
description = "Measures the displayed width of unicode strings in a terminal"
optional = false
python-versions = "*"
files = [
- {file = "wcwidth-0.2.6-py2.py3-none-any.whl", hash = "sha256:795b138f6875577cd91bba52baf9e445cd5118fd32723b460e30a0af30ea230e"},
- {file = "wcwidth-0.2.6.tar.gz", hash = "sha256:a5220780a404dbe3353789870978e472cfe477761f06ee55077256e509b156d0"},
+ {file = "wcwidth-0.2.12-py2.py3-none-any.whl", hash = "sha256:f26ec43d96c8cbfed76a5075dac87680124fa84e0855195a6184da9c187f133c"},
+ {file = "wcwidth-0.2.12.tar.gz", hash = "sha256:f01c104efdf57971bcb756f054dd58ddec5204dd15fa31d6503ea57947d97c02"},
]
[[package]]
name = "websocket-client"
-version = "1.5.1"
+version = "1.7.0"
description = "WebSocket client for Python with low level API options"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "websocket-client-1.5.1.tar.gz", hash = "sha256:3f09e6d8230892547132177f575a4e3e73cfdf06526e20cc02aa1c3b47184d40"},
- {file = "websocket_client-1.5.1-py3-none-any.whl", hash = "sha256:cdf5877568b7e83aa7cf2244ab56a3213de587bbe0ce9d8b9600fc77b455d89e"},
+ {file = "websocket-client-1.7.0.tar.gz", hash = "sha256:10e511ea3a8c744631d3bd77e61eb17ed09304c413ad42cf6ddfa4c7787e8fe6"},
+ {file = "websocket_client-1.7.0-py3-none-any.whl", hash = "sha256:f4c3d22fec12a2461427a29957ff07d35098ee2d976d3ba244e688b8b4057588"},
]
[package.extras]
-docs = ["Sphinx (>=3.4)", "sphinx-rtd-theme (>=0.5)"]
+docs = ["Sphinx (>=6.0)", "sphinx-rtd-theme (>=1.1.0)"]
optional = ["python-socks", "wsaccel"]
test = ["websockets"]
[[package]]
name = "zipp"
-version = "3.15.0"
+version = "3.17.0"
description = "Backport of pathlib-compatible object wrapper for zip files"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "zipp-3.15.0-py3-none-any.whl", hash = "sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556"},
- {file = "zipp-3.15.0.tar.gz", hash = "sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b"},
+ {file = "zipp-3.17.0-py3-none-any.whl", hash = "sha256:0e923e726174922dce09c53c59ad483ff7bbb8e572e00c7f7c46b88556409f31"},
+ {file = "zipp-3.17.0.tar.gz", hash = "sha256:84e64a1c28cf7e91ed2078bb8cc8c259cb19b76942096c8d7b84947690cabaf0"},
]
[package.extras]
-docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
-testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"]
+docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"]
+testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy (>=0.9.1)", "pytest-ruff"]
[metadata]
lock-version = "2.0"
python-versions = "^3.9"
-content-hash = "1a41135d3e71717b16a1d1d3d1b8523274ae2f816ca9ffceea585a69bc6420dd"
+content-hash = "0abc9dc5b617ce798c5af8972d45920e340aee2f399061c8ebd10c8bd0e58bbd"
diff --git a/agenta-cli/pyproject.toml b/agenta-cli/pyproject.toml
index 7b5762ab0e..96d62f930f 100644
--- a/agenta-cli/pyproject.toml
+++ b/agenta-cli/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "agenta"
-version = "0.6.5"
+version = "0.6.9"
description = "The SDK for agenta is an open-source LLMOps platform."
readme = "README.md"
authors = ["Mahmoud Mabrouk "]
@@ -19,14 +19,15 @@ keywords = ["LLMOps", "LLM", "evaluation", "prompt engineering"]
python = "^3.9"
docker = "^6.1.1"
click = "^8.1.3"
-fastapi = "^0.95.1"
+fastapi = ">=0.95.1"
toml = "^0.10.2"
questionary = "^1.10.0"
-ipdb = "^0.13.13"
+ipdb = ">=0.13"
python-dotenv = "^1.0.0"
python-multipart = "^0.0.6"
importlib-metadata = "^6.7.0"
-posthog = "^3.0.2"
+posthog = "^3.1.0"
+pydantic = ">=2.0"
[tool.poetry.dev-dependencies]
pytest = "^6.2"
diff --git a/agenta-web/src/components/AppSelector/AppSelector.tsx b/agenta-web/src/components/AppSelector/AppSelector.tsx
index e32e2b1b69..cca4ebfe6d 100644
--- a/agenta-web/src/components/AppSelector/AppSelector.tsx
+++ b/agenta-web/src/components/AppSelector/AppSelector.tsx
@@ -246,7 +246,8 @@ const AppSelector: React.FC = () => {
}
const appNameExist = useMemo(
- () => apps.some((app: GenericObject) => app.app_name === newApp),
+ () =>
+ apps.some((app: GenericObject) => app.app_name.toLowerCase() === newApp.toLowerCase()),
[apps, newApp],
)
diff --git a/agenta-web/src/components/ChatInputs/ChatInputs.tsx b/agenta-web/src/components/ChatInputs/ChatInputs.tsx
index d1ccada975..879d902ea2 100644
--- a/agenta-web/src/components/ChatInputs/ChatInputs.tsx
+++ b/agenta-web/src/components/ChatInputs/ChatInputs.tsx
@@ -1,6 +1,7 @@
import {ChatMessage, ChatRole} from "@/lib/Types"
import {MinusOutlined, PlusOutlined} from "@ant-design/icons"
import {Button, Input, Select, Space, Tooltip} from "antd"
+import {cloneDeep} from "lodash"
import React, {useEffect, useRef, useState} from "react"
import {createUseStyles} from "react-jss"
import {useUpdateEffect} from "usehooks-ts"
@@ -63,7 +64,7 @@ const ChatInputs: React.FC = ({
}) => {
const classes = useStyles()
const [messages, setMessages] = useState(
- value || defaultValue || [getDefaultNewMessage()],
+ cloneDeep(value || defaultValue || [getDefaultNewMessage()]),
)
const onChangeRef = useRef(onChange)
@@ -77,6 +78,9 @@ const ChatInputs: React.FC = ({
const newMessages = [...messages]
newMessages[index].role = role
setMessages(newMessages)
+ if (onChangeRef.current) {
+ onChangeRef.current(cloneDeep(newMessages))
+ }
}
const handleInputChange = (index: number, event: React.ChangeEvent) => {
@@ -84,28 +88,40 @@ const ChatInputs: React.FC = ({
const newMessages = [...messages]
newMessages[index].content = value
setMessages(newMessages)
+ if (onChangeRef.current) {
+ onChangeRef.current(cloneDeep(newMessages))
+ }
}
const handleDelete = (index: number) => {
- setMessages((prev) => prev.filter((_, i) => i !== index))
+ const newMessages = messages.filter((_, i) => i !== index)
+ setMessages(newMessages)
+ if (onChangeRef.current) {
+ onChangeRef.current(cloneDeep(newMessages))
+ }
}
const handleAdd = () => {
- setMessages((prev) => prev.concat([getDefaultNewMessage()]))
+ const newMessages = messages.concat([getDefaultNewMessage()])
+ setMessages(newMessages)
+ if (onChangeRef.current) {
+ onChangeRef.current(cloneDeep(newMessages))
+ }
}
useEffect(() => {
onChangeRef.current = onChange
}, [onChange])
- useUpdateEffect(() => {
- if (onChangeRef.current) {
- onChangeRef.current(messages)
- }
- }, [messages])
+ // disabled for now (to be reverted if there are issues after this change)
+ // useUpdateEffect(() => {
+ // if (onChangeRef.current) {
+ // onChangeRef.current(cloneDeep(messages))
+ // }
+ // }, [messages])
useUpdateEffect(() => {
- if (Array.isArray(value)) setMessages(value)
+ if (Array.isArray(value)) setMessages(cloneDeep(value))
}, [JSON.stringify(value)])
return (
diff --git a/agenta-web/src/components/EvaluationTable/AICritiqueEvaluationTable.tsx b/agenta-web/src/components/EvaluationTable/AICritiqueEvaluationTable.tsx
new file mode 100644
index 0000000000..2dbc2aab3b
--- /dev/null
+++ b/agenta-web/src/components/EvaluationTable/AICritiqueEvaluationTable.tsx
@@ -0,0 +1,569 @@
+import {useState, useEffect} from "react"
+import type {ColumnType} from "antd/es/table"
+import {CaretRightOutlined, LineChartOutlined} from "@ant-design/icons"
+import {
+ Button,
+ Card,
+ Col,
+ Input,
+ Row,
+ Space,
+ Spin,
+ Statistic,
+ Table,
+ Tag,
+ Typography,
+ message,
+} from "antd"
+import {Evaluation} from "@/lib/Types"
+import {
+ updateEvaluationScenario,
+ callVariant,
+ fetchEvaluationResults,
+ updateEvaluation,
+ evaluateAICritiqueForEvalScenario,
+} from "@/lib/services/api"
+import {useVariants} from "@/lib/hooks/useVariant"
+import {useRouter} from "next/router"
+import {EvaluationFlow, EvaluationType} from "@/lib/enums"
+import {batchExecute, getApikeys} from "@/lib/helpers/utils"
+import {createUseStyles} from "react-jss"
+import {exportAICritiqueEvaluationData} from "@/lib/helpers/evaluate"
+import SecondaryButton from "../SecondaryButton/SecondaryButton"
+import {useAppTheme} from "../Layout/ThemeContextProvider"
+import {contentToChatMessageString, testsetRowToChatMessages} from "@/lib/helpers/testset"
+import ParamsForm from "../Playground/ParamsForm/ParamsForm"
+
+const {Title} = Typography
+
+interface AICritiqueEvaluationTableProps {
+ evaluation: Evaluation
+ columnsCount: number
+ evaluationScenarios: AICritiqueEvaluationTableRow[]
+}
+
+interface AICritiqueEvaluationTableRow {
+ id?: string
+ inputs: {
+ input_name: string
+ input_value: string
+ }[]
+ outputs: {
+ variant_id: string
+ variant_output: string
+ }[]
+ columnData0: string
+ correctAnswer: string
+ score: string
+ evaluationFlow: EvaluationFlow
+}
+
+type StyleProps = {
+ themeMode: "dark" | "light"
+}
+/**
+ *
+ * @param evaluation - Evaluation object
+ * @param evaluationScenarios - Evaluation rows
+ * @param columnsCount - Number of variants to compare face to face (per default 2)
+ * @returns
+ */
+
+const useStyles = createUseStyles({
+ appVariant: {
+ backgroundColor: "rgb(201 255 216)",
+ color: "rgb(0 0 0)",
+ padding: 4,
+ borderRadius: 5,
+ },
+ inputTestContainer: {
+ display: "flex",
+ justifyContent: "space-between",
+ },
+ inputTest: {
+ backgroundColor: "rgb(201 255 216)",
+ color: "rgb(0 0 0)",
+ padding: 4,
+ borderRadius: 5,
+ },
+ recordInput: {
+ marginBottom: 10,
+ },
+ tag: {
+ fontSize: "14px",
+ },
+ card: ({themeMode}: StyleProps) => ({
+ marginTop: 16,
+ width: "100%",
+ border: "1px solid #ccc",
+ marginRight: "24px",
+ marginBottom: 30,
+ background: themeMode === "light" ? "rgb(246 253 245)" : "#000000",
+ "& .ant-card-head": {
+ minHeight: 44,
+ padding: "0px 12px",
+ },
+ "& .ant-card-body": {
+ padding: "4px 16px",
+ border: "0px solid #ccc",
+ },
+ }),
+ cardTextarea: {
+ height: 120,
+ padding: "0px 0px",
+ },
+ row: {marginBottom: 20},
+ evaluationResult: ({themeMode}: StyleProps) => ({
+ padding: "30px 10px",
+ marginBottom: 20,
+ border: "1px solid #ccc",
+ background: themeMode === "light" ? "rgb(244 244 244)" : "#000000",
+ color: themeMode === "light" ? "#000" : "#fff",
+ borderRadius: 5,
+ }),
+ h3: {
+ marginTop: 0,
+ },
+ resultDataRow: {
+ maxWidth: "100%",
+ overflowX: "auto",
+ whiteSpace: "nowrap",
+ },
+ resultDataCol: {
+ display: "inline-block",
+ },
+ resultDataCard: {
+ width: 200,
+ margin: "0 4px",
+ },
+ stat: {
+ "& .ant-statistic-content-value": {
+ color: "#3f8600",
+ },
+ },
+ inputTestBtn: {
+ width: "100%",
+ display: "flex",
+ justifyContent: "flex-end",
+ "& button": {
+ marginLeft: 10,
+ },
+ marginTop: "0.75rem",
+ },
+})
+
+const AICritiqueEvaluationTable: React.FC = ({
+ evaluation,
+ evaluationScenarios,
+ columnsCount,
+}) => {
+ const {appTheme} = useAppTheme()
+ const classes = useStyles({themeMode: appTheme} as StyleProps)
+ const router = useRouter()
+ const appId = router.query.app_id as string
+
+ const variants = evaluation.variants
+
+ const variantData = useVariants(appId, variants)
+
+ const [rows, setRows] = useState([])
+ const [evaluationPromptTemplate, setEvaluationPromptTemplate] = useState(
+ evaluation.evaluationTypeSettings.evaluationPromptTemplate ||
+ `We have an LLM App that we want to evaluate its outputs.
+Based on the prompt and the parameters provided below evaluate the output based on the evaluation strategy below:
+
+Evaluation strategy: 0 to 10 0 is very bad and 10 is very good.
+
+Prompt: {llm_app_prompt_template}
+Inputs: {inputs}
+Correct Answer:{correct_answer}
+Evaluate this: {app_variant_output}
+
+Answer ONLY with one of the given grading or evaluation options.
+`,
+ )
+
+ const [shouldFetchResults, setShouldFetchResults] = useState(false)
+ const [evaluationStatus, setEvaluationStatus] = useState(evaluation.status)
+ const [evaluationResults, setEvaluationResults] = useState(null)
+
+ useEffect(() => {
+ if (
+ variantData &&
+ variantData[0] &&
+ variantData[0].inputParams &&
+ variantData[0].inputParams.length > 0
+ ) {
+ const llmAppInputs = variantData[0].inputParams
+ .map((param) => `${param.name}: {${param.name}}`)
+ .join(", ")
+ setEvaluationPromptTemplate(evaluationPromptTemplate.replace("{inputs}", llmAppInputs))
+ }
+ }, [variantData])
+
+ useEffect(() => {
+ if (evaluationScenarios) {
+ setRows(evaluationScenarios)
+ }
+ }, [evaluationScenarios])
+
+ useEffect(() => {
+ if (evaluationStatus === EvaluationFlow.EVALUATION_FINISHED && shouldFetchResults) {
+ fetchEvaluationResults(evaluation.id)
+ .then((data) => setEvaluationResults(data))
+ .catch((err) => console.error("Failed to fetch results:", err))
+ .then(() => {
+ updateEvaluation(evaluation.id, {
+ status: EvaluationFlow.EVALUATION_FINISHED,
+ evaluation_type_settings: {
+ evaluation_prompt_template: evaluationPromptTemplate,
+ },
+ })
+ })
+ .catch((err) => console.error("Failed to fetch results:", err))
+ }
+ }, [evaluationStatus, evaluation.id])
+
+ const handleInputChange = (value: any, name: string, rowIndex: any) => {
+ const newRows = [...rows]
+ const ip = newRows[rowIndex].inputs.find((ip) => ip.input_name === name)
+ if (ip) ip.input_value = value
+ setRows(newRows)
+ }
+
+ const runAllEvaluations = async () => {
+ try {
+ setEvaluationStatus(EvaluationFlow.EVALUATION_STARTED)
+ await batchExecute(rows.map((_, rowIndex) => () => runEvaluation(rowIndex)))
+ setEvaluationStatus(EvaluationFlow.EVALUATION_FINISHED)
+ console.log("All evaluations finished.")
+ } catch (err) {
+ console.error("An error occurred:", err)
+ setEvaluationStatus(EvaluationFlow.EVALUATION_FAILED)
+ }
+ }
+
+ const runEvaluation = async (rowIndex: number) => {
+ try {
+ setEvaluationStatus(EvaluationFlow.EVALUATION_STARTED)
+
+ const inputParamsDict = rows[rowIndex].inputs.reduce(
+ (acc: {[key: string]: any}, item) => {
+ acc[item.input_name] = item.input_value
+ return acc
+ },
+ {},
+ )
+
+ const columnsDataNames = ["columnData0"]
+ let idx = 0
+
+ for (const columnName of columnsDataNames) {
+ setRowValue(rowIndex, "evaluationFlow", EvaluationFlow.COMPARISON_RUN_STARTED)
+
+ let result = await callVariant(
+ inputParamsDict,
+ variantData[idx].inputParams!,
+ variantData[idx].optParams!,
+ appId || "",
+ variants[idx].baseId || "",
+ variantData[idx].isChatVariant
+ ? testsetRowToChatMessages(evaluation.testset.csvdata[rowIndex], false)
+ : [],
+ )
+
+ if (variantData[idx].isChatVariant) {
+ result = contentToChatMessageString(result)
+ }
+
+ setRowValue(rowIndex, columnName as any, result)
+ await evaluate(rowIndex)
+ setShouldFetchResults(true)
+
+ if (rowIndex === rows.length - 1) {
+ message.success("Evaluation Results Saved")
+ }
+
+ idx++
+ }
+
+ setEvaluationStatus(EvaluationFlow.EVALUATION_FINISHED)
+ } catch (error) {
+ console.error("Error during evaluation:", error)
+ setEvaluationStatus(EvaluationFlow.EVALUATION_FAILED)
+ message.error("Failed to run evaluation")
+ }
+ }
+
+ const evaluate = async (rowNumber: number) => {
+ const evaluation_scenario_id = rows[rowNumber].id
+ const outputVariantX = rows[rowNumber].columnData0
+
+ if (evaluation_scenario_id) {
+ const data = {
+ outputs: [{variant_id: variants[0].variantId, variant_output: outputVariantX}],
+ }
+
+ const aiCritiqueScoreResponse = await evaluateAICritiqueForEvalScenario({
+ correct_answer: rows[rowNumber].correctAnswer,
+ llm_app_prompt_template: evaluation.llmAppPromptTemplate,
+ inputs: rows[rowNumber].inputs,
+ outputs: data.outputs,
+ evaluation_prompt_template: evaluationPromptTemplate,
+ open_ai_key: getApikeys(),
+ })
+
+ try {
+ const responseData = await updateEvaluationScenario(
+ evaluation.id,
+ evaluation_scenario_id,
+ {...data, score: aiCritiqueScoreResponse.data},
+ evaluation.evaluationType as EvaluationType,
+ )
+ setRowValue(rowNumber, "evaluationFlow", EvaluationFlow.EVALUATION_FINISHED)
+ setRowValue(rowNumber, "score", aiCritiqueScoreResponse.data)
+ } catch (err) {
+ console.error(err)
+ }
+ }
+ }
+
+ const setRowValue = (
+ rowIndex: number,
+ columnKey: keyof AICritiqueEvaluationTableRow,
+ value: any,
+ ) => {
+ const newRows = [...rows]
+ newRows[rowIndex][columnKey] = value as never
+ setRows(newRows)
+ }
+
+ const dynamicColumns: ColumnType[] = Array.from(
+ {length: columnsCount},
+ (_, i) => {
+ const columnKey = `columnData${i}`
+
+ return {
+ title: (
+
+ App Variant:
+
+ {variants ? variants[i].variantName : ""}
+
+
+ ),
+ dataIndex: columnKey,
+ key: columnKey,
+ width: "30%",
+ render: (text: any, record: AICritiqueEvaluationTableRow, rowIndex: number) => {
+ if (
+ record.evaluationFlow === EvaluationFlow.COMPARISON_RUN_STARTED &&
+ evaluationStatus === EvaluationFlow.EVALUATION_STARTED
+ ) {
+ return (
+
+
+
+ )
+ }
+ if (
+ record.evaluationFlow === EvaluationFlow.COMPARISON_RUN_STARTED &&
+ evaluationStatus === EvaluationFlow.EVALUATION_FAILED
+ ) {
+ return
+ }
+ if (record.outputs && record.outputs.length > 0) {
+ const outputValue = record.outputs.find(
+ (output: any) => output.variant_id === variants[i].variantId,
+ )?.variant_output
+ return {outputValue}
+ }
+ return text
+ },
+ }
+ },
+ )
+
+ const columns = [
+ {
+ key: "1",
+ width: "30%",
+ title: (
+
+
+ Inputs (Test set:
+ {evaluation.testset.name}
+ )
+
+
+ ),
+ dataIndex: "inputs",
+ render: (text: any, record: AICritiqueEvaluationTableRow, rowIndex: number) => (
+
+ {evaluation.testset.testsetChatColumn ? (
+ evaluation.testset.csvdata[rowIndex][
+ evaluation.testset.testsetChatColumn
+ ] || " - "
+ ) : (
+
+ handleInputChange(value, name, rowIndex)
+ }
+ inputParams={
+ variantData[0].inputParams?.map((item) => ({
+ ...item,
+ value: record.inputs.find((ip) => ip.input_name === item.name)
+ ?.input_value,
+ })) || []
+ }
+ />
+ )}
+
+
+ runEvaluation(rowIndex)}
+ icon={ }
+ >
+ Run
+
+
+
+ ),
+ },
+ ...dynamicColumns,
+ {
+ title: "Correct Answer",
+ dataIndex: "correctAnswer",
+ key: "correctAnswer",
+ width: "30%",
+
+ render: (text: any, record: any, rowIndex: number) => {record.correctAnswer}
,
+ },
+ {
+ title: "Evaluation",
+ dataIndex: "evaluation",
+ key: "score",
+ width: 200,
+ align: "center" as "left" | "right" | "center",
+ render: (score: string, record: any) => {
+ if (
+ record.evaluationFlow === EvaluationFlow.COMPARISON_RUN_STARTED &&
+ evaluationStatus === EvaluationFlow.EVALUATION_STARTED
+ ) {
+ return
+ }
+ if (
+ record.evaluationFlow === EvaluationFlow.COMPARISON_RUN_STARTED &&
+ evaluationStatus === EvaluationFlow.EVALUATION_FAILED
+ ) {
+ return
+ }
+ let tagColor = ""
+
+ return (
+
+
+
+ {score !== "" && (
+
+ {record.score}
+
+ )}
+
+
+
+ )
+ },
+ },
+ ]
+
+ const onChangeEvaluationPromptTemplate = (e: any) => {
+ setEvaluationPromptTemplate(e.target.value)
+ }
+
+ return (
+
+
AI Critique Evaluation
+
+
+
+
+
+
+
+
+
+ }
+ size="large"
+ >
+ Run Evaluation
+
+ exportAICritiqueEvaluationData(evaluation, rows)}
+ disabled={evaluationStatus !== EvaluationFlow.EVALUATION_FINISHED}
+ >
+ Export results
+
+
+
+
+
+
+
+ {evaluationStatus === EvaluationFlow.EVALUATION_FAILED && (
+ Failed to run evaluation
+ )}
+
+ {evaluationStatus === EvaluationFlow.EVALUATION_INITIALIZED && (
+ Run evaluation to see results!
+ )}
+
+ {evaluationStatus === EvaluationFlow.EVALUATION_STARTED && }
+
+ {evaluationStatus === EvaluationFlow.EVALUATION_FINISHED &&
+ evaluationResults &&
+ evaluationResults.results_data && (
+
+
Results Data:
+
+ {Object.entries(evaluationResults.results_data).map(
+ ([key, value], index) => (
+
+
+
+
+
+ ),
+ )}
+
+
+ )}
+
+
+
+
+ )
+}
+
+export default AICritiqueEvaluationTable
diff --git a/agenta-web/src/components/Playground/Playground.tsx b/agenta-web/src/components/Playground/Playground.tsx
index 539f6cd0c2..4a8bcc3bc2 100644
--- a/agenta-web/src/components/Playground/Playground.tsx
+++ b/agenta-web/src/components/Playground/Playground.tsx
@@ -14,6 +14,7 @@ import {DndContext, PointerSensor, useSensor} from "@dnd-kit/core"
import {arrayMove, SortableContext, horizontalListSortingStrategy} from "@dnd-kit/sortable"
import DraggableTabNode from "../DraggableTabNode/DraggableTabNode"
import {useLocalStorage} from "usehooks-ts"
+import TestContextProvider from "./TestContextProvider"
const Playground: React.FC = () => {
const router = useRouter()
@@ -258,8 +259,7 @@ const Playground: React.FC = () => {
return (
{contextHolder}
-
-
+
{
)}
/>
)}
-
+
>
+ isRunning: boolean[]
+ setIsRunning: DispatchWithCallback>
+}>({testList: [{}], setTestList: () => {}, isRunning: [], setIsRunning: () => {}})
+
+const TestContextProvider: React.FC = (props) => {
+ const [testList, setTestList] = useState([{_id: randString(6)}])
+ const [isRunning, setIsRunning] = useStateCallback([])
+
+ return (
+
+ {props.children}
+
+ )
+}
+
+export default TestContextProvider
diff --git a/agenta-web/src/components/Playground/ViewNavigation.tsx b/agenta-web/src/components/Playground/ViewNavigation.tsx
index a3438b84e0..bae3dddd6d 100644
--- a/agenta-web/src/components/Playground/ViewNavigation.tsx
+++ b/agenta-web/src/components/Playground/ViewNavigation.tsx
@@ -248,6 +248,7 @@ const ViewNavigation: React.FC = ({
optParams={optParams}
variant={variant}
isChatVariant={!!isChatVariant}
+ compareMode={compareMode}
/>
diff --git a/agenta-web/src/components/Playground/Views/ParametersCards.tsx b/agenta-web/src/components/Playground/Views/ParametersCards.tsx
index e18a6a570d..aaffb16bb5 100644
--- a/agenta-web/src/components/Playground/Views/ParametersCards.tsx
+++ b/agenta-web/src/components/Playground/Views/ParametersCards.tsx
@@ -1,8 +1,8 @@
-import {Row, Card, Slider, Select, InputNumber, Col, Input, Button} from "antd"
import React from "react"
-import {Parameter, InputParameter} from "@/lib/Types"
-import {renameVariables} from "@/lib/helpers/utils"
import {createUseStyles} from "react-jss"
+import {renameVariables} from "@/lib/helpers/utils"
+import {Parameter, InputParameter} from "@/lib/Types"
+import {Row, Card, Slider, Select, InputNumber, Col, Input, Button, Switch} from "antd"
const useStyles = createUseStyles({
row1: {
@@ -63,7 +63,7 @@ const useStyles = createUseStyles({
interface ModelParametersProps {
optParams: Parameter[] | null
onChange: (param: Parameter, value: number | string) => void
- handleParamChange: (name: string, value: number | string) => void
+ handleParamChange: (name: string, value: number | string | boolean) => void
}
export const ModelParameters: React.FC = ({
@@ -72,6 +72,9 @@ export const ModelParameters: React.FC = ({
handleParamChange,
}) => {
const classes = useStyles()
+ const handleCheckboxChange = (paramName: string, checked: boolean) => {
+ handleParamChange(paramName, checked)
+ }
return (
<>
{optParams?.some((param) => !param.input && param.type === "number") && (
@@ -80,10 +83,11 @@ export const ModelParameters: React.FC = ({
{optParams
?.filter(
(param) =>
- !param.input &&
- (param.type === "number" ||
- param.type === "integer" ||
- param.type === "array"),
+ (!param.input &&
+ (param.type === "number" ||
+ param.type === "integer" ||
+ param.type === "array")) ||
+ param.type === "boolean",
)
.map((param, index) => (
@@ -136,6 +140,14 @@ export const ModelParameters: React.FC = ({
))}
)}
+ {param.type === "boolean" && (
+
+ handleCheckboxChange(param.name, checked)
+ }
+ />
+ )}
{param.type === "number" && (
diff --git a/agenta-web/src/components/Playground/Views/TestView.tsx b/agenta-web/src/components/Playground/Views/TestView.tsx
index cd0fe16951..aa9b2fbfee 100644
--- a/agenta-web/src/components/Playground/Views/TestView.tsx
+++ b/agenta-web/src/components/Playground/Views/TestView.tsx
@@ -1,4 +1,4 @@
-import React, {useEffect, useState} from "react"
+import React, {useContext, useEffect, useState} from "react"
import {Button, Input, Card, Row, Col, Space, Form} from "antd"
import {CaretRightOutlined, PlusOutlined} from "@ant-design/icons"
import {callVariant} from "@/lib/services/api"
@@ -15,6 +15,8 @@ import {getDefaultNewMessage} from "@/components/ChatInputs/ChatInputs"
import {v4 as uuidv4} from "uuid"
import {testsetRowToChatMessages} from "@/lib/helpers/testset"
import ParamsForm from "../ParamsForm/ParamsForm"
+import {TestContext} from "../TestContextProvider"
+import {isEqual} from "lodash"
const {TextArea} = Input
const LOADING_TEXT = "Loading..."
@@ -88,6 +90,7 @@ interface TestViewProps {
inputParams: Parameter[] | null
optParams: Parameter[] | null
isChatVariant?: boolean
+ compareMode: boolean
}
interface BoxComponentProps {
@@ -99,6 +102,7 @@ interface BoxComponentProps {
onAddToTestset: (params: Record) => void
onDelete?: () => void
isChatVariant?: boolean
+ variant: Variant
}
const BoxComponent: React.FC = ({
@@ -110,6 +114,7 @@ const BoxComponent: React.FC = ({
onAddToTestset,
onDelete,
isChatVariant = false,
+ variant,
}) => {
const classes = useStylesBox()
const loading = result === LOADING_TEXT
@@ -156,7 +161,7 @@ const BoxComponent: React.FC = ({
/>
-
+
}
@@ -173,6 +178,7 @@ const BoxComponent: React.FC = ({
/>
}
@@ -198,10 +204,22 @@ const BoxComponent: React.FC = ({
)
}
-const App: React.FC = ({inputParams, optParams, variant, isChatVariant}) => {
+const App: React.FC = ({
+ inputParams,
+ optParams,
+ variant,
+ isChatVariant,
+ compareMode,
+}) => {
const router = useRouter()
const appId = router.query.app_id as unknown as string
- const [testList, setTestList] = useState([{}])
+ const {
+ testList: _testList,
+ setTestList: _setTestList,
+ isRunning,
+ setIsRunning,
+ } = useContext(TestContext)
+ const [testList, setTestList] = useState(_testList)
const [resultsList, setResultsList] = useState(testList.map(() => ""))
const [params, setParams] = useState | null>(null)
const classes = useStylesApp()
@@ -216,6 +234,10 @@ const App: React.FC = ({inputParams, optParams, variant, isChatVa
})
}, [testList])
+ useEffect(() => {
+ setTestList(_testList)
+ }, [JSON.stringify(_testList)])
+
const setResultForIndex = (value: string, index: number) => {
if (isChatVariant) {
setTestList((prevState) =>
@@ -254,9 +276,27 @@ const App: React.FC = ({inputParams, optParams, variant, isChatVa
const handleRun = async (index: number) => {
try {
+ const testItem = testList[index]
+ if (compareMode && !isRunning[index]) {
+ setIsRunning(
+ (prevState) => {
+ const newState = [...prevState]
+ newState[index] = true
+ return newState
+ },
+ () => {
+ document
+ .querySelectorAll(`.testview-run-button-${testItem._id}`)
+ .forEach((btn) => {
+ if (btn.parentElement?.id !== variant.variantId) {
+ ;(btn as HTMLButtonElement).click()
+ }
+ })
+ },
+ )
+ }
setResultForIndex(LOADING_TEXT, index)
- const testItem = testList[index]
const res = await callVariant(
isChatVariant ? removeKeys(testItem, ["chat"]) : testItem,
inputParams || [],
@@ -274,6 +314,12 @@ const App: React.FC = ({inputParams, optParams, variant, isChatVa
"\n---------------------\n\nPlease update your code, and re-serve it using cli and try again.\n\nFor more information please read https://docs.agenta.ai/howto/how-to-debug\n\nIf you believe this is a bug, please create a new issue here: https://github.com/Agenta-AI/agenta/issues/new?title=Issue%20in%20playground",
index,
)
+ } finally {
+ setIsRunning((prevState) => {
+ const newState = [...prevState]
+ newState[index] = false
+ return newState
+ })
}
}
@@ -287,23 +333,28 @@ const App: React.FC = ({inputParams, optParams, variant, isChatVa
}
const handleAddRow = () => {
- setTestList([...testList, {_id: randString(6)}])
+ _setTestList([...testList, {_id: randString(6)}])
setResultsList([...resultsList, ""])
}
const handleDeleteRow = (testIndex: number) => {
- setTestList((prevTestList) => prevTestList.filter((_, index) => index !== testIndex))
+ _setTestList((prevTestList) => prevTestList.filter((_, index) => index !== testIndex))
setResultsList((prevResultsList) =>
prevResultsList.filter((_, index) => index !== testIndex),
)
}
const handleInputParamChange = (paramName: string, value: any, index: number) => {
- setTestList((prevState) => {
- const newState = [...prevState]
- newState[index] = {...newState[index], [paramName]: value}
- return newState
- })
+ const newState = [...testList]
+ newState[index] = {...newState[index], [paramName]: value}
+ setTestList(newState)
+
+ if (
+ !isEqual(_testList[index][paramName], value) &&
+ !isEqual(testList[index][paramName], value)
+ ) {
+ _setTestList(newState)
+ }
}
const onLoadTests = (tests: Record[], shouldReplace: boolean) => {
@@ -313,9 +364,9 @@ const App: React.FC = ({inputParams, optParams, variant, isChatVa
_id: randString(6),
}))
if (shouldReplace) {
- setTestList(testsList)
+ _setTestList(testsList)
} else {
- setTestList((prev) => [...prev, ...testsList])
+ _setTestList((prev) => [...prev, ...testsList])
}
}
@@ -355,6 +406,7 @@ const App: React.FC = ({inputParams, optParams, variant, isChatVa
onAddToTestset={setParams}
onDelete={testList.length >= 2 ? () => handleDeleteRow(index) : undefined}
isChatVariant={isChatVariant}
+ variant={variant}
/>
))}
= (value?: T) => void
-type DispatchWithCallback = (value: T, callback?: Callback) => void
+export type DispatchWithCallback = (value: T, callback?: Callback) => void
/**
* This hook mimcs the setState behaviour of class components. An optional callback can be passed
diff --git a/agenta-web/src/lib/helpers/openapi_parser.ts b/agenta-web/src/lib/helpers/openapi_parser.ts
index 54d7dd34b7..02a3100d68 100644
--- a/agenta-web/src/lib/helpers/openapi_parser.ts
+++ b/agenta-web/src/lib/helpers/openapi_parser.ts
@@ -63,6 +63,8 @@ const determineType = (xParam: any): string => {
return "number"
case "dict":
return "object"
+ case "bool":
+ return "boolean"
case "int":
return "integer"
case "file_url":
diff --git a/docker-compose.gh.yml b/docker-compose.gh.yml
index 35e3642b42..d1fa48b55a 100644
--- a/docker-compose.gh.yml
+++ b/docker-compose.gh.yml
@@ -9,6 +9,7 @@ services:
- /var/run/docker.sock:/var/run/docker.sock
networks:
- agenta-network
+ restart: always
agenta-backend:
container_name: agenta-backend-1
@@ -49,9 +50,12 @@ services:
- "traefik.http.routers.backend.service=backend"
networks:
- agenta-network
+ extra_hosts:
+ - "host.docker.internal:host-gateway"
depends_on:
mongo:
condition: service_healthy
+ restart: always
agenta-web:
container_name: agenta-web-1
@@ -69,6 +73,8 @@ services:
- NEXT_PUBLIC_AGENTA_API_URL=${DOMAIN_NAME:-http://localhost}
- NEXT_PUBLIC_FF=oss
- NEXT_PUBLIC_TELEMETRY_TRACKING_ENABLED=true
+ restart: always
+
mongo:
image: mongo:5.0
environment:
@@ -85,6 +91,7 @@ services:
interval: 10s
timeout: 10s
retries: 20
+ restart: always
mongo_express:
image: mongo-express
@@ -99,6 +106,7 @@ services:
depends_on:
mongo:
condition: service_healthy
+ restart: always
redis:
image: redis:latest
@@ -106,6 +114,7 @@ services:
- agenta-network
volumes:
- redis_data:/data
+ restart: always
networks:
agenta-network:
diff --git a/docker-compose.prod.yml b/docker-compose.prod.yml
index ff4dbdbac3..7c30d63a78 100644
--- a/docker-compose.prod.yml
+++ b/docker-compose.prod.yml
@@ -9,6 +9,7 @@ services:
- /var/run/docker.sock:/var/run/docker.sock
networks:
- agenta-network
+ restart: always
backend:
build: ./agenta-backend
@@ -34,6 +35,8 @@ services:
- "traefik.http.routers.backend.service=backend"
networks:
- agenta-network
+ extra_hosts:
+ - "host.docker.internal:host-gateway"
command:
[
"uvicorn",
@@ -51,6 +54,7 @@ services:
depends_on:
mongo:
condition: service_healthy
+ restart: always
agenta-web:
build:
@@ -69,6 +73,7 @@ services:
- "traefik.http.services.agenta-web.loadbalancer.server.port=3000"
environment:
- NEXT_PUBLIC_POSTHOG_API_KEY=phc_hmVSxIjTW1REBHXgj2aw4HW9X6CXb6FzerBgP9XenC7
+ restart: always
mongo:
image: mongo:5.0
@@ -86,6 +91,7 @@ services:
interval: 10s
timeout: 10s
retries: 20
+ restart: always
mongo_express:
image: mongo-express
@@ -100,6 +106,7 @@ services:
depends_on:
mongo:
condition: service_healthy
+ restart: always
redis:
image: redis:latest
@@ -107,6 +114,7 @@ services:
- agenta-network
volumes:
- redis_data:/data
+ restart: always
networks:
agenta-network:
diff --git a/docker-compose.yml b/docker-compose.yml
index 13e7f93d72..e7c416aee2 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -9,6 +9,7 @@ services:
- /var/run/docker.sock:/var/run/docker.sock
networks:
- agenta-network
+ restart: always
backend:
build: ./agenta-backend
@@ -58,6 +59,7 @@ services:
depends_on:
mongo:
condition: service_healthy
+ restart: always
agenta-web:
build:
@@ -77,6 +79,7 @@ services:
- "traefik.http.services.agenta-web.loadbalancer.server.port=3000"
environment:
- NEXT_PUBLIC_POSTHOG_API_KEY=phc_hmVSxIjTW1REBHXgj2aw4HW9X6CXb6FzerBgP9XenC7
+ restart: always
mongo:
image: mongo:5.0
@@ -94,6 +97,7 @@ services:
interval: 10s
timeout: 10s
retries: 20
+ restart: always
mongo_express:
image: mongo-express:0.54.0
@@ -108,6 +112,7 @@ services:
depends_on:
mongo:
condition: service_healthy
+ restart: always
redis:
image: redis:latest
@@ -115,6 +120,7 @@ services:
- agenta-network
volumes:
- redis_data:/data
+ restart: always
rabbitmq:
image: rabbitmq:3-management
diff --git a/docs/advanced_guides/custom_applications.mdx b/docs/advanced_guides/custom_applications.mdx
new file mode 100644
index 0000000000..6f00f5a7c3
--- /dev/null
+++ b/docs/advanced_guides/custom_applications.mdx
@@ -0,0 +1,202 @@
+---
+title: 'Creating Custom Applications'
+description: 'Learn how to use your custom application with Agenta'
+---
+
+Agenta comes with several pre-built template LLM applications for common use cases, such as single prompt and chatbot. However, you can also create your own custom application with Agenta. This could be a RAG application, a custom agent, a chain of prompts, or any custom logic.
+
+This guide will show you how to create a custom application and use it with Agenta.
+
+ We recommend reading ["How does Agenta work"](/developer_guides/how_does_agenta_work) beforehand to familiarize yourself with the main concepts of Agenta.
+
+## How to create a custom application in Agenta ?
+
+To add your custom application in Agenta, you need to write the application code using the Agenta SDK , then add the application to Agenta using the CLI.
+
+The [Agenta SDK](/developer_guides/sdk/quick_start) takes care of specifying the configuration of your application (prompts, model parameters, chunk size, etc.), and integrates it with Agenta. The [Agenta CLI](/developer_guides/cli/quick-usage) takes care of building the application image, deploying it, and exposing it to Agenta.
+
+## Converting an existing application to Agenta
+
+Writing an application for Agenta involves adding a few lines of code to your existing application.
+
+Let's consider the code of a simple application that calls OpenAI to generate a text for a blog post.
+
+```python myapp.py before adding agenta
+
+from openai import OpenAI
+
+client = OpenAI()
+
+def generate(subject: str):
+ prompt = "Write an blog post about {subject}"
+ formatted_prompt = prompt.format(subject=subject)
+ chat_completion = client.chat.completions.create(
+ model="gpt-3.5-turbo", messages=[{"role": "user", "content": prompt}]
+ )
+ return chat_completion.choices[0].message.content
+```
+
+As you can see, the application is a simple function that takes a blog post subject as input, format the prompt using f-strings, then calls gpt-3.5-turbo with the formatted prompt and return its output.
+
+To use the application in Agenta, we need to add a few lines of code. Here is the end result. We will go over each change in detail in the next sections.
+
+```python myapp.py after adding agenta
+import agenta as ag
+from openai import OpenAI
+
+ag.init()
+ag.config.register_default(prompt=ag.TextParam("Write an blog post about {subject}"))
+client = OpenAI()
+
+@ag.entrypoint
+def generate(subject: str):
+ formatted_prompt = ag.config.prompt.format(subject=subject)
+ chat_completion = client.chat.completions.create(
+ model="gpt-3.5-turbo", messages=[{"role": "user", "content": prompt}]
+ )
+ return chat_completion.choices[0].message.content
+```
+
+Below are the modifications we made to the code:
+
+### Importing and initializing the agenta SDK
+```python
+import agenta as ag
+ag.init()
+```
+
+1. We added the `ag.init()` call to initialize the application. Note that this call is always needed before using any other agenta function.
+
+### Specifying the default configuration
+```python
+ag.config.register_default(prompt=TextParam("Write an blog post about {subject}"))
+```
+
+Here, we informed Agenta that the configuration for this application is a single parameter of type text, and its default value is `"Write a blog post about {subject}"`.
+
+This tells Agenta how to render the playground for this application. In this case, the playground will have a single text input with the default value `"Write a blog post about {subject}"`.
+
+### Specifying the entrypoint of the application
+```python
+@ag.entrypoint
+def generate(subject: str):
+```
+
+We added the `@ag.entrypoint` decorator to the main function of the application. This decorator informs Agenta that this function is the entry point to the application. It converts it (using FastAPI) into an API endpoint, allowing it to be used from the web interface.
+
+### Using the configuration in the application
+
+```python
+formatted_prompt = ag.config.prompt.format(subject=subject)
+```
+
+Instead of using the variable `prompt` directly, we are now using `ag.config.prompt`. This line tells the application to use the value set in the Agenta Here Agenta acts as a management system for the app configuration (a prompt management system). This allows you to change the application's configuration from the web interface without modifying the code.
+
+When you call `ag.config.`, the Agenta SDK calls the backend and retrieves the value of the variable for the requested variant.
+
+## Adding the requirements and environment variables
+
+Before serving the application in Agenta using the CLI, we need to add the application's requirements to the requirements.txt file.
+
+```python requirements.txt
+agenta
+openai
+```
+
+Additionally, we need to add the .env file with any required environment variables. In this case, we need to add the OpenAI API key.
+
+```bash .env
+OPENAI_API_KEY=sk-...
+```
+
+The Agenta SDK will automatically load the environment variables from the .env file.
+
+Both these files need to be in the same folder as the application code.
+
+## Serving the application
+
+To serve the application, we first need to initialize the project in Agenta. We run the following command in the folder containing the application code and the rest of the files.
+
+```bash
+agenta init
+```
+
+This command will prompt you to provide the name of the application, the host for Agenta (Agenta cloud), and whether to start from a blank project (yes in this case since we wrote the code) or to populate the folder with a template application (no in this case).
+
+After running this command, you should see a new config.toml file containing the application's configuration in the folder. Additionally, you should see a new empty project in the Agenta web UI.
+
+Now, we can serve the application by running the following command.
+
+```bash
+agenta variant serve myapp.py
+```
+
+This command will serve the application in Agenta. The application is now added to the Agenta web interface and can be used from there.
+
+ Under the hood, this command will build an image for the application, deploy a container with the image, and expose a REST API to the application which is used by Agenta to communicate with the application.
+
+## Using the application in agenta
+
+The application should now be visible in Agenta. A new application variant is always created under the name `.default`. Variants are always named in this format `.`. This allows you to determine which source code was used to create the application (``). When first created, we always create a 'default' configuration. This is the configuration specified in the code (when using `register_default`).
+
+## Adding other parameters
+
+We are not limited to one configuration parameter in the playground. We can add as many as we'd like. These parameters can be prompts (TextParam), numbers (FloatParam, IntParam), or dropdowns (MultipleChoiceParam). You can read more about the types of parameters in the [parameters](/developer_guides/sdk/config_datatypes) section.
+
+Here is a modified version of the application that adds a new parameter `temperature` to the playground.
+
+
+```python
+import agenta as ag
+from openai import OpenAI
+
+ag.init()
+ag.config.register_default(prompt=TextParam("Write a blog post about {subject}"),
+ temperature=FloatParam(0.2))
+client = OpenAI()
+
+@ag.entrypoint
+def generate(subject: str):
+ formatted_prompt = ag.config.prompt.format(subject=subject)
+ chat_completion = client.chat.completions.create(
+ model="gpt-3.5-turbo",
+ temperature=ag.config.temperature,
+ messages=[{"role": "user", "content": prompt}]
+ )
+ return chat_completion.choices[0].message.content
+
+```
+
+## Where to go from here?
+
+Agenta provides the flexibility to add any LLM application to the platform, so that you collaborate on the prompt engineering, evaluation, and the management of the application's entire lifecycle all from one place.
+
+We've merely touched on what Agenta can do. You're not limited to apps that consist of a single file or function. You can create chains of prompts, or even agents. You can use the SDK allows you to track costs and log traces of your application.
+
+More information about the SDK can be found in the [SDK section in the developer guide](/developer_guides/sdk/quick_start). You can also explore a growing list of templates and tutorials in the [cookbook section](/cookbook/list_templates).
+
+Finally, our team is always ready to assist you with any custom application. Simply reach out to us on Slack, or book a call to discuss your use case in detail.
+You can read more about the SDK in the . You can also check the growing list of templates and tutorials in the . Last please note, that our team is always available to help you with any custom applicatoin, just reach out to use on [Slack](https://join.slack.com/t/agenta-hq/shared_invite/zt-1zsafop5i-Y7~ZySbhRZvKVPV5DO_7IA) Or [book a call](https://cal.com/mahmoud-mabrouk-ogzgey/demo) to discuss your use case in details.
+
+
+
+
+
+
+
+ Learn how to use the SDK to create custom applications.
+
+
+
+
+
+
+
diff --git a/docs/advanced_guides/using_agenta_from_cli.mdx b/docs/advanced_guides/using_agenta_from_cli.mdx
new file mode 100644
index 0000000000..10e085558b
--- /dev/null
+++ b/docs/advanced_guides/using_agenta_from_cli.mdx
@@ -0,0 +1,52 @@
+---
+title: 'Using Agenta from CLI'
+description: 'Create, experiment, and evaluate your applications all from the CLI'
+---
+
+Agenta was designed for use both from the CLI and from the web interface. This guide explains the basics of using Agenta from the CLI. For more details, refer to the [CLI developer guide](/developer_guides/cli/quick-usage).
+
+## Installation
+The agenta CLI can be easily installed through pip:
+```bash
+pip install -U agenta
+````
+
+## Creating an application
+
+
+
+To create an application in Agenta, first, initialize an empty project. Run the following command in the folder containing your application code :
+
+```bash
+agenta init
+```
+
+This will prompt you for the project name, the Agenta host, and the API key (if using the cloud or enterprise version).
+
+
+Running `agenta init` creates a blank project in Agenta and generates a config.toml file in that folder, which contains all the information about your project.
+
+
+
+With the project created, we need to add the first app variant to it.
+This can be done by running the following command:
+ ```bash
+ agenta variant serve
+ ```
+
+This will create a new app variant in Agenta under the name filename.default. Here, filename is the name of the codebase containing the app logic, while default is a default configuration created for that codebase. Each new app variant created from the web interface or from the CLI will always have the name format `.`.
+
+ Running this comand will [create a container for the application](developer_guides/how_does_agenta_work) with a REST API endpoint. This endpoint is what is used by the agenta web interface to communicate with the application.
+
+The CLI will also display the URL of the endpoint, which can be used to test the application.
+
+
+
+## Adding a new configuration
+
+In addition to the default configuration specified in the code, you can add more configurations to the application from the CLI.
+
+Configurations are specified in toml files and always named `.`. To add a new configuration, run the following command:
+
+@devgenix please continue here
+
diff --git a/docs/advanced_guides/using_custom_evaluators.mdx b/docs/advanced_guides/using_custom_evaluators.mdx
new file mode 100644
index 0000000000..03cc648344
--- /dev/null
+++ b/docs/advanced_guides/using_custom_evaluators.mdx
@@ -0,0 +1,5 @@
+---
+title: 'Custom Evaluation'
+---
+
+ This page is under construction. Please reach out to us on [Slack](https://join.slack.com/t/agenta-hq/shared_invite/zt-1zsafop5i-Y7~ZySbhRZvKVPV5DO_7IA) **#support** channel, [Book a call](https://cal.com/mahmoud-mabrouk-ogzgey/demo), through [email](mailto:team@agenta.ai) if you need help with using custom evaluation.
\ No newline at end of file
diff --git a/docs/basic_guides/automatic_evaluation.mdx b/docs/basic_guides/automatic_evaluation.mdx
new file mode 100644
index 0000000000..26ac847e5e
--- /dev/null
+++ b/docs/basic_guides/automatic_evaluation.mdx
@@ -0,0 +1,5 @@
+---
+title: 'Automatic Evaluation'
+---
+
+ This page is under construction. Please reach out to us on [Slack](https://join.slack.com/t/agenta-hq/shared_invite/zt-1zsafop5i-Y7~ZySbhRZvKVPV5DO_7IA) **#support** channel, [Book a call](https://cal.com/mahmoud-mabrouk-ogzgey/demo), through [email](mailto:team@agenta.ai) if you need help with using automatic evaluation.
\ No newline at end of file
diff --git a/docs/basic_guides/creating_an_app.mdx b/docs/basic_guides/creating_an_app.mdx
new file mode 100644
index 0000000000..b04ef6df6f
--- /dev/null
+++ b/docs/basic_guides/creating_an_app.mdx
@@ -0,0 +1,42 @@
+---
+title: 'Creating an LLM App'
+---
+
+You can create applications in Agenta either from the web interface or from code. This guide will focus on creating an application using a template from the UI. You can read more about creating a custom application using code [here](/advanced_guides/custom_applications)
+
+## Step-by-step Guide
+
+1. **Navigate to the main page**: This is where you can create a new application.
+
+
+
+
+2. **Choose a template**: Currently, we offer templates for single prompt applications and chat applications.
+
+
+
+
+### Single Prompt Application
+
+The single prompt application is useful for single turn LLM applications such as question answering, text generation, entity extraction, and classification, etc.
+
+This template is based on the OpenAI specification and uses both the system prompt and user prompt:
+
+- **System Prompt**: The system prompt is a message written by the developer to guide the model's behavior. It provides a high-level instruction, such as defining the model's role or setting specific goals.
+
+- **User Prompt**: The user message is the message sent to the model by the user.
+
+While you could write the same application using only the user prompt or the system prompt, it is best to experiment with both approaches. Usually, the system-prompt is typically used for high-level instruction.
+
+
+
+
+### Chat Application
+Like the single prompt application, the chat application is based on the OpenAI specification and uses both the system prompt and user prompt, but it is designed for multi-turn applications like chatbots.
+
+
+
+
+## Next steps
+
+Now that you've created an application, you can learn how to do [prompt engineering in the playground](/basic_guides/prompt_engineering).
\ No newline at end of file
diff --git a/docs/basic_guides/deployment.mdx b/docs/basic_guides/deployment.mdx
new file mode 100644
index 0000000000..8b4f1ab8a1
--- /dev/null
+++ b/docs/basic_guides/deployment.mdx
@@ -0,0 +1,40 @@
+---
+title: 'Deployment'
+description: 'Learn how to integrate Agenta with your application'
+---
+
+After using the playground to find a good configuration for your application, it's time to deploy the application.
+By deploying the application, you can integrate it with short code snippet in your applicatoin. You can later change the configuration from the UI without having to update the code.
+
+
+Agenta provides a way to deploy an application to multiple environments: development, staging, and production. Each environment has its own unique configuration.
+
+## Deploying an application to an environment:
+
+1. Navigate to the playground for the variant you want to deploy.
+2. Click on 'Publish'. Make sure the variant is saved before deployment.
+
+
+
+
+3. Select the environment and click on "Publish"
+
+
+
+
+The application is now deployed to the chosen environment and is accessible as an API endpoint.
+
+## Accessing the API endpoint:
+
+1. Go to the 'API endpoint' section.
+2. Choose the environment where the application was deployed.
+
+
+
+
+3. Here, you'll find the application code in various languages, which can be embedded in your application code.
+
+
+
+
+Additionally, you can use the configuration in your application code without using Agenta deployment. This can be achieved by using the Python SDK or the REST API. More information can be found on the page 'Integrating Agenta with Your Application'.`
\ No newline at end of file
diff --git a/docs/basic_guides/human_evaluation.mdx b/docs/basic_guides/human_evaluation.mdx
new file mode 100644
index 0000000000..9545bcd8d0
--- /dev/null
+++ b/docs/basic_guides/human_evaluation.mdx
@@ -0,0 +1,5 @@
+---
+title: 'Human Evaluation'
+---
+
+ This page is under construction. Please reach out to us on [Slack](https://join.slack.com/t/agenta-hq/shared_invite/zt-1zsafop5i-Y7~ZySbhRZvKVPV5DO_7IA) **#support** channel, [Book a call](https://cal.com/mahmoud-mabrouk-ogzgey/demo), or through [email](mailto:team@agenta.ai) if you need help with using human evaluation.
\ No newline at end of file
diff --git a/docs/basic_guides/prompt_engineering.mdx b/docs/basic_guides/prompt_engineering.mdx
new file mode 100644
index 0000000000..3aa3b660c0
--- /dev/null
+++ b/docs/basic_guides/prompt_engineering.mdx
@@ -0,0 +1,55 @@
+---
+title: 'Prompt Engineering'
+description: 'Using the playground for prompt engineering.'
+---
+
+The agenta playground is a platform that lets you create, modify, and compare different prompts and configurations for your LLM application.
+
+
+## Prompt Templates
+An LLM takes a prompt and returns a completion. To make the prompt reusable, we need to be able to dynamically modify it based on a set of inputs. A prompt template is a prompt that can be dynamically modified based on a set of inputs.
+
+For instance, a prompt to create a blog post might look like this:
+```
+Write a blog post about {subject}
+```
+
+Here, the input is the subject of the blog post. The prompt can be reused for different subjects.
+
+The LLM app templates in agenta use the f-string templating language, with prompt variables referenced using a curly bracket syntax.
+
+## Adding new inputs
+
+To add new inputs to the LLM app, navigate to the playground and modify the inputs under "Modify Parameters."
+
+
+
+
+After adding an input, incorporate it into the prompt templates using the curly bracket syntax.
+
+## Creating a variant
+
+You can create a new variant of an application by clicking on "Add Variant" in the "Side-by-side" view or the "+" tab in the "Tab view".
+
+
+
+
+You'll then see a window where you can select the source variant to use as a template and provide a new name for your variant.
+
+After creating a variant, you can alter its parameters. Remember to save your changes before deploying the variant.
+
+## Testing a variant
+
+Fill in the inputs in the cards and click "Run" to test a variant dynamically.
+
+You can also load a test set to populate the playground with a set of inputs. Then You can click Run all to run all the inputs in the test set.
+
+
+
+
+## Comparing variants side by side
+
+Click on the "Side-by-side" tab to compare variants. From the dropdowns, select the variants you wish to compare. This allows you to view the results of multiple variants simultaneously. When using a chat application, you can interact with different variants in parallel.
+
+
+
diff --git a/docs/basic_guides/team_management.mdx b/docs/basic_guides/team_management.mdx
new file mode 100644
index 0000000000..9e21617229
--- /dev/null
+++ b/docs/basic_guides/team_management.mdx
@@ -0,0 +1,20 @@
+---
+title: 'Team Management'
+---
+
+ Role-based access control and audit trails are currently under development.
+
+
+## Inviting users
+
+To add new users to your workspace, click on settings in the lower left corner of the screen. Then select the "Workspace" tab. You can invite new users by entering their email addresses.
+
+
+
+
+## Switching workspaces
+
+If you're a member of multiple workspaces, you can switch between them by clicking the workspace name in the lower left corner of the screen. Remember, all applications in a workspace are shared among all its members.
+
+
+
\ No newline at end of file
diff --git a/docs/basic_guides/test_sets.mdx b/docs/basic_guides/test_sets.mdx
new file mode 100644
index 0000000000..dbd98116f5
--- /dev/null
+++ b/docs/basic_guides/test_sets.mdx
@@ -0,0 +1,131 @@
+---
+title: 'Test Sets'
+---
+
+This guide will help you create, edit, and use test sets effectively.
+
+Test sets in Agenta can be loaded in the playground, used in evaluations, or for conducting human evaluations/annotations.
+
+# Creating a Test Set
+
+You can create a test set in Agenta using various methods: the API, the UI, by uploading a CSV, or directly from the playground.
+
+## Creating a Test Set from the Playground
+
+Creating a test set is simple while experimenting with your application directly from the playground:
+
+1. Navigate to the Playground.
+2. Enter an input and click "Run."
+3. Click on 'Add to test setโ.
+
+The inputs and outputs from the playground will be displayed in the drawer. You can modify inputs and correct answers if necessary. Select an existing test set to add to, or choose "+Add new" if needed, then click "Add."
+
+
+
+
+
+When adding chat history, you have the option to include all turns from the history. For example:
+
+- User: Hi
+- Assistant: Hi, how can I help you?
+- User: I would like to book a table
+- Assistant: Sure, for how many people?
+
+If you select "Turn by Turn," two rows will be added to the test set: one for "Hi/Hi, how can I help you?" and another for "Hi/Hi, how can I help you?/I would like to book a table/Sure, for how many people?"
+
+
+## The Test Set Schema
+
+A test set in Agenta should have specific columns based on the input names of your application. For example, if your application takes a text and instruction as input, the test set should have two columns: "text" and "instruction." Optionally, you can include the correct answer under the column name "correct_answer."
+
+### Test Set Schema for Chat Applications
+
+For chat applications, format the chat column in the inputs as a list of messages:
+
+```json
+[
+ {"content": "message.", "role": "user"},
+ {"content": "message.", "role": "assistant"}
+ // Add more messages if necessary
+]
+
+```
+
+The "correct_answer" should follow a specific format as well:
+
+```json
+{"content": "message.", "role": "assistant"}
+```
+
+## Creating/Editing a Test Set from the UI
+
+To create or edit a test set from the UI:
+
+1. Go to "Test sets."
+2. Choose "Create a test set with UI."
+3. Name your test set and specify the columns for input types.
+4. Add the dataset.
+
+Remember to click "Save test set."
+
+Additional UI Features:
+
+- **Add Rows**: For new data entries.
+- **Rename Columns**: By clicking the pen icon above a column.
+- **Add Columns**: Using the '+' sign in the table header.
+
+
+## Creating a Test Set from a CSV or JSON
+To create a test set from a CSV or JSON file:
+
+Go to "Test sets.", Click "Upload test sets.", Select either CSV or JSON.
+
+
+### CSV Format
+We use CSV with "," as a separator and '"' as a quote character. The first row should contain the header with the column names. Each input name should have its column, and the correct answer should be under the "correct_answer" column. Here's an example of a valid CSV:
+
+```csv
+text,instruction,correct_answer
+Hello,How are you?,I'm good.
+"Tell me a joke.",Sure, here's one:...
+```
+## JSON Format
+
+The test set should be in JSON format with specific requirements:
+
+1. A JSON file with an array of rows.
+2. Each row in the array should be an object with column header names as keys and row data as values. Here's an example of a valid JSON file:
+```json
+[{ "recipe_name": "Chicken Parmesan","correct_answer": "Chicken" },
+{ "recipe_name": "a, special, recipe","correct_answer": "Beef" }]
+```
+
+## Creating a Test Set Using the API
+
+You can upload a test set using the Agenta API. Here's a high-level overview of how to do it:
+
+**HTTP Request:**
+
+```
+POST /testsets/{app_id}/
+
+```
+
+**Request Body:**
+
+```json
+{
+ "name": "testsetname",
+ "csvdata": [
+ {"column1": "row1col1",
+ "column2": "row1col2"},
+ {"column1": "row2col1",
+ "column2": "row2col2"
+ }
+ ]
+}
+
+```
+
+If you are using the API for the cloud, you should add Bearer: `your Agenta API key` in the request.
+
diff --git a/docs/changelog/main.mdx b/docs/changelog/main.mdx
new file mode 100644
index 0000000000..f0da6c86a4
--- /dev/null
+++ b/docs/changelog/main.mdx
@@ -0,0 +1,74 @@
+---
+title: "Changelog"
+---
+
+## v0.6.6 - Improving Side-by-side Comparison in the Playground
+*19th December 2023*
+- Enhanced the side-by-side comparison in the playground for better user experience
+
+## v0.6.5 - Resolved Batch Logic Issue in Evaluation
+*18th December 2023*
+- Resolved an issue with batch logic in evaluation (users can now run extensive evaluations)
+
+## v0.6.4 - Comprehensive Updates and Bug Fixes
+*12th December 2023*
+- Incorporated all chat turns to the chat set
+- Rectified self-hosting documentation
+- Introduced asynchronous support for applications
+- Added 'register_default' alias
+- Fixed a bug in the side-by-side feature
+
+## v0.6.3 - Integrated File Input and UI Enhancements
+*12th December 2023*
+- Integrated file input feature in the SDK
+- Provided an example that includes images
+- Upgraded the human evaluation view to present larger inputs
+- Fixed issues related to data overwriting in the cloud
+- Implemented UI enhancements to the side bar
+
+## v0.6.2 - Minor Adjustments for Better Performance
+*7th December 2023*
+- Made minor adjustments
+
+## v0.6.1 - Bug Fix for Application Saving
+*7th December 2023*
+- Resolved a bug related to saving the application
+
+## v0.6.0 - Introduction of Chat-based Applications
+*1st December 2023*
+- Introduced chat-based applications
+- Fixed a bug in 'export csv' feature in auto evaluation
+
+## v0.5.8 - Multiple UI and CSV Reader Fixes
+*1st December 2023*
+- Fixed a bug impacting the csv reader
+- Addressed an issue of variant overwriting
+- Made tabs draggable for better UI navigation
+- Implemented support for multiple LLM keys in the UI
+
+## v0.5.7 - Enhanced Self-hosting and Mistral Model Tutorial
+*17th November 2023*
+- Enhanced and simplified self-hosting feature
+- Added a tutorial for the Mistral model
+- Resolved a race condition issue in deployment
+- Fixed an issue with saving in the playground
+
+## v0.5.6 - Sentry Integration and User Communication Improvements
+*12th November 2023*
+- Enhanced bug tracking with Sentry integration in the cloud
+- Integrated Intercom for better user communication in the cloud
+- Upgraded to the latest version of OpenAI
+- Cleaned up files post serving in CLI
+
+## v0.5.5 - Cypress Tests and UI Improvements
+*2nd November 2023*
+- Conducted extensive Cypress tests for improved application stability
+- Added a collapsible sidebar for better navigation
+- Improved error handling mechanisms
+- Added documentation for the evaluation feature
+
+## v0.5 - Launch of SDK Version 2 and Cloud-hosted Version
+*23rd October 2023*
+- Launched SDK version 2
+- Launched the cloud-hosted version
+- Completed a comprehensive refactoring of the application
diff --git a/docs/cookbook/extract_job_information.mdx b/docs/cookbook/extract_job_information.mdx
new file mode 100644
index 0000000000..19f6d634fb
--- /dev/null
+++ b/docs/cookbook/extract_job_information.mdx
@@ -0,0 +1,49 @@
+---
+title: "Extraction using OpenAI Functions and Langchain"
+---
+
+This templates is designed to extracts job information (company name, job title, salary range) from a job description. It uses OpenAI Functions and Langchain.
+
+## Code base
+
+The code base can be found in the [GitHub repository](https://github.com/Agenta-AI/job_extractor_template).
+
+## How to use
+### 0. Prerequisites
+- Install the agenta CLI
+```bash
+pip install agenta-cli
+```
+- Either create an account in [agenta cloud](https://cloud.agenta.ai/) or [self-host agenta](/self-host/host-locally)
+
+### 1. Clone the repository
+
+```bash
+git clone https://github.com/Agenta-AI/job_extractor_template
+```
+
+### 2. Initialize the project
+
+```bash
+agenta init
+```
+
+### 3. Setup your openAI API key
+Create a .env file by copying the .env.example file and add your openAI API key to it.
+```bash
+OPENAI_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxx
+```
+
+### 4. Deploy the application to agenta
+
+```bash
+agenta variant serve app.py
+```
+
+### 5. Experiment with the prompts in a playground and evaluate different variants
+
diff --git a/docs/cookbook/list_templates.mdx b/docs/cookbook/list_templates.mdx
new file mode 100644
index 0000000000..418fcbfd84
--- /dev/null
+++ b/docs/cookbook/list_templates.mdx
@@ -0,0 +1,32 @@
+---
+title: "Templates by Architecture"
+description: "A collection of templates and tutorials indexed by architecture."
+---
+
+# Tutorials
+## ๐ Text Generation
+### [Single Prompt Application using OpenAI and Langchain](/tutorials/first-app-with-langchain)
+ Text Generation OpenAI Langchain
+
+Learn how to use our SDK to deploy an application with agenta. The application we will create uses OpenAI and Langchain. The application generates outreach messages in Linkedin to investors based on a startup name and idea.
+### [Use Mistral from Huggingface for a Summarization Task](/tutorials/deploy-mistral-model)
+ Text Generation Mistral Hugging Face
+
+Learn how to use a custom model with agenta.
+
+## Retrieval Augmented Generation (RAG)
+### [RAG Application with LlamaIndex](/tutorials/build-rag-application)
+ Sales OpenAI RAG LlamaIndex
+
+Learn how to create a RAG application with LlamaIndex and use it in agenta. You will create a playground in agenta where you can experiment with the parameters of the RAG application, test it and compare different versions.
+
+# Templates
+## โ๏ธ Extraction
+
+These templates extract data in a structured format from an unstructured source.
+### [Extraction using OpenAI Functions and Langchain](/cookbook/extract_job_information)
+
+ HR OpenAI Functions Langchain
+
+
+Extracts job information (company name, job title, salary range) from a job description. Uses OpenAI Functions and Langchain.
diff --git a/docs/cookbook/list_templates_by_technology.mdx b/docs/cookbook/list_templates_by_technology.mdx
new file mode 100644
index 0000000000..bc7f38810f
--- /dev/null
+++ b/docs/cookbook/list_templates_by_technology.mdx
@@ -0,0 +1,19 @@
+---
+title: "Templates by Technology"
+description: "A collection of templates and tutorials indexed by the used framework and model provider."
+---
+
+ This page is a work in progress. Please note that some of the entries are redundant.
+
+## Langchain
+### [Extraction using OpenAI Functions and Langchain](/templates/extract_job_information)
+Extracts job information (company name, job title, salary range) from a job description. Uses OpenAI Functions and Langchain.
+
+## LlamaIndex
+### [RAG Application with LlamaIndex](/tutorials/build-rag-application)
+Learn how to create a RAG application with LlamaIndex and use it in agenta. You will create a playground in agenta where you can experiment with the parameters of the RAG application, test it and compare different versions. The application takes a sales transcript and answers questions based on it.
+
+
+## OpenAI
+### [Extraction using OpenAI Functions and Langchain](/templates/extract_job_information)
+Extracts job information (company name, job title, salary range) from a job description. Uses OpenAI Functions and Langchain.
diff --git a/docs/cookbook/list_templates_by_use_case.mdx b/docs/cookbook/list_templates_by_use_case.mdx
new file mode 100644
index 0000000000..9fa5954236
--- /dev/null
+++ b/docs/cookbook/list_templates_by_use_case.mdx
@@ -0,0 +1,15 @@
+---
+title: "Templates by Use Case"
+description: "A collection of templates and tutorials indexed by the the use case."
+---
+ This page is a work in progress. Please note that some of the entries are redundant.
+
+## Human Ressources
+### [Extraction using OpenAI Functions and Langchain](/cookbook/extract_job_information)
+Extracts job information (company name, job title, salary range) from a job description. Uses OpenAI Functions and Langchain.
+
+## Sales
+### [Single Prompt Application using OpenAI and Langchain](/tutorials/first-app-with-langchain)
+Learn how to use our SDK to deploy an application with agenta. The application we will create uses OpenAI and Langchain. The application generates outreach messages in Linkedin to investors based on a startup name and idea.
+### [RAG Application with LlamaIndex](/tutorials/build-rag-application)
+Learn how to create a RAG application with LlamaIndex and use it in agenta. You will create a playground in agenta where you can experiment with the parameters of the RAG application, test it and compare different versions. The application takes a sales transcript and answers questions based on it.
diff --git a/docs/conceptual/lifecycle.mdx b/docs/depractated/conceptual/lifecycle.mdx
similarity index 100%
rename from docs/conceptual/lifecycle.mdx
rename to docs/depractated/conceptual/lifecycle.mdx
diff --git a/docs/howto/creating-multiple-app-variants.mdx b/docs/depractated/howto/creating-multiple-app-variants.mdx
similarity index 100%
rename from docs/howto/creating-multiple-app-variants.mdx
rename to docs/depractated/howto/creating-multiple-app-variants.mdx
diff --git a/docs/howto/disabling-anonymized-tracking.mdx b/docs/depractated/howto/disabling-anonymized-tracking.mdx
similarity index 100%
rename from docs/howto/disabling-anonymized-tracking.mdx
rename to docs/depractated/howto/disabling-anonymized-tracking.mdx
diff --git a/docs/howto/how-to-compare-apps.mdx b/docs/depractated/howto/how-to-compare-apps.mdx
similarity index 100%
rename from docs/howto/how-to-compare-apps.mdx
rename to docs/depractated/howto/how-to-compare-apps.mdx
diff --git a/docs/howto/how-to-debug.mdx b/docs/depractated/howto/how-to-debug.mdx
similarity index 100%
rename from docs/howto/how-to-debug.mdx
rename to docs/depractated/howto/how-to-debug.mdx
diff --git a/docs/howto/how-to-evaluate-apps.mdx b/docs/depractated/howto/how-to-evaluate-apps.mdx
similarity index 100%
rename from docs/howto/how-to-evaluate-apps.mdx
rename to docs/depractated/howto/how-to-evaluate-apps.mdx
diff --git a/docs/howto/how-to-use-playground.mdx b/docs/depractated/howto/how-to-use-playground.mdx
similarity index 100%
rename from docs/howto/how-to-use-playground.mdx
rename to docs/depractated/howto/how-to-use-playground.mdx
diff --git a/docs/howto/how-to-use-sdk-to-create-custom-application.mdx b/docs/depractated/howto/how-to-use-sdk-to-create-custom-application.mdx
similarity index 100%
rename from docs/howto/how-to-use-sdk-to-create-custom-application.mdx
rename to docs/depractated/howto/how-to-use-sdk-to-create-custom-application.mdx
diff --git a/docs/howto/use-a-custom-llm.mdx b/docs/depractated/howto/use-a-custom-llm.mdx
similarity index 87%
rename from docs/howto/use-a-custom-llm.mdx
rename to docs/depractated/howto/use-a-custom-llm.mdx
index 668177e95c..6d8972b3c0 100644
--- a/docs/howto/use-a-custom-llm.mdx
+++ b/docs/depractated/howto/use-a-custom-llm.mdx
@@ -15,7 +15,7 @@ import requests
default_prompt = "Please write a joke about {subject}"
url = "https:///generate"
-ag.config.default(prompt=default_prompt,
+ag.config.register_default(prompt=default_prompt,
temperature=0.8)
@ag.entrypoint
@@ -31,7 +31,7 @@ def generate(subject:str)->str:
The above code is a simple LLM app that generates jokes about a given subject, using a vLLM hosted model. It is structured as follows:
-`ag.config.default` sets the default values for the configuration of the LLM application. In this example, the default prompt is "Please write a joke about {subject}", and the temperature is set at 0.8.
+`ag.config.register_default` sets the default values for the configuration of the LLM application. In this example, the default prompt is "Please write a joke about {subject}", and the temperature is set at 0.8.
The `@ag.entrypoint` decorator marks the function that will be called. The function `generate` accepts a subject as input and returns a joke as output. It calls the vLLM hosted model using the requests library.
diff --git a/docs/learn/concepts.mdx b/docs/depractated/learn/concepts.mdx
similarity index 100%
rename from docs/learn/concepts.mdx
rename to docs/depractated/learn/concepts.mdx
diff --git a/docs/learn/evaluating_llm_apps.mdx b/docs/depractated/learn/evaluating_llm_apps.mdx
similarity index 100%
rename from docs/learn/evaluating_llm_apps.mdx
rename to docs/depractated/learn/evaluating_llm_apps.mdx
diff --git a/docs/learn/llm_app_architectures.mdx b/docs/depractated/learn/llm_app_architectures.mdx
similarity index 100%
rename from docs/learn/llm_app_architectures.mdx
rename to docs/depractated/learn/llm_app_architectures.mdx
diff --git a/docs/learn/the_llmops_workflow.mdx b/docs/depractated/learn/the_llmops_workflow.mdx
similarity index 100%
rename from docs/learn/the_llmops_workflow.mdx
rename to docs/depractated/learn/the_llmops_workflow.mdx
diff --git a/docs/quickstart/getting-started-code.mdx b/docs/depractated/quickstart/getting-started-code.mdx
similarity index 97%
rename from docs/quickstart/getting-started-code.mdx
rename to docs/depractated/quickstart/getting-started-code.mdx
index bcea8e32be..4b9042eb81 100644
--- a/docs/quickstart/getting-started-code.mdx
+++ b/docs/depractated/quickstart/getting-started-code.mdx
@@ -9,7 +9,7 @@ sidebarTitle: 'Creating LLM Apps using code'
Prefer video tutorial? Watch our 4-minute video [here](https://youtu.be/nggaRwDZM-0).
-VIDEO
+VIDEO
## Introduction
diff --git a/docs/quickstart/how-agenta-works.mdx b/docs/depractated/quickstart/how-agenta-works.mdx
similarity index 100%
rename from docs/quickstart/how-agenta-works.mdx
rename to docs/depractated/quickstart/how-agenta-works.mdx
diff --git a/docs/quickstart/installation.mdx b/docs/depractated/quickstart/installation.mdx
similarity index 95%
rename from docs/quickstart/installation.mdx
rename to docs/depractated/quickstart/installation.mdx
index 0b700af772..4f23d5b401 100644
--- a/docs/quickstart/installation.mdx
+++ b/docs/depractated/quickstart/installation.mdx
@@ -3,7 +3,7 @@ title: Installation
description: 'How to install the Agenta CLI on your machine'
---
- This guide helps you install Agenta on your local machine. If you're looking to set it up on a server for multiple users, head over to [this guide](/installation/self-hosting/).
+ This guide helps you install Agenta on your local machine. If you're looking to set it up on a server for multiple users, head over to [this guide](/self-host/host-remotely).
# Installing Agenta locally
To install Agenta, you need first to install the python package containing both the SDK and the CLI. Then, you need to install the web platform.
@@ -57,6 +57,6 @@ Open your browser and go to [http://localhost](http://localhost). If you see the
## What's next?
You're all set to start using Agenta!
-
+
Click here to build your first LLM app in just 1 minute.
\ No newline at end of file
diff --git a/docs/cli/init.mdx b/docs/developer_guides/cli/init.mdx
similarity index 100%
rename from docs/cli/init.mdx
rename to docs/developer_guides/cli/init.mdx
diff --git a/docs/cli/install.mdx b/docs/developer_guides/cli/install.mdx
similarity index 100%
rename from docs/cli/install.mdx
rename to docs/developer_guides/cli/install.mdx
diff --git a/docs/cli/quick-usage.mdx b/docs/developer_guides/cli/quick-usage.mdx
similarity index 100%
rename from docs/cli/quick-usage.mdx
rename to docs/developer_guides/cli/quick-usage.mdx
diff --git a/docs/cli/variant_list.mdx b/docs/developer_guides/cli/variant_list.mdx
similarity index 100%
rename from docs/cli/variant_list.mdx
rename to docs/developer_guides/cli/variant_list.mdx
diff --git a/docs/cli/variant_remove.mdx b/docs/developer_guides/cli/variant_remove.mdx
similarity index 100%
rename from docs/cli/variant_remove.mdx
rename to docs/developer_guides/cli/variant_remove.mdx
diff --git a/docs/cli/variant_serve.mdx b/docs/developer_guides/cli/variant_serve.mdx
similarity index 100%
rename from docs/cli/variant_serve.mdx
rename to docs/developer_guides/cli/variant_serve.mdx
diff --git a/docs/contributing/api_reference.mdx b/docs/developer_guides/contributing/api_reference.mdx
similarity index 100%
rename from docs/contributing/api_reference.mdx
rename to docs/developer_guides/contributing/api_reference.mdx
diff --git a/docs/contributing/community.mdx b/docs/developer_guides/contributing/community.mdx
similarity index 100%
rename from docs/contributing/community.mdx
rename to docs/developer_guides/contributing/community.mdx
diff --git a/docs/contributing/development-mode.mdx b/docs/developer_guides/contributing/development-mode.mdx
similarity index 100%
rename from docs/contributing/development-mode.mdx
rename to docs/developer_guides/contributing/development-mode.mdx
diff --git a/docs/contributing/file-issue.mdx b/docs/developer_guides/contributing/file-issue.mdx
similarity index 100%
rename from docs/contributing/file-issue.mdx
rename to docs/developer_guides/contributing/file-issue.mdx
diff --git a/docs/contributing/getting-started.mdx b/docs/developer_guides/contributing/getting-started.mdx
similarity index 97%
rename from docs/contributing/getting-started.mdx
rename to docs/developer_guides/contributing/getting-started.mdx
index a95454765b..4c4425f7be 100644
--- a/docs/contributing/getting-started.mdx
+++ b/docs/developer_guides/contributing/getting-started.mdx
@@ -28,7 +28,7 @@ To maintain code quality, we adhere to certain formatting and linting rules:
## Contribution Steps
-1. **Pick an Issue:** Start by selecting an issue from our issue tracker. Choose one that matches your skill set and begin coding. For more on this, read our [Creating an Issue Guide](file-issue).
+1. **Pick an Issue:** Start by selecting an issue from our issue tracker. Choose one that matches your skill set and begin coding. For more on this, read our [Creating an Issue Guide](/contributing/file-issue).
2. **Fork & Pull Request:** Fork our repository, create a new branch, add your changes, and submit a pull request. Ensure your code aligns with our standards and includes appropriate unit tests.
diff --git a/docs/contributing/technical_details.mdx b/docs/developer_guides/contributing/technical_details.mdx
similarity index 100%
rename from docs/contributing/technical_details.mdx
rename to docs/developer_guides/contributing/technical_details.mdx
diff --git a/docs/contributing/terminology.mdx b/docs/developer_guides/contributing/terminology.mdx
similarity index 100%
rename from docs/contributing/terminology.mdx
rename to docs/developer_guides/contributing/terminology.mdx
diff --git a/docs/developer_guides/how_does_agenta_work.mdx b/docs/developer_guides/how_does_agenta_work.mdx
new file mode 100644
index 0000000000..b4519d395e
--- /dev/null
+++ b/docs/developer_guides/how_does_agenta_work.mdx
@@ -0,0 +1,55 @@
+---
+title: "How does Agenta work?"
+description: "An overview of the architecture and main concepts of Agenta"
+---
+
+
+## What problem does Agenta solve?
+
+To build a robust LLM application, you need to:
+1. **Rapidly experiment and evaluate** various prompts, models, and architectures/workflows (RAG, chain-of-prompts, etc..).
+2. **Collaborate with non-developers**, such as product managers or domain experts.
+
+While some tools exist that help doing the first point via a user interface, they are typically limited to pre-built single-prompt applications and fail to accomodate custom workflows or application logic.
+
+## How does Agenta solve this problem?
+
+Agenta creates a playground in the UI from your LLM applications, regardless of the workflow (RAG, chain-of-prompts, custom logic) or the framework (Langchain, Llama_index, OpenAI calls) in use.
+
+This enables the entire team to collaborate on prompt engineering and experimentation with the application parameters (prompts, models, chunk size, etc.). It also allow them to manage all aspects of the app development lifecyclefrom the UI: comparing different configuration, evaluating the application, deploying it, and more.
+
+## How does Agenta achieve this?
+
+1. **Microservice-based Applications**:
+
+Agenta treats each application as a microservice. Creating a new application in Agenta automatically generates a container with a REST API. This is true whether the application is created using a pre-built template from the UI, or whether from CLI using custom application code. Agenta handles the creation of Docker images and container deployment. This means that all interactions with the application (either from the UI, during evaluations, or post-deployment) occur with the container.
+
+2. **Separation of Logic and Configuration**:
+
+Agenta separates the application logic from the configuration. The application logic refers to the code that defines the application, whether it's a simple prompt, chain of prompts, RAG, etc. The configuration refers to the parameters used in the application logic, such as the prompt, model, chunk size, etc. In the application code, you specify which configuration does the application use. This configuration can be modified from the UI in the playground or the CLI.
+
+## Agenta architecture
+
+
+
+
+
+### The Application
+
+The application describes the logic written in Python code. An application can be created from a pre-built template in the UI or from code in the CLI. In either case, a new container with the application code is launched. The application can then be accessed via a REST API.
+
+Each application has a default configuration specified in its code. This default configuration can be overridden by the user in the UI or the CLI. Additionally, the user can create new configurations from the UI or the CLI. Each new configuration results in the creation of a new application variant, which is a combination of the application logic and a configuration. A single project can house many variants encompassing multiple application logics and configurations.
+
+## The Backend
+Agenta's backend manages applications and configurations. It is responsible for building images, deploying containers, and managing configurations and prompts for the application.
+
+## The Frontend / UI
+The frontend provides tools to create new applications from a template, create and edit configurations, run evaluations, and deploy applications to different environments (e.g., staging, production, etc.).
+
+## The CLI
+The CLI offers the same capabilities as the frontend. Additionally, it allows for the creation of custom applications not available as templates. When serving a new application from the CLI, Agenta handles container creation and deployment. After creating a new application, users can edit its configuration and evaluate it in the UI.
+
+## The SDK
+The SDK is a Python library used to create new applications from code. It manages the saving of the default configuration, creation of the REST API, and necessary actions to create a playground and integrate the application with the Agenta platform.
+
+
diff --git a/docs/reference/backend_api/add-variant-from-base-and-config.mdx b/docs/developer_guides/reference/backend_api/add-variant-from-base-and-config.mdx
similarity index 100%
rename from docs/reference/backend_api/add-variant-from-base-and-config.mdx
rename to docs/developer_guides/reference/backend_api/add-variant-from-base-and-config.mdx
diff --git a/docs/reference/backend_api/add-variant-from-image.mdx b/docs/developer_guides/reference/backend_api/add-variant-from-image.mdx
similarity index 100%
rename from docs/reference/backend_api/add-variant-from-image.mdx
rename to docs/developer_guides/reference/backend_api/add-variant-from-image.mdx
diff --git a/docs/reference/backend_api/build-image.mdx b/docs/developer_guides/reference/backend_api/build-image.mdx
similarity index 100%
rename from docs/reference/backend_api/build-image.mdx
rename to docs/developer_guides/reference/backend_api/build-image.mdx
diff --git a/docs/reference/backend_api/construct-app-container-url.mdx b/docs/developer_guides/reference/backend_api/construct-app-container-url.mdx
similarity index 100%
rename from docs/reference/backend_api/construct-app-container-url.mdx
rename to docs/developer_guides/reference/backend_api/construct-app-container-url.mdx
diff --git a/docs/reference/backend_api/container-templates.mdx b/docs/developer_guides/reference/backend_api/container-templates.mdx
similarity index 100%
rename from docs/reference/backend_api/container-templates.mdx
rename to docs/developer_guides/reference/backend_api/container-templates.mdx
diff --git a/docs/reference/backend_api/create-app-and-variant-from-template.mdx b/docs/developer_guides/reference/backend_api/create-app-and-variant-from-template.mdx
similarity index 100%
rename from docs/reference/backend_api/create-app-and-variant-from-template.mdx
rename to docs/developer_guides/reference/backend_api/create-app-and-variant-from-template.mdx
diff --git a/docs/reference/backend_api/create-app.mdx b/docs/developer_guides/reference/backend_api/create-app.mdx
similarity index 100%
rename from docs/reference/backend_api/create-app.mdx
rename to docs/developer_guides/reference/backend_api/create-app.mdx
diff --git a/docs/reference/backend_api/create-custom-evaluation.mdx b/docs/developer_guides/reference/backend_api/create-custom-evaluation.mdx
similarity index 100%
rename from docs/reference/backend_api/create-custom-evaluation.mdx
rename to docs/developer_guides/reference/backend_api/create-custom-evaluation.mdx
diff --git a/docs/reference/backend_api/create-evaluation-scenario.mdx b/docs/developer_guides/reference/backend_api/create-evaluation-scenario.mdx
similarity index 100%
rename from docs/reference/backend_api/create-evaluation-scenario.mdx
rename to docs/developer_guides/reference/backend_api/create-evaluation-scenario.mdx
diff --git a/docs/reference/backend_api/create-evaluation.mdx b/docs/developer_guides/reference/backend_api/create-evaluation.mdx
similarity index 100%
rename from docs/reference/backend_api/create-evaluation.mdx
rename to docs/developer_guides/reference/backend_api/create-evaluation.mdx
diff --git a/docs/reference/backend_api/create-feedback.mdx b/docs/developer_guides/reference/backend_api/create-feedback.mdx
similarity index 100%
rename from docs/reference/backend_api/create-feedback.mdx
rename to docs/developer_guides/reference/backend_api/create-feedback.mdx
diff --git a/docs/reference/backend_api/create-span.mdx b/docs/developer_guides/reference/backend_api/create-span.mdx
similarity index 100%
rename from docs/reference/backend_api/create-span.mdx
rename to docs/developer_guides/reference/backend_api/create-span.mdx
diff --git a/docs/reference/backend_api/create-testset.mdx b/docs/developer_guides/reference/backend_api/create-testset.mdx
similarity index 100%
rename from docs/reference/backend_api/create-testset.mdx
rename to docs/developer_guides/reference/backend_api/create-testset.mdx
diff --git a/docs/reference/backend_api/create-trace.mdx b/docs/developer_guides/reference/backend_api/create-trace.mdx
similarity index 100%
rename from docs/reference/backend_api/create-trace.mdx
rename to docs/developer_guides/reference/backend_api/create-trace.mdx
diff --git a/docs/reference/backend_api/create_doc_from_openai.sh b/docs/developer_guides/reference/backend_api/create_doc_from_openai.sh
similarity index 100%
rename from docs/reference/backend_api/create_doc_from_openai.sh
rename to docs/developer_guides/reference/backend_api/create_doc_from_openai.sh
diff --git a/docs/reference/backend_api/delete-evaluations.mdx b/docs/developer_guides/reference/backend_api/delete-evaluations.mdx
similarity index 100%
rename from docs/reference/backend_api/delete-evaluations.mdx
rename to docs/developer_guides/reference/backend_api/delete-evaluations.mdx
diff --git a/docs/reference/backend_api/delete-testsets.mdx b/docs/developer_guides/reference/backend_api/delete-testsets.mdx
similarity index 100%
rename from docs/reference/backend_api/delete-testsets.mdx
rename to docs/developer_guides/reference/backend_api/delete-testsets.mdx
diff --git a/docs/reference/backend_api/deploy-to-environment.mdx b/docs/developer_guides/reference/backend_api/deploy-to-environment.mdx
similarity index 100%
rename from docs/reference/backend_api/deploy-to-environment.mdx
rename to docs/developer_guides/reference/backend_api/deploy-to-environment.mdx
diff --git a/docs/reference/backend_api/evaluate-ai-critique.mdx b/docs/developer_guides/reference/backend_api/evaluate-ai-critique.mdx
similarity index 100%
rename from docs/reference/backend_api/evaluate-ai-critique.mdx
rename to docs/developer_guides/reference/backend_api/evaluate-ai-critique.mdx
diff --git a/docs/reference/backend_api/execute-custom-evaluation.mdx b/docs/developer_guides/reference/backend_api/execute-custom-evaluation.mdx
similarity index 100%
rename from docs/reference/backend_api/execute-custom-evaluation.mdx
rename to docs/developer_guides/reference/backend_api/execute-custom-evaluation.mdx
diff --git a/docs/reference/backend_api/fetch-evaluation-scenarios.mdx b/docs/developer_guides/reference/backend_api/fetch-evaluation-scenarios.mdx
similarity index 100%
rename from docs/reference/backend_api/fetch-evaluation-scenarios.mdx
rename to docs/developer_guides/reference/backend_api/fetch-evaluation-scenarios.mdx
diff --git a/docs/reference/backend_api/fetch-evaluation.mdx b/docs/developer_guides/reference/backend_api/fetch-evaluation.mdx
similarity index 100%
rename from docs/reference/backend_api/fetch-evaluation.mdx
rename to docs/developer_guides/reference/backend_api/fetch-evaluation.mdx
diff --git a/docs/reference/backend_api/fetch-list-evaluations.mdx b/docs/developer_guides/reference/backend_api/fetch-list-evaluations.mdx
similarity index 100%
rename from docs/reference/backend_api/fetch-list-evaluations.mdx
rename to docs/developer_guides/reference/backend_api/fetch-list-evaluations.mdx
diff --git a/docs/reference/backend_api/fetch-results.mdx b/docs/developer_guides/reference/backend_api/fetch-results.mdx
similarity index 100%
rename from docs/reference/backend_api/fetch-results.mdx
rename to docs/developer_guides/reference/backend_api/fetch-results.mdx
diff --git a/docs/reference/backend_api/get-config.mdx b/docs/developer_guides/reference/backend_api/get-config.mdx
similarity index 100%
rename from docs/reference/backend_api/get-config.mdx
rename to docs/developer_guides/reference/backend_api/get-config.mdx
diff --git a/docs/reference/backend_api/get-custom-evaluation-names.mdx b/docs/developer_guides/reference/backend_api/get-custom-evaluation-names.mdx
similarity index 100%
rename from docs/reference/backend_api/get-custom-evaluation-names.mdx
rename to docs/developer_guides/reference/backend_api/get-custom-evaluation-names.mdx
diff --git a/docs/reference/backend_api/get-custom-evaluation.mdx b/docs/developer_guides/reference/backend_api/get-custom-evaluation.mdx
similarity index 100%
rename from docs/reference/backend_api/get-custom-evaluation.mdx
rename to docs/developer_guides/reference/backend_api/get-custom-evaluation.mdx
diff --git a/docs/reference/backend_api/get-evaluation-scenario-score-router.mdx b/docs/developer_guides/reference/backend_api/get-evaluation-scenario-score-router.mdx
similarity index 100%
rename from docs/reference/backend_api/get-evaluation-scenario-score-router.mdx
rename to docs/developer_guides/reference/backend_api/get-evaluation-scenario-score-router.mdx
diff --git a/docs/reference/backend_api/get-feedback.mdx b/docs/developer_guides/reference/backend_api/get-feedback.mdx
similarity index 100%
rename from docs/reference/backend_api/get-feedback.mdx
rename to docs/developer_guides/reference/backend_api/get-feedback.mdx
diff --git a/docs/reference/backend_api/get-feedbacks.mdx b/docs/developer_guides/reference/backend_api/get-feedbacks.mdx
similarity index 100%
rename from docs/reference/backend_api/get-feedbacks.mdx
rename to docs/developer_guides/reference/backend_api/get-feedbacks.mdx
diff --git a/docs/reference/backend_api/get-spans-of-trace.mdx b/docs/developer_guides/reference/backend_api/get-spans-of-trace.mdx
similarity index 100%
rename from docs/reference/backend_api/get-spans-of-trace.mdx
rename to docs/developer_guides/reference/backend_api/get-spans-of-trace.mdx
diff --git a/docs/reference/backend_api/get-trace.mdx b/docs/developer_guides/reference/backend_api/get-trace.mdx
similarity index 100%
rename from docs/reference/backend_api/get-trace.mdx
rename to docs/developer_guides/reference/backend_api/get-trace.mdx
diff --git a/docs/reference/backend_api/get-traces.mdx b/docs/developer_guides/reference/backend_api/get-traces.mdx
similarity index 100%
rename from docs/reference/backend_api/get-traces.mdx
rename to docs/developer_guides/reference/backend_api/get-traces.mdx
diff --git a/docs/reference/backend_api/get-user-organization.mdx b/docs/developer_guides/reference/backend_api/get-user-organization.mdx
similarity index 100%
rename from docs/reference/backend_api/get-user-organization.mdx
rename to docs/developer_guides/reference/backend_api/get-user-organization.mdx
diff --git a/docs/reference/backend_api/get-variant-by-env.mdx b/docs/developer_guides/reference/backend_api/get-variant-by-env.mdx
similarity index 100%
rename from docs/reference/backend_api/get-variant-by-env.mdx
rename to docs/developer_guides/reference/backend_api/get-variant-by-env.mdx
diff --git a/docs/reference/backend_api/import-testset.mdx b/docs/developer_guides/reference/backend_api/import-testset.mdx
similarity index 100%
rename from docs/reference/backend_api/import-testset.mdx
rename to docs/developer_guides/reference/backend_api/import-testset.mdx
diff --git a/docs/reference/backend_api/list-app-variants.mdx b/docs/developer_guides/reference/backend_api/list-app-variants.mdx
similarity index 100%
rename from docs/reference/backend_api/list-app-variants.mdx
rename to docs/developer_guides/reference/backend_api/list-app-variants.mdx
diff --git a/docs/reference/backend_api/list-apps.mdx b/docs/developer_guides/reference/backend_api/list-apps.mdx
similarity index 100%
rename from docs/reference/backend_api/list-apps.mdx
rename to docs/developer_guides/reference/backend_api/list-apps.mdx
diff --git a/docs/reference/backend_api/list-bases.mdx b/docs/developer_guides/reference/backend_api/list-bases.mdx
similarity index 100%
rename from docs/reference/backend_api/list-bases.mdx
rename to docs/developer_guides/reference/backend_api/list-bases.mdx
diff --git a/docs/reference/backend_api/list-custom-evaluations.mdx b/docs/developer_guides/reference/backend_api/list-custom-evaluations.mdx
similarity index 100%
rename from docs/reference/backend_api/list-custom-evaluations.mdx
rename to docs/developer_guides/reference/backend_api/list-custom-evaluations.mdx
diff --git a/docs/reference/backend_api/list-environments.mdx b/docs/developer_guides/reference/backend_api/list-environments.mdx
similarity index 100%
rename from docs/reference/backend_api/list-environments.mdx
rename to docs/developer_guides/reference/backend_api/list-environments.mdx
diff --git a/docs/reference/backend_api/list-organizations.mdx b/docs/developer_guides/reference/backend_api/list-organizations.mdx
similarity index 100%
rename from docs/reference/backend_api/list-organizations.mdx
rename to docs/developer_guides/reference/backend_api/list-organizations.mdx
diff --git a/docs/reference/backend_api/openapi.json b/docs/developer_guides/reference/backend_api/openapi.json
similarity index 100%
rename from docs/reference/backend_api/openapi.json
rename to docs/developer_guides/reference/backend_api/openapi.json
diff --git a/docs/reference/backend_api/remove-app.mdx b/docs/developer_guides/reference/backend_api/remove-app.mdx
similarity index 100%
rename from docs/reference/backend_api/remove-app.mdx
rename to docs/developer_guides/reference/backend_api/remove-app.mdx
diff --git a/docs/reference/backend_api/remove-variant.mdx b/docs/developer_guides/reference/backend_api/remove-variant.mdx
similarity index 100%
rename from docs/reference/backend_api/remove-variant.mdx
rename to docs/developer_guides/reference/backend_api/remove-variant.mdx
diff --git a/docs/reference/backend_api/restart-docker-container.mdx b/docs/developer_guides/reference/backend_api/restart-docker-container.mdx
similarity index 100%
rename from docs/reference/backend_api/restart-docker-container.mdx
rename to docs/developer_guides/reference/backend_api/restart-docker-container.mdx
diff --git a/docs/reference/backend_api/save-config.mdx b/docs/developer_guides/reference/backend_api/save-config.mdx
similarity index 100%
rename from docs/reference/backend_api/save-config.mdx
rename to docs/developer_guides/reference/backend_api/save-config.mdx
diff --git a/docs/reference/backend_api/start-variant.mdx b/docs/developer_guides/reference/backend_api/start-variant.mdx
similarity index 100%
rename from docs/reference/backend_api/start-variant.mdx
rename to docs/developer_guides/reference/backend_api/start-variant.mdx
diff --git a/docs/reference/backend_api/testsets/get-testset.mdx b/docs/developer_guides/reference/backend_api/testsets/get-testset.mdx
similarity index 100%
rename from docs/reference/backend_api/testsets/get-testset.mdx
rename to docs/developer_guides/reference/backend_api/testsets/get-testset.mdx
diff --git a/docs/reference/backend_api/testsets/get-testsets.mdx b/docs/developer_guides/reference/backend_api/testsets/get-testsets.mdx
similarity index 100%
rename from docs/reference/backend_api/testsets/get-testsets.mdx
rename to docs/developer_guides/reference/backend_api/testsets/get-testsets.mdx
diff --git a/docs/reference/backend_api/update-custom-evaluation.mdx b/docs/developer_guides/reference/backend_api/update-custom-evaluation.mdx
similarity index 100%
rename from docs/reference/backend_api/update-custom-evaluation.mdx
rename to docs/developer_guides/reference/backend_api/update-custom-evaluation.mdx
diff --git a/docs/reference/backend_api/update-evaluation-router.mdx b/docs/developer_guides/reference/backend_api/update-evaluation-router.mdx
similarity index 100%
rename from docs/reference/backend_api/update-evaluation-router.mdx
rename to docs/developer_guides/reference/backend_api/update-evaluation-router.mdx
diff --git a/docs/reference/backend_api/update-evaluation-scenario-router.mdx b/docs/developer_guides/reference/backend_api/update-evaluation-scenario-router.mdx
similarity index 100%
rename from docs/reference/backend_api/update-evaluation-scenario-router.mdx
rename to docs/developer_guides/reference/backend_api/update-evaluation-scenario-router.mdx
diff --git a/docs/reference/backend_api/update-evaluation-scenario-score-router.mdx b/docs/developer_guides/reference/backend_api/update-evaluation-scenario-score-router.mdx
similarity index 100%
rename from docs/reference/backend_api/update-evaluation-scenario-score-router.mdx
rename to docs/developer_guides/reference/backend_api/update-evaluation-scenario-score-router.mdx
diff --git a/docs/reference/backend_api/update-feedback.mdx b/docs/developer_guides/reference/backend_api/update-feedback.mdx
similarity index 100%
rename from docs/reference/backend_api/update-feedback.mdx
rename to docs/developer_guides/reference/backend_api/update-feedback.mdx
diff --git a/docs/reference/backend_api/update-testset.mdx b/docs/developer_guides/reference/backend_api/update-testset.mdx
similarity index 100%
rename from docs/reference/backend_api/update-testset.mdx
rename to docs/developer_guides/reference/backend_api/update-testset.mdx
diff --git a/docs/reference/backend_api/update-trace-status.mdx b/docs/developer_guides/reference/backend_api/update-trace-status.mdx
similarity index 100%
rename from docs/reference/backend_api/update-trace-status.mdx
rename to docs/developer_guides/reference/backend_api/update-trace-status.mdx
diff --git a/docs/reference/backend_api/update-variant-image.mdx b/docs/developer_guides/reference/backend_api/update-variant-image.mdx
similarity index 100%
rename from docs/reference/backend_api/update-variant-image.mdx
rename to docs/developer_guides/reference/backend_api/update-variant-image.mdx
diff --git a/docs/reference/backend_api/update-variant-parameters.mdx b/docs/developer_guides/reference/backend_api/update-variant-parameters.mdx
similarity index 100%
rename from docs/reference/backend_api/update-variant-parameters.mdx
rename to docs/developer_guides/reference/backend_api/update-variant-parameters.mdx
diff --git a/docs/reference/backend_api/upload-file.mdx b/docs/developer_guides/reference/backend_api/upload-file.mdx
similarity index 100%
rename from docs/reference/backend_api/upload-file.mdx
rename to docs/developer_guides/reference/backend_api/upload-file.mdx
diff --git a/docs/reference/backend_api/user-profile.mdx b/docs/developer_guides/reference/backend_api/user-profile.mdx
similarity index 100%
rename from docs/reference/backend_api/user-profile.mdx
rename to docs/developer_guides/reference/backend_api/user-profile.mdx
diff --git a/docs/reference/backend_api/webhook-example-fake.mdx b/docs/developer_guides/reference/backend_api/webhook-example-fake.mdx
similarity index 100%
rename from docs/reference/backend_api/webhook-example-fake.mdx
rename to docs/developer_guides/reference/backend_api/webhook-example-fake.mdx
diff --git a/docs/sdk/config_datatypes.mdx b/docs/developer_guides/sdk/config_datatypes.mdx
similarity index 66%
rename from docs/sdk/config_datatypes.mdx
rename to docs/developer_guides/sdk/config_datatypes.mdx
index 3f0de2e078..88f3577295 100644
--- a/docs/sdk/config_datatypes.mdx
+++ b/docs/developer_guides/sdk/config_datatypes.mdx
@@ -12,7 +12,7 @@ There are multiple data types that can be used for the parameters in the configu
This display a text area widget in the playground.
```python
-agenta.config.default(prompt_system = ag.TextParam("You are an expert in geography."),
+agenta.config.register_default(prompt_system = ag.TextParam("You are an expert in geography."),
prompt_user = ag.TextParam("What is the capital of {country}?"))
```
@@ -21,7 +21,7 @@ agenta.config.default(prompt_system = ag.TextParam("You are an expert in geograp
This displays a slider widget in the playground with a step 1 (integers).
```python
-agenta.config.default(intval1 = ag.IntParam(default=1, minval=0, maxval=2),
+agenta.config.register_default(intval1 = ag.IntParam(default=1, minval=0, maxval=2),
intval2 = ag.IntParam(1)
```
@@ -31,11 +31,21 @@ This displays a slider widget in the playground with a step 0.1 (float).
```python
-agenta.config.default(temperature = ag.IntParam(default=0.5, minval=0, maxval=2),
+agenta.config.register_default(temperature = ag.IntParam(default=0.5, minval=0, maxval=2),
temperature2 = ag.IntParam(0.5)
```
+### BinaryParam
+This displays a binary switch in the playground.
+
+
+```python
+agenta.config.register_default(temperature = ag.IntParam(default=0.5, minval=0, maxval=2),
+ force_json = BinaryParam())
+```
+
+ For now the binary parameter is always initialized with `False` and can only be changed from the playground
## Data types for inputs
Inputs in contrast to parameters are given as argument to the function decorated with `@agenta.entrypoint`. They are not part of the configuration but instead are the input in the call to the LLM app.
diff --git a/docs/sdk/config_default.mdx b/docs/developer_guides/sdk/config_default.mdx
similarity index 63%
rename from docs/sdk/config_default.mdx
rename to docs/developer_guides/sdk/config_default.mdx
index fdfea3b474..00d0823226 100644
--- a/docs/sdk/config_default.mdx
+++ b/docs/developer_guides/sdk/config_default.mdx
@@ -1,22 +1,27 @@
---
-title: "config.default()"
+title: "config.register_default()"
description: "Register the default configuration for your application"
---
-`agenta.config.default(**kwargs)`
+```python
+agenta.config.register_default(**kwargs)
+#alias
+agenta.config.default(**kwargs)
+```
+```
Set the default configuration for your variant. For instance if you set
```python
-agenta.config.default(prompt = ag.TextParam("Hello world"))
+agenta.config.register_default(prompt = ag.TextParam("Hello world"))
```
This will set the default value of the prompt to "Hello World". This means that the default configuration (the configuration in the variant variant_name.default) will have the value "Hello World".
-agenta.config.default should be used to set the parameters that you are planning to test and iterate on in the playground. For instance if your application has two different prompts in addition to temperature, you might want to use the following:
+agenta.config.register_default should be used to set the parameters that you are planning to test and iterate on in the playground. For instance if your application has two different prompts in addition to temperature, you might want to use the following:
```python
-agenta.config.default(prompt1 = ag.TextParam("my prompt1"),
+agenta.config.register_default(prompt1 = ag.TextParam("my prompt1"),
prompt2 = ag.TextParam("my prompt2"),
temperature = ag.FloatParam(0.5),
temperature2 = ag.FloatParam(0.5))
diff --git a/docs/sdk/config_object.mdx b/docs/developer_guides/sdk/config_object.mdx
similarity index 100%
rename from docs/sdk/config_object.mdx
rename to docs/developer_guides/sdk/config_object.mdx
diff --git a/docs/sdk/config_pull.mdx b/docs/developer_guides/sdk/config_pull.mdx
similarity index 100%
rename from docs/sdk/config_pull.mdx
rename to docs/developer_guides/sdk/config_pull.mdx
diff --git a/docs/sdk/config_push.mdx b/docs/developer_guides/sdk/config_push.mdx
similarity index 100%
rename from docs/sdk/config_push.mdx
rename to docs/developer_guides/sdk/config_push.mdx
diff --git a/docs/sdk/init.mdx b/docs/developer_guides/sdk/init.mdx
similarity index 100%
rename from docs/sdk/init.mdx
rename to docs/developer_guides/sdk/init.mdx
diff --git a/docs/sdk/quick_start.mdx b/docs/developer_guides/sdk/quick_start.mdx
similarity index 82%
rename from docs/sdk/quick_start.mdx
rename to docs/developer_guides/sdk/quick_start.mdx
index 3907013df6..fbf4e6b399 100644
--- a/docs/sdk/quick_start.mdx
+++ b/docs/developer_guides/sdk/quick_start.mdx
@@ -8,7 +8,7 @@ The agenta SDK allows you to experiment with AI applications with LLMs (and in t
The most commenly used functions are:
- `agenta.init` - initialize your variant
-- `agenta.config.default` - set the default configuration
+- `agenta.config.register_default` - set the default configuration
diff --git a/docs/tutorials/a-more-complicated-tutorial-draft.mdx b/docs/developer_guides/tutorials/a-more-complicated-tutorial-draft.mdx
similarity index 99%
rename from docs/tutorials/a-more-complicated-tutorial-draft.mdx
rename to docs/developer_guides/tutorials/a-more-complicated-tutorial-draft.mdx
index 98dc178aac..9fb087365c 100644
--- a/docs/tutorials/a-more-complicated-tutorial-draft.mdx
+++ b/docs/developer_guides/tutorials/a-more-complicated-tutorial-draft.mdx
@@ -7,9 +7,9 @@ In this tutorial, we'll lead you through the process of creating your first Lang
Let's begin.
-## Prerequisites
+## Installation
-This guide assumes you have completed the installation process. If not, please follow our [installation guide](/installation).
+Run `pip install agenta` to install the Agenta CLI.
## 1. Project Initialization
diff --git a/docs/developer_guides/tutorials/build-rag-application.mdx b/docs/developer_guides/tutorials/build-rag-application.mdx
new file mode 100644
index 0000000000..5297df5bc6
--- /dev/null
+++ b/docs/developer_guides/tutorials/build-rag-application.mdx
@@ -0,0 +1,314 @@
+---
+title: RAG application with LlamaIndex
+description: Build a playground to experiment and evaluate with you RAG application
+---
+
+Retrieval Augmented Generation (RAG) is a very useful architecture for grounding the LLM application with your own knowldge base. However, it is not easy to build a robust RAG application that does not hallucinate and answers truthfully.
+
+In this tutorial, we will show how to use a RAG application built with [LlamaIndex](https://www.llamaindex.ai/). We will create a playground based on the RAG application allowing us to quickly test different configurations in a live playground. Then we will evaluate different variants of the RAG application with the playground.
+
+
+[You can find the full code for this tutorial here](https://github.com/Agenta-AI/qa_llama_index_playground)
+
+Let's get started
+
+## What are we building?
+
+Our goal is to build a RAG application. The application takes a transcript of a conversation and a question then returns the answer.
+
+We want to quickly iterate on the configuration of the RAG application and evaluate the performance of each configuration.
+
+Here is a list of parameters we would to experiment with in the playground:
+
+- How to split the transcript: the separator, the chunk size, and the overlap, and the text splitter to use in LlamaIndex (`TokenTextSplitter` or `SentenceSplitter`)
+- The embedding model to be used (`Davinci`, `Curie`, `Babbage`, `ADA`, `Text_embed_ada_002`)
+- The embedding mode: similarity mode or text search mode
+- The LLM model to be used to generate the final response (`gpt3.5-turbo`, `gpt4`...)
+
+After finishing, we will have a playground where we can experiment with these different parameters live, and compare the outputs between different configuration side-by-side.
+
+In addition, we will be able to run evaluations on the different versions to score them, and later deploy the best version to production, without any overhead.
+
+## Installation and Setup
+
+First, let's make sure that you have the latest version of agenta installed.
+```bash
+pip install -U agenta
+```
+
+Now let's initialize our project
+
+```bash
+agenta init
+````
+
+## Write the core application
+
+The idea behind agenta is to distangle the core application code from the parameters. So first let's write the core code of the application using some default parameters. Then we will extract the parameters, add them to the configuration and add the agenta lines of codes.
+
+### The core application
+
+Let's start by writing a simple application with LlamaIndex.
+
+
+```python
+from llama_index import Document, ServiceContext, VectorStoreIndex
+from llama_index.embeddings.openai import (
+ OpenAIEmbedding,
+ OpenAIEmbeddingMode,
+ OpenAIEmbeddingModelType,
+)
+from llama_index.langchain_helpers.text_splitter import (
+ TokenTextSplitter,
+)
+from llama_index.llms import OpenAI
+from llama_index.text_splitter import TokenTextSplitter
+
+
+def answer_qa(transcript: str, question: str):
+ text_splitter = TokenTextSplitter(
+ separator="\n",
+ chunk_size=1024,
+ chunk_overlap=20,
+ )
+ service_context = ServiceContext.from_defaults(
+ llm=OpenAI(temperature=0.9, model="gpt-3.5-turbo"),
+ embed_model=OpenAIEmbedding(
+ mode=OpenAIEmbeddingMode.SIMILARITY_MODE,
+ model=OpenAIEmbeddingModelType.ADA,
+ ),
+ node_parser=text_splitter,
+ )
+ # build a vector store index from the transcript as message documents
+ index = VectorStoreIndex.from_documents(
+ documents=[Document(text=transcript)], service_context=service_context
+ )
+
+ query_engine = index.as_query_engine(
+ service_context=service_context, response_mode="simple_summarize"
+ )
+
+ response = query_engine.query(question)
+ return response
+
+
+if __name__ == "__main__":
+ with open("transcript", "r") as f:
+ transcript = f.read()
+ question = "What do they say about blackfriday?"
+ response = answer_qa(transcript, question)
+ print(response)
+```
+
+If you are not familiar with LlamaIndex, I encourage you to read the docs [here](https://docs.llamaindex.ai).
+
+However, here is a quick explanation of what is happening in the code above:
+
+```python
+ text_splitter = TokenTextSplitter(
+ separator="\n",
+ chunk_size=1024,
+ chunk_overlap=20,
+ )
+ service_context = ServiceContext.from_defaults(
+ llm=OpenAI(temperature=0.9, model="gpt-3.5-turbo"),
+ embed_model=OpenAIEmbedding(
+ mode=OpenAIEmbeddingMode.SIMILARITY_MODE,
+ model=OpenAIEmbeddingModelType.ADA,
+ ),
+ node_parser=text_splitter,
+ )
+ # build a vector store index from the transcript as message documents
+ index = VectorStoreIndex.from_documents(
+ documents=[Document(text=transcript)], service_context=service_context
+ )
+```
+
+This part is responsible for ingesting the data and building the index. We specify how the input text should be split into chunks in the `text_splitter`, then which model to use for embedding and in the response in `service_context`.
+
+```python
+ query_engine = index.as_query_engine(
+ service_context=service_context, response_mode="simple_summarize"
+ )
+
+ response = query_engine.query(question)
+```
+
+This part is responsible for querying the index and generating the response. We specify the response mode to be `simple_summarize` which is one of the [response modes](https://docs.llamaindex.ai/en/stable/module_guides/deploying/query_engine/response_modes.html) in LlamaIndex. This response mode Truncates all text chunks to fit into a single LLM prompt.
+
+Basically, we are taking the transcript of the call, chunking it and embedding it, then later querying it using the simple_summarize technique, which first embeds the question, retrieve the most similar chunk, creates a prompt for it and summarize it using the LLM model.
+
+
+## Make it into an agenta application
+
+Now that we have the core application, let's serve it to the agenta platform. In this first step we would not add the parameters yet, we will do that in the next step. We will just add it to agenta to be able to use it in the playground, evaluate it and deploy it.
+
+For this we need three things:
+1. Modifying the code to initialize agenta and specify the entrypoint to the code (which will be converted to an endpoint)
+2. Add a requirements.txt file
+3. Adding the environment variables to a `.env` file
+
+### Modifying the code
+
+We just need to add the following lines to initialize agenta and specify the entrypoint to the code (which will be converted to an endpoint)
+
+```python
+import agenta as ag
+
+ag.init() # This initializes agenta
+
+@ag.entrypoint()
+def answer_qa(transcript: str, question: str):
+ # the rest of the code
+```
+
+`ag.init()` initializes agenta while `@ag.entrypoint()` is a wrapper around Fastapi that creates an entrypoint.
+
+### Adding a requirements.txt file
+
+We need to add a requirements.txt file to specify the dependencies of our application. In our case, we need to add `llama_index` and `agenta` to the requirements.txt file.
+
+```txt
+llama_index
+agenta
+```
+
+### Adding the environment variables to a `.env` file
+
+We need to add the environment variables to a `.env` file. In our case, we need to add the following variables:
+
+```bash
+OPENAI_API_KEY=
+````
+
+### Serving the application to agenta
+Finally we need serve the application to agenta. For this we need to run the following command:
+
+```bash
+pip install -U agenta
+agenta init
+agenta variant serve app.py
+```
+
+`agenta init` initializes the llm application in the folder. It creates a `config.yaml` file that contains the configuration of the application.
+
+`agenta variant serve app.py` serves the application to agenta. It sends the code to the platform, which builds a docker image and deploy the endpoint. Additionally it is added to the UI.
+
+You should see the following outputs at success of the command:
+
+```bash
+Congratulations! ๐
+Your app has been deployed locally as an API. ๐ You can access it here: https:////.lambda-url.eu-central-1.on.aws/
+
+Read the API documentation. ๐ It's available at: https:////.lambda-url.eu-central-1.on.aws/docs
+
+Start experimenting with your app in the playground. ๐ฎ Go to: https://cloud.agenta.ai/apps//playground
+```
+
+Now you can jump to agenta and find a playground where you can interact with the application.
+
+
+
+
+
+# Adding parameters to the playground
+
+The version we have deployed to the playground does not have any parameters. We can test it, evaluate it, but we cannot modify it and test different configurations.
+
+Let's add a few parameters to the application to improve our playground and serve it again to agenta.
+
+To add a configuration to the application, we just need to register the default in the code after calling `agenta.init()`. When defining the parameters, we need to provide the type to render them correctly in the playground.
+
+```python
+ag.config.register_default(
+ chunk_size=ag.intParam(1024, 256, 4096),
+ chunk_overlap=ag.intParam(20, 0, 100),
+ temperature=ag.intParam(0.9, 0.0, 1.0),
+ model=ag.MultipleChoiceParam(
+ "gpt-3.5-turbo", ["gpt-3.5-turbo", "gpt-4", "gpt-4-turbo"]),
+ response_mode=ag.MultipleChoiceParam(
+ "simple_summarize", ["simple_summarize", "refine", "compact", "tree_summarize", "accumulate", "compact_accumulate"]),
+)
+```
+
+What we did here is to add the parameters, and specify the type of each parameter. `intParam` are integers with a default value, a minimum, maximum in that order. They are rendered as a slider in the playground. `MultipleChoiceParam` are multiple choice parameters with a default value and a list of choices. They are rendered as a dropdown in the playground.
+
+We chose here to select the most important parameters in a RAG. The chunk size, the chunk overlap, the temperature of the LLM model, the LLM model itself, and the response mode (you can see the [documentation of LlamaIndex](https://docs.llamaindex.ai/en/stable/module_guides/deploying/query_engine/response_modes.html) for more details about the response mode).
+
+To use the configuration in the code, you use the variable as `ag.config.` anywhere in the code. For instance:
+
+```python
+ text_splitter = TokenTextSplitter(
+ separator="\n",
+ chunk_size=ag.config.chunk_size,
+ chunk_overlap=ag.config.chunk_overlap,
+ )
+```
+
+# Putting it all together
+
+Here is how our final code looks like:
+
+```python
+import agenta as ag
+from llama_index import Document, ServiceContext, VectorStoreIndex
+from llama_index.embeddings.openai import (
+ OpenAIEmbedding,
+ OpenAIEmbeddingMode,
+ OpenAIEmbeddingModelType,
+)
+from llama_index.langchain_helpers.text_splitter import (
+ TokenTextSplitter,
+)
+from llama_index.llms import OpenAI
+from llama_index.text_splitter import TokenTextSplitter
+
+ag.init()
+ag.config.register_default(
+ chunk_size=ag.IntParam(1024, 256, 4096),
+ chunk_overlap=ag.IntParam(20, 0, 100),
+ temperature=ag.IntParam(0.9, 0.0, 1.0),
+ model=ag.MultipleChoiceParam(
+ "gpt-3.5-turbo", ["gpt-3.5-turbo", "gpt-4", "gpt-4-turbo"]),
+ response_mode=ag.MultipleChoiceParam(
+ "simple_summarize", ["simple_summarize", "refine", "compact", "tree_summarize", "accumulate", "compact_accumulate"]),
+)
+
+@ag.entrypoint
+def answer_qa(transcript: str, question: str):
+ text_splitter = TokenTextSplitter(
+ separator="\n",
+ chunk_size=ag.config.chunk_size,
+ chunk_overlap=ag.config.chunk_overlap,
+ )
+ service_context = ServiceContext.from_defaults(
+ llm=OpenAI(temperature=ag.config.temperature, model=ag.config.model),
+ embed_model=OpenAIEmbedding(
+ mode=OpenAIEmbeddingMode.SIMILARITY_MODE,
+ model=OpenAIEmbeddingModelType.ADA,
+ ),
+ node_parser=text_splitter,
+ )
+ # build a vector store index from the transcript as message documents
+ index = VectorStoreIndex.from_documents(
+ documents=[Document(text=transcript)], service_context=service_context
+ )
+
+ query_engine = index.as_query_engine(
+ service_context=service_context, response_mode=ag.config.response_mode
+ )
+
+ response = query_engine.query(question)
+ return response
+```
+
+Now let's serve it to agenta again:
+
+```bash
+agenta variant serve app.py
+```
+
+
+[You can find the full code for this tutorial here](https://github.com/Agenta-AI/qa_llama_index_playground)
+
+
diff --git a/docs/tutorials/deploy-mistral-model.mdx b/docs/developer_guides/tutorials/deploy-mistral-model.mdx
similarity index 98%
rename from docs/tutorials/deploy-mistral-model.mdx
rename to docs/developer_guides/tutorials/deploy-mistral-model.mdx
index 10ba7a131f..da7e6730fc 100644
--- a/docs/tutorials/deploy-mistral-model.mdx
+++ b/docs/developer_guides/tutorials/deploy-mistral-model.mdx
@@ -1,5 +1,5 @@
---
-title: Deploy Mistral-7B from Hugging Face
+title: Use Mistral-7B from Hugging Face
description: How to deploy an LLM application using Mistral-7B from Hugging Face'
---
@@ -39,7 +39,7 @@ API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-v0.1
headers = {"Authorization": "Bearer [Your_Token]"}
ag.init()
-ag.config.default(
+ag.config.register_default(
prompt_template=ag.TextParam("Summarize the following text: {text}"),
)
@@ -116,7 +116,7 @@ agenta variant serve app.py
headers = {"Authorization": f"Bearer [Your_Token]"}
ag.init()
- ag.config.default(
+ ag.config.register_default(
prompt_template=ag.TextParam("Summarize the following text: {text}")
)
diff --git a/docs/tutorials/first-app-with-langchain.mdx b/docs/developer_guides/tutorials/first-app-with-langchain.mdx
similarity index 93%
rename from docs/tutorials/first-app-with-langchain.mdx
rename to docs/developer_guides/tutorials/first-app-with-langchain.mdx
index 4aea792e9d..b6ae630570 100644
--- a/docs/tutorials/first-app-with-langchain.mdx
+++ b/docs/developer_guides/tutorials/first-app-with-langchain.mdx
@@ -4,9 +4,9 @@ title: Simple App with Langchain
This tutorial guides you through writing of your first LLM app using Langchain and Agenta. The objective is to create an app that can produce a persuasive startup pitch, using the startup's name and core idea. By the end of this tutorial, your app will be set up locally and ready for testing and refinement in the playground.
-## Prerequisites
+## Installation
-This guide assumes you have completed the installation process. If not, please follow our [installation guide](/installation).
+Run `pip install agenta` to install the Agenta SDK.
## 1. Project Initialization
@@ -92,7 +92,7 @@ default_prompt = """
startup idea: {startup_idea}"""
ag.init()
-ag.config.default(prompt_template=default_prompt, temperature=0.5)
+ag.config.register_default(prompt_template=default_prompt, temperature=0.5)
@ag.entrypoint
def generate(
@@ -113,7 +113,7 @@ Let's examine how we modified the original code.
```python
ag.init()
-ag.config.default(prompt_template=ag.TextParam(default_prompt), temperature=ag.FloatParam(0.5))
+ag.config.register_default(prompt_template=ag.TextParam(default_prompt), temperature=ag.FloatParam(0.5))
```
These two lines initialize agenta, then set a default configuration for the app.
diff --git a/docs/quickstart/getting-started-ui.mdx b/docs/getting_started/getting-started-ui.mdx
similarity index 93%
rename from docs/quickstart/getting-started-ui.mdx
rename to docs/getting_started/getting-started-ui.mdx
index abd83629c2..9100a0e663 100644
--- a/docs/quickstart/getting-started-ui.mdx
+++ b/docs/getting_started/getting-started-ui.mdx
@@ -7,7 +7,7 @@ description: 'Create and deploy your first LLM app in one minute'
Want a video tutorial instead? We have a 4-minute video for you. [Watch it here](https://youtu.be/plPVrHXQ-DU).
-VIDEO
+VIDEO
## Introduction
@@ -57,4 +57,4 @@ You can now find the API endpoint in the "Endpoints" menu. Copy and paste the co
- Congratulations! You've created your first LLM application. Feel free to modify it, explore its parameters, and discover Agenta's features. Your next steps could include [building an application using your own code](getting-started-code.mdx), or following one of our UI-based tutorials.
+ Congratulations! You've created your first LLM application. Feel free to modify it, explore its parameters, and discover Agenta's features. Your next steps could include [building an application using your own code](/quickstart/getting-started-code.mdx), or following one of our UI-based tutorials.
diff --git a/docs/getting_started/introduction.mdx b/docs/getting_started/introduction.mdx
new file mode 100644
index 0000000000..285ef86737
--- /dev/null
+++ b/docs/getting_started/introduction.mdx
@@ -0,0 +1,73 @@
+---
+title: What is Agenta?
+description: 'The open-source end-to-end LLMOps platform.'
+---
+
+
+
+Agenta is an open-source platform that helps **developers** and **product teams** build robust AI applications powered by LLMs. It offers all the tools for **prompt management and evaluation**.
+
+### With Agenta, you can:
+
+1. Rapidly [**experiment** and **compare** prompts](/basic_guides/prompt_engineering) on [any LLM workflow](/advanced_guides/custom_applications) (chain-of-prompts, Retrieval Augmented Generation (RAG), LLM agents...)
+2. Rapidly [**create test sets**](/basic_guides/test_sets) and **golden datasets** for evaluation
+3. [**Evaluate** your application](/basic_guides/automatic_evaluation) with pre-existing or [**custom evaluators**](/advanced_guides/using_custom_evaluators)
+4. [**Annotate** and **A/B test**](/basic_guides/human_evaluation) your applications with **human feedback**
+5. [**Collaborate with product teams**](/basic_guides/team_management) for prompt engineering and evaluation
+6. [**Deploy your application**](/basic_guides/deployment) in one-click in the UI, through CLI, or through github workflows.
+
+Agenta focuses on increasing the speed of the development cycle of LLM applications by increasing the speed of experimentation.
+
+## How is Agenta different?
+
+### Works with any LLM app workflow
+Agenta enables prompt engineering and evaluation on any LLM app architecture, such as **Chain of Prompts**, **RAG**, or **LLM agents**. It is compatible with any framework like **Langchain** or **LlamaIndex**, and works with any model provider, such as **OpenAI**, **Cohere**, or **local models**.
+
+[Jump here](/advanced_guides/custom_applications) to see how to use your own custom application with Agenta and [here](/developer_guides/how_does_agenta_work) to understand more how Agenta works.
+
+### Enable collaboration between developers and product teams
+
+Agenta empowers **non-developers** to iterate on the configuration of any custom LLM application, evaluate it, annotate it, A/B test it, and deploy it, all within the user interface.
+
+By **adding a few lines to your application code**, you can create a prompt playground that allows non-developers to experiment with prompts for your application and use all the tools within Agenta.
+
+
+## Next Steps
+
+
+
+
+ Create and deploy your first app from the UI in under 2 minutes.
+
+
+
+ Write a custom LLM app and evaluate it in 10 minutes.
+
+
+
+
+## Getting Help
+
+If you have questions or need support, here's how you can reach us.
+
+
+
+ Book a call with a founder for one-on-one guidance on using Agenta.
+
+
+
+ Use the #support channel on Slack to ask questions and get assistance with Agenta.
+
+
diff --git a/docs/images/basic_guides/00_main_page_dark.png b/docs/images/basic_guides/00_main_page_dark.png
new file mode 100644
index 0000000000..f0e0fe92fc
Binary files /dev/null and b/docs/images/basic_guides/00_main_page_dark.png differ
diff --git a/docs/images/basic_guides/00_main_page_light.png b/docs/images/basic_guides/00_main_page_light.png
new file mode 100644
index 0000000000..52e45e9e8e
Binary files /dev/null and b/docs/images/basic_guides/00_main_page_light.png differ
diff --git a/docs/images/basic_guides/01_choose_template_dark.png b/docs/images/basic_guides/01_choose_template_dark.png
new file mode 100644
index 0000000000..301c8807c0
Binary files /dev/null and b/docs/images/basic_guides/01_choose_template_dark.png differ
diff --git a/docs/images/basic_guides/01_choose_template_light.png b/docs/images/basic_guides/01_choose_template_light.png
new file mode 100644
index 0000000000..f35743daa4
Binary files /dev/null and b/docs/images/basic_guides/01_choose_template_light.png differ
diff --git a/docs/images/basic_guides/02_single_prompt_playground_dark.png b/docs/images/basic_guides/02_single_prompt_playground_dark.png
new file mode 100644
index 0000000000..c426c006d9
Binary files /dev/null and b/docs/images/basic_guides/02_single_prompt_playground_dark.png differ
diff --git a/docs/images/basic_guides/02_single_prompt_playground_light.png b/docs/images/basic_guides/02_single_prompt_playground_light.png
new file mode 100644
index 0000000000..20b6974222
Binary files /dev/null and b/docs/images/basic_guides/02_single_prompt_playground_light.png differ
diff --git a/docs/images/basic_guides/03_chat_app_playground_dark.png b/docs/images/basic_guides/03_chat_app_playground_dark.png
new file mode 100644
index 0000000000..ca75e704a3
Binary files /dev/null and b/docs/images/basic_guides/03_chat_app_playground_dark.png differ
diff --git a/docs/images/basic_guides/03_chat_app_playground_light.png b/docs/images/basic_guides/03_chat_app_playground_light.png
new file mode 100644
index 0000000000..d6470eda99
Binary files /dev/null and b/docs/images/basic_guides/03_chat_app_playground_light.png differ
diff --git a/docs/images/basic_guides/04_save_before_deployment_dark.png b/docs/images/basic_guides/04_save_before_deployment_dark.png
new file mode 100644
index 0000000000..a7f3a10422
Binary files /dev/null and b/docs/images/basic_guides/04_save_before_deployment_dark.png differ
diff --git a/docs/images/basic_guides/04_save_before_deployment_light.png b/docs/images/basic_guides/04_save_before_deployment_light.png
new file mode 100644
index 0000000000..19205eeaa4
Binary files /dev/null and b/docs/images/basic_guides/04_save_before_deployment_light.png differ
diff --git a/docs/images/basic_guides/05_select_environment_to_publish_dark.png b/docs/images/basic_guides/05_select_environment_to_publish_dark.png
new file mode 100644
index 0000000000..450606c742
Binary files /dev/null and b/docs/images/basic_guides/05_select_environment_to_publish_dark.png differ
diff --git a/docs/images/basic_guides/05_select_environment_to_publish_light.png b/docs/images/basic_guides/05_select_environment_to_publish_light.png
new file mode 100644
index 0000000000..bcce033dc6
Binary files /dev/null and b/docs/images/basic_guides/05_select_environment_to_publish_light.png differ
diff --git a/docs/images/basic_guides/06_choose_evironment_to_deploy_dark.png b/docs/images/basic_guides/06_choose_evironment_to_deploy_dark.png
new file mode 100644
index 0000000000..3a7516052f
Binary files /dev/null and b/docs/images/basic_guides/06_choose_evironment_to_deploy_dark.png differ
diff --git a/docs/images/basic_guides/06_choose_evironment_to_deploy_light.png b/docs/images/basic_guides/06_choose_evironment_to_deploy_light.png
new file mode 100644
index 0000000000..a177fcc780
Binary files /dev/null and b/docs/images/basic_guides/06_choose_evironment_to_deploy_light.png differ
diff --git a/docs/images/basic_guides/07_select_app_code_dark.png b/docs/images/basic_guides/07_select_app_code_dark.png
new file mode 100644
index 0000000000..1a7b93720d
Binary files /dev/null and b/docs/images/basic_guides/07_select_app_code_dark.png differ
diff --git a/docs/images/basic_guides/07_select_app_code_light.png b/docs/images/basic_guides/07_select_app_code_light.png
new file mode 100644
index 0000000000..024e378f5d
Binary files /dev/null and b/docs/images/basic_guides/07_select_app_code_light.png differ
diff --git a/docs/images/basic_guides/08_add_new_input_dark.png b/docs/images/basic_guides/08_add_new_input_dark.png
new file mode 100644
index 0000000000..74cb015ccb
Binary files /dev/null and b/docs/images/basic_guides/08_add_new_input_dark.png differ
diff --git a/docs/images/basic_guides/08_add_new_input_light.png b/docs/images/basic_guides/08_add_new_input_light.png
new file mode 100644
index 0000000000..7a96724740
Binary files /dev/null and b/docs/images/basic_guides/08_add_new_input_light.png differ
diff --git a/docs/images/basic_guides/09_add_variant_dark.png b/docs/images/basic_guides/09_add_variant_dark.png
new file mode 100644
index 0000000000..e15194959d
Binary files /dev/null and b/docs/images/basic_guides/09_add_variant_dark.png differ
diff --git a/docs/images/basic_guides/09_add_variant_light.png b/docs/images/basic_guides/09_add_variant_light.png
new file mode 100644
index 0000000000..d1390840e5
Binary files /dev/null and b/docs/images/basic_guides/09_add_variant_light.png differ
diff --git a/docs/images/basic_guides/10_testing_a_variant_dark.png b/docs/images/basic_guides/10_testing_a_variant_dark.png
new file mode 100644
index 0000000000..905e4d4798
Binary files /dev/null and b/docs/images/basic_guides/10_testing_a_variant_dark.png differ
diff --git a/docs/images/basic_guides/10_testing_a_variant_light.png b/docs/images/basic_guides/10_testing_a_variant_light.png
new file mode 100644
index 0000000000..227dccbccf
Binary files /dev/null and b/docs/images/basic_guides/10_testing_a_variant_light.png differ
diff --git a/docs/images/basic_guides/11_side_by_side_chat_mode_dark.png b/docs/images/basic_guides/11_side_by_side_chat_mode_dark.png
new file mode 100644
index 0000000000..93677ca92b
Binary files /dev/null and b/docs/images/basic_guides/11_side_by_side_chat_mode_dark.png differ
diff --git a/docs/images/basic_guides/11_side_by_side_chat_mode_light.png b/docs/images/basic_guides/11_side_by_side_chat_mode_light.png
new file mode 100644
index 0000000000..705c432e39
Binary files /dev/null and b/docs/images/basic_guides/11_side_by_side_chat_mode_light.png differ
diff --git a/docs/images/basic_guides/12_workspace_config_dark.png b/docs/images/basic_guides/12_workspace_config_dark.png
new file mode 100644
index 0000000000..c9aa37fcfe
Binary files /dev/null and b/docs/images/basic_guides/12_workspace_config_dark.png differ
diff --git a/docs/images/basic_guides/12_workspace_config_light.png b/docs/images/basic_guides/12_workspace_config_light.png
new file mode 100644
index 0000000000..16a72b1b41
Binary files /dev/null and b/docs/images/basic_guides/12_workspace_config_light.png differ
diff --git a/docs/images/basic_guides/13_workspace_tab_dark.png b/docs/images/basic_guides/13_workspace_tab_dark.png
new file mode 100644
index 0000000000..18ad95ff2a
Binary files /dev/null and b/docs/images/basic_guides/13_workspace_tab_dark.png differ
diff --git a/docs/images/basic_guides/13_workspace_tab_light.png b/docs/images/basic_guides/13_workspace_tab_light.png
new file mode 100644
index 0000000000..745474e74a
Binary files /dev/null and b/docs/images/basic_guides/13_workspace_tab_light.png differ
diff --git a/docs/images/basic_guides/14_playground_drawer_dark.png b/docs/images/basic_guides/14_playground_drawer_dark.png
new file mode 100644
index 0000000000..db85d23918
Binary files /dev/null and b/docs/images/basic_guides/14_playground_drawer_dark.png differ
diff --git a/docs/images/basic_guides/14_playground_drawer_light.png b/docs/images/basic_guides/14_playground_drawer_light.png
new file mode 100644
index 0000000000..a55ee90a68
Binary files /dev/null and b/docs/images/basic_guides/14_playground_drawer_light.png differ
diff --git a/docs/images/tutorial-rag-application/tut_llama_index_1.png b/docs/images/tutorial-rag-application/tut_llama_index_1.png
new file mode 100644
index 0000000000..66a33a7027
Binary files /dev/null and b/docs/images/tutorial-rag-application/tut_llama_index_1.png differ
diff --git a/docs/learn/architecture.mdx b/docs/learn/architecture.mdx
deleted file mode 100644
index 5f3e66804e..0000000000
--- a/docs/learn/architecture.mdx
+++ /dev/null
@@ -1,34 +0,0 @@
----
-title: "System Architecture"
-description: "An overview of the architecture of Agenta"
----
-
-
-
-
-
-## Concepts behind agenta
-
-The main concept behind agenta is the separation of the application logic and configuration. The application logic refers to the code that defines the application, whether it's a simple prompt, chain of prompts, RAG, etc. The configuration refers to the parameters that define the application logic, such as the prompt, model, chunk size, etc.
-
-By separating the application logic from the configuration. We simplify the process of iterating on application variants, and enable the collaboration between developers (working from code/cli) and non-developers (working from the UI).
-
-## Architecture Components
-
-### The Application
-
-The application describes the logic written in Python code. An application can be created from a pre-built template in the UI or from code in the CLI. In either case, a new container with the application code is launched. The application can then be accessed via a REST API.
-
-Each application has a default configuration specified in its code. This default configuration can be overridden by the user in the UI or the CLI. Additionally, the user can create new configurations from the UI or the CLI. Each new configuration results in the creation of a new application variant, which is a combination of the application logic and a configuration. A single project can house many variants encompassing multiple application logics and configurations.
-
-## The Backend
-Agenta's backend manages applications and configurations. It is responsible for building images, deploying containers, and managing configurations and prompts for the application.
-
-## The Frontend / UI
-The frontend provides tools to create new applications from a template, create and edit configurations, run evaluations, and deploy applications to different environments (e.g., staging, production, etc.).
-
-## The CLI
-The CLI offers the same capabilities as the frontend. Additionally, it allows for the creation of custom applications not available as templates. When serving a new application from the CLI, Agenta handles container creation and deployment. After creating a new application, users can edit its configuration and evaluate it in the UI.
-
-## The SDK
-The SDK is a Python library used to create new applications from code. It manages the saving of the default configuration, creation of the REST API, and necessary actions to create a playground and integrate the application with the Agenta platform.
diff --git a/docs/learn/prompt_engineering.mdx b/docs/learn/prompt_engineering.mdx
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/docs/mint.json b/docs/mint.json
index b6c2ff4b35..4d39930e84 100644
--- a/docs/mint.json
+++ b/docs/mint.json
@@ -21,7 +21,12 @@
"suggestEdit": true,
"raiseIssue": true
},
- "topbarLinks": [],
+ "topbarLinks": [
+ {
+ "name": "Request a demo",
+ "url": "https://cal.com/mahmoud-mabrouk-ogzgey/demo"
+ }
+ ],
"topbarCtaButton": {
"name": "Start for Free",
"url": "https://cloud.agenta.ai"
@@ -50,87 +55,106 @@
],
"tabs": [
{
- "name": "Reference",
- "url": "reference"
+ "name": "For Developers",
+ "url": "developer_guides"
+ },
+ {
+ "name": "Cookbook",
+ "url": "cookbook"
+ },
+ {
+ "name": "Changelog",
+ "url": "changelog"
+ },
+ {
+ "name": "Roadmap",
+ "url": "https://github.com/orgs/Agenta-AI/projects/13/views/1"
}
],
"navigation": [
{
"group": "Getting Started",
"pages": [
- "quickstart/introduction",
- "quickstart/how-agenta-works",
- "quickstart/getting-started-ui",
- "quickstart/getting-started-code"
+ "getting_started/introduction",
+ "getting_started/getting-started-ui"
]
},
{
- "group": "Tutorials",
+ "group": "Basic Guides",
"pages": [
- "tutorials/first-app-with-langchain",
- "tutorials/deploy-mistral-model"
+ "basic_guides/creating_an_app",
+ "basic_guides/prompt_engineering",
+ "basic_guides/test_sets",
+ "basic_guides/automatic_evaluation",
+ "basic_guides/deployment",
+ "basic_guides/team_management"
]
},
{
- "group": "How-to Guides",
+ "group": "Advanced Guides",
"pages": [
- "howto/use-a-custom-llm",
- "howto/creating-multiple-app-variants",
- "howto/how-to-debug"
+ "advanced_guides/custom_applications",
+ "advanced_guides/using_agenta_from_cli"
]
},
{
- "group": "Learn",
- "pages": [
- "learn/the_llmops_workflow",
- "learn/evaluating_llm_apps",
- "learn/architecture"
- ]
- },
- {
- "group": "Python SDK",
+ "group": "Self-host agenta",
"pages": [
- "sdk/quick_start",
+ "self-host/host-locally",
{
- "group": "Core Functions",
+ "group": "Deploy Remotely",
"pages": [
- "sdk/init",
- "sdk/config_object",
- "sdk/config_default",
- "sdk/config_pull",
- "sdk/config_push",
- "sdk/config_datatypes"
+ "self-host/host-remotely",
+ "self-host/host-on-aws",
+ "self-host/host-on-gcp",
+ "self-host/host-on-kubernetes"
]
}
]
},
{
- "group": "Command Line",
+ "group": "Introduction",
+ "pages": [
+ "developer_guides/how_does_agenta_work"
+ ]
+ },
+ {
+ "group": "Tutorials",
+ "pages": [
+ "developer_guides/tutorials/first-app-with-langchain",
+ "developer_guides/tutorials/build-rag-application",
+ "developer_guides/tutorials/deploy-mistral-model"
+ ]
+ },
+ {
+ "group": "Python SDK",
"pages": [
- "cli/install",
- "cli/quick-usage",
+ "developer_guides/sdk/quick_start",
{
- "group": "Core commands",
+ "group": "Core Functions",
"pages": [
- "cli/init",
- "cli/variant_serve",
- "cli/variant_list",
- "cli/variant_remove"
+ "developer_guides/sdk/init",
+ "developer_guides/sdk/config_object",
+ "developer_guides/sdk/config_default",
+ "developer_guides/sdk/config_pull",
+ "developer_guides/sdk/config_push",
+ "developer_guides/sdk/config_datatypes"
]
}
]
},
{
- "group": "Self-host agenta",
+ "group": "Command Line",
"pages": [
- "self-host/host-locally",
+ "developer_guides/cli/install",
+ "developer_guides/cli/quick-usage",
{
- "group": "Deploy Remotely",
+ "group": "Core commands",
"pages": [
- "self-host/host-remotely",
- "self-host/host-on-aws",
- "self-host/host-on-gcp",
- "self-host/host-on-kubernetes"
+ "developer_guides/cli/init",
+ "developer_guides/cli/variant_serve",
+ "developer_guides/cli/variant_list",
+ "developer_guides/cli/variant_remove"
]
}
]
@@ -138,9 +162,9 @@
{
"group": "Contributing",
"pages": [
- "contributing/getting-started",
- "contributing/development-mode",
- "contributing/file-issue"
+ "developer_guides/contributing/getting-started",
+ "developer_guides/contributing/development-mode",
+ "developer_guides/contributing/file-issue"
]
},
{
@@ -149,74 +173,87 @@
{
"group": "Backend API",
"pages": [
- "reference/backend_api/user-profile",
- "reference/backend_api/list-app-variants",
- "reference/backend_api/get-variant-by-env",
- "reference/backend_api/list-apps",
- "reference/backend_api/create-app",
- "reference/backend_api/add-variant-from-image",
- "reference/backend_api/remove-app",
- "reference/backend_api/create-app-and-variant-from-template",
- "reference/backend_api/list-environments",
- "reference/backend_api/add-variant-from-base-and-config",
- "reference/backend_api/start-variant",
- "reference/backend_api/remove-variant",
- "reference/backend_api/update-variant-parameters",
- "reference/backend_api/update-variant-image",
- "reference/backend_api/fetch-list-evaluations",
- "reference/backend_api/create-evaluation",
- "reference/backend_api/delete-evaluations",
- "reference/backend_api/fetch-evaluation",
- "reference/backend_api/update-evaluation-router",
- "reference/backend_api/fetch-evaluation-scenarios",
- "reference/backend_api/create-evaluation-scenario",
- "reference/backend_api/update-evaluation-scenario-router",
- "reference/backend_api/evaluate-ai-critique",
- "reference/backend_api/get-evaluation-scenario-score-router",
- "reference/backend_api/update-evaluation-scenario-score-router",
- "reference/backend_api/fetch-results",
- "reference/backend_api/create-custom-evaluation",
- "reference/backend_api/update-custom-evaluation",
- "reference/backend_api/list-custom-evaluations",
- "reference/backend_api/get-custom-evaluation",
- "reference/backend_api/get-custom-evaluation-names",
- "reference/backend_api/execute-custom-evaluation",
- "reference/backend_api/webhook-example-fake",
- "reference/backend_api/upload-file",
- "reference/backend_api/import-testset",
- "reference/backend_api/create-testset",
+ "developer_guides/reference/backend_api/user-profile",
+ "developer_guides/reference/backend_api/list-app-variants",
+ "developer_guides/reference/backend_api/get-variant-by-env",
+ "developer_guides/reference/backend_api/list-apps",
+ "developer_guides/reference/backend_api/create-app",
+ "developer_guides/reference/backend_api/add-variant-from-image",
+ "developer_guides/reference/backend_api/remove-app",
+ "developer_guides/reference/backend_api/create-app-and-variant-from-template",
+ "developer_guides/reference/backend_api/list-environments",
+ "developer_guides/reference/backend_api/add-variant-from-base-and-config",
+ "developer_guides/reference/backend_api/start-variant",
+ "developer_guides/reference/backend_api/remove-variant",
+ "developer_guides/reference/backend_api/update-variant-parameters",
+ "developer_guides/reference/backend_api/update-variant-image",
+ "developer_guides/reference/backend_api/fetch-list-evaluations",
+ "developer_guides/reference/backend_api/create-evaluation",
+ "developer_guides/reference/backend_api/delete-evaluations",
+ "developer_guides/reference/backend_api/fetch-evaluation",
+ "developer_guides/reference/backend_api/update-evaluation-router",
+ "developer_guides/reference/backend_api/fetch-evaluation-scenarios",
+ "developer_guides/reference/backend_api/create-evaluation-scenario",
+ "developer_guides/reference/backend_api/update-evaluation-scenario-router",
+ "developer_guides/reference/backend_api/evaluate-ai-critique",
+ "developer_guides/reference/backend_api/get-evaluation-scenario-score-router",
+ "developer_guides/reference/backend_api/update-evaluation-scenario-score-router",
+ "developer_guides/reference/backend_api/fetch-results",
+ "developer_guides/reference/backend_api/create-custom-evaluation",
+ "developer_guides/reference/backend_api/update-custom-evaluation",
+ "developer_guides/reference/backend_api/list-custom-evaluations",
+ "developer_guides/reference/backend_api/get-custom-evaluation",
+ "developer_guides/reference/backend_api/get-custom-evaluation-names",
+ "developer_guides/reference/backend_api/execute-custom-evaluation",
+ "developer_guides/reference/backend_api/webhook-example-fake",
+ "developer_guides/reference/backend_api/upload-file",
+ "developer_guides/reference/backend_api/import-testset",
+ "developer_guides/reference/backend_api/create-testset",
{
"group": "testsets",
"pages": [
- "reference/backend_api/testsets/get-testset",
- "reference/backend_api/testsets/get-testsets"
+ "developer_guides/reference/backend_api/testsets/get-testset",
+ "developer_guides/reference/backend_api/testsets/get-testsets"
]
},
- "reference/backend_api/update-testset",
- "reference/backend_api/delete-testsets",
- "reference/backend_api/build-image",
- "reference/backend_api/restart-docker-container",
- "reference/backend_api/container-templates",
- "reference/backend_api/construct-app-container-url",
- "reference/backend_api/deploy-to-environment",
- "reference/backend_api/create-trace",
- "reference/backend_api/get-traces",
- "reference/backend_api/get-trace",
- "reference/backend_api/update-trace-status",
- "reference/backend_api/create-span",
- "reference/backend_api/get-spans-of-trace",
- "reference/backend_api/get-feedbacks",
- "reference/backend_api/create-feedback",
- "reference/backend_api/get-feedback",
- "reference/backend_api/update-feedback",
- "reference/backend_api/list-organizations",
- "reference/backend_api/get-user-organization",
- "reference/backend_api/list-bases",
- "reference/backend_api/get-config",
- "reference/backend_api/save-config"
+ "developer_guides/reference/backend_api/update-testset",
+ "developer_guides/reference/backend_api/delete-testsets",
+ "developer_guides/reference/backend_api/build-image",
+ "developer_guides/reference/backend_api/restart-docker-container",
+ "developer_guides/reference/backend_api/container-templates",
+ "developer_guides/reference/backend_api/construct-app-container-url",
+ "developer_guides/reference/backend_api/deploy-to-environment",
+ "developer_guides/reference/backend_api/create-trace",
+ "developer_guides/reference/backend_api/get-traces",
+ "developer_guides/reference/backend_api/get-trace",
+ "developer_guides/reference/backend_api/update-trace-status",
+ "developer_guides/reference/backend_api/create-span",
+ "developer_guides/reference/backend_api/get-spans-of-trace",
+ "developer_guides/reference/backend_api/get-feedbacks",
+ "developer_guides/reference/backend_api/create-feedback",
+ "developer_guides/reference/backend_api/get-feedback",
+ "developer_guides/reference/backend_api/update-feedback",
+ "developer_guides/reference/backend_api/list-organizations",
+ "developer_guides/reference/backend_api/get-user-organization",
+ "developer_guides/reference/backend_api/list-bases",
+ "developer_guides/reference/backend_api/get-config",
+ "developer_guides/reference/backend_api/save-config"
]
}
]
+ },
+ {
+ "group": "Changelog",
+ "pages": [
+ "changelog/main"
+ ]
+ },
+ {
+ "group": "Cookbook",
+ "pages": [
+ "cookbook/list_templates",
+ "cookbook/extract_job_information"
+ ]
}
],
"api": {
diff --git a/docs/quickstart/introduction.mdx b/docs/quickstart/introduction.mdx
deleted file mode 100644
index cc762fc16b..0000000000
--- a/docs/quickstart/introduction.mdx
+++ /dev/null
@@ -1,69 +0,0 @@
----
-title: What is Agenta?
-description: 'The open source end-to-end LLMOps platform.'
----
-
-
-
-Agenta is an open-source end-to-end platform that helps **developers** and **product teams** build and maintain robust AI applications powered with Large Language Models (LLMs). .
-
-With agenta, you can:
-
-1. Quickly **experiment** and **compare** prompts, configurations and complex pipelines (chains, Retrieval Augmented Generation (RAG), agents...)
-2. Quickly **create test sets** and **golden datasets** for evaluation
-3. **Evaluate** your applications with pre-existing or **custom Evaluators**
-4. **Annotate** and **A/B test** your applications with human feedback
-5. **Collaborate with product teams** for prompt engineering and evaluation
-6. **Deploy** your application in one-click in the UI, through CLI, or through github workflows.
-
-Agenta focuses on increasing the speed of the development cycle of LLM applications by increasing the speed of iteration.
-
-Agenta integrates with all frameworks and model providers in the ecosystem, such as [Langchain](https://langchain.com), [LlamaIndex](https://www.llamaindex.ai/), [OpenAI](https://openai.com), [Cohere](https://cohere.ai), [Mistral](https://mistral.ai/), [Huggingface](https://huggingface.co/), and self-hosted open source LLMs such as the one served using [vLLM](https://github.com/vllm-project/vllm)
-
-
-## Next Steps
-
-
-
- Learn the main concepts behind agenta.
-
-
-
- Create and deploy your first app from the UI in under 2 minutes.
-
-
-
- Write a custom LLM app and evaluate it in 10 minutes.
-
-
-
-
-## Getting Help
-
-If you have questions or need support, here's how you can reach us. We'd also โค๏ธ your support.
-
-
-
- Book a call with a founder for one-on-one guidance on using agenta.
-
-
-
- Use the #support channel on Slack to ask questions and get assistance with Agenta.
-
-
diff --git a/docs/self-host/host-locally.mdx b/docs/self-host/host-locally.mdx
index d48f599bfc..798c7848ec 100644
--- a/docs/self-host/host-locally.mdx
+++ b/docs/self-host/host-locally.mdx
@@ -51,6 +51,6 @@ Open your browser and go to [http://localhost](http://localhost). If you see the
## What's next?
You're all set to start using Agenta!
-
+
Click here to build your first LLM app in just 1 minute.
diff --git a/examples/baby_name_generator/app.py b/examples/baby_name_generator/app.py
index b51c96ee95..49e4aced68 100644
--- a/examples/baby_name_generator/app.py
+++ b/examples/baby_name_generator/app.py
@@ -1,8 +1,8 @@
+from agenta import FloatParam, TextParam
import agenta as ag
from openai import OpenAI
client = OpenAI()
-from agenta import FloatParam, TextParam
default_prompt = (
"Give me 10 names for a baby from this country {country} with gender {gender}!!!!"
diff --git a/examples/baby_name_generator/app_async.py b/examples/baby_name_generator/app_async.py
new file mode 100644
index 0000000000..d28335aa9e
--- /dev/null
+++ b/examples/baby_name_generator/app_async.py
@@ -0,0 +1,34 @@
+from agenta import FloatParam, TextParam
+import agenta as ag
+from openai import AsyncOpenAI
+
+client = AsyncOpenAI()
+
+default_prompt = (
+ "Give me 10 names for a baby from this country {country} with gender {gender}!!!!"
+)
+
+ag.init()
+ag.config.default(
+ temperature=FloatParam(0.2), prompt_template=TextParam(default_prompt)
+)
+
+
+@ag.entrypoint
+async def generate(country: str, gender: str) -> str:
+ """
+ Generate a baby name based on the given country and gender.
+
+ Args:
+ country (str): The country to generate the name from.
+ gender (str): The gender of the baby.
+
+ Returns:
+ str: The generated baby name.
+ """
+ prompt = ag.config.prompt_template.format(country=country, gender=gender)
+
+ chat_completion = await client.chat.completions.create(
+ model="gpt-3.5-turbo", messages=[{"role": "user", "content": prompt}]
+ )
+ return chat_completion.choices[0].message.content
diff --git a/examples/chat_json_format/app.py b/examples/chat_json_format/app.py
new file mode 100644
index 0000000000..a47f659c35
--- /dev/null
+++ b/examples/chat_json_format/app.py
@@ -0,0 +1,42 @@
+import agenta as ag
+from openai import OpenAI
+
+client = OpenAI()
+
+SYSTEM_PROMPT = "You have expertise in offering technical ideas to startups. Responses should be in json."
+GPT_FORMAT_RESPONSE = ["gpt-3.5-turbo-1106", "gpt-4-1106-preview"]
+CHAT_LLM_GPT = [
+ "gpt-3.5-turbo-16k",
+ "gpt-3.5-turbo-0301",
+ "gpt-3.5-turbo-0613",
+ "gpt-3.5-turbo-16k-0613",
+ "gpt-4",
+] + GPT_FORMAT_RESPONSE
+
+ag.init()
+ag.config.default(
+ temperature=ag.FloatParam(0.2),
+ model=ag.MultipleChoiceParam("gpt-3.5-turbo", CHAT_LLM_GPT),
+ max_tokens=ag.IntParam(-1, -1, 4000),
+ prompt_system=ag.TextParam(SYSTEM_PROMPT),
+ force_json_response=ag.BinaryParam(False),
+)
+
+
+@ag.entrypoint
+def chat(inputs: ag.MessagesInput = ag.MessagesInput()):
+ messages = [{"role": "system", "content": ag.config.prompt_system}] + inputs
+ max_tokens = ag.config.max_tokens if ag.config.max_tokens != -1 else None
+ response_format = (
+ {"type": "json_object"}
+ if ag.config.force_json_response and ag.config.model in GPT_FORMAT_RESPONSE
+ else {"type": "text"}
+ )
+ chat_completion = client.chat.completions.create(
+ model=ag.config.model,
+ messages=messages,
+ temperature=ag.config.temperature,
+ max_tokens=max_tokens,
+ response_format=response_format,
+ )
+ return chat_completion.choices[0].message.content
diff --git a/examples/chat_json_format/requirements.txt b/examples/chat_json_format/requirements.txt
new file mode 100644
index 0000000000..310f162cec
--- /dev/null
+++ b/examples/chat_json_format/requirements.txt
@@ -0,0 +1,2 @@
+agenta
+openai
\ No newline at end of file
diff --git a/examples/job_info_extractor/README.md b/examples/job_info_extractor/README.md
index 757455e2ca..4b0b51cc61 100644
--- a/examples/job_info_extractor/README.md
+++ b/examples/job_info_extractor/README.md
@@ -1,9 +1,49 @@
-# Using this template
+# Extraction using OpenAI Functions and Langchain"
-Please make sure to create a `.env` file with your OpenAI API key before running the app.
-OPENAI_API_KEY=sk-xxxxxxx
-You can find your keys here:
-https://platform.openai.com/account/api-keys
+This templates is designed to extracts job information (company name, job
+title, salary range) from a job description. It uses OpenAI Functions and
+Langchain. It runs with agenta.
+[Agenta](https://github.com/agenta-ai/agenta) is an open-source LLMOps
+platform that allows you to 1) quickly experiment and compare
+configuration for LLM apps 2) evaluate prompts and workflows 3) deploy
+applications easily.
+
+## How to use
+### 0. Prerequisites
+- Install the agenta CLI
+```bash
+pip install agenta-cli
+```
+- Either create an account in [agenta cloud](https://cloud.agenta.ai/) or
+[self-host agenta](/self-host/host-locally)
+
+### 1. Clone the repository
+
+```bash
+git clone https://github.com/Agenta-AI/job_extractor_template
+```
+
+### 2. Initialize the project
+
+```bash
+agenta init
+```
+
+### 3. Setup your openAI API key
+Create a .env file by copying the .env.example file and add your openAI
+API key to it.
+```bash
+OPENAI_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxx
+```
+
+### 4. Deploy the application to agenta
+
+```bash
+agenta variant serve app.py
+```
+
+### 5. Experiment with the prompts in a playground and evaluate different variants in agenta
+
+https://github.com/Agenta-AI/job_extractor_template/assets/4510758/30271188-8d46-4d02-8207-ddb60ad0e284
-Go back to the [Getting started tutorial](https://docs.agenta.ai/getting-started) to continue
\ No newline at end of file
diff --git a/examples/job_info_extractor/app.py b/examples/job_info_extractor/app.py
index 4ccd38a583..8d4dd21941 100644
--- a/examples/job_info_extractor/app.py
+++ b/examples/job_info_extractor/app.py
@@ -11,11 +11,17 @@
from pydantic import BaseModel, Field
-default_prompt = "What is a good name for a company that makes {product}?"
+CHAT_LLM_GPT = [
+ "gpt-3.5-turbo-16k-0613",
+ "gpt-3.5-turbo-16k",
+ "gpt-3.5-turbo-0613",
+ "gpt-3.5-turbo-0301",
+ "gpt-3.5-turbo",
+ "gpt-4",
+]
ag.init()
ag.config.default(
- prompt_template=ag.TextParam(default_prompt),
system_message=ag.TextParam(
"You are a world class algorithm for extracting information in structured formats."
),
@@ -26,10 +32,11 @@
company_desc_message=ag.TextParam("The name of the company"),
position_desc_message=ag.TextParam("The name of the position"),
salary_range_desc_message=ag.TextParam("The salary range of the position"),
- temperature=ag.FloatParam(0.5),
- top_p=ag.FloatParam(1.0),
+ temperature=ag.FloatParam(0.9),
+ top_p=ag.FloatParam(0.9),
presence_penalty=ag.FloatParam(0.0),
frequency_penalty=ag.FloatParam(0.0),
+ model=ag.MultipleChoiceParam("gpt-3.5-turbo-0613", CHAT_LLM_GPT),
)
@@ -50,7 +57,7 @@ def generate(
) -> str:
"""Extract information from a job description"""
llm = ChatOpenAI(
- model="gpt-3.5-turbo-0613",
+ model=ag.config.model,
temperature=ag.config.temperature,
top_p=ag.config.top_p,
presence_penalty=ag.config.presence_penalty,