From 96f26cc3a0d9279185ae29c78839a00ea9f0514f Mon Sep 17 00:00:00 2001
From: Kaosiso Ezealigo
Date: Sun, 26 May 2024 13:09:15 +0100
Subject: [PATCH 001/268] added evaluation error modal component
---
.../EvaluationErrorModal.tsx | 61 +++++++++++++++++++
1 file changed, 61 insertions(+)
create mode 100644 agenta-web/src/components/pages/evaluations/EvaluationErrorModal/EvaluationErrorModal.tsx
diff --git a/agenta-web/src/components/pages/evaluations/EvaluationErrorModal/EvaluationErrorModal.tsx b/agenta-web/src/components/pages/evaluations/EvaluationErrorModal/EvaluationErrorModal.tsx
new file mode 100644
index 0000000000..4262353296
--- /dev/null
+++ b/agenta-web/src/components/pages/evaluations/EvaluationErrorModal/EvaluationErrorModal.tsx
@@ -0,0 +1,61 @@
+import {JSSTheme} from "@/lib/Types"
+import {ExclamationCircleOutlined} from "@ant-design/icons"
+import {Modal, Typography} from "antd"
+import React from "react"
+import {createUseStyles} from "react-jss"
+
+interface EvaluationErrorModalProps {
+ isErrorModalOpen: boolean
+ setIsErrorModalOpen: (value: React.SetStateAction) => void
+ modalErrorMsg: {
+ message: string
+ stackTrace: string
+ }
+}
+
+const useStyles = createUseStyles((theme: JSSTheme) => ({
+ errModalStackTrace: {
+ "& code": {
+ display: "block",
+ },
+ },
+}))
+
+const EvaluationErrorModal = ({
+ isErrorModalOpen,
+ setIsErrorModalOpen,
+ modalErrorMsg,
+}: EvaluationErrorModalProps) => {
+ const classes = useStyles()
+
+ return (
+
+
+ Error
+ >
+ }
+ onCancel={() => setIsErrorModalOpen(false)}
+ >
+
+ Failed to invoke the LLM application with the following exception:
+
+ {modalErrorMsg.message && (
+
+ {modalErrorMsg.message}
+
+ )}
+ {modalErrorMsg.stackTrace && (
+
+ {modalErrorMsg.stackTrace}
+
+ )}
+
+ )
+}
+
+export default EvaluationErrorModal
From cc4fe9cc0f34b56aa1d50804f58e1f37c11d0900 Mon Sep 17 00:00:00 2001
From: Kaosiso Ezealigo
Date: Sun, 26 May 2024 13:56:03 +0100
Subject: [PATCH 002/268] improved error handling in evaluation views
---
.../cellRenderers/cellRenderers.tsx | 37 +++++++++++++----
.../evaluationCompare/EvaluationCompare.tsx | 41 ++++++++++++++++++-
.../evaluationResults/EvaluationResults.tsx | 17 +++++++-
.../EvaluationScenarios.tsx | 11 +++++
4 files changed, 95 insertions(+), 11 deletions(-)
diff --git a/agenta-web/src/components/pages/evaluations/cellRenderers/cellRenderers.tsx b/agenta-web/src/components/pages/evaluations/cellRenderers/cellRenderers.tsx
index 94a468f599..17f3b9de26 100644
--- a/agenta-web/src/components/pages/evaluations/cellRenderers/cellRenderers.tsx
+++ b/agenta-web/src/components/pages/evaluations/cellRenderers/cellRenderers.tsx
@@ -13,7 +13,7 @@ import {
InfoCircleOutlined,
} from "@ant-design/icons"
import {ICellRendererParams} from "ag-grid-community"
-import {GlobalToken, Space, Tooltip, Typography, message, theme} from "antd"
+import {Button, GlobalToken, Space, Tooltip, Typography, message, theme} from "antd"
import dayjs from "dayjs"
import relativeTime from "dayjs/plugin/relativeTime"
import duration from "dayjs/plugin/duration"
@@ -137,19 +137,40 @@ export function LongTextCellRenderer(params: ICellRendererParams, output?: any)
}
export const ResultRenderer = React.memo(
- (params: ICellRendererParams<_EvaluationScenario> & {config: EvaluatorConfig}) => {
+ (
+ params: ICellRendererParams<_EvaluationScenario> & {
+ config: EvaluatorConfig
+ setIsErrorModalOpen: React.Dispatch>
+ setModalErrorMsg: React.Dispatch<
+ React.SetStateAction<{
+ message: string
+ stackTrace: string
+ }>
+ >
+ },
+ ) => {
+ const {setIsErrorModalOpen, setModalErrorMsg} = params
const result = params.data?.results.find(
(item) => item.evaluator_config === params.config.id,
)?.result
- let errorMsg = ""
- if (result?.type === "error") {
- errorMsg = `${result?.error?.message}\n${result?.error?.stacktrace}`
+ if (result?.type === "error" && result.error) {
+ setModalErrorMsg({message: result.error.message, stackTrace: result.error.stacktrace})
}
- return (
-
- {errorMsg || getTypedValue(result)}
+ return result?.type === "error" && result.error ? (
+
+ Failed to invoke LLM app{" "}
+ setIsErrorModalOpen(true)}
+ >
+ (more details)
+
+ ) : (
+ {getTypedValue(result)}
)
},
(prev, next) => prev.value === next.value,
diff --git a/agenta-web/src/components/pages/evaluations/evaluationCompare/EvaluationCompare.tsx b/agenta-web/src/components/pages/evaluations/evaluationCompare/EvaluationCompare.tsx
index 050c6d091d..b3d13d32f6 100644
--- a/agenta-web/src/components/pages/evaluations/evaluationCompare/EvaluationCompare.tsx
+++ b/agenta-web/src/components/pages/evaluations/evaluationCompare/EvaluationCompare.tsx
@@ -9,7 +9,7 @@ import {
_EvaluationScenario,
} from "@/lib/Types"
import {fetchAllComparisonResults} from "@/services/evaluations"
-import {ColDef} from "ag-grid-community"
+import {ColDef, ValueGetterParams} from "ag-grid-community"
import {AgGridReact} from "ag-grid-react"
import {Button, Dropdown, DropdownProps, Space, Spin, Switch, Tag, Tooltip, Typography} from "antd"
import React, {useEffect, useMemo, useRef, useState} from "react"
@@ -33,6 +33,7 @@ import {evaluatorsAtom} from "@/lib/atoms/evaluation"
import CompareOutputDiff from "@/components/CompareOutputDiff/CompareOutputDiff"
import {formatCurrency, formatLatency} from "@/lib/helpers/formatters"
import {useLocalStorage} from "usehooks-ts"
+import EvaluationErrorModal from "../EvaluationErrorModal/EvaluationErrorModal"
const useStyles = createUseStyles((theme: JSSTheme) => ({
table: {
@@ -89,6 +90,8 @@ const EvaluationCompareMode: React.FC = () => {
const [evaluators] = useAtom(evaluatorsAtom)
const gridRef = useRef>()
const [filterColsDropdown, setFilterColsDropdown] = useState(false)
+ const [modalErrorMsg, setModalErrorMsg] = useState({message: "", stackTrace: ""})
+ const [isErrorModalOpen, setIsErrorModalOpen] = useState(false)
const handleOpenChange: DropdownProps["onOpenChange"] = (nextOpen, info) => {
if (info.source === "trigger" || nextOpen) {
@@ -254,6 +257,36 @@ const EvaluationCompareMode: React.FC = () => {
hide:
!evalIds.includes(variant.evaluationId) ||
hiddenVariants.includes(config.name),
+ cellRenderer: (params: ValueGetterParams) => {
+ const result = params.data?.variants
+ .find((item) => item.evaluationId === variant.evaluationId)
+ ?.evaluatorConfigs.find(
+ (item) => item.evaluatorConfig.id === config.id,
+ )?.result
+
+ if (result?.error && result.type === "error") {
+ setModalErrorMsg({
+ message: result.error.message,
+ stackTrace: result.error.stacktrace,
+ })
+ }
+
+ return result?.type === "error" && result.error ? (
+
+ Failed to invoke LLM app{" "}
+ setIsErrorModalOpen(true)}
+ >
+ (more details)
+
+
+ ) : (
+ {getTypedValue(result)}
+ )
+ },
valueGetter: (params) => {
return getTypedValue(
params.data?.variants
@@ -504,6 +537,12 @@ const EvaluationCompareMode: React.FC = () => {
/>
+
+
)
}
diff --git a/agenta-web/src/components/pages/evaluations/evaluationResults/EvaluationResults.tsx b/agenta-web/src/components/pages/evaluations/evaluationResults/EvaluationResults.tsx
index 6e33e3b2c6..16ea2f7add 100644
--- a/agenta-web/src/components/pages/evaluations/evaluationResults/EvaluationResults.tsx
+++ b/agenta-web/src/components/pages/evaluations/evaluationResults/EvaluationResults.tsx
@@ -1,9 +1,9 @@
import React, {useEffect, useMemo, useRef, useState} from "react"
import {AgGridReact} from "ag-grid-react"
import {useAppTheme} from "@/components/Layout/ThemeContextProvider"
-import {ColDef} from "ag-grid-community"
+import {ColDef, ValueGetterParams} from "ag-grid-community"
import {createUseStyles} from "react-jss"
-import {Button, Dropdown, DropdownProps, Space, Spin, Tag, Tooltip, theme} from "antd"
+import {Button, Dropdown, DropdownProps, Space, Spin, Tag, Tooltip, Typography, theme} from "antd"
import {
CheckOutlined,
DeleteOutlined,
@@ -266,6 +266,19 @@ const EvaluationResults: React.FC = () => {
),
autoHeaderHeight: true,
...getFilterParams("number"),
+ cellRenderer: (params: ValueGetterParams<_Evaluation, any>) => {
+ const result = params.data?.aggregated_results.find(
+ (item) => item.evaluator_config.id === config.id,
+ )?.result
+
+ return result?.error ? (
+
+ Error
+
+ ) : (
+ {getTypedValue(result)}
+ )
+ },
valueGetter: (params) =>
getTypedValue(
params.data?.aggregated_results.find(
diff --git a/agenta-web/src/components/pages/evaluations/evaluationScenarios/EvaluationScenarios.tsx b/agenta-web/src/components/pages/evaluations/evaluationScenarios/EvaluationScenarios.tsx
index e375c46e9d..d3d9bd2529 100644
--- a/agenta-web/src/components/pages/evaluations/evaluationScenarios/EvaluationScenarios.tsx
+++ b/agenta-web/src/components/pages/evaluations/evaluationScenarios/EvaluationScenarios.tsx
@@ -24,6 +24,7 @@ import {evaluatorsAtom} from "@/lib/atoms/evaluation"
import CompareOutputDiff from "@/components/CompareOutputDiff/CompareOutputDiff"
import {formatCurrency, formatLatency} from "@/lib/helpers/formatters"
import {useLocalStorage} from "usehooks-ts"
+import EvaluationErrorModal from "../EvaluationErrorModal/EvaluationErrorModal"
const useStyles = createUseStyles((theme: JSSTheme) => ({
infoRow: {
@@ -57,6 +58,8 @@ const EvaluationScenarios: React.FC = () => {
const gridRef = useRef>()
const evalaution = scenarios[0]?.evaluation
const [showDiff, setShowDiff] = useLocalStorage("showDiff", "show")
+ const [modalErrorMsg, setModalErrorMsg] = useState({message: "", stackTrace: ""})
+ const [isErrorModalOpen, setIsErrorModalOpen] = useState(false)
const colDefs = useMemo(() => {
const colDefs: ColDef<_EvaluationScenario>[] = []
@@ -137,6 +140,8 @@ const EvaluationScenarios: React.FC = () => {
cellRenderer: ResultRenderer,
cellRendererParams: {
config,
+ setIsErrorModalOpen,
+ setModalErrorMsg,
},
valueGetter: (params) => {
return params.data?.results[index].result.value
@@ -272,6 +277,12 @@ const EvaluationScenarios: React.FC = () => {
/>
+
+
)
}
From 2ec19f53bfac2d064308c7f8cc0f27cff587eb97 Mon Sep 17 00:00:00 2001
From: Kaosiso Ezealigo
Date: Mon, 27 May 2024 00:45:17 +0100
Subject: [PATCH 003/268] renamed Evaluation error components
---
.../EvaluationErrorModal.tsx | 0
.../EvaluationErrorText.tsx | 25 +++++++++++++++++++
2 files changed, 25 insertions(+)
rename agenta-web/src/components/pages/evaluations/{EvaluationErrorModal => EvaluationErrorProps}/EvaluationErrorModal.tsx (100%)
create mode 100644 agenta-web/src/components/pages/evaluations/EvaluationErrorProps/EvaluationErrorText.tsx
diff --git a/agenta-web/src/components/pages/evaluations/EvaluationErrorModal/EvaluationErrorModal.tsx b/agenta-web/src/components/pages/evaluations/EvaluationErrorProps/EvaluationErrorModal.tsx
similarity index 100%
rename from agenta-web/src/components/pages/evaluations/EvaluationErrorModal/EvaluationErrorModal.tsx
rename to agenta-web/src/components/pages/evaluations/EvaluationErrorProps/EvaluationErrorModal.tsx
diff --git a/agenta-web/src/components/pages/evaluations/EvaluationErrorProps/EvaluationErrorText.tsx b/agenta-web/src/components/pages/evaluations/EvaluationErrorProps/EvaluationErrorText.tsx
new file mode 100644
index 0000000000..d08a41261f
--- /dev/null
+++ b/agenta-web/src/components/pages/evaluations/EvaluationErrorProps/EvaluationErrorText.tsx
@@ -0,0 +1,25 @@
+import {Button, Typography} from "antd"
+import React from "react"
+
+interface EvaluationErrorTextProps {
+ text: string
+ setIsErrorModalOpen: (value: React.SetStateAction) => void
+}
+
+const EvaluationErrorText = ({text, setIsErrorModalOpen}: EvaluationErrorTextProps) => {
+ return (
+
+ {text}{" "}
+ setIsErrorModalOpen(true)}
+ >
+ (more details)
+
+
+ )
+}
+
+export default EvaluationErrorText
From eeea808cbc87fbed013d517825a5ebfbe9866587 Mon Sep 17 00:00:00 2001
From: Kaosiso Ezealigo
Date: Mon, 27 May 2024 00:49:36 +0100
Subject: [PATCH 004/268] removed duplicate code
---
.../cellRenderers/cellRenderers.tsx | 18 ++++++------------
.../evaluationCompare/EvaluationCompare.tsx | 18 ++++++------------
2 files changed, 12 insertions(+), 24 deletions(-)
diff --git a/agenta-web/src/components/pages/evaluations/cellRenderers/cellRenderers.tsx b/agenta-web/src/components/pages/evaluations/cellRenderers/cellRenderers.tsx
index 17f3b9de26..2a9cf5f977 100644
--- a/agenta-web/src/components/pages/evaluations/cellRenderers/cellRenderers.tsx
+++ b/agenta-web/src/components/pages/evaluations/cellRenderers/cellRenderers.tsx
@@ -13,7 +13,7 @@ import {
InfoCircleOutlined,
} from "@ant-design/icons"
import {ICellRendererParams} from "ag-grid-community"
-import {Button, GlobalToken, Space, Tooltip, Typography, message, theme} from "antd"
+import {GlobalToken, Space, Tooltip, Typography, message, theme} from "antd"
import dayjs from "dayjs"
import relativeTime from "dayjs/plugin/relativeTime"
import duration from "dayjs/plugin/duration"
@@ -21,6 +21,7 @@ import Link from "next/link"
import React, {useCallback, useEffect, useState} from "react"
import {createUseStyles} from "react-jss"
import {getTypedValue} from "@/lib/helpers/evaluate"
+import EvaluationErrorText from "../EvaluationErrorProps/EvaluationErrorText"
dayjs.extend(relativeTime)
dayjs.extend(duration)
@@ -158,17 +159,10 @@ export const ResultRenderer = React.memo(
}
return result?.type === "error" && result.error ? (
-
- Failed to invoke LLM app{" "}
- setIsErrorModalOpen(true)}
- >
- (more details)
-
-
+
) : (
{getTypedValue(result)}
)
diff --git a/agenta-web/src/components/pages/evaluations/evaluationCompare/EvaluationCompare.tsx b/agenta-web/src/components/pages/evaluations/evaluationCompare/EvaluationCompare.tsx
index b3d13d32f6..12088ef9c8 100644
--- a/agenta-web/src/components/pages/evaluations/evaluationCompare/EvaluationCompare.tsx
+++ b/agenta-web/src/components/pages/evaluations/evaluationCompare/EvaluationCompare.tsx
@@ -33,7 +33,8 @@ import {evaluatorsAtom} from "@/lib/atoms/evaluation"
import CompareOutputDiff from "@/components/CompareOutputDiff/CompareOutputDiff"
import {formatCurrency, formatLatency} from "@/lib/helpers/formatters"
import {useLocalStorage} from "usehooks-ts"
-import EvaluationErrorModal from "../EvaluationErrorModal/EvaluationErrorModal"
+import EvaluationErrorModal from "../EvaluationErrorProps/EvaluationErrorModal"
+import EvaluationErrorText from "../EvaluationErrorProps/EvaluationErrorText"
const useStyles = createUseStyles((theme: JSSTheme) => ({
table: {
@@ -272,17 +273,10 @@ const EvaluationCompareMode: React.FC = () => {
}
return result?.type === "error" && result.error ? (
-
- Failed to invoke LLM app{" "}
- setIsErrorModalOpen(true)}
- >
- (more details)
-
-
+
) : (
{getTypedValue(result)}
)
From d3e1dc19f1808ea2533dee61ee34c5d04f1d5c36 Mon Sep 17 00:00:00 2001
From: aakrem
Date: Fri, 31 May 2024 18:45:01 +0200
Subject: [PATCH 005/268] add postgres and pgadmin to docker compose
---
docker-compose.yml | 35 +++++++++++++++++++++++++++++++++++
1 file changed, 35 insertions(+)
diff --git a/docker-compose.yml b/docker-compose.yml
index 2832585b27..f9dfc894fd 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -160,6 +160,41 @@ services:
networks:
- agenta-network
+ postgres:
+ image: postgres:16.2
+ container_name: postgres
+ restart: always
+ environment:
+ POSTGRES_USER: username
+ POSTGRES_PASSWORD: password
+ ports:
+ - "5432:5432"
+ networks:
+ - agenta-network
+ healthcheck:
+ test: ["CMD-SHELL", "pg_isready -U postgres"]
+ interval: 10s
+ timeout: 5s
+ retries: 5
+
+ pgadmin:
+ image: dpage/pgadmin4
+ restart: always
+ environment:
+ PGADMIN_DEFAULT_EMAIL: "admin@example.com"
+ PGADMIN_DEFAULT_PASSWORD: "password"
+ PGADMIN_SERVER_HOST: "postgres"
+ PGADMIN_SERVER_PORT: 5432
+ PGADMIN_SERVER_USER: "username"
+ PGADMIN_SERVER_PASSWORD: "password"
+ ports:
+ - "5050:80"
+ networks:
+ - agenta-network
+ depends_on:
+ postgres:
+ condition: service_healthy
+
networks:
agenta-network:
name: agenta-network
From dde0dad1ef8d409245970410e1ee2649d31932ad Mon Sep 17 00:00:00 2001
From: aakrem
Date: Mon, 3 Jun 2024 20:54:36 +0200
Subject: [PATCH 006/268] integrate sqlalchemy
---
agenta-backend/agenta_backend/main.py | 5 +-
.../agenta_backend/models/db_engine.py | 70 ++--
.../agenta_backend/services/db_manager.py | 25 +-
agenta-backend/poetry.lock | 386 +++++++-----------
agenta-backend/pyproject.toml | 3 +-
docker-compose.yml | 1 +
6 files changed, 208 insertions(+), 282 deletions(-)
diff --git a/agenta-backend/agenta_backend/main.py b/agenta-backend/agenta_backend/main.py
index 316cf5b824..d23ce2ec63 100644
--- a/agenta-backend/agenta_backend/main.py
+++ b/agenta-backend/agenta_backend/main.py
@@ -17,7 +17,7 @@
health_router,
)
from agenta_backend.utils.common import isEE, isCloudProd, isCloudDev, isOss, isCloudEE
-from agenta_backend.models.db_engine import DBEngine
+from agenta_backend.models.db_engine import db_engine
from agenta_backend.open_api import open_api_tags_metadata
if isEE() or isCloudProd():
@@ -27,6 +27,7 @@
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
+from sqlalchemy.ext.asyncio import AsyncSession
from celery import Celery
@@ -52,7 +53,7 @@ async def lifespan(application: FastAPI, cache=True):
application: FastAPI application.
cache: A boolean value that indicates whether to use the cached data or not.
"""
- await DBEngine().init_db()
+ await db_engine.init_db()
await templates_manager.update_and_sync_templates(cache=cache)
yield
diff --git a/agenta-backend/agenta_backend/models/db_engine.py b/agenta-backend/agenta_backend/models/db_engine.py
index 6534d51b6b..375bb13d75 100644
--- a/agenta-backend/agenta_backend/models/db_engine.py
+++ b/agenta-backend/agenta_backend/models/db_engine.py
@@ -1,11 +1,8 @@
import os
import logging
-from typing import List
-
-from pymongo import MongoClient
-from beanie import init_beanie, Document
-from motor.motor_asyncio import AsyncIOMotorClient
-
+from contextlib import asynccontextmanager
+from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
+from sqlalchemy.orm import sessionmaker
from agenta_backend.utils.common import isCloudEE
if isCloudEE():
@@ -52,8 +49,7 @@
AppVariantRevisionsDB,
)
-# Define Document Models
-document_models: List[Document] = [
+models = [
AppDB,
UserDB,
ImageDB,
@@ -73,52 +69,52 @@
]
if isCloudEE():
- document_models = document_models + [SpanDB, OrganizationDB, WorkspaceDB, APIKeyDB]
-
+ models.extend([SpanDB, OrganizationDB, WorkspaceDB, APIKeyDB])
# Configure and set logging level
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
-
class DBEngine:
"""
- Database engine to initialize Beanie and return the engine based on mode.
+ Database engine to initialize SQLAlchemy and return the engine based on mode.
"""
def __init__(self) -> None:
self.mode = os.environ.get("DATABASE_MODE", "v2")
- self.db_url = os.environ["MONGODB_URI"]
-
- async def initialize_client(self):
- return AsyncIOMotorClient(self.db_url)
+ self.db_url = os.environ["DATABASE_URL"] # Use SQLAlchemy compatible database URL
+ self.engine = create_async_engine(self.db_url, echo=True)
+ self.async_session = sessionmaker(
+ self.engine, expire_on_commit=False, class_=AsyncSession
+ )
async def init_db(self):
"""
- Initialize Beanie based on the mode and store the engine.
+ Initialize the database based on the mode and create all tables.
"""
-
- client = await self.initialize_client()
- db_name = self._get_database_name(self.mode)
- await init_beanie(database=client[db_name], document_models=document_models)
- logger.info(f"Using {db_name} database...")
-
- def _get_database_name(self, mode: str) -> str:
+ async with self.engine.begin() as conn:
+ # Drop all existing tables (if needed)
+ # await conn.run_sync(Base.metadata.drop_all)
+ # Create tables
+ for model in models:
+ await conn.run_sync(model.metadata.create_all)
+ logger.info(f"Using {self.mode} database...")
+
+ async def remove_db(self) -> None:
"""
- Determine the appropriate database name based on the mode.
+ Remove the database based on the mode.
"""
- if mode in ("test", "default", "v2"):
- return f"agenta_{mode}"
+ async with self.engine.begin() as conn:
+ for model in models:
+ await conn.run_sync(model.metadata.drop_all)
- return f"agenta_{mode}"
- def remove_db(self) -> None:
- """
- Remove the database based on the mode.
- """
+ @asynccontextmanager
+ async def get_session(self):
+ session = self.async_session()
+ try:
+ yield session
+ finally:
+ await session.close()
- client = MongoClient(self.db_url)
- if self.mode == "default":
- client.drop_database("agenta")
- else:
- client.drop_database(f"agenta_{self.mode}")
+db_engine = DBEngine()
\ No newline at end of file
diff --git a/agenta-backend/agenta_backend/services/db_manager.py b/agenta-backend/agenta_backend/services/db_manager.py
index dc813026c3..00efd06e22 100644
--- a/agenta-backend/agenta_backend/services/db_manager.py
+++ b/agenta-backend/agenta_backend/services/db_manager.py
@@ -12,6 +12,11 @@
from agenta_backend.utils.common import isCloudEE
from agenta_backend.services.json_importer_helper import get_json
+from sqlalchemy.ext.asyncio import AsyncSession
+from sqlalchemy.future import select
+from sqlalchemy.exc import NoResultFound
+from agenta_backend.models.db_engine import db_engine
+
from agenta_backend.models.api.api_models import (
App,
Template,
@@ -1693,13 +1698,21 @@ async def add_template(**kwargs: dict) -> str:
**kwargs (dict): Keyword arguments containing the template data.
Returns:
- template_id (Str): The Id of the created template.
+ template_id (str): The Id of the created template.
"""
- existing_template = await TemplateDB.find_one(TemplateDB.tag_id == kwargs["tag_id"])
- if existing_template is None:
- db_template = TemplateDB(**kwargs)
- await db_template.create()
- return str(db_template.id)
+
+ async with db_engine.get_session() as session:
+ result = await session.execute(select(TemplateDB).filter_by(tag_id=kwargs["tag_id"]))
+ existing_template = result.scalars().one_or_none()
+
+ if existing_template is None:
+ db_template = TemplateDB(**kwargs)
+ session.add(db_template)
+ await session.commit()
+ await session.refresh(db_template)
+ return str(db_template.id)
+ else:
+ return str(existing_template.id)
async def add_zip_template(key, value):
diff --git a/agenta-backend/poetry.lock b/agenta-backend/poetry.lock
index 09fe882771..ba2006d68f 100644
--- a/agenta-backend/poetry.lock
+++ b/agenta-backend/poetry.lock
@@ -1,10 +1,9 @@
-# This file is automatically @generated by Poetry 1.4.2 and should not be changed by hand.
+# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand.
[[package]]
name = "aioboto3"
version = "12.4.0"
description = "Async boto3 wrapper"
-category = "main"
optional = false
python-versions = "<4.0,>=3.8"
files = [
@@ -23,7 +22,6 @@ s3cse = ["cryptography (>=2.3.1)"]
name = "aiobotocore"
version = "2.12.3"
description = "Async client for aws services using botocore and aiohttp"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -46,7 +44,6 @@ boto3 = ["boto3 (>=1.34.41,<1.34.70)"]
name = "aiodocker"
version = "0.21.0"
description = "Docker API client for asyncio"
-category = "main"
optional = false
python-versions = ">=3.6"
files = [
@@ -62,7 +59,6 @@ typing-extensions = ">=3.6.5"
name = "aiohttp"
version = "3.9.5"
description = "Async http client/server framework (asyncio)"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -159,7 +155,6 @@ speedups = ["Brotli", "aiodns", "brotlicffi"]
name = "aioitertools"
version = "0.11.0"
description = "itertools and builtins for AsyncIO and mixed iterables"
-category = "main"
optional = false
python-versions = ">=3.6"
files = [
@@ -174,7 +169,6 @@ typing_extensions = {version = ">=4.0", markers = "python_version < \"3.10\""}
name = "aiosignal"
version = "1.3.1"
description = "aiosignal: a list of registered asynchronous callbacks"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -189,7 +183,6 @@ frozenlist = ">=1.1.0"
name = "aiosmtplib"
version = "1.1.6"
description = "asyncio SMTP client"
-category = "main"
optional = false
python-versions = ">=3.5.2,<4.0.0"
files = [
@@ -205,7 +198,6 @@ uvloop = ["uvloop (>=0.13,<0.15)"]
name = "amqp"
version = "5.2.0"
description = "Low-level AMQP client for Python (fork of amqplib)."
-category = "main"
optional = false
python-versions = ">=3.6"
files = [
@@ -220,7 +212,6 @@ vine = ">=5.0.0,<6.0.0"
name = "anyio"
version = "3.7.1"
description = "High level compatibility layer for multiple asynchronous event loop implementations"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -242,7 +233,6 @@ trio = ["trio (<0.22)"]
name = "appdirs"
version = "1.4.4"
description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"."
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -254,7 +244,6 @@ files = [
name = "asgiref"
version = "3.8.1"
description = "ASGI specs, helper code, and adapters"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -272,7 +261,6 @@ tests = ["mypy (>=0.800)", "pytest", "pytest-asyncio"]
name = "async-timeout"
version = "4.0.3"
description = "Timeout context manager for asyncio programs"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -284,7 +272,6 @@ files = [
name = "asyncer"
version = "0.0.2"
description = "Asyncer, async and await, focused on developer experience."
-category = "main"
optional = false
python-versions = ">=3.6.2,<4.0.0"
files = [
@@ -295,11 +282,67 @@ files = [
[package.dependencies]
anyio = ">=3.4.0,<4.0.0"
+[[package]]
+name = "asyncpg"
+version = "0.29.0"
+description = "An asyncio PostgreSQL driver"
+optional = false
+python-versions = ">=3.8.0"
+files = [
+ {file = "asyncpg-0.29.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72fd0ef9f00aeed37179c62282a3d14262dbbafb74ec0ba16e1b1864d8a12169"},
+ {file = "asyncpg-0.29.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:52e8f8f9ff6e21f9b39ca9f8e3e33a5fcdceaf5667a8c5c32bee158e313be385"},
+ {file = "asyncpg-0.29.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9e6823a7012be8b68301342ba33b4740e5a166f6bbda0aee32bc01638491a22"},
+ {file = "asyncpg-0.29.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:746e80d83ad5d5464cfbf94315eb6744222ab00aa4e522b704322fb182b83610"},
+ {file = "asyncpg-0.29.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:ff8e8109cd6a46ff852a5e6bab8b0a047d7ea42fcb7ca5ae6eaae97d8eacf397"},
+ {file = "asyncpg-0.29.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:97eb024685b1d7e72b1972863de527c11ff87960837919dac6e34754768098eb"},
+ {file = "asyncpg-0.29.0-cp310-cp310-win32.whl", hash = "sha256:5bbb7f2cafd8d1fa3e65431833de2642f4b2124be61a449fa064e1a08d27e449"},
+ {file = "asyncpg-0.29.0-cp310-cp310-win_amd64.whl", hash = "sha256:76c3ac6530904838a4b650b2880f8e7af938ee049e769ec2fba7cd66469d7772"},
+ {file = "asyncpg-0.29.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d4900ee08e85af01adb207519bb4e14b1cae8fd21e0ccf80fac6aa60b6da37b4"},
+ {file = "asyncpg-0.29.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a65c1dcd820d5aea7c7d82a3fdcb70e096f8f70d1a8bf93eb458e49bfad036ac"},
+ {file = "asyncpg-0.29.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b52e46f165585fd6af4863f268566668407c76b2c72d366bb8b522fa66f1870"},
+ {file = "asyncpg-0.29.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc600ee8ef3dd38b8d67421359779f8ccec30b463e7aec7ed481c8346decf99f"},
+ {file = "asyncpg-0.29.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:039a261af4f38f949095e1e780bae84a25ffe3e370175193174eb08d3cecab23"},
+ {file = "asyncpg-0.29.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6feaf2d8f9138d190e5ec4390c1715c3e87b37715cd69b2c3dfca616134efd2b"},
+ {file = "asyncpg-0.29.0-cp311-cp311-win32.whl", hash = "sha256:1e186427c88225ef730555f5fdda6c1812daa884064bfe6bc462fd3a71c4b675"},
+ {file = "asyncpg-0.29.0-cp311-cp311-win_amd64.whl", hash = "sha256:cfe73ffae35f518cfd6e4e5f5abb2618ceb5ef02a2365ce64f132601000587d3"},
+ {file = "asyncpg-0.29.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6011b0dc29886ab424dc042bf9eeb507670a3b40aece3439944006aafe023178"},
+ {file = "asyncpg-0.29.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b544ffc66b039d5ec5a7454667f855f7fec08e0dfaf5a5490dfafbb7abbd2cfb"},
+ {file = "asyncpg-0.29.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d84156d5fb530b06c493f9e7635aa18f518fa1d1395ef240d211cb563c4e2364"},
+ {file = "asyncpg-0.29.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:54858bc25b49d1114178d65a88e48ad50cb2b6f3e475caa0f0c092d5f527c106"},
+ {file = "asyncpg-0.29.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bde17a1861cf10d5afce80a36fca736a86769ab3579532c03e45f83ba8a09c59"},
+ {file = "asyncpg-0.29.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:37a2ec1b9ff88d8773d3eb6d3784dc7e3fee7756a5317b67f923172a4748a175"},
+ {file = "asyncpg-0.29.0-cp312-cp312-win32.whl", hash = "sha256:bb1292d9fad43112a85e98ecdc2e051602bce97c199920586be83254d9dafc02"},
+ {file = "asyncpg-0.29.0-cp312-cp312-win_amd64.whl", hash = "sha256:2245be8ec5047a605e0b454c894e54bf2ec787ac04b1cb7e0d3c67aa1e32f0fe"},
+ {file = "asyncpg-0.29.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0009a300cae37b8c525e5b449233d59cd9868fd35431abc470a3e364d2b85cb9"},
+ {file = "asyncpg-0.29.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5cad1324dbb33f3ca0cd2074d5114354ed3be2b94d48ddfd88af75ebda7c43cc"},
+ {file = "asyncpg-0.29.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:012d01df61e009015944ac7543d6ee30c2dc1eb2f6b10b62a3f598beb6531548"},
+ {file = "asyncpg-0.29.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:000c996c53c04770798053e1730d34e30cb645ad95a63265aec82da9093d88e7"},
+ {file = "asyncpg-0.29.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e0bfe9c4d3429706cf70d3249089de14d6a01192d617e9093a8e941fea8ee775"},
+ {file = "asyncpg-0.29.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:642a36eb41b6313ffa328e8a5c5c2b5bea6ee138546c9c3cf1bffaad8ee36dd9"},
+ {file = "asyncpg-0.29.0-cp38-cp38-win32.whl", hash = "sha256:a921372bbd0aa3a5822dd0409da61b4cd50df89ae85150149f8c119f23e8c408"},
+ {file = "asyncpg-0.29.0-cp38-cp38-win_amd64.whl", hash = "sha256:103aad2b92d1506700cbf51cd8bb5441e7e72e87a7b3a2ca4e32c840f051a6a3"},
+ {file = "asyncpg-0.29.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5340dd515d7e52f4c11ada32171d87c05570479dc01dc66d03ee3e150fb695da"},
+ {file = "asyncpg-0.29.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e17b52c6cf83e170d3d865571ba574577ab8e533e7361a2b8ce6157d02c665d3"},
+ {file = "asyncpg-0.29.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f100d23f273555f4b19b74a96840aa27b85e99ba4b1f18d4ebff0734e78dc090"},
+ {file = "asyncpg-0.29.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48e7c58b516057126b363cec8ca02b804644fd012ef8e6c7e23386b7d5e6ce83"},
+ {file = "asyncpg-0.29.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f9ea3f24eb4c49a615573724d88a48bd1b7821c890c2effe04f05382ed9e8810"},
+ {file = "asyncpg-0.29.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8d36c7f14a22ec9e928f15f92a48207546ffe68bc412f3be718eedccdf10dc5c"},
+ {file = "asyncpg-0.29.0-cp39-cp39-win32.whl", hash = "sha256:797ab8123ebaed304a1fad4d7576d5376c3a006a4100380fb9d517f0b59c1ab2"},
+ {file = "asyncpg-0.29.0-cp39-cp39-win_amd64.whl", hash = "sha256:cce08a178858b426ae1aa8409b5cc171def45d4293626e7aa6510696d46decd8"},
+ {file = "asyncpg-0.29.0.tar.gz", hash = "sha256:d1c49e1f44fffafd9a55e1a9b101590859d881d639ea2922516f5d9c512d354e"},
+]
+
+[package.dependencies]
+async-timeout = {version = ">=4.0.3", markers = "python_version < \"3.12.0\""}
+
+[package.extras]
+docs = ["Sphinx (>=5.3.0,<5.4.0)", "sphinx-rtd-theme (>=1.2.2)", "sphinxcontrib-asyncio (>=0.3.0,<0.4.0)"]
+test = ["flake8 (>=6.1,<7.0)", "uvloop (>=0.15.3)"]
+
[[package]]
name = "attrs"
version = "23.2.0"
description = "Classes Without Boilerplate"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -319,7 +362,6 @@ tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "p
name = "backoff"
version = "2.2.1"
description = "Function decoration for backoff and retry"
-category = "main"
optional = false
python-versions = ">=3.7,<4.0"
files = [
@@ -331,7 +373,6 @@ files = [
name = "beanie"
version = "1.26.0"
description = "Asynchronous Python ODM for MongoDB"
-category = "main"
optional = false
python-versions = "<4.0,>=3.7"
files = [
@@ -356,7 +397,6 @@ test = ["asgi-lifespan (>=1.0.1)", "dnspython (>=2.1.0)", "fastapi (>=0.100)", "
name = "billiard"
version = "4.2.0"
description = "Python multiprocessing fork with improvements and bugfixes"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -368,7 +408,6 @@ files = [
name = "boto3"
version = "1.34.69"
description = "The AWS SDK for Python"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -388,7 +427,6 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"]
name = "botocore"
version = "1.34.69"
description = "Low-level, data-driven core of boto 3."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -411,7 +449,6 @@ crt = ["awscrt (==0.19.19)"]
name = "cachetools"
version = "5.3.3"
description = "Extensible memoizing collections and decorators"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -423,7 +460,6 @@ files = [
name = "celery"
version = "5.4.0"
description = "Distributed Task Queue."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -480,7 +516,6 @@ zstd = ["zstandard (==0.22.0)"]
name = "certifi"
version = "2024.2.2"
description = "Python package for providing Mozilla's CA Bundle."
-category = "main"
optional = false
python-versions = ">=3.6"
files = [
@@ -492,7 +527,6 @@ files = [
name = "cffi"
version = "1.16.0"
description = "Foreign Function Interface for Python calling C code."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -557,7 +591,6 @@ pycparser = "*"
name = "charset-normalizer"
version = "3.3.2"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
-category = "main"
optional = false
python-versions = ">=3.7.0"
files = [
@@ -657,7 +690,6 @@ files = [
name = "click"
version = "8.1.7"
description = "Composable command line interface toolkit"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -672,7 +704,6 @@ colorama = {version = "*", markers = "platform_system == \"Windows\""}
name = "click-didyoumean"
version = "0.3.1"
description = "Enables git-like *did-you-mean* feature in click"
-category = "main"
optional = false
python-versions = ">=3.6.2"
files = [
@@ -687,7 +718,6 @@ click = ">=7"
name = "click-plugins"
version = "1.1.1"
description = "An extension module for click to enable registering CLI commands via setuptools entry-points."
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -705,7 +735,6 @@ dev = ["coveralls", "pytest (>=3.6)", "pytest-cov", "wheel"]
name = "click-repl"
version = "0.3.0"
description = "REPL plugin for Click"
-category = "main"
optional = false
python-versions = ">=3.6"
files = [
@@ -724,7 +753,6 @@ testing = ["pytest (>=7.2.1)", "pytest-cov (>=4.0.0)", "tox (>=4.4.3)"]
name = "colorama"
version = "0.4.6"
description = "Cross-platform colored terminal text."
-category = "main"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
files = [
@@ -736,7 +764,6 @@ files = [
name = "cryptography"
version = "42.0.7"
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -791,7 +818,6 @@ test-randomorder = ["pytest-randomly"]
name = "dataclasses-json"
version = "0.6.6"
description = "Easily serialize dataclasses to and from JSON."
-category = "main"
optional = false
python-versions = "<4.0,>=3.7"
files = [
@@ -807,7 +833,6 @@ typing-inspect = ">=0.4.0,<1"
name = "datasets"
version = "2.19.1"
description = "HuggingFace community-driven open-source library of datasets"
-category = "main"
optional = false
python-versions = ">=3.8.0"
files = [
@@ -852,7 +877,6 @@ vision = ["Pillow (>=6.2.1)"]
name = "deprecated"
version = "1.2.13"
description = "Python @deprecated decorator to deprecate old python classes, functions or methods."
-category = "main"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
files = [
@@ -870,7 +894,6 @@ dev = ["PyTest", "PyTest (<5)", "PyTest-Cov", "PyTest-Cov (<2.6)", "bump2version
name = "dill"
version = "0.3.8"
description = "serialize all of Python"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -886,7 +909,6 @@ profile = ["gprof2dot (>=2022.7.29)"]
name = "distro"
version = "1.9.0"
description = "Distro - an OS platform information API"
-category = "main"
optional = false
python-versions = ">=3.6"
files = [
@@ -898,7 +920,6 @@ files = [
name = "dnspython"
version = "2.6.1"
description = "DNS toolkit"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -919,7 +940,6 @@ wmi = ["wmi (>=1.5.1)"]
name = "docker"
version = "7.1.0"
description = "A Python library for the Docker Engine API."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -942,7 +962,6 @@ websockets = ["websocket-client (>=1.3.0)"]
name = "exceptiongroup"
version = "1.2.1"
description = "Backport of PEP 654 (exception groups)"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -957,7 +976,6 @@ test = ["pytest (>=6)"]
name = "faker"
version = "23.3.0"
description = "Faker is a Python package that generates fake data for you."
-category = "dev"
optional = false
python-versions = ">=3.8"
files = [
@@ -972,7 +990,6 @@ python-dateutil = ">=2.4"
name = "fastapi"
version = "0.109.2"
description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -992,7 +1009,6 @@ all = ["email-validator (>=2.0.0)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)"
name = "filelock"
version = "3.14.0"
description = "A platform independent file lock."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1009,7 +1025,6 @@ typing = ["typing-extensions (>=4.8)"]
name = "frozenlist"
version = "1.4.1"
description = "A list-like structure which implements collections.abc.MutableSequence"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1096,7 +1111,6 @@ files = [
name = "fsspec"
version = "2024.3.1"
description = "File-system specification"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1135,7 +1149,6 @@ tqdm = ["tqdm"]
name = "google-auth"
version = "2.29.0"
description = "Google Authentication Library"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -1159,7 +1172,6 @@ requests = ["requests (>=2.20.0,<3.0.0.dev0)"]
name = "greenlet"
version = "3.0.3"
description = "Lightweight in-process concurrent programming"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -1231,7 +1243,6 @@ test = ["objgraph", "psutil"]
name = "h11"
version = "0.14.0"
description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -1243,7 +1254,6 @@ files = [
name = "httpcore"
version = "0.17.3"
description = "A minimal low-level HTTP client."
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -1255,17 +1265,16 @@ files = [
anyio = ">=3.0,<5.0"
certifi = "*"
h11 = ">=0.13,<0.15"
-sniffio = ">=1.0.0,<2.0.0"
+sniffio = "==1.*"
[package.extras]
http2 = ["h2 (>=3,<5)"]
-socks = ["socksio (>=1.0.0,<2.0.0)"]
+socks = ["socksio (==1.*)"]
[[package]]
name = "httpx"
version = "0.24.1"
description = "The next generation HTTP client."
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -1281,15 +1290,14 @@ sniffio = "*"
[package.extras]
brotli = ["brotli", "brotlicffi"]
-cli = ["click (>=8.0.0,<9.0.0)", "pygments (>=2.0.0,<3.0.0)", "rich (>=10,<14)"]
+cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"]
http2 = ["h2 (>=3,<5)"]
-socks = ["socksio (>=1.0.0,<2.0.0)"]
+socks = ["socksio (==1.*)"]
[[package]]
name = "huggingface-hub"
version = "0.23.2"
description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub"
-category = "main"
optional = false
python-versions = ">=3.8.0"
files = [
@@ -1324,7 +1332,6 @@ typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "t
name = "idna"
version = "3.7"
description = "Internationalized Domain Names in Applications (IDNA)"
-category = "main"
optional = false
python-versions = ">=3.5"
files = [
@@ -1336,7 +1343,6 @@ files = [
name = "iniconfig"
version = "2.0.0"
description = "brain-dead simple config-ini parsing"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -1348,7 +1354,6 @@ files = [
name = "jmespath"
version = "1.0.1"
description = "JSON Matching Expressions"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -1360,7 +1365,6 @@ files = [
name = "jsonpatch"
version = "1.33"
description = "Apply JSON-Patches (RFC 6902)"
-category = "main"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*"
files = [
@@ -1375,7 +1379,6 @@ jsonpointer = ">=1.9"
name = "jsonpointer"
version = "2.4"
description = "Identify specific nodes in a JSON document (RFC 6901)"
-category = "main"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*"
files = [
@@ -1387,7 +1390,6 @@ files = [
name = "kombu"
version = "5.3.7"
description = "Messaging library for Python."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1421,7 +1423,6 @@ zookeeper = ["kazoo (>=2.8.0)"]
name = "kubernetes"
version = "28.1.0"
description = "Kubernetes python client"
-category = "main"
optional = false
python-versions = ">=3.6"
files = [
@@ -1439,7 +1440,7 @@ requests = "*"
requests-oauthlib = "*"
six = ">=1.9.0"
urllib3 = ">=1.24.2,<2.0"
-websocket-client = ">=0.32.0,<0.40.0 || >0.40.0,<0.41.0 || >=0.43.0"
+websocket-client = ">=0.32.0,<0.40.0 || >0.40.0,<0.41.dev0 || >=0.43.dev0"
[package.extras]
adal = ["adal (>=1.0.2)"]
@@ -1448,7 +1449,6 @@ adal = ["adal (>=1.0.2)"]
name = "langchain"
version = "0.2.1"
description = "Building applications with LLMs through composability"
-category = "main"
optional = false
python-versions = "<4.0,>=3.8.1"
files = [
@@ -1487,7 +1487,6 @@ text-helpers = ["chardet (>=5.1.0,<6.0.0)"]
name = "langchain-community"
version = "0.2.1"
description = "Community contributed LangChain integrations."
-category = "main"
optional = false
python-versions = "<4.0,>=3.8.1"
files = [
@@ -1515,7 +1514,6 @@ extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.
name = "langchain-core"
version = "0.2.3"
description = "Building applications with LLMs through composability"
-category = "main"
optional = false
python-versions = "<4.0,>=3.8.1"
files = [
@@ -1538,7 +1536,6 @@ extended-testing = ["jinja2 (>=3,<4)"]
name = "langchain-openai"
version = "0.1.8"
description = "An integration package connecting OpenAI and LangChain"
-category = "main"
optional = false
python-versions = "<4.0,>=3.8.1"
files = [
@@ -1555,7 +1552,6 @@ tiktoken = ">=0.7,<1"
name = "langchain-text-splitters"
version = "0.2.0"
description = "LangChain text splitting utilities"
-category = "main"
optional = false
python-versions = "<4.0,>=3.8.1"
files = [
@@ -1573,7 +1569,6 @@ extended-testing = ["beautifulsoup4 (>=4.12.3,<5.0.0)", "lxml (>=4.9.3,<6.0)"]
name = "langsmith"
version = "0.1.65"
description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform."
-category = "main"
optional = false
python-versions = "<4.0,>=3.8.1"
files = [
@@ -1590,7 +1585,6 @@ requests = ">=2,<3"
name = "lazy-model"
version = "0.2.0"
description = ""
-category = "main"
optional = false
python-versions = ">=3.7,<4.0"
files = [
@@ -1605,7 +1599,6 @@ pydantic = ">=1.9.0"
name = "marshmallow"
version = "3.21.2"
description = "A lightweight library for converting complex datatypes to and from native Python datatypes."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1625,7 +1618,6 @@ tests = ["pytest", "pytz", "simplejson"]
name = "motor"
version = "3.4.0"
description = "Non-blocking MongoDB driver for Tornado or asyncio"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -1650,7 +1642,6 @@ zstd = ["pymongo[zstd] (>=4.5,<5)"]
name = "multidict"
version = "6.0.5"
description = "multidict implementation"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -1750,7 +1741,6 @@ files = [
name = "multiprocess"
version = "0.70.16"
description = "better multiprocessing and multithreading in Python"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1775,7 +1765,6 @@ dill = ">=0.3.8"
name = "mypy-extensions"
version = "1.0.0"
description = "Type system extensions for programs checked with the mypy type checker."
-category = "main"
optional = false
python-versions = ">=3.5"
files = [
@@ -1787,7 +1776,6 @@ files = [
name = "nest-asyncio"
version = "1.6.0"
description = "Patch asyncio to allow nested event loops"
-category = "main"
optional = false
python-versions = ">=3.5"
files = [
@@ -1799,7 +1787,6 @@ files = [
name = "newrelic"
version = "9.10.0"
description = "New Relic Python Agent"
-category = "main"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
files = [
@@ -1841,7 +1828,6 @@ infinite-tracing = ["grpcio", "protobuf"]
name = "numpy"
version = "1.26.4"
description = "Fundamental package for array computing in Python"
-category = "main"
optional = false
python-versions = ">=3.9"
files = [
@@ -1887,7 +1873,6 @@ files = [
name = "oauthlib"
version = "3.2.2"
description = "A generic, spec-compliant, thorough implementation of the OAuth request-signing logic"
-category = "main"
optional = false
python-versions = ">=3.6"
files = [
@@ -1904,7 +1889,6 @@ signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"]
name = "openai"
version = "1.30.5"
description = "The official Python library for the openai API"
-category = "main"
optional = false
python-versions = ">=3.7.1"
files = [
@@ -1928,7 +1912,6 @@ datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"]
name = "orjson"
version = "3.10.3"
description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1984,7 +1967,6 @@ files = [
name = "packaging"
version = "23.2"
description = "Core utilities for Python packages"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -1996,7 +1978,6 @@ files = [
name = "pandas"
version = "2.2.2"
description = "Powerful data structures for data analysis, time series, and statistics"
-category = "main"
optional = false
python-versions = ">=3.9"
files = [
@@ -2070,7 +2051,6 @@ xml = ["lxml (>=4.9.2)"]
name = "phonenumbers"
version = "8.12.48"
description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers."
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -2082,7 +2062,6 @@ files = [
name = "pkce"
version = "1.0.3"
description = "PKCE Pyhton generator."
-category = "main"
optional = false
python-versions = ">=3"
files = [
@@ -2094,7 +2073,6 @@ files = [
name = "pluggy"
version = "1.5.0"
description = "plugin and hook calling mechanisms for python"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -2110,7 +2088,6 @@ testing = ["pytest", "pytest-benchmark"]
name = "prompt-toolkit"
version = "3.0.45"
description = "Library for building powerful interactive command lines in Python"
-category = "main"
optional = false
python-versions = ">=3.7.0"
files = [
@@ -2125,7 +2102,6 @@ wcwidth = "*"
name = "pyarrow"
version = "16.1.0"
description = "Python library for Apache Arrow"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -2174,7 +2150,6 @@ numpy = ">=1.16.6"
name = "pyarrow-hotfix"
version = "0.6"
description = ""
-category = "main"
optional = false
python-versions = ">=3.5"
files = [
@@ -2186,7 +2161,6 @@ files = [
name = "pyasn1"
version = "0.6.0"
description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -2198,7 +2172,6 @@ files = [
name = "pyasn1-modules"
version = "0.4.0"
description = "A collection of ASN.1-based protocols modules"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -2213,7 +2186,6 @@ pyasn1 = ">=0.4.6,<0.7.0"
name = "pycparser"
version = "2.22"
description = "C parser in Python"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -2225,7 +2197,6 @@ files = [
name = "pycryptodome"
version = "3.10.4"
description = "Cryptographic library for Python"
-category = "main"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
files = [
@@ -2265,7 +2236,6 @@ files = [
name = "pydantic"
version = "1.10.15"
description = "Data validation and settings management using python type hints"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -2318,7 +2288,6 @@ email = ["email-validator (>=1.0.3)"]
name = "pyjwt"
version = "2.8.0"
description = "JSON Web Token implementation in Python"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -2339,7 +2308,6 @@ tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"]
name = "pymongo"
version = "4.7.2"
description = "Python driver for MongoDB "
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -2421,7 +2389,6 @@ zstd = ["zstandard"]
name = "pysbd"
version = "0.3.4"
description = "pysbd (Python Sentence Boundary Disambiguation) is a rule-based sentence boundary detection that works out-of-the-box across many languages."
-category = "main"
optional = false
python-versions = ">=3"
files = [
@@ -2432,7 +2399,6 @@ files = [
name = "pytest"
version = "7.4.4"
description = "pytest: simple powerful testing with Python"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -2455,7 +2421,6 @@ testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "no
name = "pytest-asyncio"
version = "0.21.2"
description = "Pytest support for asyncio"
-category = "dev"
optional = false
python-versions = ">=3.7"
files = [
@@ -2474,7 +2439,6 @@ testing = ["coverage (>=6.2)", "flaky (>=3.5.0)", "hypothesis (>=5.7.1)", "mypy
name = "pytest-mock"
version = "3.14.0"
description = "Thin-wrapper around the mock package for easier use with pytest"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -2492,7 +2456,6 @@ dev = ["pre-commit", "pytest-asyncio", "tox"]
name = "python-dateutil"
version = "2.9.0.post0"
description = "Extensions to the standard Python datetime module"
-category = "main"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
files = [
@@ -2507,7 +2470,6 @@ six = ">=1.5"
name = "python-http-client"
version = "3.3.7"
description = "HTTP REST client, simplified for Python"
-category = "main"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
files = [
@@ -2519,7 +2481,6 @@ files = [
name = "python-multipart"
version = "0.0.7"
description = "A streaming multipart parser for Python"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -2534,7 +2495,6 @@ dev = ["atomicwrites (==1.2.1)", "attrs (==19.2.0)", "coverage (==6.5.0)", "hatc
name = "pytz"
version = "2024.1"
description = "World timezone definitions, modern and historical"
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -2546,7 +2506,6 @@ files = [
name = "pywin32"
version = "306"
description = "Python for Window Extensions"
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -2570,7 +2529,6 @@ files = [
name = "pyyaml"
version = "6.0.1"
description = "YAML parser and emitter for Python"
-category = "main"
optional = false
python-versions = ">=3.6"
files = [
@@ -2579,6 +2537,7 @@ files = [
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"},
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"},
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"},
+ {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"},
{file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"},
{file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"},
{file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"},
@@ -2586,8 +2545,16 @@ files = [
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"},
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"},
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"},
+ {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"},
{file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"},
{file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
+ {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
+ {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
+ {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"},
+ {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
+ {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
+ {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
+ {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"},
{file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"},
{file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"},
{file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"},
@@ -2604,6 +2571,7 @@ files = [
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"},
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"},
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"},
+ {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"},
{file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"},
{file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"},
{file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"},
@@ -2611,6 +2579,7 @@ files = [
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"},
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"},
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"},
+ {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"},
{file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"},
{file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"},
{file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"},
@@ -2620,7 +2589,6 @@ files = [
name = "ragas"
version = "0.1.9"
description = ""
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -2647,7 +2615,6 @@ all = ["sentence-transformers"]
name = "redis"
version = "4.6.0"
description = "Python client for Redis database and key-value store"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -2666,7 +2633,6 @@ ocsp = ["cryptography (>=36.0.1)", "pyopenssl (==20.0.1)", "requests (>=2.26.0)"
name = "regex"
version = "2024.5.15"
description = "Alternative regular expression module, to replace re."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -2755,7 +2721,6 @@ files = [
name = "requests"
version = "2.32.3"
description = "Python HTTP for Humans."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -2777,7 +2742,6 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
name = "requests-file"
version = "2.1.0"
description = "File transport adapter for Requests"
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -2792,7 +2756,6 @@ requests = ">=1.0.0"
name = "requests-oauthlib"
version = "2.0.0"
description = "OAuthlib authentication support for Requests."
-category = "main"
optional = false
python-versions = ">=3.4"
files = [
@@ -2811,7 +2774,6 @@ rsa = ["oauthlib[signedtoken] (>=3.0.0)"]
name = "restrictedpython"
version = "6.2"
description = "RestrictedPython is a defined subset of the Python language which allows to provide a program input into a trusted environment."
-category = "main"
optional = false
python-versions = ">=3.6, <3.12"
files = [
@@ -2827,7 +2789,6 @@ test = ["pytest", "pytest-mock"]
name = "rsa"
version = "4.9"
description = "Pure-Python RSA implementation"
-category = "main"
optional = false
python-versions = ">=3.6,<4"
files = [
@@ -2842,7 +2803,6 @@ pyasn1 = ">=0.1.3"
name = "s3transfer"
version = "0.10.1"
description = "An Amazon S3 Transfer Manager"
-category = "main"
optional = false
python-versions = ">= 3.8"
files = [
@@ -2860,7 +2820,6 @@ crt = ["botocore[crt] (>=1.33.2,<2.0a.0)"]
name = "sendgrid"
version = "6.11.0"
description = "Twilio SendGrid library for Python"
-category = "main"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
files = [
@@ -2876,7 +2835,6 @@ starkbank-ecdsa = ">=2.0.1"
name = "sentry-sdk"
version = "1.45.0"
description = "Python client for Sentry (https://sentry.io)"
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -2925,7 +2883,6 @@ tornado = ["tornado (>=5)"]
name = "six"
version = "1.16.0"
description = "Python 2 and 3 compatibility utilities"
-category = "main"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
files = [
@@ -2937,7 +2894,6 @@ files = [
name = "sniffio"
version = "1.3.1"
description = "Sniff out which async library your code is running under"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -2947,116 +2903,95 @@ files = [
[[package]]
name = "sqlalchemy"
-version = "1.4.41"
+version = "2.0.30"
description = "Database Abstraction Library"
-category = "main"
-optional = false
-python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7"
-files = [
- {file = "SQLAlchemy-1.4.41-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:13e397a9371ecd25573a7b90bd037db604331cf403f5318038c46ee44908c44d"},
- {file = "SQLAlchemy-1.4.41-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:2d6495f84c4fd11584f34e62f9feec81bf373787b3942270487074e35cbe5330"},
- {file = "SQLAlchemy-1.4.41-cp27-cp27m-win32.whl", hash = "sha256:e570cfc40a29d6ad46c9aeaddbdcee687880940a3a327f2c668dd0e4ef0a441d"},
- {file = "SQLAlchemy-1.4.41-cp27-cp27m-win_amd64.whl", hash = "sha256:5facb7fd6fa8a7353bbe88b95695e555338fb038ad19ceb29c82d94f62775a05"},
- {file = "SQLAlchemy-1.4.41-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:f37fa70d95658763254941ddd30ecb23fc4ec0c5a788a7c21034fc2305dab7cc"},
- {file = "SQLAlchemy-1.4.41-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:361f6b5e3f659e3c56ea3518cf85fbdae1b9e788ade0219a67eeaaea8a4e4d2a"},
- {file = "SQLAlchemy-1.4.41-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0990932f7cca97fece8017414f57fdd80db506a045869d7ddf2dda1d7cf69ecc"},
- {file = "SQLAlchemy-1.4.41-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:cd767cf5d7252b1c88fcfb58426a32d7bd14a7e4942497e15b68ff5d822b41ad"},
- {file = "SQLAlchemy-1.4.41-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5102fb9ee2c258a2218281adcb3e1918b793c51d6c2b4666ce38c35101bb940e"},
- {file = "SQLAlchemy-1.4.41-cp310-cp310-win32.whl", hash = "sha256:2082a2d2fca363a3ce21cfa3d068c5a1ce4bf720cf6497fb3a9fc643a8ee4ddd"},
- {file = "SQLAlchemy-1.4.41-cp310-cp310-win_amd64.whl", hash = "sha256:e4b12e3d88a8fffd0b4ca559f6d4957ed91bd4c0613a4e13846ab8729dc5c251"},
- {file = "SQLAlchemy-1.4.41-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:90484a2b00baedad361402c257895b13faa3f01780f18f4a104a2f5c413e4536"},
- {file = "SQLAlchemy-1.4.41-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b67fc780cfe2b306180e56daaa411dd3186bf979d50a6a7c2a5b5036575cbdbb"},
- {file = "SQLAlchemy-1.4.41-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ad2b727fc41c7f8757098903f85fafb4bf587ca6605f82d9bf5604bd9c7cded"},
- {file = "SQLAlchemy-1.4.41-cp311-cp311-win32.whl", hash = "sha256:59bdc291165b6119fc6cdbc287c36f7f2859e6051dd923bdf47b4c55fd2f8bd0"},
- {file = "SQLAlchemy-1.4.41-cp311-cp311-win_amd64.whl", hash = "sha256:d2e054aed4645f9b755db85bc69fc4ed2c9020c19c8027976f66576b906a74f1"},
- {file = "SQLAlchemy-1.4.41-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:4ba7e122510bbc07258dc42be6ed45997efdf38129bde3e3f12649be70683546"},
- {file = "SQLAlchemy-1.4.41-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c0dcf127bb99458a9d211e6e1f0f3edb96c874dd12f2503d4d8e4f1fd103790b"},
- {file = "SQLAlchemy-1.4.41-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e16c2be5cb19e2c08da7bd3a87fed2a0d4e90065ee553a940c4fc1a0fb1ab72b"},
- {file = "SQLAlchemy-1.4.41-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5ebeeec5c14533221eb30bad716bc1fd32f509196318fb9caa7002c4a364e4c"},
- {file = "SQLAlchemy-1.4.41-cp36-cp36m-win32.whl", hash = "sha256:3e2ef592ac3693c65210f8b53d0edcf9f4405925adcfc031ff495e8d18169682"},
- {file = "SQLAlchemy-1.4.41-cp36-cp36m-win_amd64.whl", hash = "sha256:eb30cf008850c0a26b72bd1b9be6730830165ce049d239cfdccd906f2685f892"},
- {file = "SQLAlchemy-1.4.41-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:c23d64a0b28fc78c96289ffbd0d9d1abd48d267269b27f2d34e430ea73ce4b26"},
- {file = "SQLAlchemy-1.4.41-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8eb8897367a21b578b26f5713833836f886817ee2ffba1177d446fa3f77e67c8"},
- {file = "SQLAlchemy-1.4.41-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:14576238a5f89bcf504c5f0a388d0ca78df61fb42cb2af0efe239dc965d4f5c9"},
- {file = "SQLAlchemy-1.4.41-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:639e1ae8d48b3c86ffe59c0daa9a02e2bfe17ca3d2b41611b30a0073937d4497"},
- {file = "SQLAlchemy-1.4.41-cp37-cp37m-win32.whl", hash = "sha256:0005bd73026cd239fc1e8ccdf54db58b6193be9a02b3f0c5983808f84862c767"},
- {file = "SQLAlchemy-1.4.41-cp37-cp37m-win_amd64.whl", hash = "sha256:5323252be2bd261e0aa3f33cb3a64c45d76829989fa3ce90652838397d84197d"},
- {file = "SQLAlchemy-1.4.41-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:05f0de3a1dc3810a776275763764bb0015a02ae0f698a794646ebc5fb06fad33"},
- {file = "SQLAlchemy-1.4.41-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0002e829142b2af00b4eaa26c51728f3ea68235f232a2e72a9508a3116bd6ed0"},
- {file = "SQLAlchemy-1.4.41-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:22ff16cedab5b16a0db79f1bc99e46a6ddececb60c396562e50aab58ddb2871c"},
- {file = "SQLAlchemy-1.4.41-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ccfd238f766a5bb5ee5545a62dd03f316ac67966a6a658efb63eeff8158a4bbf"},
- {file = "SQLAlchemy-1.4.41-cp38-cp38-win32.whl", hash = "sha256:58bb65b3274b0c8a02cea9f91d6f44d0da79abc993b33bdedbfec98c8440175a"},
- {file = "SQLAlchemy-1.4.41-cp38-cp38-win_amd64.whl", hash = "sha256:ce8feaa52c1640de9541eeaaa8b5fb632d9d66249c947bb0d89dd01f87c7c288"},
- {file = "SQLAlchemy-1.4.41-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:199a73c31ac8ea59937cc0bf3dfc04392e81afe2ec8a74f26f489d268867846c"},
- {file = "SQLAlchemy-1.4.41-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4676d51c9f6f6226ae8f26dc83ec291c088fe7633269757d333978df78d931ab"},
- {file = "SQLAlchemy-1.4.41-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:036d8472356e1d5f096c5e0e1a7e0f9182140ada3602f8fff6b7329e9e7cfbcd"},
- {file = "SQLAlchemy-1.4.41-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2307495d9e0ea00d0c726be97a5b96615035854972cc538f6e7eaed23a35886c"},
- {file = "SQLAlchemy-1.4.41-cp39-cp39-win32.whl", hash = "sha256:9c56e19780cd1344fcd362fd6265a15f48aa8d365996a37fab1495cae8fcd97d"},
- {file = "SQLAlchemy-1.4.41-cp39-cp39-win_amd64.whl", hash = "sha256:f5fa526d027d804b1f85cdda1eb091f70bde6fb7d87892f6dd5a48925bc88898"},
- {file = "SQLAlchemy-1.4.41.tar.gz", hash = "sha256:0292f70d1797e3c54e862e6f30ae474014648bc9c723e14a2fda730adb0a9791"},
-]
-
-[package.dependencies]
-greenlet = {version = "!=0.4.17", markers = "python_version >= \"3\" and platform_machine == \"aarch64\" or python_version >= \"3\" and platform_machine == \"ppc64le\" or python_version >= \"3\" and platform_machine == \"x86_64\" or python_version >= \"3\" and platform_machine == \"amd64\" or python_version >= \"3\" and platform_machine == \"AMD64\" or python_version >= \"3\" and platform_machine == \"win32\" or python_version >= \"3\" and platform_machine == \"WIN32\""}
-
-[package.extras]
-aiomysql = ["aiomysql", "greenlet (!=0.4.17)"]
-aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing-extensions (!=3.10.0.1)"]
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "SQLAlchemy-2.0.30-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3b48154678e76445c7ded1896715ce05319f74b1e73cf82d4f8b59b46e9c0ddc"},
+ {file = "SQLAlchemy-2.0.30-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2753743c2afd061bb95a61a51bbb6a1a11ac1c44292fad898f10c9839a7f75b2"},
+ {file = "SQLAlchemy-2.0.30-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7bfc726d167f425d4c16269a9a10fe8630ff6d14b683d588044dcef2d0f6be7"},
+ {file = "SQLAlchemy-2.0.30-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4f61ada6979223013d9ab83a3ed003ded6959eae37d0d685db2c147e9143797"},
+ {file = "SQLAlchemy-2.0.30-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3a365eda439b7a00732638f11072907c1bc8e351c7665e7e5da91b169af794af"},
+ {file = "SQLAlchemy-2.0.30-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bba002a9447b291548e8d66fd8c96a6a7ed4f2def0bb155f4f0a1309fd2735d5"},
+ {file = "SQLAlchemy-2.0.30-cp310-cp310-win32.whl", hash = "sha256:0138c5c16be3600923fa2169532205d18891b28afa817cb49b50e08f62198bb8"},
+ {file = "SQLAlchemy-2.0.30-cp310-cp310-win_amd64.whl", hash = "sha256:99650e9f4cf3ad0d409fed3eec4f071fadd032e9a5edc7270cd646a26446feeb"},
+ {file = "SQLAlchemy-2.0.30-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:955991a09f0992c68a499791a753523f50f71a6885531568404fa0f231832aa0"},
+ {file = "SQLAlchemy-2.0.30-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f69e4c756ee2686767eb80f94c0125c8b0a0b87ede03eacc5c8ae3b54b99dc46"},
+ {file = "SQLAlchemy-2.0.30-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69c9db1ce00e59e8dd09d7bae852a9add716efdc070a3e2068377e6ff0d6fdaa"},
+ {file = "SQLAlchemy-2.0.30-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1429a4b0f709f19ff3b0cf13675b2b9bfa8a7e79990003207a011c0db880a13"},
+ {file = "SQLAlchemy-2.0.30-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:efedba7e13aa9a6c8407c48facfdfa108a5a4128e35f4c68f20c3407e4376aa9"},
+ {file = "SQLAlchemy-2.0.30-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:16863e2b132b761891d6c49f0a0f70030e0bcac4fd208117f6b7e053e68668d0"},
+ {file = "SQLAlchemy-2.0.30-cp311-cp311-win32.whl", hash = "sha256:2ecabd9ccaa6e914e3dbb2aa46b76dede7eadc8cbf1b8083c94d936bcd5ffb49"},
+ {file = "SQLAlchemy-2.0.30-cp311-cp311-win_amd64.whl", hash = "sha256:0b3f4c438e37d22b83e640f825ef0f37b95db9aa2d68203f2c9549375d0b2260"},
+ {file = "SQLAlchemy-2.0.30-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5a79d65395ac5e6b0c2890935bad892eabb911c4aa8e8015067ddb37eea3d56c"},
+ {file = "SQLAlchemy-2.0.30-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9a5baf9267b752390252889f0c802ea13b52dfee5e369527da229189b8bd592e"},
+ {file = "SQLAlchemy-2.0.30-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cb5a646930c5123f8461f6468901573f334c2c63c795b9af350063a736d0134"},
+ {file = "SQLAlchemy-2.0.30-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:296230899df0b77dec4eb799bcea6fbe39a43707ce7bb166519c97b583cfcab3"},
+ {file = "SQLAlchemy-2.0.30-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c62d401223f468eb4da32627bffc0c78ed516b03bb8a34a58be54d618b74d472"},
+ {file = "SQLAlchemy-2.0.30-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3b69e934f0f2b677ec111b4d83f92dc1a3210a779f69bf905273192cf4ed433e"},
+ {file = "SQLAlchemy-2.0.30-cp312-cp312-win32.whl", hash = "sha256:77d2edb1f54aff37e3318f611637171e8ec71472f1fdc7348b41dcb226f93d90"},
+ {file = "SQLAlchemy-2.0.30-cp312-cp312-win_amd64.whl", hash = "sha256:b6c7ec2b1f4969fc19b65b7059ed00497e25f54069407a8701091beb69e591a5"},
+ {file = "SQLAlchemy-2.0.30-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5a8e3b0a7e09e94be7510d1661339d6b52daf202ed2f5b1f9f48ea34ee6f2d57"},
+ {file = "SQLAlchemy-2.0.30-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b60203c63e8f984df92035610c5fb76d941254cf5d19751faab7d33b21e5ddc0"},
+ {file = "SQLAlchemy-2.0.30-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1dc3eabd8c0232ee8387fbe03e0a62220a6f089e278b1f0aaf5e2d6210741ad"},
+ {file = "SQLAlchemy-2.0.30-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:40ad017c672c00b9b663fcfcd5f0864a0a97828e2ee7ab0c140dc84058d194cf"},
+ {file = "SQLAlchemy-2.0.30-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:e42203d8d20dc704604862977b1470a122e4892791fe3ed165f041e4bf447a1b"},
+ {file = "SQLAlchemy-2.0.30-cp37-cp37m-win32.whl", hash = "sha256:2a4f4da89c74435f2bc61878cd08f3646b699e7d2eba97144030d1be44e27584"},
+ {file = "SQLAlchemy-2.0.30-cp37-cp37m-win_amd64.whl", hash = "sha256:b6bf767d14b77f6a18b6982cbbf29d71bede087edae495d11ab358280f304d8e"},
+ {file = "SQLAlchemy-2.0.30-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bc0c53579650a891f9b83fa3cecd4e00218e071d0ba00c4890f5be0c34887ed3"},
+ {file = "SQLAlchemy-2.0.30-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:311710f9a2ee235f1403537b10c7687214bb1f2b9ebb52702c5aa4a77f0b3af7"},
+ {file = "SQLAlchemy-2.0.30-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:408f8b0e2c04677e9c93f40eef3ab22f550fecb3011b187f66a096395ff3d9fd"},
+ {file = "SQLAlchemy-2.0.30-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:37a4b4fb0dd4d2669070fb05b8b8824afd0af57587393015baee1cf9890242d9"},
+ {file = "SQLAlchemy-2.0.30-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a943d297126c9230719c27fcbbeab57ecd5d15b0bd6bfd26e91bfcfe64220621"},
+ {file = "SQLAlchemy-2.0.30-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0a089e218654e740a41388893e090d2e2c22c29028c9d1353feb38638820bbeb"},
+ {file = "SQLAlchemy-2.0.30-cp38-cp38-win32.whl", hash = "sha256:fa561138a64f949f3e889eb9ab8c58e1504ab351d6cf55259dc4c248eaa19da6"},
+ {file = "SQLAlchemy-2.0.30-cp38-cp38-win_amd64.whl", hash = "sha256:7d74336c65705b986d12a7e337ba27ab2b9d819993851b140efdf029248e818e"},
+ {file = "SQLAlchemy-2.0.30-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ae8c62fe2480dd61c532ccafdbce9b29dacc126fe8be0d9a927ca3e699b9491a"},
+ {file = "SQLAlchemy-2.0.30-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2383146973a15435e4717f94c7509982770e3e54974c71f76500a0136f22810b"},
+ {file = "SQLAlchemy-2.0.30-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8409de825f2c3b62ab15788635ccaec0c881c3f12a8af2b12ae4910a0a9aeef6"},
+ {file = "SQLAlchemy-2.0.30-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0094c5dc698a5f78d3d1539853e8ecec02516b62b8223c970c86d44e7a80f6c7"},
+ {file = "SQLAlchemy-2.0.30-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:edc16a50f5e1b7a06a2dcc1f2205b0b961074c123ed17ebda726f376a5ab0953"},
+ {file = "SQLAlchemy-2.0.30-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:f7703c2010355dd28f53deb644a05fc30f796bd8598b43f0ba678878780b6e4c"},
+ {file = "SQLAlchemy-2.0.30-cp39-cp39-win32.whl", hash = "sha256:1f9a727312ff6ad5248a4367358e2cf7e625e98b1028b1d7ab7b806b7d757513"},
+ {file = "SQLAlchemy-2.0.30-cp39-cp39-win_amd64.whl", hash = "sha256:a0ef36b28534f2a5771191be6edb44cc2673c7b2edf6deac6562400288664221"},
+ {file = "SQLAlchemy-2.0.30-py3-none-any.whl", hash = "sha256:7108d569d3990c71e26a42f60474b4c02c8586c4681af5fd67e51a044fdea86a"},
+ {file = "SQLAlchemy-2.0.30.tar.gz", hash = "sha256:2b1708916730f4830bc69d6f49d37f7698b5bd7530aca7f04f785f8849e95255"},
+]
+
+[package.dependencies]
+greenlet = {version = "!=0.4.17", markers = "platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\""}
+typing-extensions = ">=4.6.0"
+
+[package.extras]
+aiomysql = ["aiomysql (>=0.2.0)", "greenlet (!=0.4.17)"]
+aioodbc = ["aioodbc", "greenlet (!=0.4.17)"]
+aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing_extensions (!=3.10.0.1)"]
asyncio = ["greenlet (!=0.4.17)"]
-asyncmy = ["asyncmy (>=0.2.3,!=0.2.4)", "greenlet (!=0.4.17)"]
-mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2)"]
+asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (!=0.4.17)"]
+mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5)"]
mssql = ["pyodbc"]
mssql-pymssql = ["pymssql"]
mssql-pyodbc = ["pyodbc"]
-mypy = ["mypy (>=0.910)", "sqlalchemy2-stubs"]
-mysql = ["mysqlclient (>=1.4.0)", "mysqlclient (>=1.4.0,<2)"]
+mypy = ["mypy (>=0.910)"]
+mysql = ["mysqlclient (>=1.4.0)"]
mysql-connector = ["mysql-connector-python"]
-oracle = ["cx-oracle (>=7)", "cx-oracle (>=7,<8)"]
+oracle = ["cx_oracle (>=8)"]
+oracle-oracledb = ["oracledb (>=1.0.1)"]
postgresql = ["psycopg2 (>=2.7)"]
postgresql-asyncpg = ["asyncpg", "greenlet (!=0.4.17)"]
-postgresql-pg8000 = ["pg8000 (>=1.16.6,!=1.29.0)"]
+postgresql-pg8000 = ["pg8000 (>=1.29.1)"]
+postgresql-psycopg = ["psycopg (>=3.0.7)"]
postgresql-psycopg2binary = ["psycopg2-binary"]
postgresql-psycopg2cffi = ["psycopg2cffi"]
-pymysql = ["pymysql", "pymysql (<1)"]
-sqlcipher = ["sqlcipher3-binary"]
-
-[[package]]
-name = "sqlalchemy2-stubs"
-version = "0.0.2a38"
-description = "Typing Stubs for SQLAlchemy 1.4"
-category = "main"
-optional = false
-python-versions = ">=3.6"
-files = [
- {file = "sqlalchemy2-stubs-0.0.2a38.tar.gz", hash = "sha256:861d722abeb12f13eacd775a9f09379b11a5a9076f469ccd4099961b95800f9e"},
- {file = "sqlalchemy2_stubs-0.0.2a38-py3-none-any.whl", hash = "sha256:b62aa46943807287550e2033dafe07564b33b6a815fbaa3c144e396f9cc53bcb"},
-]
-
-[package.dependencies]
-typing-extensions = ">=3.7.4"
-
-[[package]]
-name = "sqlmodel"
-version = "0.0.8"
-description = "SQLModel, SQL databases in Python, designed for simplicity, compatibility, and robustness."
-category = "main"
-optional = false
-python-versions = ">=3.6.1,<4.0.0"
-files = [
- {file = "sqlmodel-0.0.8-py3-none-any.whl", hash = "sha256:0fd805719e0c5d4f22be32eb3ffc856eca3f7f20e8c7aa3e117ad91684b518ee"},
- {file = "sqlmodel-0.0.8.tar.gz", hash = "sha256:3371b4d1ad59d2ffd0c530582c2140b6c06b090b32af9b9c6412986d7b117036"},
-]
-
-[package.dependencies]
-pydantic = ">=1.8.2,<2.0.0"
-SQLAlchemy = ">=1.4.17,<=1.4.41"
-sqlalchemy2-stubs = "*"
+postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"]
+pymysql = ["pymysql"]
+sqlcipher = ["sqlcipher3_binary"]
[[package]]
name = "starkbank-ecdsa"
version = "2.2.0"
description = "A lightweight and fast pure python ECDSA library"
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -3067,7 +3002,6 @@ files = [
name = "starlette"
version = "0.36.3"
description = "The little ASGI library that shines."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -3086,7 +3020,6 @@ full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.7
name = "supertokens-python"
version = "0.15.3"
description = "SuperTokens SDK for Python"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -3101,7 +3034,7 @@ Deprecated = "1.2.13"
httpx = ">=0.15.0,<0.25.0"
phonenumbers = "8.12.48"
pkce = "1.0.3"
-pycryptodome = ">=3.10.0,<3.11.0"
+pycryptodome = "==3.10.*"
PyJWT = {version = ">=2.6.0,<3.0.0", extras = ["crypto"]}
tldextract = "3.1.0"
twilio = "7.9.1"
@@ -3117,7 +3050,6 @@ flask = ["Flask", "flask-cors", "python-dotenv (==0.19.2)"]
name = "tenacity"
version = "8.3.0"
description = "Retry code until it succeeds"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -3133,7 +3065,6 @@ test = ["pytest", "tornado (>=4.5)", "typeguard"]
name = "tiktoken"
version = "0.7.0"
description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -3186,7 +3117,6 @@ blobfile = ["blobfile (>=2)"]
name = "tldextract"
version = "3.1.0"
description = "Accurately separate the TLD from the registered domain and subdomains of a URL, using the Public Suffix List. By default, this includes the public ICANN TLDs and their exceptions. You can optionally support the Public Suffix List's private domains as well."
-category = "main"
optional = false
python-versions = ">=3.5"
files = [
@@ -3204,7 +3134,6 @@ requests-file = ">=1.4"
name = "toml"
version = "0.10.2"
description = "Python Library for Tom's Obvious, Minimal Language"
-category = "main"
optional = false
python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
files = [
@@ -3216,7 +3145,6 @@ files = [
name = "tomli"
version = "2.0.1"
description = "A lil' TOML parser"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -3228,7 +3156,6 @@ files = [
name = "tqdm"
version = "4.66.4"
description = "Fast, Extensible Progress Meter"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -3249,7 +3176,6 @@ telegram = ["requests"]
name = "twilio"
version = "7.9.1"
description = "Twilio API client and TwiML generator"
-category = "main"
optional = false
python-versions = ">=3.6.0"
files = [
@@ -3266,7 +3192,6 @@ requests = ">=2.0.0"
name = "typing-extensions"
version = "4.12.0"
description = "Backported and Experimental Type Hints for Python 3.8+"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -3278,7 +3203,6 @@ files = [
name = "typing-inspect"
version = "0.9.0"
description = "Runtime inspection utilities for typing module."
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -3294,7 +3218,6 @@ typing-extensions = ">=3.7.4"
name = "tzdata"
version = "2024.1"
description = "Provider of IANA time zone data"
-category = "main"
optional = false
python-versions = ">=2"
files = [
@@ -3306,7 +3229,6 @@ files = [
name = "urllib3"
version = "1.26.18"
description = "HTTP library with thread-safe connection pooling, file post, and more."
-category = "main"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*"
files = [
@@ -3323,7 +3245,6 @@ socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
name = "uvicorn"
version = "0.22.0"
description = "The lightning-fast ASGI server."
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -3342,7 +3263,6 @@ standard = ["colorama (>=0.4)", "httptools (>=0.5.0)", "python-dotenv (>=0.13)",
name = "vine"
version = "5.1.0"
description = "Python promises."
-category = "main"
optional = false
python-versions = ">=3.6"
files = [
@@ -3354,7 +3274,6 @@ files = [
name = "watchdog"
version = "3.0.0"
description = "Filesystem events monitoring"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -3397,7 +3316,6 @@ watchmedo = ["PyYAML (>=3.10)"]
name = "wcwidth"
version = "0.2.13"
description = "Measures the displayed width of unicode strings in a terminal"
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -3409,7 +3327,6 @@ files = [
name = "websocket-client"
version = "1.8.0"
description = "WebSocket client for Python with low level API options"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -3426,7 +3343,6 @@ test = ["websockets"]
name = "wrapt"
version = "1.16.0"
description = "Module for decorators, wrappers and monkey patching."
-category = "main"
optional = false
python-versions = ">=3.6"
files = [
@@ -3506,7 +3422,6 @@ files = [
name = "xxhash"
version = "3.4.1"
description = "Python binding for xxHash"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -3624,7 +3539,6 @@ files = [
name = "yarl"
version = "1.9.4"
description = "Yet another URL library"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -3727,4 +3641,4 @@ multidict = ">=4.0"
[metadata]
lock-version = "2.0"
python-versions = "^3.9"
-content-hash = "1ec6041622ce147bb8c2784049018a6b3c46d302deb653642425e8a0eedd2a87"
+content-hash = "598257da4f2afc86ccf88f0163dae694720ea7249af3ef88a880167bcb0a5b78"
diff --git a/agenta-backend/pyproject.toml b/agenta-backend/pyproject.toml
index d90db61083..040832fa10 100644
--- a/agenta-backend/pyproject.toml
+++ b/agenta-backend/pyproject.toml
@@ -13,7 +13,6 @@ pydantic = "^1.10.7"
docker = "7.1.0"
toml = "^0.10.2"
uvicorn = "^0.22.0"
-sqlmodel = "^0.0.8"
motor = "^3.1.2"
python-multipart = "^0.0.7"
backoff = "^2.2.1"
@@ -35,6 +34,8 @@ newrelic = "^9.8.0"
aioboto3 = "^12.4.0"
ragas = "^0.1.8"
openai = "^1.30.4"
+sqlalchemy = "^2.0.30"
+asyncpg = "^0.29.0"
[tool.poetry.group.dev.dependencies]
pytest = "^7.3.1"
diff --git a/docker-compose.yml b/docker-compose.yml
index f9dfc894fd..9e28377c0b 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -15,6 +15,7 @@ services:
build: ./agenta-backend
environment:
- MONGODB_URI=mongodb://username:password@mongo:27017
+ - DATABASE_URL=postgresql+asyncpg://username:password@postgres:5432
- REDIS_URL=redis://redis:6379/0
- ENVIRONMENT=development
- DATABASE_MODE=v2
From 10e5aad535abfe5b4d20613cbda36d245a44230f Mon Sep 17 00:00:00 2001
From: aakrem
Date: Mon, 3 Jun 2024 20:55:32 +0200
Subject: [PATCH 007/268] converting schema to sqlalchemy
---
.../agenta_backend/models/db_models.py | 559 ++++++++++--------
1 file changed, 313 insertions(+), 246 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index 85877bb20c..905f3498bd 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -1,303 +1,370 @@
-from datetime import datetime, timezone
-from typing import Any, Dict, List, Optional
-
+from datetime import datetime
from pydantic import BaseModel, Field
-from beanie import Document, Link, PydanticObjectId
-
-
-class UserDB(Document):
- uid: str = Field(default="0", unique=True, index=True)
- username: str = Field(default="agenta")
- email: str = Field(default="demo@agenta.ai", unique=True)
- created_at: Optional[datetime] = Field(default=datetime.now(timezone.utc))
- updated_at: Optional[datetime] = Field(default=datetime.now(timezone.utc))
-
- class Settings:
- name = "users"
-
-
-class ImageDB(Document):
- """Defines the info needed to get an image and connect it to the app variant"""
-
- type: Optional[str] = Field(default="image")
- template_uri: Optional[str]
- docker_id: Optional[str] = Field(index=True)
- tags: Optional[str]
- deletable: bool = Field(default=True)
- user: Link[UserDB]
- created_at: Optional[datetime] = Field(default=datetime.now(timezone.utc))
- updated_at: Optional[datetime] = Field(default=datetime.now(timezone.utc))
-
- class Settings:
- name = "docker_images"
-
-
-class AppDB(Document):
- app_name: str
- user: Link[UserDB]
- created_at: Optional[datetime] = Field(default=datetime.now(timezone.utc))
- updated_at: Optional[datetime] = Field(default=datetime.now(timezone.utc))
-
- class Settings:
- name = "app_db"
-
-
-class DeploymentDB(Document):
- app: Link[AppDB]
- user: Link[UserDB]
- container_name: Optional[str]
- container_id: Optional[str]
- uri: Optional[str]
- status: str
- created_at: Optional[datetime] = Field(default=datetime.now(timezone.utc))
- updated_at: Optional[datetime] = Field(default=datetime.now(timezone.utc))
-
- class Settings:
- name = "deployments"
-
-
-class VariantBaseDB(Document):
- app: Link[AppDB]
- user: Link[UserDB]
- base_name: str
- image: Link[ImageDB]
- deployment: Optional[PydanticObjectId] # Link to deployment
- created_at: Optional[datetime] = Field(default=datetime.now(timezone.utc))
- updated_at: Optional[datetime] = Field(default=datetime.now(timezone.utc))
-
- class Settings:
- name = "bases"
-
+from typing import Any, Dict, List, Optional
+from sqlalchemy import (
+ Column,
+ String,
+ Integer,
+ DateTime,
+ Boolean,
+ ForeignKey,
+ JSON,
+ Text,
+ Float,
+)
+from sqlalchemy.orm import relationship, declarative_base
+
+Base = declarative_base()
+
+
+class UserDB(Base):
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ uid = Column(String, unique=True, index=True, default="0")
+ username = Column(String, default="agenta")
+ email = Column(String, unique=True, default="demo@agenta.ai")
+ created_at = Column(DateTime, default=datetime.utcnow)
+ updated_at = Column(DateTime, default=datetime.utcnow)
+
+ __tablename__ = "users"
+
+
+class ImageDB(Base):
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ type = Column(String, default="image")
+ template_uri = Column(String)
+ docker_id = Column(String, index=True)
+ tags = Column(String)
+ deletable = Column(Boolean, default=True)
+ user_id = Column(Integer, ForeignKey("users.id"))
+ user = relationship("UserDB")
+ created_at = Column(DateTime, default=datetime.utcnow)
+ updated_at = Column(DateTime, default=datetime.utcnow)
+
+ __tablename__ = "docker_images"
+
+
+class AppDB(Base):
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ app_name = Column(String)
+ user_id = Column(Integer, ForeignKey("users.id"))
+ user = relationship("UserDB")
+ created_at = Column(DateTime, default=datetime.utcnow)
+ updated_at = Column(DateTime, default=datetime.utcnow)
+
+ __tablename__ = "app_db"
+
+
+class DeploymentDB(Base):
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ app_id = Column(Integer, ForeignKey("app_db.id"))
+ app = relationship("AppDB")
+ user_id = Column(Integer, ForeignKey("users.id"))
+ user = relationship("UserDB")
+ container_name = Column(String)
+ container_id = Column(String)
+ uri = Column(String)
+ status = Column(String)
+ created_at = Column(DateTime, default=datetime.utcnow)
+ updated_at = Column(DateTime, default=datetime.utcnow)
+
+ __tablename__ = "deployments"
+
+
+class VariantBaseDB(Base):
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ app_id = Column(Integer, ForeignKey("app_db.id"))
+ app = relationship("AppDB")
+ user_id = Column(Integer, ForeignKey("users.id"))
+ user = relationship("UserDB")
+ base_name = Column(String)
+ image_id = Column(Integer, ForeignKey("docker_images.id"))
+ image = relationship("ImageDB")
+ deployment_id = Column(Integer) # reference to deployment, can be nullable
+ created_at = Column(DateTime, default=datetime.utcnow)
+ updated_at = Column(DateTime, default=datetime.utcnow)
+
+ __tablename__ = "bases"
+
+
+class AppVariantDB(Base):
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ app_id = Column(Integer, ForeignKey("app_db.id"))
+ app = relationship("AppDB")
+ variant_name = Column(String)
+ revision = Column(Integer)
+ image_id = Column(Integer, ForeignKey("docker_images.id"))
+ image = relationship("ImageDB")
+ user_id = Column(Integer, ForeignKey("users.id"))
+ user = relationship("UserDB", foreign_keys=[user_id])
+ modified_by_id = Column(Integer, ForeignKey("users.id"))
+ modified_by = relationship("UserDB", foreign_keys=[modified_by_id])
+ parameters = Column(JSON, default=dict) # deprecated
+ previous_variant_name = Column(String) # deprecated
+ base_name = Column(String)
+ base_id = Column(Integer, ForeignKey("bases.id"))
+ base = relationship("VariantBaseDB")
+ config_name = Column(String)
+ config_id = Column(Integer, ForeignKey("config.id"))
+ config = Column(JSON)
+ created_at = Column(DateTime, default=datetime.utcnow)
+ updated_at = Column(DateTime, default=datetime.utcnow)
+ is_deleted = Column(Boolean, default=False) # deprecated
+
+ __tablename__ = "app_variants"
+
+
+class AppVariantRevisionsDB(Base):
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ variant_id = Column(Integer, ForeignKey("app_variants.id"))
+ variant = relationship("AppVariantDB")
+ revision = Column(Integer)
+ modified_by_id = Column(Integer, ForeignKey("users.id"))
+ modified_by = relationship("UserDB")
+ base_id = Column(Integer, ForeignKey("bases.id"))
+ base = relationship("VariantBaseDB")
+ config_id = Column(Integer, ForeignKey("config.id"))
+ config = Column(JSON)
+ created_at = Column(DateTime, default=datetime.utcnow)
+ updated_at = Column(DateTime, default=datetime.utcnow)
+
+ __tablename__ = "app_variant_revisions"
+
+
+class AppEnvironmentDB(Base):
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ app_id = Column(Integer, ForeignKey("app_db.id"))
+ app = relationship("AppDB")
+ name = Column(String)
+ user_id = Column(Integer, ForeignKey("users.id"))
+ user = relationship("UserDB")
+ revision = Column(Integer)
+ deployed_app_variant_id = Column(Integer) # reference to app_variant
+ deployed_app_variant_revision_id = Column(
+ Integer, ForeignKey("app_variant_revisions.id")
+ )
+ deployed_app_variant_revision = relationship("AppVariantRevisionsDB")
+ deployment_id = Column(Integer) # reference to deployment
+ created_at = Column(DateTime, default=datetime.utcnow)
+
+ __tablename__ = "environments"
+
+
+class AppEnvironmentRevisionDB(Base):
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ environment_id = Column(Integer, ForeignKey("environments.id"))
+ environment = relationship("AppEnvironmentDB")
+ revision = Column(Integer)
+ modified_by_id = Column(Integer, ForeignKey("users.id"))
+ modified_by = relationship("UserDB")
+ deployed_app_variant_revision_id = Column(
+ Integer
+ ) # reference to app_variant_revision
+ deployment_id = Column(Integer) # reference to deployment
+ created_at = Column(DateTime, default=datetime.utcnow)
+
+ __tablename__ = "environments_revisions"
+
+
+class TemplateDB(Base):
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ type = Column(String, default="image")
+ template_uri = Column(String)
+ tag_id = Column(Integer)
+ name = Column(String, unique=True)
+ repo_name = Column(String)
+ title = Column(String)
+ description = Column(String)
+ size = Column(Integer)
+ digest = Column(String) # sha256 hash of image digest
+ last_pushed = Column(String)
+
+ __tablename__ = "templates"
+
+
+class TestSetDB(Base):
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ name = Column(String)
+ app_id = Column(Integer, ForeignKey("app_db.id"))
+ app = relationship("AppDB")
+ csvdata = Column(JSON) # List of dictionaries
+ user_id = Column(Integer, ForeignKey("users.id"))
+ user = relationship("UserDB")
+ created_at = Column(DateTime, default=datetime.utcnow)
+ updated_at = Column(DateTime, default=datetime.utcnow)
+
+ __tablename__ = "testsets"
+
+
+class EvaluatorConfigDB(Base):
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ app_id = Column(Integer, ForeignKey("app_db.id"))
+ app = relationship("AppDB")
+ user_id = Column(Integer, ForeignKey("users.id"))
+ user = relationship("UserDB")
+ name = Column(String)
+ evaluator_key = Column(String)
+ settings_values = Column(JSON, default=dict)
+ created_at = Column(DateTime, default=datetime.utcnow)
+ updated_at = Column(DateTime, default=datetime.utcnow)
+
+ __tablename__ = "evaluators_configs"
+
+
+class HumanEvaluationDB(Base):
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ app_id = Column(Integer, ForeignKey("app_db.id"))
+ app = relationship("AppDB")
+ user_id = Column(Integer, ForeignKey("users.id"))
+ user = relationship("UserDB")
+ status = Column(String)
+ evaluation_type = Column(String)
+ variants = Column(JSON) # List of PydanticObjectId
+ variants_revisions = Column(JSON) # List of PydanticObjectId
+ testset_id = Column(Integer, ForeignKey("testsets.id"))
+ testset = relationship("TestSetDB")
+ created_at = Column(DateTime, default=datetime.utcnow)
+ updated_at = Column(DateTime, default=datetime.utcnow)
+
+ __tablename__ = "human_evaluations"
+
+
+class HumanEvaluationScenarioDB(Base):
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ user_id = Column(Integer, ForeignKey("users.id"))
+ user = relationship("UserDB")
+ evaluation_id = Column(Integer, ForeignKey("human_evaluations.id"))
+ evaluation = relationship("HumanEvaluationDB")
+ inputs = Column(JSON) # List of HumanEvaluationScenarioInput
+ outputs = Column(JSON) # List of HumanEvaluationScenarioOutput
+ vote = Column(String)
+ score = Column(JSON) # Any type
+ correct_answer = Column(String)
+ created_at = Column(DateTime, default=datetime.utcnow)
+ updated_at = Column(DateTime, default=datetime.utcnow)
+ is_pinned = Column(Boolean)
+ note = Column(String)
+
+ __tablename__ = "human_evaluations_scenarios"
+
+
+class EvaluationDB(Base):
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ app_id = Column(Integer, ForeignKey("app_db.id"))
+ app = relationship("AppDB")
+ user_id = Column(Integer, ForeignKey("users.id"))
+ user = relationship("UserDB")
+ status = Column(JSON) # Result type
+ testset_id = Column(Integer, ForeignKey("testsets.id"))
+ testset = relationship("TestSetDB")
+ variant = Column(Integer) # PydanticObjectId
+ variant_revision = Column(Integer) # PydanticObjectId
+ evaluators_configs = Column(JSON) # List of PydanticObjectId
+ aggregated_results = Column(JSON) # List of AggregatedResult
+ average_cost = Column(JSON) # Result type
+ total_cost = Column(JSON) # Result type
+ average_latency = Column(JSON) # Result type
+ created_at = Column(DateTime, default=datetime.utcnow)
+ updated_at = Column(DateTime, default=datetime.utcnow)
+
+ __tablename__ = "new_evaluations"
+
+
+class EvaluationScenarioDB(Base):
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ user_id = Column(Integer, ForeignKey("users.id"))
+ user = relationship("UserDB")
+ evaluation_id = Column(Integer, ForeignKey("new_evaluations.id"))
+ evaluation = relationship("EvaluationDB")
+ variant_id = Column(Integer) # PydanticObjectId
+ inputs = Column(JSON) # List of EvaluationScenarioInputDB
+ outputs = Column(JSON) # List of EvaluationScenarioOutputDB
+ correct_answers = Column(JSON) # List of CorrectAnswer
+ is_pinned = Column(Boolean)
+ note = Column(String)
+ evaluators_configs = Column(JSON) # List of PydanticObjectId
+ results = Column(JSON) # List of EvaluationScenarioResult
+ latency = Column(Integer)
+ cost = Column(Integer)
+ created_at = Column(DateTime, default=datetime.utcnow)
+ updated_at = Column(DateTime, default=datetime.utcnow)
+
+ __tablename__ = "new_evaluation_scenarios"
class ConfigDB(BaseModel):
config_name: str
parameters: Dict[str, Any] = Field(default_factory=dict)
-
-class AppVariantDB(Document):
- app: Link[AppDB]
- variant_name: str
- revision: int
- image: Link[ImageDB]
- user: Link[UserDB]
- modified_by: Link[UserDB]
- parameters: Dict[str, Any] = Field(default=dict) # TODO: deprecated. remove
- previous_variant_name: Optional[str] # TODO: deprecated. remove
- base_name: Optional[str]
- base: Link[VariantBaseDB]
- config_name: Optional[str]
- config: ConfigDB
- created_at: Optional[datetime] = Field(default=datetime.now(timezone.utc))
- updated_at: Optional[datetime] = Field(default=datetime.now(timezone.utc))
-
- is_deleted: bool = Field( # TODO: deprecated. remove
- default=False
- ) # soft deletion for using the template variants
-
- class Settings:
- name = "app_variants"
-
-
-class AppVariantRevisionsDB(Document):
- variant: Link[AppVariantDB]
- revision: int
- modified_by: Link[UserDB]
- base: Link[VariantBaseDB]
- config: ConfigDB
- created_at: datetime
- updated_at: Optional[datetime] = Field(default=datetime.now(timezone.utc))
-
- class Settings:
- name = "app_variant_revisions"
-
-
-class AppEnvironmentDB(Document):
- app: Link[AppDB]
- name: str
- user: Link[UserDB]
- revision: int
- deployed_app_variant: Optional[PydanticObjectId]
- deployed_app_variant_revision: Optional[Link[AppVariantRevisionsDB]]
- deployment: Optional[PydanticObjectId] # reference to deployment
- created_at: Optional[datetime] = Field(default=datetime.now(timezone.utc))
-
- class Settings:
- name = "environments"
-
-
-class AppEnvironmentRevisionDB(Document):
- environment: Link[AppEnvironmentDB]
- revision: int
- modified_by: Link[UserDB]
- deployed_app_variant_revision: Optional[PydanticObjectId]
- deployment: Optional[PydanticObjectId] # reference to deployment
- created_at: datetime
-
- class Settings:
- name = "environments_revisions"
-
-
-class TemplateDB(Document):
- type: Optional[str] = Field(default="image")
- template_uri: Optional[str]
- tag_id: Optional[int]
- name: str = Field(unique=True) # tag name of image
- repo_name: Optional[str]
- title: str
- description: str
- size: Optional[int]
- digest: Optional[str] # sha256 hash of image digest
- last_pushed: Optional[datetime]
-
- class Settings:
- name = "templates"
-
-
-class TestSetDB(Document):
- name: str
- app: Link[AppDB]
- csvdata: List[Dict[str, str]]
- user: Link[UserDB]
- created_at: Optional[datetime] = Field(default=datetime.now(timezone.utc))
- updated_at: Optional[datetime] = Field(default=datetime.now(timezone.utc))
-
- class Settings:
- name = "testsets"
-
-
-class EvaluatorConfigDB(Document):
- app: Link[AppDB]
- user: Link[UserDB]
- name: str
- evaluator_key: str
- settings_values: Dict[str, Any] = Field(default=dict)
- created_at: datetime = Field(default=datetime.now(timezone.utc))
- updated_at: datetime = Field(default=datetime.now(timezone.utc))
-
- class Settings:
- name = "evaluators_configs"
-
+ class Config:
+ arbitrary_types_allowed = True
class Error(BaseModel):
message: str
stacktrace: Optional[str] = None
+ class Config:
+ arbitrary_types_allowed = True
class Result(BaseModel):
type: str
value: Optional[Any] = None
error: Optional[Error] = None
+ class Config:
+ arbitrary_types_allowed = True
class InvokationResult(BaseModel):
result: Result
cost: Optional[float] = None
latency: Optional[float] = None
+ class Config:
+ arbitrary_types_allowed = True
class EvaluationScenarioResult(BaseModel):
- evaluator_config: PydanticObjectId
+ evaluator_config: int # Assuming this should be an ID reference
result: Result
+ class Config:
+ arbitrary_types_allowed = True
class AggregatedResult(BaseModel):
- evaluator_config: PydanticObjectId
+ evaluator_config: int # Assuming this should be an ID reference
result: Result
+ class Config:
+ arbitrary_types_allowed = True
class EvaluationScenarioInputDB(BaseModel):
name: str
type: str
value: str
+ class Config:
+ arbitrary_types_allowed = True
class EvaluationScenarioOutputDB(BaseModel):
result: Result
cost: Optional[float] = None
latency: Optional[float] = None
+ class Config:
+ arbitrary_types_allowed = True
class HumanEvaluationScenarioInput(BaseModel):
input_name: str
input_value: str
+ class Config:
+ arbitrary_types_allowed = True
class HumanEvaluationScenarioOutput(BaseModel):
variant_id: str
variant_output: str
-
-class HumanEvaluationDB(Document):
- app: Link[AppDB]
- user: Link[UserDB]
- status: str
- evaluation_type: str
- variants: List[PydanticObjectId]
- variants_revisions: List[PydanticObjectId]
- testset: Link[TestSetDB]
- created_at: Optional[datetime] = Field(default=datetime.now(timezone.utc))
- updated_at: Optional[datetime] = Field(default=datetime.now(timezone.utc))
-
- class Settings:
- name = "human_evaluations"
-
-
-class HumanEvaluationScenarioDB(Document):
- user: Link[UserDB]
- evaluation: Link[HumanEvaluationDB]
- inputs: List[HumanEvaluationScenarioInput]
- outputs: List[HumanEvaluationScenarioOutput]
- vote: Optional[str]
- score: Optional[Any]
- correct_answer: Optional[str]
- created_at: Optional[datetime] = Field(default=datetime.now(timezone.utc))
- updated_at: Optional[datetime] = Field(default=datetime.now(timezone.utc))
- is_pinned: Optional[bool]
- note: Optional[str]
-
- class Settings:
- name = "human_evaluations_scenarios"
-
-
-class EvaluationDB(Document):
- app: Link[AppDB]
- user: Link[UserDB]
- status: Result
- testset: Link[TestSetDB]
- variant: PydanticObjectId
- variant_revision: PydanticObjectId
- evaluators_configs: List[PydanticObjectId]
- aggregated_results: List[AggregatedResult]
- average_cost: Optional[Result] = None
- total_cost: Optional[Result] = None
- average_latency: Optional[Result] = None
- created_at: datetime = Field(default=datetime.now(timezone.utc))
- updated_at: datetime = Field(default=datetime.now(timezone.utc))
-
- class Settings:
- name = "new_evaluations"
+ class Config:
+ arbitrary_types_allowed = True
class CorrectAnswer(BaseModel):
key: str
- value: str
-
-
-class EvaluationScenarioDB(Document):
- user: Link[UserDB]
- evaluation: Link[EvaluationDB]
- variant_id: PydanticObjectId
- inputs: List[EvaluationScenarioInputDB]
- outputs: List[EvaluationScenarioOutputDB]
- correct_answers: Optional[List[CorrectAnswer]]
- is_pinned: Optional[bool]
- note: Optional[str]
- evaluators_configs: List[PydanticObjectId]
- results: List[EvaluationScenarioResult]
- latency: Optional[int] = None
- cost: Optional[int] = None
- created_at: datetime = Field(default=datetime.now(timezone.utc))
- updated_at: datetime = Field(default=datetime.now(timezone.utc))
-
- class Settings:
- name = "new_evaluation_scenarios"
+ value: str
\ No newline at end of file
From b9b17fcd97ccab206a4a93b531109dbbf69c8bcf Mon Sep 17 00:00:00 2001
From: aakrem
Date: Mon, 3 Jun 2024 21:09:02 +0200
Subject: [PATCH 008/268] format
---
agenta-backend/agenta_backend/models/db_engine.py | 9 ++++++---
agenta-backend/agenta_backend/models/db_models.py | 12 +++++++++++-
agenta-backend/agenta_backend/services/db_manager.py | 4 +++-
3 files changed, 20 insertions(+), 5 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/db_engine.py b/agenta-backend/agenta_backend/models/db_engine.py
index 375bb13d75..4744e0e362 100644
--- a/agenta-backend/agenta_backend/models/db_engine.py
+++ b/agenta-backend/agenta_backend/models/db_engine.py
@@ -75,6 +75,7 @@
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
+
class DBEngine:
"""
Database engine to initialize SQLAlchemy and return the engine based on mode.
@@ -82,7 +83,9 @@ class DBEngine:
def __init__(self) -> None:
self.mode = os.environ.get("DATABASE_MODE", "v2")
- self.db_url = os.environ["DATABASE_URL"] # Use SQLAlchemy compatible database URL
+ self.db_url = os.environ[
+ "DATABASE_URL"
+ ]
self.engine = create_async_engine(self.db_url, echo=True)
self.async_session = sessionmaker(
self.engine, expire_on_commit=False, class_=AsyncSession
@@ -108,7 +111,6 @@ async def remove_db(self) -> None:
for model in models:
await conn.run_sync(model.metadata.drop_all)
-
@asynccontextmanager
async def get_session(self):
session = self.async_session()
@@ -117,4 +119,5 @@ async def get_session(self):
finally:
await session.close()
-db_engine = DBEngine()
\ No newline at end of file
+
+db_engine = DBEngine()
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index 905f3498bd..302d75ba48 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -290,6 +290,7 @@ class EvaluationScenarioDB(Base):
__tablename__ = "new_evaluation_scenarios"
+
class ConfigDB(BaseModel):
config_name: str
parameters: Dict[str, Any] = Field(default_factory=dict)
@@ -297,6 +298,7 @@ class ConfigDB(BaseModel):
class Config:
arbitrary_types_allowed = True
+
class Error(BaseModel):
message: str
stacktrace: Optional[str] = None
@@ -304,6 +306,7 @@ class Error(BaseModel):
class Config:
arbitrary_types_allowed = True
+
class Result(BaseModel):
type: str
value: Optional[Any] = None
@@ -312,6 +315,7 @@ class Result(BaseModel):
class Config:
arbitrary_types_allowed = True
+
class InvokationResult(BaseModel):
result: Result
cost: Optional[float] = None
@@ -320,6 +324,7 @@ class InvokationResult(BaseModel):
class Config:
arbitrary_types_allowed = True
+
class EvaluationScenarioResult(BaseModel):
evaluator_config: int # Assuming this should be an ID reference
result: Result
@@ -327,6 +332,7 @@ class EvaluationScenarioResult(BaseModel):
class Config:
arbitrary_types_allowed = True
+
class AggregatedResult(BaseModel):
evaluator_config: int # Assuming this should be an ID reference
result: Result
@@ -334,6 +340,7 @@ class AggregatedResult(BaseModel):
class Config:
arbitrary_types_allowed = True
+
class EvaluationScenarioInputDB(BaseModel):
name: str
type: str
@@ -342,6 +349,7 @@ class EvaluationScenarioInputDB(BaseModel):
class Config:
arbitrary_types_allowed = True
+
class EvaluationScenarioOutputDB(BaseModel):
result: Result
cost: Optional[float] = None
@@ -350,6 +358,7 @@ class EvaluationScenarioOutputDB(BaseModel):
class Config:
arbitrary_types_allowed = True
+
class HumanEvaluationScenarioInput(BaseModel):
input_name: str
input_value: str
@@ -357,6 +366,7 @@ class HumanEvaluationScenarioInput(BaseModel):
class Config:
arbitrary_types_allowed = True
+
class HumanEvaluationScenarioOutput(BaseModel):
variant_id: str
variant_output: str
@@ -367,4 +377,4 @@ class Config:
class CorrectAnswer(BaseModel):
key: str
- value: str
\ No newline at end of file
+ value: str
diff --git a/agenta-backend/agenta_backend/services/db_manager.py b/agenta-backend/agenta_backend/services/db_manager.py
index 00efd06e22..bb15c6d34e 100644
--- a/agenta-backend/agenta_backend/services/db_manager.py
+++ b/agenta-backend/agenta_backend/services/db_manager.py
@@ -1702,7 +1702,9 @@ async def add_template(**kwargs: dict) -> str:
"""
async with db_engine.get_session() as session:
- result = await session.execute(select(TemplateDB).filter_by(tag_id=kwargs["tag_id"]))
+ result = await session.execute(
+ select(TemplateDB).filter_by(tag_id=kwargs["tag_id"])
+ )
existing_template = result.scalars().one_or_none()
if existing_template is None:
From 6b2a11ab9c5be720881adbf78e9cba760f89459f Mon Sep 17 00:00:00 2001
From: aakrem
Date: Tue, 4 Jun 2024 11:39:59 +0200
Subject: [PATCH 009/268] add db_mode
---
agenta-backend/agenta_backend/models/db_engine.py | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/db_engine.py b/agenta-backend/agenta_backend/models/db_engine.py
index 4744e0e362..e573b29620 100644
--- a/agenta-backend/agenta_backend/models/db_engine.py
+++ b/agenta-backend/agenta_backend/models/db_engine.py
@@ -83,9 +83,7 @@ class DBEngine:
def __init__(self) -> None:
self.mode = os.environ.get("DATABASE_MODE", "v2")
- self.db_url = os.environ[
- "DATABASE_URL"
- ]
+ self.db_url = f"{os.environ.get('DATABASE_URL')}/{self.mode}"
self.engine = create_async_engine(self.db_url, echo=True)
self.async_session = sessionmaker(
self.engine, expire_on_commit=False, class_=AsyncSession
From dba05974261204d33f84380cf957bee088a14536 Mon Sep 17 00:00:00 2001
From: aakrem
Date: Tue, 4 Jun 2024 12:20:35 +0200
Subject: [PATCH 010/268] fixes so that the app starts without errors
---
.../agenta_backend/models/db_engine.py | 2 +-
.../agenta_backend/models/db_models.py | 3 +--
.../agenta_backend/services/db_manager.py | 23 +++++++++++++------
3 files changed, 18 insertions(+), 10 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/db_engine.py b/agenta-backend/agenta_backend/models/db_engine.py
index e573b29620..564d8e0ddc 100644
--- a/agenta-backend/agenta_backend/models/db_engine.py
+++ b/agenta-backend/agenta_backend/models/db_engine.py
@@ -83,7 +83,7 @@ class DBEngine:
def __init__(self) -> None:
self.mode = os.environ.get("DATABASE_MODE", "v2")
- self.db_url = f"{os.environ.get('DATABASE_URL')}/{self.mode}"
+ self.db_url = f"{os.environ.get('DATABASE_URL')}/agenta_{self.mode}"
self.engine = create_async_engine(self.db_url, echo=True)
self.async_session = sessionmaker(
self.engine, expire_on_commit=False, class_=AsyncSession
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index 302d75ba48..02674e5de8 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -104,7 +104,7 @@ class AppVariantDB(Base):
base_id = Column(Integer, ForeignKey("bases.id"))
base = relationship("VariantBaseDB")
config_name = Column(String)
- config_id = Column(Integer, ForeignKey("config.id"))
+ config_id = Column(JSON)
config = Column(JSON)
created_at = Column(DateTime, default=datetime.utcnow)
updated_at = Column(DateTime, default=datetime.utcnow)
@@ -122,7 +122,6 @@ class AppVariantRevisionsDB(Base):
modified_by = relationship("UserDB")
base_id = Column(Integer, ForeignKey("bases.id"))
base = relationship("VariantBaseDB")
- config_id = Column(Integer, ForeignKey("config.id"))
config = Column(JSON)
created_at = Column(DateTime, default=datetime.utcnow)
updated_at = Column(DateTime, default=datetime.utcnow)
diff --git a/agenta-backend/agenta_backend/services/db_manager.py b/agenta-backend/agenta_backend/services/db_manager.py
index bb15c6d34e..1d03f02642 100644
--- a/agenta-backend/agenta_backend/services/db_manager.py
+++ b/agenta-backend/agenta_backend/services/db_manager.py
@@ -1783,15 +1783,24 @@ async def remove_old_template_from_db(tag_ids: list) -> None:
tag_ids -- list of template IDs you want to keep
"""
- templates_to_delete = []
- templates: List[TemplateDB] = await TemplateDB.find().to_list()
+ async with db_engine.get_session() as session:
+ # Fetch all templates with tag_id in tag_ids
+ templates = await session.execute(
+ select(TemplateDB)
+ )
+ templates = templates.scalars().all()
+
+ # Filter templates to delete
+ templates_to_delete = [
+ template for template in templates if template.tag_id not in tag_ids
+ ]
- for temp in templates:
- if temp.tag_id not in tag_ids:
- templates_to_delete.append(temp)
+ # Delete each template
+ for template in templates_to_delete:
+ await session.delete(template)
- for template in templates_to_delete:
- await template.delete()
+ # Commit the changes
+ await session.commit()
async def get_templates() -> List[Template]:
From e35fa6c203274282690f3e4cdb8f1be5ee2c469b Mon Sep 17 00:00:00 2001
From: aakrem
Date: Tue, 4 Jun 2024 14:39:40 +0200
Subject: [PATCH 011/268] format
---
agenta-backend/agenta_backend/services/db_manager.py | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/agenta-backend/agenta_backend/services/db_manager.py b/agenta-backend/agenta_backend/services/db_manager.py
index 1d03f02642..264646f393 100644
--- a/agenta-backend/agenta_backend/services/db_manager.py
+++ b/agenta-backend/agenta_backend/services/db_manager.py
@@ -1785,9 +1785,7 @@ async def remove_old_template_from_db(tag_ids: list) -> None:
async with db_engine.get_session() as session:
# Fetch all templates with tag_id in tag_ids
- templates = await session.execute(
- select(TemplateDB)
- )
+ templates = await session.execute(select(TemplateDB))
templates = templates.scalars().all()
# Filter templates to delete
From 80533429ade672025f347bf9d402a5d4bff479d2 Mon Sep 17 00:00:00 2001
From: aakrem
Date: Tue, 4 Jun 2024 15:46:50 +0200
Subject: [PATCH 012/268] fix timezone
---
.../agenta_backend/models/db_models.py | 118 +++++++++++++-----
1 file changed, 88 insertions(+), 30 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index 02674e5de8..f9264d03d9 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -1,4 +1,4 @@
-from datetime import datetime
+from datetime import datetime, timezone
from pydantic import BaseModel, Field
from typing import Any, Dict, List, Optional
from sqlalchemy import (
@@ -22,8 +22,12 @@ class UserDB(Base):
uid = Column(String, unique=True, index=True, default="0")
username = Column(String, default="agenta")
email = Column(String, unique=True, default="demo@agenta.ai")
- created_at = Column(DateTime, default=datetime.utcnow)
- updated_at = Column(DateTime, default=datetime.utcnow)
+ created_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
+ updated_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
__tablename__ = "users"
@@ -37,8 +41,12 @@ class ImageDB(Base):
deletable = Column(Boolean, default=True)
user_id = Column(Integer, ForeignKey("users.id"))
user = relationship("UserDB")
- created_at = Column(DateTime, default=datetime.utcnow)
- updated_at = Column(DateTime, default=datetime.utcnow)
+ created_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
+ updated_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
__tablename__ = "docker_images"
@@ -48,8 +56,12 @@ class AppDB(Base):
app_name = Column(String)
user_id = Column(Integer, ForeignKey("users.id"))
user = relationship("UserDB")
- created_at = Column(DateTime, default=datetime.utcnow)
- updated_at = Column(DateTime, default=datetime.utcnow)
+ created_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
+ updated_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
__tablename__ = "app_db"
@@ -64,8 +76,12 @@ class DeploymentDB(Base):
container_id = Column(String)
uri = Column(String)
status = Column(String)
- created_at = Column(DateTime, default=datetime.utcnow)
- updated_at = Column(DateTime, default=datetime.utcnow)
+ created_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
+ updated_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
__tablename__ = "deployments"
@@ -80,8 +96,12 @@ class VariantBaseDB(Base):
image_id = Column(Integer, ForeignKey("docker_images.id"))
image = relationship("ImageDB")
deployment_id = Column(Integer) # reference to deployment, can be nullable
- created_at = Column(DateTime, default=datetime.utcnow)
- updated_at = Column(DateTime, default=datetime.utcnow)
+ created_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
+ updated_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
__tablename__ = "bases"
@@ -106,8 +126,12 @@ class AppVariantDB(Base):
config_name = Column(String)
config_id = Column(JSON)
config = Column(JSON)
- created_at = Column(DateTime, default=datetime.utcnow)
- updated_at = Column(DateTime, default=datetime.utcnow)
+ created_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
+ updated_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
is_deleted = Column(Boolean, default=False) # deprecated
__tablename__ = "app_variants"
@@ -123,8 +147,12 @@ class AppVariantRevisionsDB(Base):
base_id = Column(Integer, ForeignKey("bases.id"))
base = relationship("VariantBaseDB")
config = Column(JSON)
- created_at = Column(DateTime, default=datetime.utcnow)
- updated_at = Column(DateTime, default=datetime.utcnow)
+ created_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
+ updated_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
__tablename__ = "app_variant_revisions"
@@ -143,7 +171,9 @@ class AppEnvironmentDB(Base):
)
deployed_app_variant_revision = relationship("AppVariantRevisionsDB")
deployment_id = Column(Integer) # reference to deployment
- created_at = Column(DateTime, default=datetime.utcnow)
+ created_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
__tablename__ = "environments"
@@ -159,7 +189,9 @@ class AppEnvironmentRevisionDB(Base):
Integer
) # reference to app_variant_revision
deployment_id = Column(Integer) # reference to deployment
- created_at = Column(DateTime, default=datetime.utcnow)
+ created_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
__tablename__ = "environments_revisions"
@@ -175,7 +207,9 @@ class TemplateDB(Base):
description = Column(String)
size = Column(Integer)
digest = Column(String) # sha256 hash of image digest
- last_pushed = Column(String)
+ last_pushed = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
__tablename__ = "templates"
@@ -188,8 +222,12 @@ class TestSetDB(Base):
csvdata = Column(JSON) # List of dictionaries
user_id = Column(Integer, ForeignKey("users.id"))
user = relationship("UserDB")
- created_at = Column(DateTime, default=datetime.utcnow)
- updated_at = Column(DateTime, default=datetime.utcnow)
+ created_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
+ updated_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
__tablename__ = "testsets"
@@ -203,8 +241,12 @@ class EvaluatorConfigDB(Base):
name = Column(String)
evaluator_key = Column(String)
settings_values = Column(JSON, default=dict)
- created_at = Column(DateTime, default=datetime.utcnow)
- updated_at = Column(DateTime, default=datetime.utcnow)
+ created_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
+ updated_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
__tablename__ = "evaluators_configs"
@@ -221,8 +263,12 @@ class HumanEvaluationDB(Base):
variants_revisions = Column(JSON) # List of PydanticObjectId
testset_id = Column(Integer, ForeignKey("testsets.id"))
testset = relationship("TestSetDB")
- created_at = Column(DateTime, default=datetime.utcnow)
- updated_at = Column(DateTime, default=datetime.utcnow)
+ created_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
+ updated_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
__tablename__ = "human_evaluations"
@@ -238,8 +284,12 @@ class HumanEvaluationScenarioDB(Base):
vote = Column(String)
score = Column(JSON) # Any type
correct_answer = Column(String)
- created_at = Column(DateTime, default=datetime.utcnow)
- updated_at = Column(DateTime, default=datetime.utcnow)
+ created_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
+ updated_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
is_pinned = Column(Boolean)
note = Column(String)
@@ -262,8 +312,12 @@ class EvaluationDB(Base):
average_cost = Column(JSON) # Result type
total_cost = Column(JSON) # Result type
average_latency = Column(JSON) # Result type
- created_at = Column(DateTime, default=datetime.utcnow)
- updated_at = Column(DateTime, default=datetime.utcnow)
+ created_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
+ updated_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
__tablename__ = "new_evaluations"
@@ -284,8 +338,12 @@ class EvaluationScenarioDB(Base):
results = Column(JSON) # List of EvaluationScenarioResult
latency = Column(Integer)
cost = Column(Integer)
- created_at = Column(DateTime, default=datetime.utcnow)
- updated_at = Column(DateTime, default=datetime.utcnow)
+ created_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
+ updated_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
__tablename__ = "new_evaluation_scenarios"
From 6a52de20283a41abfcc9391b6c05e2fb9cbe108c Mon Sep 17 00:00:00 2001
From: aakrem
Date: Tue, 4 Jun 2024 16:23:22 +0200
Subject: [PATCH 013/268] add a helper to convert a value to utc
---
.../agenta_backend/services/helpers.py | 23 +++++++++++++++++--
.../services/templates_manager.py | 12 ++++++----
2 files changed, 28 insertions(+), 7 deletions(-)
diff --git a/agenta-backend/agenta_backend/services/helpers.py b/agenta-backend/agenta_backend/services/helpers.py
index e3b9bf0810..7b9510a0b7 100644
--- a/agenta-backend/agenta_backend/services/helpers.py
+++ b/agenta-backend/agenta_backend/services/helpers.py
@@ -1,6 +1,6 @@
import json
-from typing import List, Dict, Any, Tuple
-from datetime import datetime, timedelta
+from typing import List, Dict, Any, Tuple, Union
+from datetime import datetime, timedelta, timezone
def format_inputs(list_of_dictionaries: List[Dict[str, Any]]) -> Dict:
@@ -57,3 +57,22 @@ def include_dynamic_values(json_data: Dict, inputs: Dict[str, Any]) -> Dict:
json_data = json_data.replace(f"{key}", value)
return json_data
+
+
+def convert_to_utc_datetime(dt: Union[datetime, str, None]) -> datetime:
+ """
+ Converts a datetime object, a datetime string, or None into a UTC timezone-aware datetime object.
+
+ Args:
+ dt (Union[datetime, str, None]): The input datetime, which can be a datetime object, a string, or None.
+
+ Returns:
+ datetime: A UTC timezone-aware datetime object.
+ """
+ if dt is None:
+ return datetime.now(timezone.utc)
+ if isinstance(dt, str):
+ return datetime.fromisoformat(dt).astimezone(timezone.utc)
+ if dt.tzinfo is None:
+ return dt.replace(tzinfo=timezone.utc)
+ return dt
diff --git a/agenta-backend/agenta_backend/services/templates_manager.py b/agenta-backend/agenta_backend/services/templates_manager.py
index 6a43e13fbc..3c9f074fd5 100644
--- a/agenta-backend/agenta_backend/services/templates_manager.py
+++ b/agenta-backend/agenta_backend/services/templates_manager.py
@@ -10,6 +10,10 @@
from agenta_backend.services import db_manager
from agenta_backend.utils.common import isCloud, isOss
+from datetime import datetime, timezone
+
+from agenta_backend.services.helpers import convert_to_utc_datetime
+
if isCloud() or isOss():
from agenta_backend.services import container_manager
@@ -37,6 +41,8 @@ async def update_and_sync_templates(cache: bool = True) -> None:
if temp["name"] in list(templates_info.keys()):
templates_ids_not_to_remove.append(int(temp["id"]))
temp_info = templates_info[temp["name"]]
+ last_pushed = convert_to_utc_datetime(temp.get("last_pushed"))
+
template_id = await db_manager.add_template(
**{
"tag_id": int(temp["id"]),
@@ -50,11 +56,7 @@ async def update_and_sync_templates(cache: bool = True) -> None:
else temp["size"]
),
"digest": temp["digest"],
- "last_pushed": (
- temp["images"][0]["last_pushed"]
- if not temp.get("last_pushed", None)
- else temp["last_pushed"]
- ),
+ "last_pushed": last_pushed,
}
)
print(f"Template {template_id} added to the database.")
From 84bf0314a294778f8e9a4c92bd0f30535bc575fe Mon Sep 17 00:00:00 2001
From: aakrem
Date: Tue, 4 Jun 2024 17:31:31 +0200
Subject: [PATCH 014/268] remove deprecated add new tables
HumanEvaluationScenarioInputsDB and HumanEvaluationScenarioOutputsDB
---
.../agenta_backend/models/db_models.py | 39 +++++++++++++------
1 file changed, 27 insertions(+), 12 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index f9264d03d9..ef4e96e15d 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -32,6 +32,7 @@ class UserDB(Base):
__tablename__ = "users"
+# TODO: Rename ImageDB to DockerImageDB ?
class ImageDB(Base):
id = Column(Integer, primary_key=True, autoincrement=True)
type = Column(String, default="image")
@@ -118,8 +119,6 @@ class AppVariantDB(Base):
user = relationship("UserDB", foreign_keys=[user_id])
modified_by_id = Column(Integer, ForeignKey("users.id"))
modified_by = relationship("UserDB", foreign_keys=[modified_by_id])
- parameters = Column(JSON, default=dict) # deprecated
- previous_variant_name = Column(String) # deprecated
base_name = Column(String)
base_id = Column(Integer, ForeignKey("bases.id"))
base = relationship("VariantBaseDB")
@@ -132,7 +131,6 @@ class AppVariantDB(Base):
updated_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
- is_deleted = Column(Boolean, default=False) # deprecated
__tablename__ = "app_variants"
@@ -146,7 +144,7 @@ class AppVariantRevisionsDB(Base):
modified_by = relationship("UserDB")
base_id = Column(Integer, ForeignKey("bases.id"))
base = relationship("VariantBaseDB")
- config = Column(JSON)
+ config = Column(JSON) # TODO: Use table ConfigDB
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
@@ -240,7 +238,7 @@ class EvaluatorConfigDB(Base):
user = relationship("UserDB")
name = Column(String)
evaluator_key = Column(String)
- settings_values = Column(JSON, default=dict)
+ settings_values = Column(JSON, default=dict) # TODO: Check
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
@@ -259,8 +257,8 @@ class HumanEvaluationDB(Base):
user = relationship("UserDB")
status = Column(String)
evaluation_type = Column(String)
- variants = Column(JSON) # List of PydanticObjectId
- variants_revisions = Column(JSON) # List of PydanticObjectId
+ variants = Column(JSON) # List of PydanticObjectId # TODO: Check
+ variants_revisions = Column(JSON) # List of PydanticObjectId TODO: Check
testset_id = Column(Integer, ForeignKey("testsets.id"))
testset = relationship("TestSetDB")
created_at = Column(
@@ -279,10 +277,10 @@ class HumanEvaluationScenarioDB(Base):
user = relationship("UserDB")
evaluation_id = Column(Integer, ForeignKey("human_evaluations.id"))
evaluation = relationship("HumanEvaluationDB")
- inputs = Column(JSON) # List of HumanEvaluationScenarioInput
- outputs = Column(JSON) # List of HumanEvaluationScenarioOutput
+ inputs = relationship("HumanEvaluationScenarioInputsDB", backref="scenario")
+ outputs = relationship("HumanEvaluationScenarioOutputsDB", backref="scenario")
vote = Column(String)
- score = Column(JSON) # Any type
+ score = Column(JSON)
correct_answer = Column(String)
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
@@ -292,10 +290,27 @@ class HumanEvaluationScenarioDB(Base):
)
is_pinned = Column(Boolean)
note = Column(String)
-
__tablename__ = "human_evaluations_scenarios"
+class HumanEvaluationScenarioInputsDB(Base):
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ scenario_id = Column(Integer, ForeignKey("human_evaluations_scenarios.id"))
+ input_name = Column(String)
+ input_value = Column(String)
+
+ __tablename__ = "human_evaluations_scenarios_inputs"
+
+
+class HumanEvaluationScenarioOutputsDB(Base):
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ scenario_id = Column(Integer, ForeignKey("human_evaluations_scenarios.id"))
+ variant_id = Column(String)
+ variant_output = Column(String)
+
+ __tablename__ = "human_evaluations_scenarios_outputs"
+
+
class EvaluationDB(Base):
id = Column(Integer, primary_key=True, autoincrement=True)
app_id = Column(Integer, ForeignKey("app_db.id"))
@@ -307,7 +322,7 @@ class EvaluationDB(Base):
testset = relationship("TestSetDB")
variant = Column(Integer) # PydanticObjectId
variant_revision = Column(Integer) # PydanticObjectId
- evaluators_configs = Column(JSON) # List of PydanticObjectId
+ evaluators_configs = Column(JSON) # List of PydanticObjectId # TODO: Check
aggregated_results = Column(JSON) # List of AggregatedResult
average_cost = Column(JSON) # Result type
total_cost = Column(JSON) # Result type
From aeb04e2a01e8b0ce3a80c2183af1c4da988747e9 Mon Sep 17 00:00:00 2001
From: aakrem
Date: Tue, 4 Jun 2024 19:10:10 +0200
Subject: [PATCH 015/268] move some models to shared models
---
.../agenta_backend/models/api/api_models.py | 2 +-
.../agenta_backend/models/converters.py | 7 +-
.../agenta_backend/models/db_models.py | 111 ++++--------------
.../agenta_backend/models/shared_models.py | 52 ++++++++
.../services/aggregation_service.py | 2 +-
.../agenta_backend/services/db_manager.py | 11 +-
.../services/evaluation_service.py | 2 +-
.../services/evaluators_service.py | 2 +-
.../services/llm_apps_service.py | 2 +-
.../agenta_backend/tasks/evaluations.py | 6 +-
10 files changed, 94 insertions(+), 103 deletions(-)
create mode 100644 agenta-backend/agenta_backend/models/shared_models.py
diff --git a/agenta-backend/agenta_backend/models/api/api_models.py b/agenta-backend/agenta_backend/models/api/api_models.py
index fc2c34f313..e01568001c 100644
--- a/agenta-backend/agenta_backend/models/api/api_models.py
+++ b/agenta-backend/agenta_backend/models/api/api_models.py
@@ -4,7 +4,7 @@
from pydantic import BaseModel, Field
-from agenta_backend.models.db_models import ConfigDB
+from agenta_backend.models.shared_models import ConfigDB
class PaginationParam(BaseModel):
diff --git a/agenta-backend/agenta_backend/models/converters.py b/agenta-backend/agenta_backend/models/converters.py
index cb73ea5976..7fdbb55c72 100644
--- a/agenta-backend/agenta_backend/models/converters.py
+++ b/agenta-backend/agenta_backend/models/converters.py
@@ -71,9 +71,7 @@
from agenta_backend.models.db_models import (
TemplateDB,
- AggregatedResult,
AppVariantRevisionsDB,
- EvaluationScenarioResult,
)
from agenta_backend.models.api.api_models import (
App,
@@ -86,6 +84,11 @@
WithPagination,
)
+from agenta_backend.models.shared_models import (
+ AggregatedResult,
+ EvaluationScenarioResult,
+)
+
from fastapi import Depends
from beanie import Link, PydanticObjectId as ObjectId
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index ef4e96e15d..5fe0d0850b 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -299,7 +299,7 @@ class HumanEvaluationScenarioInputsDB(Base):
input_name = Column(String)
input_value = Column(String)
- __tablename__ = "human_evaluations_scenarios_inputs"
+ __tablename__ = "human_evaluation_scenario_inputs"
class HumanEvaluationScenarioOutputsDB(Base):
@@ -308,7 +308,7 @@ class HumanEvaluationScenarioOutputsDB(Base):
variant_id = Column(String)
variant_output = Column(String)
- __tablename__ = "human_evaluations_scenarios_outputs"
+ __tablename__ = "human_evaluation_scenario_outputs"
class EvaluationDB(Base):
@@ -334,18 +334,18 @@ class EvaluationDB(Base):
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
- __tablename__ = "new_evaluations"
+ __tablename__ = "evaluations"
class EvaluationScenarioDB(Base):
id = Column(Integer, primary_key=True, autoincrement=True)
user_id = Column(Integer, ForeignKey("users.id"))
user = relationship("UserDB")
- evaluation_id = Column(Integer, ForeignKey("new_evaluations.id"))
+ evaluation_id = Column(Integer, ForeignKey("evaluations.id"))
evaluation = relationship("EvaluationDB")
variant_id = Column(Integer) # PydanticObjectId
- inputs = Column(JSON) # List of EvaluationScenarioInputDB
- outputs = Column(JSON) # List of EvaluationScenarioOutputDB
+ inputs = relationship("EvaluationScenarioInputDB", backref="scenario")
+ outputs = relationship("EvaluationScenarioOutputDB", backref="scenario")
correct_answers = Column(JSON) # List of CorrectAnswer
is_pinned = Column(Boolean)
note = Column(String)
@@ -360,93 +360,24 @@ class EvaluationScenarioDB(Base):
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
- __tablename__ = "new_evaluation_scenarios"
+ __tablename__ = "evaluation_scenarios"
-class ConfigDB(BaseModel):
- config_name: str
- parameters: Dict[str, Any] = Field(default_factory=dict)
-
- class Config:
- arbitrary_types_allowed = True
-
-
-class Error(BaseModel):
- message: str
- stacktrace: Optional[str] = None
-
- class Config:
- arbitrary_types_allowed = True
-
-
-class Result(BaseModel):
- type: str
- value: Optional[Any] = None
- error: Optional[Error] = None
-
- class Config:
- arbitrary_types_allowed = True
-
-
-class InvokationResult(BaseModel):
- result: Result
- cost: Optional[float] = None
- latency: Optional[float] = None
-
- class Config:
- arbitrary_types_allowed = True
-
-
-class EvaluationScenarioResult(BaseModel):
- evaluator_config: int # Assuming this should be an ID reference
- result: Result
-
- class Config:
- arbitrary_types_allowed = True
-
-
-class AggregatedResult(BaseModel):
- evaluator_config: int # Assuming this should be an ID reference
- result: Result
-
- class Config:
- arbitrary_types_allowed = True
-
-
-class EvaluationScenarioInputDB(BaseModel):
- name: str
- type: str
- value: str
-
- class Config:
- arbitrary_types_allowed = True
-
-
-class EvaluationScenarioOutputDB(BaseModel):
- result: Result
- cost: Optional[float] = None
- latency: Optional[float] = None
-
- class Config:
- arbitrary_types_allowed = True
-
-
-class HumanEvaluationScenarioInput(BaseModel):
- input_name: str
- input_value: str
-
- class Config:
- arbitrary_types_allowed = True
-
+class EvaluationScenarioInputDB(Base):
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ scenario_id = Column(Integer, ForeignKey("evaluation_scenarios.id"))
+ name = Column(String)
+ type = Column(String)
+ value = Column(String)
-class HumanEvaluationScenarioOutput(BaseModel):
- variant_id: str
- variant_output: str
+ __tablename__ = "evaluation_scenario_inputs"
- class Config:
- arbitrary_types_allowed = True
+class EvaluationScenarioOutputDB(Base):
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ scenario_id = Column(Integer, ForeignKey("evaluation_scenarios.id"))
+ result = Column(JSON) # Result type
+ cost = Column(Float)
+ latency = Column(Float)
-class CorrectAnswer(BaseModel):
- key: str
- value: str
+ __tablename__ = "evaluation_scenario_outputs"
diff --git a/agenta-backend/agenta_backend/models/shared_models.py b/agenta-backend/agenta_backend/models/shared_models.py
new file mode 100644
index 0000000000..e5a8ffb724
--- /dev/null
+++ b/agenta-backend/agenta_backend/models/shared_models.py
@@ -0,0 +1,52 @@
+from pydantic import BaseModel, Field
+from typing import Any, Dict, List, Optional
+
+
+class ConfigDB(BaseModel):
+ config_name: str
+ parameters: Dict[str, Any] = Field(default_factory=dict)
+
+
+class Error(BaseModel):
+ message: str
+ stacktrace: Optional[str] = None
+
+
+class Result(BaseModel):
+ type: str
+ value: Optional[Any] = None
+ error: Optional[Error] = None
+
+
+class InvokationResult(BaseModel):
+ result: Result
+ cost: Optional[float] = None
+ latency: Optional[float] = None
+
+
+class EvaluationScenarioResult(BaseModel):
+ evaluator_config: int
+ result: Result
+
+ class Config:
+ arbitrary_types_allowed = True
+
+
+class AggregatedResult(BaseModel):
+ evaluator_config: int
+ result: Result
+
+
+class CorrectAnswer(BaseModel):
+ key: str
+ value: str
+
+
+class HumanEvaluationScenarioInput(BaseModel):
+ input_name: str
+ input_value: str
+
+
+class HumanEvaluationScenarioOutput(BaseModel):
+ variant_id: str
+ variant_output: str
diff --git a/agenta-backend/agenta_backend/services/aggregation_service.py b/agenta-backend/agenta_backend/services/aggregation_service.py
index d1120f1b53..b55c2e1995 100644
--- a/agenta-backend/agenta_backend/services/aggregation_service.py
+++ b/agenta-backend/agenta_backend/services/aggregation_service.py
@@ -2,7 +2,7 @@
import traceback
from typing import List, Optional
-from agenta_backend.models.db_models import InvokationResult, Result, Error
+from agenta_backend.models.shared_models import InvokationResult, Result, Error
def aggregate_ai_critique(results: List[Result]) -> Result:
diff --git a/agenta-backend/agenta_backend/services/db_manager.py b/agenta-backend/agenta_backend/services/db_manager.py
index 264646f393..f5069fda7f 100644
--- a/agenta-backend/agenta_backend/services/db_manager.py
+++ b/agenta-backend/agenta_backend/services/db_manager.py
@@ -63,16 +63,19 @@
HumanEvaluationScenarioDB,
)
from agenta_backend.models.db_models import (
- ConfigDB,
- CorrectAnswer,
TemplateDB,
- AggregatedResult,
AppVariantRevisionsDB,
- EvaluationScenarioResult,
EvaluationScenarioInputDB,
EvaluationScenarioOutputDB,
)
+from agenta_backend.models.shared_models import (
+ ConfigDB,
+ CorrectAnswer,
+ AggregatedResult,
+ EvaluationScenarioResult,
+)
+
from beanie.operators import In
from beanie import PydanticObjectId as ObjectId
diff --git a/agenta-backend/agenta_backend/services/evaluation_service.py b/agenta-backend/agenta_backend/services/evaluation_service.py
index 7afa2ff194..96b7f904c1 100644
--- a/agenta-backend/agenta_backend/services/evaluation_service.py
+++ b/agenta-backend/agenta_backend/services/evaluation_service.py
@@ -41,7 +41,7 @@
HumanEvaluationScenarioDB,
)
-from agenta_backend.models.db_models import (
+from agenta_backend.models.shared_models import (
HumanEvaluationScenarioInput,
HumanEvaluationScenarioOutput,
Result,
diff --git a/agenta-backend/agenta_backend/services/evaluators_service.py b/agenta-backend/agenta_backend/services/evaluators_service.py
index d73a4229d9..710affb89e 100644
--- a/agenta-backend/agenta_backend/services/evaluators_service.py
+++ b/agenta-backend/agenta_backend/services/evaluators_service.py
@@ -6,7 +6,7 @@
import httpx
from openai import OpenAI
-from agenta_backend.models.db_models import Error, Result
+from agenta_backend.models.shared_models import Error, Result
from agenta_backend.services.security import sandbox
logger = logging.getLogger(__name__)
diff --git a/agenta-backend/agenta_backend/services/llm_apps_service.py b/agenta-backend/agenta_backend/services/llm_apps_service.py
index 2accbfe509..93654b3729 100644
--- a/agenta-backend/agenta_backend/services/llm_apps_service.py
+++ b/agenta-backend/agenta_backend/services/llm_apps_service.py
@@ -6,7 +6,7 @@
from typing import Any, Dict, List
-from agenta_backend.models.db_models import InvokationResult, Result, Error
+from agenta_backend.models.shared_models import InvokationResult, Result, Error
from agenta_backend.utils import common
# Set logger
diff --git a/agenta-backend/agenta_backend/tasks/evaluations.py b/agenta-backend/agenta_backend/tasks/evaluations.py
index 5a79eb0740..a788ae4f1f 100644
--- a/agenta-backend/agenta_backend/tasks/evaluations.py
+++ b/agenta-backend/agenta_backend/tasks/evaluations.py
@@ -17,11 +17,13 @@
)
from agenta_backend.models.api.evaluation_model import EvaluationStatusEnum
from agenta_backend.models.db_models import (
- AggregatedResult,
AppDB,
- CorrectAnswer,
EvaluationScenarioInputDB,
EvaluationScenarioOutputDB,
+)
+from agenta_backend.models.shared_models import (
+ AggregatedResult,
+ CorrectAnswer,
EvaluationScenarioResult,
InvokationResult,
Error,
From 4d7e0c2fda697902734af59b6034d2633492c20d Mon Sep 17 00:00:00 2001
From: aakrem
Date: Tue, 4 Jun 2024 19:33:15 +0200
Subject: [PATCH 016/268] cleanup
---
agenta-backend/agenta_backend/models/shared_models.py | 3 ---
1 file changed, 3 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/shared_models.py b/agenta-backend/agenta_backend/models/shared_models.py
index e5a8ffb724..876fa1702e 100644
--- a/agenta-backend/agenta_backend/models/shared_models.py
+++ b/agenta-backend/agenta_backend/models/shared_models.py
@@ -28,9 +28,6 @@ class EvaluationScenarioResult(BaseModel):
evaluator_config: int
result: Result
- class Config:
- arbitrary_types_allowed = True
-
class AggregatedResult(BaseModel):
evaluator_config: int
From 0234a7bf2c2d1f08f5335f27d07fd9defd5e7d10 Mon Sep 17 00:00:00 2001
From: aakrem
Date: Tue, 4 Jun 2024 19:33:42 +0200
Subject: [PATCH 017/268] move config to a table
---
.../agenta_backend/models/db_models.py | 20 ++++++++++++++++---
1 file changed, 17 insertions(+), 3 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index 5fe0d0850b..24089341e0 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -123,8 +123,10 @@ class AppVariantDB(Base):
base_id = Column(Integer, ForeignKey("bases.id"))
base = relationship("VariantBaseDB")
config_name = Column(String)
- config_id = Column(JSON)
- config = Column(JSON)
+ config_id = Column(Integer, ForeignKey("configs.id"))
+ config = relationship(
+ "ConfigDB", primaryjoin="AppVariantDB.config_id == ConfigDB.id"
+ )
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
@@ -144,7 +146,10 @@ class AppVariantRevisionsDB(Base):
modified_by = relationship("UserDB")
base_id = Column(Integer, ForeignKey("bases.id"))
base = relationship("VariantBaseDB")
- config = Column(JSON) # TODO: Use table ConfigDB
+ config_id = Column(Integer, ForeignKey("configs.id"))
+ config = relationship(
+ "ConfigDB", primaryjoin="AppVariantRevisionsDB.config_id == ConfigDB.id"
+ )
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
@@ -381,3 +386,12 @@ class EvaluationScenarioOutputDB(Base):
latency = Column(Float)
__tablename__ = "evaluation_scenario_outputs"
+
+
+# TODO: better name?
+class ConfigDB(Base):
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ config_name = Column(String, nullable=False)
+ parameters = Column(JSON, nullable=False, default=dict)
+
+ __tablename__ = "configs"
From cc06cf6b447f3ee4afd6458de3361a3fdd9a3ab8 Mon Sep 17 00:00:00 2001
From: aakrem
Date: Wed, 5 Jun 2024 09:26:01 +0200
Subject: [PATCH 018/268] adjust schemas for evaluations
---
.../agenta_backend/models/db_models.py | 36 ++++++++++++-------
1 file changed, 24 insertions(+), 12 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index 24089341e0..b625b93264 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -237,13 +237,19 @@ class TestSetDB(Base):
class EvaluatorConfigDB(Base):
id = Column(Integer, primary_key=True, autoincrement=True)
+ evaluation_id = Column(Integer, ForeignKey("evaluations.id"))
+ evaluation = relationship("EvaluationDB", back_populates="evaluator_configs")
+ evaluation_scenario_id = Column(Integer, ForeignKey("evaluation_scenarios.id"))
+ evaluation_scenario = relationship(
+ "EvaluationScenarioDB", back_populates="evaluator_configs"
+ )
app_id = Column(Integer, ForeignKey("app_db.id"))
app = relationship("AppDB")
user_id = Column(Integer, ForeignKey("users.id"))
user = relationship("UserDB")
name = Column(String)
evaluator_key = Column(String)
- settings_values = Column(JSON, default=dict) # TODO: Check
+ settings_values = Column(JSON, default=dict)
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
@@ -262,8 +268,10 @@ class HumanEvaluationDB(Base):
user = relationship("UserDB")
status = Column(String)
evaluation_type = Column(String)
- variants = Column(JSON) # List of PydanticObjectId # TODO: Check
- variants_revisions = Column(JSON) # List of PydanticObjectId TODO: Check
+ variant_id = Column(Integer, ForeignKey("app_variants.id"))
+ variant = relationship("AppVariantDB")
+ variant_revision_id = Column(Integer, ForeignKey("app_variant_revisions.id"))
+ variant_revision = relationship("AppVariantRevisionsDB")
testset_id = Column(Integer, ForeignKey("testsets.id"))
testset = relationship("TestSetDB")
created_at = Column(
@@ -322,16 +330,18 @@ class EvaluationDB(Base):
app = relationship("AppDB")
user_id = Column(Integer, ForeignKey("users.id"))
user = relationship("UserDB")
- status = Column(JSON) # Result type
+ status = Column(JSON)
testset_id = Column(Integer, ForeignKey("testsets.id"))
testset = relationship("TestSetDB")
- variant = Column(Integer) # PydanticObjectId
- variant_revision = Column(Integer) # PydanticObjectId
- evaluators_configs = Column(JSON) # List of PydanticObjectId # TODO: Check
+ variant_id = Column(Integer, ForeignKey("app_variants.id"))
+ variant = relationship("AppVariantDB")
+ variant_revision_id = Column(Integer, ForeignKey("app_variant_revisions.id"))
+ variant_revision = relationship("AppVariantRevisionsDB")
+ evaluator_configs = relationship("EvaluatorConfigDB", back_populates="evaluation")
aggregated_results = Column(JSON) # List of AggregatedResult
- average_cost = Column(JSON) # Result type
- total_cost = Column(JSON) # Result type
- average_latency = Column(JSON) # Result type
+ average_cost = Column(JSON)
+ total_cost = Column(JSON)
+ average_latency = Column(JSON)
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
@@ -354,7 +364,9 @@ class EvaluationScenarioDB(Base):
correct_answers = Column(JSON) # List of CorrectAnswer
is_pinned = Column(Boolean)
note = Column(String)
- evaluators_configs = Column(JSON) # List of PydanticObjectId
+ evaluator_configs = relationship(
+ "EvaluatorConfigDB", back_populates="evaluation_scenario"
+ ) # One-to-many relationship
results = Column(JSON) # List of EvaluationScenarioResult
latency = Column(Integer)
cost = Column(Integer)
@@ -381,7 +393,7 @@ class EvaluationScenarioInputDB(Base):
class EvaluationScenarioOutputDB(Base):
id = Column(Integer, primary_key=True, autoincrement=True)
scenario_id = Column(Integer, ForeignKey("evaluation_scenarios.id"))
- result = Column(JSON) # Result type
+ result = Column(JSON)
cost = Column(Float)
latency = Column(Float)
From 5408d14b584deb3750c07caa23daea05e7a0e873 Mon Sep 17 00:00:00 2001
From: aakrem
Date: Wed, 5 Jun 2024 09:28:47 +0200
Subject: [PATCH 019/268] Add a relation Evaluation > AppVariantDB
---
agenta-backend/agenta_backend/models/db_models.py | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index b625b93264..428d6d2be0 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -358,7 +358,8 @@ class EvaluationScenarioDB(Base):
user = relationship("UserDB")
evaluation_id = Column(Integer, ForeignKey("evaluations.id"))
evaluation = relationship("EvaluationDB")
- variant_id = Column(Integer) # PydanticObjectId
+ variant_id = Column(Integer, ForeignKey("app_variants.id"))
+ variant = relationship("AppVariantDB")
inputs = relationship("EvaluationScenarioInputDB", backref="scenario")
outputs = relationship("EvaluationScenarioOutputDB", backref="scenario")
correct_answers = Column(JSON) # List of CorrectAnswer
@@ -366,7 +367,7 @@ class EvaluationScenarioDB(Base):
note = Column(String)
evaluator_configs = relationship(
"EvaluatorConfigDB", back_populates="evaluation_scenario"
- ) # One-to-many relationship
+ )
results = Column(JSON) # List of EvaluationScenarioResult
latency = Column(Integer)
cost = Column(Integer)
From 7c736f00dfba946db71e621e2ff9bf68c282f0f0 Mon Sep 17 00:00:00 2001
From: aakrem
Date: Wed, 5 Jun 2024 10:04:15 +0200
Subject: [PATCH 020/268] remove config db
---
.../agenta_backend/models/db_models.py | 19 ++++---------------
1 file changed, 4 insertions(+), 15 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index 428d6d2be0..de8360f38b 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -124,9 +124,8 @@ class AppVariantDB(Base):
base = relationship("VariantBaseDB")
config_name = Column(String)
config_id = Column(Integer, ForeignKey("configs.id"))
- config = relationship(
- "ConfigDB", primaryjoin="AppVariantDB.config_id == ConfigDB.id"
- )
+ config_name = Column(String, nullable=False)
+ parameters = Column(JSON, nullable=False, default=dict)
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
@@ -147,9 +146,8 @@ class AppVariantRevisionsDB(Base):
base_id = Column(Integer, ForeignKey("bases.id"))
base = relationship("VariantBaseDB")
config_id = Column(Integer, ForeignKey("configs.id"))
- config = relationship(
- "ConfigDB", primaryjoin="AppVariantRevisionsDB.config_id == ConfigDB.id"
- )
+ config_name = Column(String, nullable=False)
+ parameters = Column(JSON, nullable=False, default=dict)
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
@@ -399,12 +397,3 @@ class EvaluationScenarioOutputDB(Base):
latency = Column(Float)
__tablename__ = "evaluation_scenario_outputs"
-
-
-# TODO: better name?
-class ConfigDB(Base):
- id = Column(Integer, primary_key=True, autoincrement=True)
- config_name = Column(String, nullable=False)
- parameters = Column(JSON, nullable=False, default=dict)
-
- __tablename__ = "configs"
From 26832055c4106453bba0c1b2c2ad2a4300858bda Mon Sep 17 00:00:00 2001
From: aakrem
Date: Wed, 5 Jun 2024 10:28:22 +0200
Subject: [PATCH 021/268] fix config
---
agenta-backend/agenta_backend/models/db_models.py | 7 ++-----
1 file changed, 2 insertions(+), 5 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index de8360f38b..b041249932 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -122,10 +122,8 @@ class AppVariantDB(Base):
base_name = Column(String)
base_id = Column(Integer, ForeignKey("bases.id"))
base = relationship("VariantBaseDB")
- config_name = Column(String)
- config_id = Column(Integer, ForeignKey("configs.id"))
config_name = Column(String, nullable=False)
- parameters = Column(JSON, nullable=False, default=dict)
+ config_parameters = Column(JSON, nullable=False, default=dict)
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
@@ -145,9 +143,8 @@ class AppVariantRevisionsDB(Base):
modified_by = relationship("UserDB")
base_id = Column(Integer, ForeignKey("bases.id"))
base = relationship("VariantBaseDB")
- config_id = Column(Integer, ForeignKey("configs.id"))
config_name = Column(String, nullable=False)
- parameters = Column(JSON, nullable=False, default=dict)
+ config_parameters = Column(JSON, nullable=False, default=dict)
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
From d1c7811543feecd4f351673a98940e507e1f9872 Mon Sep 17 00:00:00 2001
From: aakrem
Date: Wed, 5 Jun 2024 20:39:11 +0200
Subject: [PATCH 022/268] move from integer to uuid
---
.../agenta_backend/models/db_models.py | 265 +++++++++++++-----
agenta-backend/poetry.lock | 175 +++++++++++-
agenta-backend/pyproject.toml | 2 +
3 files changed, 376 insertions(+), 66 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index b041249932..b81351744e 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -13,12 +13,20 @@
Float,
)
from sqlalchemy.orm import relationship, declarative_base
+import uuid_utils.compat as uuid
+from sqlalchemy.dialects.postgresql import UUID
Base = declarative_base()
class UserDB(Base):
- id = Column(Integer, primary_key=True, autoincrement=True)
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
uid = Column(String, unique=True, index=True, default="0")
username = Column(String, default="agenta")
email = Column(String, unique=True, default="demo@agenta.ai")
@@ -34,13 +42,19 @@ class UserDB(Base):
# TODO: Rename ImageDB to DockerImageDB ?
class ImageDB(Base):
- id = Column(Integer, primary_key=True, autoincrement=True)
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
type = Column(String, default="image")
template_uri = Column(String)
docker_id = Column(String, index=True)
tags = Column(String)
deletable = Column(Boolean, default=True)
- user_id = Column(Integer, ForeignKey("users.id"))
+ user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
user = relationship("UserDB")
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
@@ -53,9 +67,15 @@ class ImageDB(Base):
class AppDB(Base):
- id = Column(Integer, primary_key=True, autoincrement=True)
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
app_name = Column(String)
- user_id = Column(Integer, ForeignKey("users.id"))
+ user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
user = relationship("UserDB")
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
@@ -68,10 +88,16 @@ class AppDB(Base):
class DeploymentDB(Base):
- id = Column(Integer, primary_key=True, autoincrement=True)
- app_id = Column(Integer, ForeignKey("app_db.id"))
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id"))
app = relationship("AppDB")
- user_id = Column(Integer, ForeignKey("users.id"))
+ user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
user = relationship("UserDB")
container_name = Column(String)
container_id = Column(String)
@@ -88,13 +114,19 @@ class DeploymentDB(Base):
class VariantBaseDB(Base):
- id = Column(Integer, primary_key=True, autoincrement=True)
- app_id = Column(Integer, ForeignKey("app_db.id"))
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id"))
app = relationship("AppDB")
- user_id = Column(Integer, ForeignKey("users.id"))
+ user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
user = relationship("UserDB")
base_name = Column(String)
- image_id = Column(Integer, ForeignKey("docker_images.id"))
+ image_id = Column(UUID(as_uuid=True), ForeignKey("docker_images.id"))
image = relationship("ImageDB")
deployment_id = Column(Integer) # reference to deployment, can be nullable
created_at = Column(
@@ -108,19 +140,25 @@ class VariantBaseDB(Base):
class AppVariantDB(Base):
- id = Column(Integer, primary_key=True, autoincrement=True)
- app_id = Column(Integer, ForeignKey("app_db.id"))
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id"))
app = relationship("AppDB")
variant_name = Column(String)
revision = Column(Integer)
- image_id = Column(Integer, ForeignKey("docker_images.id"))
+ image_id = Column(UUID(as_uuid=True), ForeignKey("docker_images.id"))
image = relationship("ImageDB")
- user_id = Column(Integer, ForeignKey("users.id"))
+ user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
user = relationship("UserDB", foreign_keys=[user_id])
- modified_by_id = Column(Integer, ForeignKey("users.id"))
+ modified_by_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
modified_by = relationship("UserDB", foreign_keys=[modified_by_id])
base_name = Column(String)
- base_id = Column(Integer, ForeignKey("bases.id"))
+ base_id = Column(UUID(as_uuid=True), ForeignKey("bases.id"))
base = relationship("VariantBaseDB")
config_name = Column(String, nullable=False)
config_parameters = Column(JSON, nullable=False, default=dict)
@@ -135,13 +173,19 @@ class AppVariantDB(Base):
class AppVariantRevisionsDB(Base):
- id = Column(Integer, primary_key=True, autoincrement=True)
- variant_id = Column(Integer, ForeignKey("app_variants.id"))
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ variant_id = Column(UUID(as_uuid=True), ForeignKey("app_variants.id"))
variant = relationship("AppVariantDB")
revision = Column(Integer)
- modified_by_id = Column(Integer, ForeignKey("users.id"))
+ modified_by_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
modified_by = relationship("UserDB")
- base_id = Column(Integer, ForeignKey("bases.id"))
+ base_id = Column(UUID(as_uuid=True), ForeignKey("bases.id"))
base = relationship("VariantBaseDB")
config_name = Column(String, nullable=False)
config_parameters = Column(JSON, nullable=False, default=dict)
@@ -156,19 +200,28 @@ class AppVariantRevisionsDB(Base):
class AppEnvironmentDB(Base):
- id = Column(Integer, primary_key=True, autoincrement=True)
- app_id = Column(Integer, ForeignKey("app_db.id"))
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id"))
app = relationship("AppDB")
name = Column(String)
- user_id = Column(Integer, ForeignKey("users.id"))
+ user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
user = relationship("UserDB")
revision = Column(Integer)
deployed_app_variant_id = Column(Integer) # reference to app_variant
+
deployed_app_variant_revision_id = Column(
- Integer, ForeignKey("app_variant_revisions.id")
+ UUID(as_uuid=True), ForeignKey("app_variant_revisions.id")
)
deployed_app_variant_revision = relationship("AppVariantRevisionsDB")
+
deployment_id = Column(Integer) # reference to deployment
+
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
@@ -177,11 +230,17 @@ class AppEnvironmentDB(Base):
class AppEnvironmentRevisionDB(Base):
- id = Column(Integer, primary_key=True, autoincrement=True)
- environment_id = Column(Integer, ForeignKey("environments.id"))
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ environment_id = Column(UUID(as_uuid=True), ForeignKey("environments.id"))
environment = relationship("AppEnvironmentDB")
revision = Column(Integer)
- modified_by_id = Column(Integer, ForeignKey("users.id"))
+ modified_by_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
modified_by = relationship("UserDB")
deployed_app_variant_revision_id = Column(
Integer
@@ -195,7 +254,13 @@ class AppEnvironmentRevisionDB(Base):
class TemplateDB(Base):
- id = Column(Integer, primary_key=True, autoincrement=True)
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
type = Column(String, default="image")
template_uri = Column(String)
tag_id = Column(Integer)
@@ -213,12 +278,18 @@ class TemplateDB(Base):
class TestSetDB(Base):
- id = Column(Integer, primary_key=True, autoincrement=True)
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
name = Column(String)
- app_id = Column(Integer, ForeignKey("app_db.id"))
+ app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id"))
app = relationship("AppDB")
csvdata = Column(JSON) # List of dictionaries
- user_id = Column(Integer, ForeignKey("users.id"))
+ user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
user = relationship("UserDB")
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
@@ -231,16 +302,24 @@ class TestSetDB(Base):
class EvaluatorConfigDB(Base):
- id = Column(Integer, primary_key=True, autoincrement=True)
- evaluation_id = Column(Integer, ForeignKey("evaluations.id"))
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ evaluation_id = Column(UUID(as_uuid=True), ForeignKey("evaluations.id"))
evaluation = relationship("EvaluationDB", back_populates="evaluator_configs")
- evaluation_scenario_id = Column(Integer, ForeignKey("evaluation_scenarios.id"))
+ evaluation_scenario_id = Column(
+ UUID(as_uuid=True), ForeignKey("evaluation_scenarios.id")
+ )
evaluation_scenario = relationship(
"EvaluationScenarioDB", back_populates="evaluator_configs"
)
- app_id = Column(Integer, ForeignKey("app_db.id"))
+ app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id"))
app = relationship("AppDB")
- user_id = Column(Integer, ForeignKey("users.id"))
+ user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
user = relationship("UserDB")
name = Column(String)
evaluator_key = Column(String)
@@ -256,18 +335,26 @@ class EvaluatorConfigDB(Base):
class HumanEvaluationDB(Base):
- id = Column(Integer, primary_key=True, autoincrement=True)
- app_id = Column(Integer, ForeignKey("app_db.id"))
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id"))
app = relationship("AppDB")
- user_id = Column(Integer, ForeignKey("users.id"))
+ user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
user = relationship("UserDB")
status = Column(String)
evaluation_type = Column(String)
- variant_id = Column(Integer, ForeignKey("app_variants.id"))
+ variant_id = Column(UUID(as_uuid=True), ForeignKey("app_variants.id"))
variant = relationship("AppVariantDB")
- variant_revision_id = Column(Integer, ForeignKey("app_variant_revisions.id"))
+ variant_revision_id = Column(
+ UUID(as_uuid=True), ForeignKey("app_variant_revisions.id")
+ )
variant_revision = relationship("AppVariantRevisionsDB")
- testset_id = Column(Integer, ForeignKey("testsets.id"))
+ testset_id = Column(UUID(as_uuid=True), ForeignKey("testsets.id"))
testset = relationship("TestSetDB")
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
@@ -280,10 +367,16 @@ class HumanEvaluationDB(Base):
class HumanEvaluationScenarioDB(Base):
- id = Column(Integer, primary_key=True, autoincrement=True)
- user_id = Column(Integer, ForeignKey("users.id"))
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
user = relationship("UserDB")
- evaluation_id = Column(Integer, ForeignKey("human_evaluations.id"))
+ evaluation_id = Column(UUID(as_uuid=True), ForeignKey("human_evaluations.id"))
evaluation = relationship("HumanEvaluationDB")
inputs = relationship("HumanEvaluationScenarioInputsDB", backref="scenario")
outputs = relationship("HumanEvaluationScenarioOutputsDB", backref="scenario")
@@ -302,8 +395,16 @@ class HumanEvaluationScenarioDB(Base):
class HumanEvaluationScenarioInputsDB(Base):
- id = Column(Integer, primary_key=True, autoincrement=True)
- scenario_id = Column(Integer, ForeignKey("human_evaluations_scenarios.id"))
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ scenario_id = Column(
+ UUID(as_uuid=True), ForeignKey("human_evaluations_scenarios.id")
+ )
input_name = Column(String)
input_value = Column(String)
@@ -311,8 +412,16 @@ class HumanEvaluationScenarioInputsDB(Base):
class HumanEvaluationScenarioOutputsDB(Base):
- id = Column(Integer, primary_key=True, autoincrement=True)
- scenario_id = Column(Integer, ForeignKey("human_evaluations_scenarios.id"))
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ scenario_id = Column(
+ UUID(as_uuid=True), ForeignKey("human_evaluations_scenarios.id")
+ )
variant_id = Column(String)
variant_output = Column(String)
@@ -320,17 +429,25 @@ class HumanEvaluationScenarioOutputsDB(Base):
class EvaluationDB(Base):
- id = Column(Integer, primary_key=True, autoincrement=True)
- app_id = Column(Integer, ForeignKey("app_db.id"))
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id"))
app = relationship("AppDB")
- user_id = Column(Integer, ForeignKey("users.id"))
+ user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
user = relationship("UserDB")
status = Column(JSON)
- testset_id = Column(Integer, ForeignKey("testsets.id"))
+ testset_id = Column(UUID(as_uuid=True), ForeignKey("testsets.id"))
testset = relationship("TestSetDB")
- variant_id = Column(Integer, ForeignKey("app_variants.id"))
+ variant_id = Column(UUID(as_uuid=True), ForeignKey("app_variants.id"))
variant = relationship("AppVariantDB")
- variant_revision_id = Column(Integer, ForeignKey("app_variant_revisions.id"))
+ variant_revision_id = Column(
+ UUID(as_uuid=True), ForeignKey("app_variant_revisions.id")
+ )
variant_revision = relationship("AppVariantRevisionsDB")
evaluator_configs = relationship("EvaluatorConfigDB", back_populates="evaluation")
aggregated_results = Column(JSON) # List of AggregatedResult
@@ -348,12 +465,18 @@ class EvaluationDB(Base):
class EvaluationScenarioDB(Base):
- id = Column(Integer, primary_key=True, autoincrement=True)
- user_id = Column(Integer, ForeignKey("users.id"))
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
user = relationship("UserDB")
- evaluation_id = Column(Integer, ForeignKey("evaluations.id"))
+ evaluation_id = Column(UUID(as_uuid=True), ForeignKey("evaluations.id"))
evaluation = relationship("EvaluationDB")
- variant_id = Column(Integer, ForeignKey("app_variants.id"))
+ variant_id = Column(UUID(as_uuid=True), ForeignKey("app_variants.id"))
variant = relationship("AppVariantDB")
inputs = relationship("EvaluationScenarioInputDB", backref="scenario")
outputs = relationship("EvaluationScenarioOutputDB", backref="scenario")
@@ -377,8 +500,14 @@ class EvaluationScenarioDB(Base):
class EvaluationScenarioInputDB(Base):
- id = Column(Integer, primary_key=True, autoincrement=True)
- scenario_id = Column(Integer, ForeignKey("evaluation_scenarios.id"))
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ scenario_id = Column(UUID(as_uuid=True), ForeignKey("evaluation_scenarios.id"))
name = Column(String)
type = Column(String)
value = Column(String)
@@ -387,8 +516,14 @@ class EvaluationScenarioInputDB(Base):
class EvaluationScenarioOutputDB(Base):
- id = Column(Integer, primary_key=True, autoincrement=True)
- scenario_id = Column(Integer, ForeignKey("evaluation_scenarios.id"))
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ scenario_id = Column(UUID(as_uuid=True), ForeignKey("evaluation_scenarios.id"))
result = Column(JSON)
cost = Column(Float)
latency = Column(Float)
diff --git a/agenta-backend/poetry.lock b/agenta-backend/poetry.lock
index ba2006d68f..bfa1888b20 100644
--- a/agenta-backend/poetry.lock
+++ b/agenta-backend/poetry.lock
@@ -2098,6 +2098,87 @@ files = [
[package.dependencies]
wcwidth = "*"
+[[package]]
+name = "psycopg2-binary"
+version = "2.9.9"
+description = "psycopg2 - Python-PostgreSQL Database Adapter"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "psycopg2-binary-2.9.9.tar.gz", hash = "sha256:7f01846810177d829c7692f1f5ada8096762d9172af1b1a28d4ab5b77c923c1c"},
+ {file = "psycopg2_binary-2.9.9-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c2470da5418b76232f02a2fcd2229537bb2d5a7096674ce61859c3229f2eb202"},
+ {file = "psycopg2_binary-2.9.9-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c6af2a6d4b7ee9615cbb162b0738f6e1fd1f5c3eda7e5da17861eacf4c717ea7"},
+ {file = "psycopg2_binary-2.9.9-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:75723c3c0fbbf34350b46a3199eb50638ab22a0228f93fb472ef4d9becc2382b"},
+ {file = "psycopg2_binary-2.9.9-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:83791a65b51ad6ee6cf0845634859d69a038ea9b03d7b26e703f94c7e93dbcf9"},
+ {file = "psycopg2_binary-2.9.9-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0ef4854e82c09e84cc63084a9e4ccd6d9b154f1dbdd283efb92ecd0b5e2b8c84"},
+ {file = "psycopg2_binary-2.9.9-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed1184ab8f113e8d660ce49a56390ca181f2981066acc27cf637d5c1e10ce46e"},
+ {file = "psycopg2_binary-2.9.9-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d2997c458c690ec2bc6b0b7ecbafd02b029b7b4283078d3b32a852a7ce3ddd98"},
+ {file = "psycopg2_binary-2.9.9-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:b58b4710c7f4161b5e9dcbe73bb7c62d65670a87df7bcce9e1faaad43e715245"},
+ {file = "psycopg2_binary-2.9.9-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:0c009475ee389757e6e34611d75f6e4f05f0cf5ebb76c6037508318e1a1e0d7e"},
+ {file = "psycopg2_binary-2.9.9-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8dbf6d1bc73f1d04ec1734bae3b4fb0ee3cb2a493d35ede9badbeb901fb40f6f"},
+ {file = "psycopg2_binary-2.9.9-cp310-cp310-win32.whl", hash = "sha256:3f78fd71c4f43a13d342be74ebbc0666fe1f555b8837eb113cb7416856c79682"},
+ {file = "psycopg2_binary-2.9.9-cp310-cp310-win_amd64.whl", hash = "sha256:876801744b0dee379e4e3c38b76fc89f88834bb15bf92ee07d94acd06ec890a0"},
+ {file = "psycopg2_binary-2.9.9-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ee825e70b1a209475622f7f7b776785bd68f34af6e7a46e2e42f27b659b5bc26"},
+ {file = "psycopg2_binary-2.9.9-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1ea665f8ce695bcc37a90ee52de7a7980be5161375d42a0b6c6abedbf0d81f0f"},
+ {file = "psycopg2_binary-2.9.9-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:143072318f793f53819048fdfe30c321890af0c3ec7cb1dfc9cc87aa88241de2"},
+ {file = "psycopg2_binary-2.9.9-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c332c8d69fb64979ebf76613c66b985414927a40f8defa16cf1bc028b7b0a7b0"},
+ {file = "psycopg2_binary-2.9.9-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7fc5a5acafb7d6ccca13bfa8c90f8c51f13d8fb87d95656d3950f0158d3ce53"},
+ {file = "psycopg2_binary-2.9.9-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:977646e05232579d2e7b9c59e21dbe5261f403a88417f6a6512e70d3f8a046be"},
+ {file = "psycopg2_binary-2.9.9-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b6356793b84728d9d50ead16ab43c187673831e9d4019013f1402c41b1db9b27"},
+ {file = "psycopg2_binary-2.9.9-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:bc7bb56d04601d443f24094e9e31ae6deec9ccb23581f75343feebaf30423359"},
+ {file = "psycopg2_binary-2.9.9-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:77853062a2c45be16fd6b8d6de2a99278ee1d985a7bd8b103e97e41c034006d2"},
+ {file = "psycopg2_binary-2.9.9-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:78151aa3ec21dccd5cdef6c74c3e73386dcdfaf19bced944169697d7ac7482fc"},
+ {file = "psycopg2_binary-2.9.9-cp311-cp311-win32.whl", hash = "sha256:dc4926288b2a3e9fd7b50dc6a1909a13bbdadfc67d93f3374d984e56f885579d"},
+ {file = "psycopg2_binary-2.9.9-cp311-cp311-win_amd64.whl", hash = "sha256:b76bedd166805480ab069612119ea636f5ab8f8771e640ae103e05a4aae3e417"},
+ {file = "psycopg2_binary-2.9.9-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:8532fd6e6e2dc57bcb3bc90b079c60de896d2128c5d9d6f24a63875a95a088cf"},
+ {file = "psycopg2_binary-2.9.9-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b0605eaed3eb239e87df0d5e3c6489daae3f7388d455d0c0b4df899519c6a38d"},
+ {file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f8544b092a29a6ddd72f3556a9fcf249ec412e10ad28be6a0c0d948924f2212"},
+ {file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2d423c8d8a3c82d08fe8af900ad5b613ce3632a1249fd6a223941d0735fce493"},
+ {file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2e5afae772c00980525f6d6ecf7cbca55676296b580c0e6abb407f15f3706996"},
+ {file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e6f98446430fdf41bd36d4faa6cb409f5140c1c2cf58ce0bbdaf16af7d3f119"},
+ {file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c77e3d1862452565875eb31bdb45ac62502feabbd53429fdc39a1cc341d681ba"},
+ {file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:cb16c65dcb648d0a43a2521f2f0a2300f40639f6f8c1ecbc662141e4e3e1ee07"},
+ {file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:911dda9c487075abd54e644ccdf5e5c16773470a6a5d3826fda76699410066fb"},
+ {file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:57fede879f08d23c85140a360c6a77709113efd1c993923c59fde17aa27599fe"},
+ {file = "psycopg2_binary-2.9.9-cp312-cp312-win32.whl", hash = "sha256:64cf30263844fa208851ebb13b0732ce674d8ec6a0c86a4e160495d299ba3c93"},
+ {file = "psycopg2_binary-2.9.9-cp312-cp312-win_amd64.whl", hash = "sha256:81ff62668af011f9a48787564ab7eded4e9fb17a4a6a74af5ffa6a457400d2ab"},
+ {file = "psycopg2_binary-2.9.9-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2293b001e319ab0d869d660a704942c9e2cce19745262a8aba2115ef41a0a42a"},
+ {file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03ef7df18daf2c4c07e2695e8cfd5ee7f748a1d54d802330985a78d2a5a6dca9"},
+ {file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a602ea5aff39bb9fac6308e9c9d82b9a35c2bf288e184a816002c9fae930b77"},
+ {file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8359bf4791968c5a78c56103702000105501adb557f3cf772b2c207284273984"},
+ {file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:275ff571376626195ab95a746e6a04c7df8ea34638b99fc11160de91f2fef503"},
+ {file = "psycopg2_binary-2.9.9-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:f9b5571d33660d5009a8b3c25dc1db560206e2d2f89d3df1cb32d72c0d117d52"},
+ {file = "psycopg2_binary-2.9.9-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:420f9bbf47a02616e8554e825208cb947969451978dceb77f95ad09c37791dae"},
+ {file = "psycopg2_binary-2.9.9-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:4154ad09dac630a0f13f37b583eae260c6aa885d67dfbccb5b02c33f31a6d420"},
+ {file = "psycopg2_binary-2.9.9-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a148c5d507bb9b4f2030a2025c545fccb0e1ef317393eaba42e7eabd28eb6041"},
+ {file = "psycopg2_binary-2.9.9-cp37-cp37m-win32.whl", hash = "sha256:68fc1f1ba168724771e38bee37d940d2865cb0f562380a1fb1ffb428b75cb692"},
+ {file = "psycopg2_binary-2.9.9-cp37-cp37m-win_amd64.whl", hash = "sha256:281309265596e388ef483250db3640e5f414168c5a67e9c665cafce9492eda2f"},
+ {file = "psycopg2_binary-2.9.9-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:60989127da422b74a04345096c10d416c2b41bd7bf2a380eb541059e4e999980"},
+ {file = "psycopg2_binary-2.9.9-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:246b123cc54bb5361588acc54218c8c9fb73068bf227a4a531d8ed56fa3ca7d6"},
+ {file = "psycopg2_binary-2.9.9-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34eccd14566f8fe14b2b95bb13b11572f7c7d5c36da61caf414d23b91fcc5d94"},
+ {file = "psycopg2_binary-2.9.9-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18d0ef97766055fec15b5de2c06dd8e7654705ce3e5e5eed3b6651a1d2a9a152"},
+ {file = "psycopg2_binary-2.9.9-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d3f82c171b4ccd83bbaf35aa05e44e690113bd4f3b7b6cc54d2219b132f3ae55"},
+ {file = "psycopg2_binary-2.9.9-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ead20f7913a9c1e894aebe47cccf9dc834e1618b7aa96155d2091a626e59c972"},
+ {file = "psycopg2_binary-2.9.9-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ca49a8119c6cbd77375ae303b0cfd8c11f011abbbd64601167ecca18a87e7cdd"},
+ {file = "psycopg2_binary-2.9.9-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:323ba25b92454adb36fa425dc5cf6f8f19f78948cbad2e7bc6cdf7b0d7982e59"},
+ {file = "psycopg2_binary-2.9.9-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:1236ed0952fbd919c100bc839eaa4a39ebc397ed1c08a97fc45fee2a595aa1b3"},
+ {file = "psycopg2_binary-2.9.9-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:729177eaf0aefca0994ce4cffe96ad3c75e377c7b6f4efa59ebf003b6d398716"},
+ {file = "psycopg2_binary-2.9.9-cp38-cp38-win32.whl", hash = "sha256:804d99b24ad523a1fe18cc707bf741670332f7c7412e9d49cb5eab67e886b9b5"},
+ {file = "psycopg2_binary-2.9.9-cp38-cp38-win_amd64.whl", hash = "sha256:a6cdcc3ede532f4a4b96000b6362099591ab4a3e913d70bcbac2b56c872446f7"},
+ {file = "psycopg2_binary-2.9.9-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:72dffbd8b4194858d0941062a9766f8297e8868e1dd07a7b36212aaa90f49472"},
+ {file = "psycopg2_binary-2.9.9-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:30dcc86377618a4c8f3b72418df92e77be4254d8f89f14b8e8f57d6d43603c0f"},
+ {file = "psycopg2_binary-2.9.9-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31a34c508c003a4347d389a9e6fcc2307cc2150eb516462a7a17512130de109e"},
+ {file = "psycopg2_binary-2.9.9-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:15208be1c50b99203fe88d15695f22a5bed95ab3f84354c494bcb1d08557df67"},
+ {file = "psycopg2_binary-2.9.9-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1873aade94b74715be2246321c8650cabf5a0d098a95bab81145ffffa4c13876"},
+ {file = "psycopg2_binary-2.9.9-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a58c98a7e9c021f357348867f537017057c2ed7f77337fd914d0bedb35dace7"},
+ {file = "psycopg2_binary-2.9.9-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4686818798f9194d03c9129a4d9a702d9e113a89cb03bffe08c6cf799e053291"},
+ {file = "psycopg2_binary-2.9.9-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ebdc36bea43063116f0486869652cb2ed7032dbc59fbcb4445c4862b5c1ecf7f"},
+ {file = "psycopg2_binary-2.9.9-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:ca08decd2697fdea0aea364b370b1249d47336aec935f87b8bbfd7da5b2ee9c1"},
+ {file = "psycopg2_binary-2.9.9-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ac05fb791acf5e1a3e39402641827780fe44d27e72567a000412c648a85ba860"},
+ {file = "psycopg2_binary-2.9.9-cp39-cp39-win32.whl", hash = "sha256:9dba73be7305b399924709b91682299794887cbbd88e38226ed9f6712eabee90"},
+ {file = "psycopg2_binary-2.9.9-cp39-cp39-win_amd64.whl", hash = "sha256:f7ae5d65ccfbebdfa761585228eb4d0df3a8b15cfb53bd953e713e09fbb12957"},
+]
+
[[package]]
name = "pyarrow"
version = "16.1.0"
@@ -3241,6 +3322,98 @@ brotli = ["brotli (==1.0.9)", "brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotl
secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"]
socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
+[[package]]
+name = "uuid-utils"
+version = "0.7.0"
+description = "Drop-in replacement for Python UUID in Rust"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "uuid_utils-0.7.0-cp310-cp310-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:1813869ffbf82ebe5fbe749cf0d5e580c605b0fd65d5e738e44439578280f993"},
+ {file = "uuid_utils-0.7.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:afb6d3cea6f8b1d9692a1c5d7a93aa6189f973509ea272f4c070399e88cea36b"},
+ {file = "uuid_utils-0.7.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38af087e1804774f563ff5f9f043022274dfce110b721ca272f89c0de4ee44e1"},
+ {file = "uuid_utils-0.7.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:183603176b65401492db51a16526360997c91e32bc1ffe20ee527337fc57f634"},
+ {file = "uuid_utils-0.7.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cebc0e99853c6c12f42e509c27af6131ef36b29e6f381d53c6d81eb1bd21a5f4"},
+ {file = "uuid_utils-0.7.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:49e0a42bd9c3825f10d38dcc49bafe5b6543b6c107e4b614e96abf8a7cd58a6f"},
+ {file = "uuid_utils-0.7.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a0f978aa8a51ca05142e4e81767d67de08b35ce7db28bc2e600d0c317472013"},
+ {file = "uuid_utils-0.7.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c3d2d02868c73334e84d80a7ad60e6c7506c72c059508e9a38db453e4110a652"},
+ {file = "uuid_utils-0.7.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:03f710c032d903f273c720dfc080b68fead1ed543de8ad53c4c8dde64c6edd56"},
+ {file = "uuid_utils-0.7.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b60c49becd9ff3844fe6e0e87319df9c84dd65bb86c36ad3514981f64e7a737a"},
+ {file = "uuid_utils-0.7.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c7ae618dbe27eb5c681a09bec4554d8da8264130a083657fcb80033bbf1c6114"},
+ {file = "uuid_utils-0.7.0-cp310-none-win32.whl", hash = "sha256:fb73e36a209c2b585e878748615c0410d2422908ad86fc12b5ae66fedd7e326d"},
+ {file = "uuid_utils-0.7.0-cp310-none-win_amd64.whl", hash = "sha256:8e30075e257184328356436a8a6b0e5a0c2b097c224a1e7f9d98a4c350ae5f21"},
+ {file = "uuid_utils-0.7.0-cp311-cp311-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:ca41e673b807405c0c5aa97ff8959b80884734b1eb55428c7285de245aa3e101"},
+ {file = "uuid_utils-0.7.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:cac7e2cf5b40ef297a998fc3ede146f171f99b18210e1237f01002c7e3fa6b0b"},
+ {file = "uuid_utils-0.7.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9bad486bcb3b1bd1f6a6e02d9627c51b993305bd2efd3eb4acd0aff529cd7d43"},
+ {file = "uuid_utils-0.7.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bd9d769f85bd24a558e8d1aee93400811e3f734199acc5410617f67b1041e0f4"},
+ {file = "uuid_utils-0.7.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5c99930f6d51efd15b6c2feb73b386bffccfc82c535eb7d8229e4fb6467f5c6c"},
+ {file = "uuid_utils-0.7.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c68ba81b63e23032beda93eeab084f76f141017a26cb895c65777cf3c6c3474"},
+ {file = "uuid_utils-0.7.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bdaa67667584aba2096292607e2f2e4485df1d1fb2594b2390227cf18df057f0"},
+ {file = "uuid_utils-0.7.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6506fedaacd814b50cb62745b058796612c0ddd818a35a70082ea76f8b484931"},
+ {file = "uuid_utils-0.7.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:eaa55deae8fd4e7ff30a31f1661e953d70705efa3b09d0fc33576a8eaa589910"},
+ {file = "uuid_utils-0.7.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:0d4e7cd2f45e9a3dd371abb8532c6fcbb9befa1551522336095b02369e9144a9"},
+ {file = "uuid_utils-0.7.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d6a35a2205318cff201e76cbc6ad428c58e4d9d9ce9c83fd600c5295538be60e"},
+ {file = "uuid_utils-0.7.0-cp311-none-win32.whl", hash = "sha256:a7c82f88158f0693cfbc769536d7c09a7cd3c58b22a1b2a041374db1ba03e2d3"},
+ {file = "uuid_utils-0.7.0-cp311-none-win_amd64.whl", hash = "sha256:df8f82270295726d1f7d1e26026c29d33a2b40e6dcf8723cf7f5809909eaf6d6"},
+ {file = "uuid_utils-0.7.0-cp312-cp312-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:53e5d6703f6a38aa1ba59cf8ac0486ac9a847e816e638cf9d6a2a4da4e9f6247"},
+ {file = "uuid_utils-0.7.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:c29183a8baedb39fc89e3d98ed2427d49e97ff3680f6832bffe73568d594970d"},
+ {file = "uuid_utils-0.7.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:253fd6e8962008484e02fd4ff4a77ffbddd3867c0c3c24a6919eb4fefc3a2297"},
+ {file = "uuid_utils-0.7.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:de53537159212608eb15d4948d0e0098d2fa2b30d453f93d83fe737f0fd7188b"},
+ {file = "uuid_utils-0.7.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:116c4b2ff774ce552324b196a3222302a2e78479a301fdb11c2aa1d294ab0f4d"},
+ {file = "uuid_utils-0.7.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2eafb4fe02270e22a3bdb03c2107604cf68589a965667cabb71789beed318497"},
+ {file = "uuid_utils-0.7.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6ab7a012a1514e498f3f537852257ad2ec9402d1cc165865108dc6d9496bbd4"},
+ {file = "uuid_utils-0.7.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:08d58f7de04f3c43a4da05eece58002f4028a7275775ad5013e010abd51d7238"},
+ {file = "uuid_utils-0.7.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7e349d43a969f696dbc7acd002b64952b71674eaf948043a4c6dd1ab65d7c462"},
+ {file = "uuid_utils-0.7.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:53f4c96e7fd1dab33dd56a885d9cffb5aaf21a9064115743e2cee1ff03cb359b"},
+ {file = "uuid_utils-0.7.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c145629c4e48cda275310955632a8231c031f5e9b2eb93b9ab8a081dc6ab6681"},
+ {file = "uuid_utils-0.7.0-cp312-none-win_amd64.whl", hash = "sha256:2ca368440148049475ff94f62d5011c34cd7954fe36247698fc05658d04ad9a1"},
+ {file = "uuid_utils-0.7.0-cp38-cp38-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:48ed8e59c6fdcc8f825e9fa58afc7f98ba37f744a401ff28a47e7042a761b373"},
+ {file = "uuid_utils-0.7.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:bb2777eb2837fc88aceb09addb45bfc7bc8dd0058d19627867b459dac3101a4b"},
+ {file = "uuid_utils-0.7.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:070254d2435e9f187e0e8c0626fc6ed108d308cdec669c6d1493dd117bfbedd1"},
+ {file = "uuid_utils-0.7.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:424abbbf7e8bdfe78ab552d838efeb9fd033cfe2208f00aadee2704169a1ebad"},
+ {file = "uuid_utils-0.7.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:884a72b5f87f7534b685382221d872058bb743294cdb0f2215056b6cc85350fb"},
+ {file = "uuid_utils-0.7.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ab1509e21c74feb68b4a3e309bde8c64a8fce2e4552b79cb14058d6bc17a6129"},
+ {file = "uuid_utils-0.7.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e6d70efc5e3449f0be3184a6925d0feb29fe40bdcd24ee2611a9021ee9b2580"},
+ {file = "uuid_utils-0.7.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:411e29b3a2de713c4a3f3edc653599fb17ef3f38b6a788fecef62c3f229b7b0e"},
+ {file = "uuid_utils-0.7.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:3bf10bd5a898d72f50183718ca18bd61b8830c9134469b4d7b9f73f176f06c9f"},
+ {file = "uuid_utils-0.7.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:247af7258004f497ec927fcf463914df5447eb691d7e9c23528280c471d6e830"},
+ {file = "uuid_utils-0.7.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:01f7c73860b3cef024f9f57515dae5d52a554c3d2480d8410174ec5b609e20f5"},
+ {file = "uuid_utils-0.7.0-cp38-none-win32.whl", hash = "sha256:d90d432c85bb2d9b3d67c8483b1134cf4363a39fa3273b8f05dcfde2bdddfc5d"},
+ {file = "uuid_utils-0.7.0-cp38-none-win_amd64.whl", hash = "sha256:d31ebe0e6d5d1210da259de4d04ee31dfd5407296302bc2dfcca941e3e8f7bee"},
+ {file = "uuid_utils-0.7.0-cp39-cp39-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:076fe5f6e5295a5d47b240ece6047d25ce15e8a114f60acc51b4025c3b973ed9"},
+ {file = "uuid_utils-0.7.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:997f4d4f505391b69373c852662b5fe0af8c17b71fe401fea7687261464b9aa5"},
+ {file = "uuid_utils-0.7.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59fc7ce3dddb5694f6ecd427d557a342f44075cdaf836cd99033fd0cc500e592"},
+ {file = "uuid_utils-0.7.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:463b98c24c5f6f4d0b46174c1068c19007fe6414c38fbd58d5cb6c8d29cdd1ef"},
+ {file = "uuid_utils-0.7.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65c5d33fd056517d0ab1624168359371b012cc6e3a0fd6029d212d3973032e90"},
+ {file = "uuid_utils-0.7.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:da47c5c4348a5f88749ac8fd54715bdfa18c1317ebf709121721e9b5fb338c66"},
+ {file = "uuid_utils-0.7.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:04f39fd90656770422cc7ec46467c2eb758e19d70c5844770bd67834ebae40ea"},
+ {file = "uuid_utils-0.7.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a5817e38d497ae643c68044c5c84153fa47557df1f8c1661c17bd1e26bda1058"},
+ {file = "uuid_utils-0.7.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:407c15bbde425bc4df829771ef601260eda8617ac5adc6f1eb924d916674c34f"},
+ {file = "uuid_utils-0.7.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:d4ac00e7f3bbb578e20fadf81468f28b63d1b29930192d8285e9d01b2f75f270"},
+ {file = "uuid_utils-0.7.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6d937d37b696a2e3346171367a6ecf69519af4f2a5325e8e7f9a7cfb61597387"},
+ {file = "uuid_utils-0.7.0-cp39-none-win32.whl", hash = "sha256:a4fd826bc2c260716b53db90b2e4c8a0f752aae053fbfbd1860e6e450bcf6ae9"},
+ {file = "uuid_utils-0.7.0-cp39-none-win_amd64.whl", hash = "sha256:c1aa084a1b4842c49526ed1189122a96a8cdd73f66ef4219956279044bf6721f"},
+ {file = "uuid_utils-0.7.0-pp38-pypy38_pp73-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:15eb3621d24fb6aab7f8e7b315356171795ca0f226ba9c31490fb9c08712c201"},
+ {file = "uuid_utils-0.7.0-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:bd0dac47317dcdefafe493428237019582ba8adb91c3ec80e033ee631c173f6d"},
+ {file = "uuid_utils-0.7.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7b555e485f17ab1ab0cb963ff48c6404b93dd491aef7f52a8ae8c52f7f51841"},
+ {file = "uuid_utils-0.7.0-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e66bddd9a469645ede16f0abde5db4dd1a75bc9628ab0b68cad0b848de8494aa"},
+ {file = "uuid_utils-0.7.0-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6ad6957427be8f2e48d2f128b3382b3c8e33b4b26542d757e5957c9593773082"},
+ {file = "uuid_utils-0.7.0-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4c753f5b690a481d31f13668a57610a4ee9805d0bd4515ab74a3766bea3b0e66"},
+ {file = "uuid_utils-0.7.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e99615eb01550e1f883b5b251a04e8afe053dd30fb6c1af823bd14841bd9290"},
+ {file = "uuid_utils-0.7.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cb241970c10cccd37ecac5b3759276ca499cb5b639b832167f91b0a98383e89d"},
+ {file = "uuid_utils-0.7.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:14b672b950e792545fde222cf08f9ba9e30ac69399c2ca34b91d4fa457ce1528"},
+ {file = "uuid_utils-0.7.0-pp39-pypy39_pp73-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:1229b9849a239714899040f8af9c7b3b7ad790483ac0bdf06982eb03383e7a93"},
+ {file = "uuid_utils-0.7.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2b6c56101e5dedf06c81c5f3e3dc9d542feb4a5443b01a100c14eef6ae7e9ec4"},
+ {file = "uuid_utils-0.7.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77c7f5e54fad8d761e019122080b14fae9568dd09cbb908f349284efa8f9a792"},
+ {file = "uuid_utils-0.7.0-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b429a906f0dff1c35d55ca17c5f7fedf3149cb405808b43ba4f3a6d21732c31"},
+ {file = "uuid_utils-0.7.0-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:06608c7d643149dee92ceebc73a84bb736d4394f200ecb794541a79e10bc482d"},
+ {file = "uuid_utils-0.7.0-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:655e505c4e7c321e7f60572fdd594bdfdd96556a9699f697045e3d0b4699f30a"},
+ {file = "uuid_utils-0.7.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c1e59447b45d5988572e450f43de5546e1d2f6643d2e0137d83b5fdad204fd05"},
+ {file = "uuid_utils-0.7.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f3010bdaff5c2a78980849aa6b082e7a0013949c8e4d317934f4aaacf14a2d22"},
+ {file = "uuid_utils-0.7.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:ec25fadeeb34c41ef95a8b849a3e4dcc39e96eb39367323ba873bc1732d6516a"},
+ {file = "uuid_utils-0.7.0.tar.gz", hash = "sha256:015aa22711ffd57c5001c2477c6a40121db2794ae3be181a0bf79eef80e28943"},
+]
+
[[package]]
name = "uvicorn"
version = "0.22.0"
@@ -3641,4 +3814,4 @@ multidict = ">=4.0"
[metadata]
lock-version = "2.0"
python-versions = "^3.9"
-content-hash = "598257da4f2afc86ccf88f0163dae694720ea7249af3ef88a880167bcb0a5b78"
+content-hash = "20b3e4e19a02e246d051de3e7853ef1e2b9f45ebafd9512c71bf1f9e3ddd8d24"
diff --git a/agenta-backend/pyproject.toml b/agenta-backend/pyproject.toml
index 040832fa10..3d071913b8 100644
--- a/agenta-backend/pyproject.toml
+++ b/agenta-backend/pyproject.toml
@@ -36,6 +36,8 @@ ragas = "^0.1.8"
openai = "^1.30.4"
sqlalchemy = "^2.0.30"
asyncpg = "^0.29.0"
+psycopg2-binary = "^2.9.9"
+uuid-utils = "^0.7.0"
[tool.poetry.group.dev.dependencies]
pytest = "^7.3.1"
From f3acf3dbff151a2ed02535121666e9e864613c26 Mon Sep 17 00:00:00 2001
From: aakrem
Date: Wed, 5 Jun 2024 21:21:53 +0200
Subject: [PATCH 023/268] move to jsonb for better indexing and efficiency
---
.../agenta_backend/models/db_models.py | 42 +++++++++----------
1 file changed, 19 insertions(+), 23 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index b81351744e..c0a78cb4ce 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -8,13 +8,11 @@
DateTime,
Boolean,
ForeignKey,
- JSON,
- Text,
Float,
)
from sqlalchemy.orm import relationship, declarative_base
import uuid_utils.compat as uuid
-from sqlalchemy.dialects.postgresql import UUID
+from sqlalchemy.dialects.postgresql import UUID, JSONB
Base = declarative_base()
@@ -128,7 +126,7 @@ class VariantBaseDB(Base):
base_name = Column(String)
image_id = Column(UUID(as_uuid=True), ForeignKey("docker_images.id"))
image = relationship("ImageDB")
- deployment_id = Column(Integer) # reference to deployment, can be nullable
+ deployment_id = Column(Integer) # # TODO: check missing relationship
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
@@ -161,7 +159,7 @@ class AppVariantDB(Base):
base_id = Column(UUID(as_uuid=True), ForeignKey("bases.id"))
base = relationship("VariantBaseDB")
config_name = Column(String, nullable=False)
- config_parameters = Column(JSON, nullable=False, default=dict)
+ config_parameters = Column(JSONB, nullable=False, default=dict)
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
@@ -188,7 +186,7 @@ class AppVariantRevisionsDB(Base):
base_id = Column(UUID(as_uuid=True), ForeignKey("bases.id"))
base = relationship("VariantBaseDB")
config_name = Column(String, nullable=False)
- config_parameters = Column(JSON, nullable=False, default=dict)
+ config_parameters = Column(JSONB, nullable=False, default=dict)
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
@@ -213,14 +211,14 @@ class AppEnvironmentDB(Base):
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
user = relationship("UserDB")
revision = Column(Integer)
- deployed_app_variant_id = Column(Integer) # reference to app_variant
+ deployed_app_variant_id = Column(Integer) # TODO: check missing relationship
deployed_app_variant_revision_id = Column(
UUID(as_uuid=True), ForeignKey("app_variant_revisions.id")
)
deployed_app_variant_revision = relationship("AppVariantRevisionsDB")
- deployment_id = Column(Integer) # reference to deployment
+ deployment_id = Column(Integer) # TODO: check missing relationship
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
@@ -242,10 +240,8 @@ class AppEnvironmentRevisionDB(Base):
revision = Column(Integer)
modified_by_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
modified_by = relationship("UserDB")
- deployed_app_variant_revision_id = Column(
- Integer
- ) # reference to app_variant_revision
- deployment_id = Column(Integer) # reference to deployment
+ deployed_app_variant_revision_id = Column(Integer)
+ deployment_id = Column(Integer)
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
@@ -288,7 +284,7 @@ class TestSetDB(Base):
name = Column(String)
app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id"))
app = relationship("AppDB")
- csvdata = Column(JSON) # List of dictionaries
+ csvdata = Column(JSONB)
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
user = relationship("UserDB")
created_at = Column(
@@ -323,7 +319,7 @@ class EvaluatorConfigDB(Base):
user = relationship("UserDB")
name = Column(String)
evaluator_key = Column(String)
- settings_values = Column(JSON, default=dict)
+ settings_values = Column(JSONB, default=dict)
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
@@ -381,7 +377,7 @@ class HumanEvaluationScenarioDB(Base):
inputs = relationship("HumanEvaluationScenarioInputsDB", backref="scenario")
outputs = relationship("HumanEvaluationScenarioOutputsDB", backref="scenario")
vote = Column(String)
- score = Column(JSON)
+ score = Column(JSONB)
correct_answer = Column(String)
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
@@ -440,7 +436,7 @@ class EvaluationDB(Base):
app = relationship("AppDB")
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
user = relationship("UserDB")
- status = Column(JSON)
+ status = Column(JSONB)
testset_id = Column(UUID(as_uuid=True), ForeignKey("testsets.id"))
testset = relationship("TestSetDB")
variant_id = Column(UUID(as_uuid=True), ForeignKey("app_variants.id"))
@@ -450,10 +446,10 @@ class EvaluationDB(Base):
)
variant_revision = relationship("AppVariantRevisionsDB")
evaluator_configs = relationship("EvaluatorConfigDB", back_populates="evaluation")
- aggregated_results = Column(JSON) # List of AggregatedResult
- average_cost = Column(JSON)
- total_cost = Column(JSON)
- average_latency = Column(JSON)
+ aggregated_results = Column(JSONB) # List of AggregatedResult
+ average_cost = Column(JSONB)
+ total_cost = Column(JSONB)
+ average_latency = Column(JSONB)
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
@@ -480,13 +476,13 @@ class EvaluationScenarioDB(Base):
variant = relationship("AppVariantDB")
inputs = relationship("EvaluationScenarioInputDB", backref="scenario")
outputs = relationship("EvaluationScenarioOutputDB", backref="scenario")
- correct_answers = Column(JSON) # List of CorrectAnswer
+ correct_answers = Column(JSONB) # List of CorrectAnswer
is_pinned = Column(Boolean)
note = Column(String)
evaluator_configs = relationship(
"EvaluatorConfigDB", back_populates="evaluation_scenario"
)
- results = Column(JSON) # List of EvaluationScenarioResult
+ results = Column(JSONB) # List of EvaluationScenarioResult
latency = Column(Integer)
cost = Column(Integer)
created_at = Column(
@@ -524,7 +520,7 @@ class EvaluationScenarioOutputDB(Base):
nullable=False,
)
scenario_id = Column(UUID(as_uuid=True), ForeignKey("evaluation_scenarios.id"))
- result = Column(JSON)
+ result = Column(JSONB)
cost = Column(Float)
latency = Column(Float)
From 99f032b87480149ee3e0abe57227a5fbac9046d4 Mon Sep 17 00:00:00 2001
From: aakrem
Date: Thu, 6 Jun 2024 07:57:29 +0200
Subject: [PATCH 024/268] fix deployment relationships
---
agenta-backend/agenta_backend/models/db_models.py | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index c0a78cb4ce..70e8b71cba 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -126,7 +126,10 @@ class VariantBaseDB(Base):
base_name = Column(String)
image_id = Column(UUID(as_uuid=True), ForeignKey("docker_images.id"))
image = relationship("ImageDB")
- deployment_id = Column(Integer) # # TODO: check missing relationship
+
+ deployment_id = Column(UUID(as_uuid=True), ForeignKey("deployments.id"))
+ deployment = relationship("DeploymentDB")
+
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
@@ -218,7 +221,8 @@ class AppEnvironmentDB(Base):
)
deployed_app_variant_revision = relationship("AppVariantRevisionsDB")
- deployment_id = Column(Integer) # TODO: check missing relationship
+ deployment_id = Column(UUID(as_uuid=True), ForeignKey("deployments.id"))
+ deployment = relationship("DeploymentDB")
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
From 256c249fc5c99b4f8478c8755253bc04e839f357 Mon Sep 17 00:00:00 2001
From: aakrem
Date: Thu, 6 Jun 2024 08:28:04 +0200
Subject: [PATCH 025/268] make template type enum
---
agenta-backend/agenta_backend/models/db_models.py | 5 ++++-
agenta-backend/agenta_backend/models/shared_models.py | 6 ++++++
2 files changed, 10 insertions(+), 1 deletion(-)
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index 70e8b71cba..b69b5b9cbb 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -9,11 +9,14 @@
Boolean,
ForeignKey,
Float,
+ Enum,
)
from sqlalchemy.orm import relationship, declarative_base
import uuid_utils.compat as uuid
from sqlalchemy.dialects.postgresql import UUID, JSONB
+from agenta_backend.models.shared_models import TemplateType
+
Base = declarative_base()
@@ -261,7 +264,7 @@ class TemplateDB(Base):
unique=True,
nullable=False,
)
- type = Column(String, default="image")
+ type = Column(Enum(TemplateType), default=TemplateType.IMAGE, nullable=False)
template_uri = Column(String)
tag_id = Column(Integer)
name = Column(String, unique=True)
diff --git a/agenta-backend/agenta_backend/models/shared_models.py b/agenta-backend/agenta_backend/models/shared_models.py
index 876fa1702e..58fa12de45 100644
--- a/agenta-backend/agenta_backend/models/shared_models.py
+++ b/agenta-backend/agenta_backend/models/shared_models.py
@@ -1,5 +1,6 @@
from pydantic import BaseModel, Field
from typing import Any, Dict, List, Optional
+import enum
class ConfigDB(BaseModel):
@@ -47,3 +48,8 @@ class HumanEvaluationScenarioInput(BaseModel):
class HumanEvaluationScenarioOutput(BaseModel):
variant_id: str
variant_output: str
+
+
+class TemplateType(enum.Enum):
+ IMAGE = "image"
+ ZIP = "zip"
From c584f0c708d180f887fdb6447217b018aef2e75a Mon Sep 17 00:00:00 2001
From: aakrem
Date: Thu, 6 Jun 2024 12:14:00 +0200
Subject: [PATCH 026/268] opt for json inputs and outputs
---
.../agenta_backend/models/db_models.py | 74 +------------------
.../agenta_backend/models/shared_models.py | 12 +++
.../agenta_backend/services/db_manager.py | 8 +-
.../agenta_backend/tasks/evaluations.py | 8 +-
4 files changed, 24 insertions(+), 78 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index b69b5b9cbb..d8d3394474 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -381,8 +381,8 @@ class HumanEvaluationScenarioDB(Base):
user = relationship("UserDB")
evaluation_id = Column(UUID(as_uuid=True), ForeignKey("human_evaluations.id"))
evaluation = relationship("HumanEvaluationDB")
- inputs = relationship("HumanEvaluationScenarioInputsDB", backref="scenario")
- outputs = relationship("HumanEvaluationScenarioOutputsDB", backref="scenario")
+ inputs = Column(JSONB) # List of HumanEvaluationScenarioInput
+ outputs = Column(JSONB) # List of HumanEvaluationScenarioOutput
vote = Column(String)
score = Column(JSONB)
correct_answer = Column(String)
@@ -397,40 +397,6 @@ class HumanEvaluationScenarioDB(Base):
__tablename__ = "human_evaluations_scenarios"
-class HumanEvaluationScenarioInputsDB(Base):
- id = Column(
- UUID(as_uuid=True),
- primary_key=True,
- default=uuid.uuid7,
- unique=True,
- nullable=False,
- )
- scenario_id = Column(
- UUID(as_uuid=True), ForeignKey("human_evaluations_scenarios.id")
- )
- input_name = Column(String)
- input_value = Column(String)
-
- __tablename__ = "human_evaluation_scenario_inputs"
-
-
-class HumanEvaluationScenarioOutputsDB(Base):
- id = Column(
- UUID(as_uuid=True),
- primary_key=True,
- default=uuid.uuid7,
- unique=True,
- nullable=False,
- )
- scenario_id = Column(
- UUID(as_uuid=True), ForeignKey("human_evaluations_scenarios.id")
- )
- variant_id = Column(String)
- variant_output = Column(String)
-
- __tablename__ = "human_evaluation_scenario_outputs"
-
-
class EvaluationDB(Base):
id = Column(
UUID(as_uuid=True),
@@ -481,8 +447,8 @@ class EvaluationScenarioDB(Base):
evaluation = relationship("EvaluationDB")
variant_id = Column(UUID(as_uuid=True), ForeignKey("app_variants.id"))
variant = relationship("AppVariantDB")
- inputs = relationship("EvaluationScenarioInputDB", backref="scenario")
- outputs = relationship("EvaluationScenarioOutputDB", backref="scenario")
+ inputs = Column(JSONB) # List of EvaluationScenarioInput
+ outputs = Column(JSONB) # List of EvaluationScenarioOutput
correct_answers = Column(JSONB) # List of CorrectAnswer
is_pinned = Column(Boolean)
note = Column(String)
@@ -500,35 +466,3 @@ class EvaluationScenarioDB(Base):
)
__tablename__ = "evaluation_scenarios"
-
-
-class EvaluationScenarioInputDB(Base):
- id = Column(
- UUID(as_uuid=True),
- primary_key=True,
- default=uuid.uuid7,
- unique=True,
- nullable=False,
- )
- scenario_id = Column(UUID(as_uuid=True), ForeignKey("evaluation_scenarios.id"))
- name = Column(String)
- type = Column(String)
- value = Column(String)
-
- __tablename__ = "evaluation_scenario_inputs"
-
-
-class EvaluationScenarioOutputDB(Base):
- id = Column(
- UUID(as_uuid=True),
- primary_key=True,
- default=uuid.uuid7,
- unique=True,
- nullable=False,
- )
- scenario_id = Column(UUID(as_uuid=True), ForeignKey("evaluation_scenarios.id"))
- result = Column(JSONB)
- cost = Column(Float)
- latency = Column(Float)
-
- __tablename__ = "evaluation_scenario_outputs"
diff --git a/agenta-backend/agenta_backend/models/shared_models.py b/agenta-backend/agenta_backend/models/shared_models.py
index 58fa12de45..a84d7768b5 100644
--- a/agenta-backend/agenta_backend/models/shared_models.py
+++ b/agenta-backend/agenta_backend/models/shared_models.py
@@ -40,6 +40,18 @@ class CorrectAnswer(BaseModel):
value: str
+class EvaluationScenarioInput(BaseModel):
+ name: str
+ type: str
+ value: str
+
+
+class EvaluationScenarioOutput(BaseModel):
+ result: Result
+ cost: Optional[float] = None
+ latency: Optional[float] = None
+
+
class HumanEvaluationScenarioInput(BaseModel):
input_name: str
input_value: str
diff --git a/agenta-backend/agenta_backend/services/db_manager.py b/agenta-backend/agenta_backend/services/db_manager.py
index f5069fda7f..cd9f823416 100644
--- a/agenta-backend/agenta_backend/services/db_manager.py
+++ b/agenta-backend/agenta_backend/services/db_manager.py
@@ -65,8 +65,6 @@
from agenta_backend.models.db_models import (
TemplateDB,
AppVariantRevisionsDB,
- EvaluationScenarioInputDB,
- EvaluationScenarioOutputDB,
)
from agenta_backend.models.shared_models import (
@@ -74,6 +72,8 @@
CorrectAnswer,
AggregatedResult,
EvaluationScenarioResult,
+ EvaluationScenarioInput,
+ EvaluationScenarioOutput,
)
from beanie.operators import In
@@ -1931,8 +1931,8 @@ async def create_new_evaluation_scenario(
user: UserDB,
evaluation: EvaluationDB,
variant_id: str,
- inputs: List[EvaluationScenarioInputDB],
- outputs: List[EvaluationScenarioOutputDB],
+ inputs: List[EvaluationScenarioInput],
+ outputs: List[EvaluationScenarioOutput],
correct_answers: Optional[List[CorrectAnswer]],
is_pinned: Optional[bool],
note: Optional[str],
diff --git a/agenta-backend/agenta_backend/tasks/evaluations.py b/agenta-backend/agenta_backend/tasks/evaluations.py
index a788ae4f1f..ed2d038b58 100644
--- a/agenta-backend/agenta_backend/tasks/evaluations.py
+++ b/agenta-backend/agenta_backend/tasks/evaluations.py
@@ -18,12 +18,12 @@
from agenta_backend.models.api.evaluation_model import EvaluationStatusEnum
from agenta_backend.models.db_models import (
AppDB,
- EvaluationScenarioInputDB,
- EvaluationScenarioOutputDB,
)
from agenta_backend.models.shared_models import (
AggregatedResult,
CorrectAnswer,
+ EvaluationScenarioInput,
+ EvaluationScenarioOutput,
EvaluationScenarioResult,
InvokationResult,
Error,
@@ -150,7 +150,7 @@ def evaluate(
logger.debug(f"List of inputs: {list_inputs}")
inputs = [
- EvaluationScenarioInputDB(
+ EvaluationScenarioInput(
name=input_item["name"],
type="text",
value=data_point[
@@ -196,7 +196,7 @@ def evaluate(
note="",
correct_answers=None,
outputs=[
- EvaluationScenarioOutputDB(
+ EvaluationScenarioOutput(
result=Result(
type="error",
value=None,
From 35c2be98360c357b44fffff4930402ca12fdb076 Mon Sep 17 00:00:00 2001
From: aakrem
Date: Thu, 6 Jun 2024 12:25:27 +0200
Subject: [PATCH 027/268] rename and refactor db uri
---
agenta-backend/agenta_backend/models/db_engine.py | 2 +-
agenta-cli/agenta/config.py | 2 --
agenta-cli/agenta/config.toml | 1 -
docker-compose.yml | 4 ++--
4 files changed, 3 insertions(+), 6 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/db_engine.py b/agenta-backend/agenta_backend/models/db_engine.py
index 564d8e0ddc..e59f78ccec 100644
--- a/agenta-backend/agenta_backend/models/db_engine.py
+++ b/agenta-backend/agenta_backend/models/db_engine.py
@@ -83,7 +83,7 @@ class DBEngine:
def __init__(self) -> None:
self.mode = os.environ.get("DATABASE_MODE", "v2")
- self.db_url = f"{os.environ.get('DATABASE_URL')}/agenta_{self.mode}"
+ self.db_url = f"{os.environ.get('POSTGRES_URI')}/agenta_{self.mode}"
self.engine = create_async_engine(self.db_url, echo=True)
self.async_session = sessionmaker(
self.engine, expire_on_commit=False, class_=AsyncSession
diff --git a/agenta-cli/agenta/config.py b/agenta-cli/agenta/config.py
index 167ccc86d0..4034d7cd86 100644
--- a/agenta-cli/agenta/config.py
+++ b/agenta-cli/agenta/config.py
@@ -11,14 +11,12 @@
toml_config = toml.load(f"{Path(__file__).parent}/config.toml")
# Set the environment variables from the TOML configurations
-os.environ["DATABASE_URL"] = toml_config["database_url"]
os.environ["REGISTRY"] = toml_config["registry"]
os.environ["BACKEND_URL_SUFFIX"] = toml_config["backend_url_suffix"]
os.environ["ALLOW_ORIGINS"] = toml_config["allow_origins"]
class Settings(BaseSettings):
- database_url: str
registry: str
backend_url_suffix: str
allow_origins: str
diff --git a/agenta-cli/agenta/config.toml b/agenta-cli/agenta/config.toml
index bc5cd73754..a29287ccea 100644
--- a/agenta-cli/agenta/config.toml
+++ b/agenta-cli/agenta/config.toml
@@ -1,5 +1,4 @@
docker_registry_url="127.0.0.1:5001"
-database_url="localhost:5432"
registry="agenta-server"
backend_url_suffix="api"
allow_origins="http://localhost:3000,http://localhost:3001,http://cloud.agenta.ai,https://cloud.agenta.ai"
\ No newline at end of file
diff --git a/docker-compose.yml b/docker-compose.yml
index 9e28377c0b..f18adc22d7 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -15,7 +15,7 @@ services:
build: ./agenta-backend
environment:
- MONGODB_URI=mongodb://username:password@mongo:27017
- - DATABASE_URL=postgresql+asyncpg://username:password@postgres:5432
+ - POSTGRES_URI=postgresql+asyncpg://username:password@postgres:5432
- REDIS_URL=redis://redis:6379/0
- ENVIRONMENT=development
- DATABASE_MODE=v2
@@ -203,4 +203,4 @@ networks:
volumes:
mongodb_data:
redis_data:
- nextjs_cache:
\ No newline at end of file
+ nextjs_cache:
From dbbd21e7bcac0a180c6b5df9e32469171b6ebc0a Mon Sep 17 00:00:00 2001
From: aakrem
Date: Thu, 6 Jun 2024 12:31:08 +0200
Subject: [PATCH 028/268] fix relationship
---
agenta-backend/agenta_backend/models/db_models.py | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index d8d3394474..9f39abe74b 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -217,7 +217,9 @@ class AppEnvironmentDB(Base):
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
user = relationship("UserDB")
revision = Column(Integer)
- deployed_app_variant_id = Column(Integer) # TODO: check missing relationship
+
+ deployed_app_variant_id = Column(UUID(as_uuid=True), ForeignKey("app_variants.id"))
+ deployed_app_variant = relationship("AppVariantDB")
deployed_app_variant_revision_id = Column(
UUID(as_uuid=True), ForeignKey("app_variant_revisions.id")
From 7b135c728c317edbf38526a4b841695957fea00d Mon Sep 17 00:00:00 2001
From: aakrem
Date: Thu, 6 Jun 2024 12:52:31 +0200
Subject: [PATCH 029/268] use schema definition standards
---
.../agenta_backend/models/db_models.py | 64 +++++++++----------
1 file changed, 32 insertions(+), 32 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index 9f39abe74b..5ba45cc704 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -21,6 +21,8 @@
class UserDB(Base):
+ __tablename__ = "users"
+
id = Column(
UUID(as_uuid=True),
primary_key=True,
@@ -38,11 +40,11 @@ class UserDB(Base):
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
- __tablename__ = "users"
-
# TODO: Rename ImageDB to DockerImageDB ?
class ImageDB(Base):
+ __tablename__ = "docker_images"
+
id = Column(
UUID(as_uuid=True),
primary_key=True,
@@ -64,10 +66,10 @@ class ImageDB(Base):
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
- __tablename__ = "docker_images"
-
class AppDB(Base):
+ __tablename__ = "app_db"
+
id = Column(
UUID(as_uuid=True),
primary_key=True,
@@ -85,10 +87,10 @@ class AppDB(Base):
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
- __tablename__ = "app_db"
-
class DeploymentDB(Base):
+ __tablename__ = "deployments"
+
id = Column(
UUID(as_uuid=True),
primary_key=True,
@@ -111,10 +113,10 @@ class DeploymentDB(Base):
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
- __tablename__ = "deployments"
-
class VariantBaseDB(Base):
+ __tablename__ = "bases"
+
id = Column(
UUID(as_uuid=True),
primary_key=True,
@@ -140,10 +142,10 @@ class VariantBaseDB(Base):
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
- __tablename__ = "bases"
-
class AppVariantDB(Base):
+ __tablename__ = "app_variants"
+
id = Column(
UUID(as_uuid=True),
primary_key=True,
@@ -173,10 +175,10 @@ class AppVariantDB(Base):
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
- __tablename__ = "app_variants"
-
class AppVariantRevisionsDB(Base):
+ __tablename__ = "app_variant_revisions"
+
id = Column(
UUID(as_uuid=True),
primary_key=True,
@@ -200,10 +202,10 @@ class AppVariantRevisionsDB(Base):
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
- __tablename__ = "app_variant_revisions"
-
class AppEnvironmentDB(Base):
+ __tablename__ = "environments"
+
id = Column(
UUID(as_uuid=True),
primary_key=True,
@@ -228,15 +230,14 @@ class AppEnvironmentDB(Base):
deployment_id = Column(UUID(as_uuid=True), ForeignKey("deployments.id"))
deployment = relationship("DeploymentDB")
-
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
- __tablename__ = "environments"
-
class AppEnvironmentRevisionDB(Base):
+ __tablename__ = "environments_revisions"
+
id = Column(
UUID(as_uuid=True),
primary_key=True,
@@ -255,10 +256,10 @@ class AppEnvironmentRevisionDB(Base):
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
- __tablename__ = "environments_revisions"
-
class TemplateDB(Base):
+ __tablename__ = "templates"
+
id = Column(
UUID(as_uuid=True),
primary_key=True,
@@ -279,10 +280,10 @@ class TemplateDB(Base):
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
- __tablename__ = "templates"
-
class TestSetDB(Base):
+ __tablename__ = "testsets"
+
id = Column(
UUID(as_uuid=True),
primary_key=True,
@@ -303,10 +304,10 @@ class TestSetDB(Base):
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
- __tablename__ = "testsets"
-
class EvaluatorConfigDB(Base):
+ __tablename__ = "evaluators_configs"
+
id = Column(
UUID(as_uuid=True),
primary_key=True,
@@ -336,10 +337,10 @@ class EvaluatorConfigDB(Base):
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
- __tablename__ = "evaluators_configs"
-
class HumanEvaluationDB(Base):
+ __tablename__ = "human_evaluations"
+
id = Column(
UUID(as_uuid=True),
primary_key=True,
@@ -368,10 +369,10 @@ class HumanEvaluationDB(Base):
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
- __tablename__ = "human_evaluations"
-
class HumanEvaluationScenarioDB(Base):
+ __tablename__ = "human_evaluations_scenarios"
+
id = Column(
UUID(as_uuid=True),
primary_key=True,
@@ -396,10 +397,11 @@ class HumanEvaluationScenarioDB(Base):
)
is_pinned = Column(Boolean)
note = Column(String)
- __tablename__ = "human_evaluations_scenarios"
class EvaluationDB(Base):
+ __tablename__ = "evaluations"
+
id = Column(
UUID(as_uuid=True),
primary_key=True,
@@ -432,10 +434,10 @@ class EvaluationDB(Base):
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
- __tablename__ = "evaluations"
-
class EvaluationScenarioDB(Base):
+ __tablename__ = "evaluation_scenarios"
+
id = Column(
UUID(as_uuid=True),
primary_key=True,
@@ -466,5 +468,3 @@ class EvaluationScenarioDB(Base):
updated_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
-
- __tablename__ = "evaluation_scenarios"
From 276d1dcce5f71f8a61dfd6fe313309e72eafbdc3 Mon Sep 17 00:00:00 2001
From: aakrem
Date: Thu, 6 Jun 2024 17:38:20 +0200
Subject: [PATCH 030/268] add types as comments
---
agenta-backend/agenta_backend/models/db_models.py | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index 5ba45cc704..c0dec4d9a7 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -413,7 +413,7 @@ class EvaluationDB(Base):
app = relationship("AppDB")
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
user = relationship("UserDB")
- status = Column(JSONB)
+ status = Column(JSONB) # Result
testset_id = Column(UUID(as_uuid=True), ForeignKey("testsets.id"))
testset = relationship("TestSetDB")
variant_id = Column(UUID(as_uuid=True), ForeignKey("app_variants.id"))
@@ -424,9 +424,9 @@ class EvaluationDB(Base):
variant_revision = relationship("AppVariantRevisionsDB")
evaluator_configs = relationship("EvaluatorConfigDB", back_populates="evaluation")
aggregated_results = Column(JSONB) # List of AggregatedResult
- average_cost = Column(JSONB)
- total_cost = Column(JSONB)
- average_latency = Column(JSONB)
+ average_cost = Column(JSONB) # Result
+ total_cost = Column(JSONB) # Result
+ average_latency = Column(JSONB) # Result
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
From 58ce2242f20617213681c12dd3148c9ea1a8f5a8 Mon Sep 17 00:00:00 2001
From: Kaosiso Ezealigo
Date: Fri, 7 Jun 2024 06:48:45 +0100
Subject: [PATCH 031/268] feat(frontend): improved error handling in evaluation
---
.../evaluationCompare/EvaluationCompare.tsx | 40 ++++++++++---------
.../EvaluationScenarios.tsx | 19 +++++----
2 files changed, 33 insertions(+), 26 deletions(-)
diff --git a/agenta-web/src/components/pages/evaluations/evaluationCompare/EvaluationCompare.tsx b/agenta-web/src/components/pages/evaluations/evaluationCompare/EvaluationCompare.tsx
index 1781a782ec..cc87980f2c 100644
--- a/agenta-web/src/components/pages/evaluations/evaluationCompare/EvaluationCompare.tsx
+++ b/agenta-web/src/components/pages/evaluations/evaluationCompare/EvaluationCompare.tsx
@@ -188,38 +188,40 @@ const EvaluationCompareMode: React.FC = () => {
),
headerName: "Output",
- minWidth: 280,
+ minWidth: 300,
flex: 1,
field: `variants.${vi}.output` as any,
...getFilterParams("text"),
hide: hiddenVariants.includes("Output"),
cellRenderer: (params: any) => {
+ const result = params.data?.variants.find(
+ (item: any) => item.evaluationId === variant.evaluationId,
+ )?.output?.result
+
+ if (result && result.error && result.type == "error") {
+ setModalErrorMsg({
+ message: result.error.message,
+ stackTrace: result.error.stacktrace,
+ })
+ return (
+
+ )
+ }
+
return (
<>
{selectedCorrectAnswer[0] !== "noDiffColumnIsSelected"
? LongTextCellRenderer(
params,
- item.evaluationId === variant.evaluationId,
- )?.output?.result,
- )}
- expectedOutput={
- params.data[selectedCorrectAnswer[0]] || ""
- }
+ variantOutput={getTypedValue(result)}
+ expectedOutput={params.data?.correctAnswer}
/>,
)
- : LongTextCellRenderer(
- params,
- getTypedValue(
- params.data?.variants.find(
- (item: any) =>
- item.evaluationId === variant.evaluationId,
- )?.output?.result,
- ),
- )}
+ : LongTextCellRenderer(params, getTypedValue(result))}
>
)
},
diff --git a/agenta-web/src/components/pages/evaluations/evaluationScenarios/EvaluationScenarios.tsx b/agenta-web/src/components/pages/evaluations/evaluationScenarios/EvaluationScenarios.tsx
index 7b16910b4d..f6ed441833 100644
--- a/agenta-web/src/components/pages/evaluations/evaluationScenarios/EvaluationScenarios.tsx
+++ b/agenta-web/src/components/pages/evaluations/evaluationScenarios/EvaluationScenarios.tsx
@@ -23,8 +23,8 @@ import {useAtom} from "jotai"
import {evaluatorsAtom} from "@/lib/atoms/evaluation"
import CompareOutputDiff from "@/components/CompareOutputDiff/CompareOutputDiff"
import {formatCurrency, formatLatency} from "@/lib/helpers/formatters"
-import {useLocalStorage} from "usehooks-ts"
import EvaluationErrorModal from "../EvaluationErrorProps/EvaluationErrorModal"
+import EvaluationErrorText from "../EvaluationErrorProps/EvaluationErrorText"
import _ from "lodash"
import FilterColumns, {generateFilterItems} from "../FilterColumns/FilterColumns"
import {variantNameWithRev} from "@/lib/helpers/variantHelper"
@@ -82,7 +82,6 @@ const EvaluationScenarios: React.FC = () => {
scenarios[0]?.correct_answers || [],
"key",
)
- const [showDiff, setShowDiff] = useLocalStorage("showDiff", "show")
const [modalErrorMsg, setModalErrorMsg] = useState({message: "", stackTrace: ""})
const [isErrorModalOpen, setIsErrorModalOpen] = useState(false)
@@ -148,12 +147,18 @@ const EvaluationScenarios: React.FC = () => {
const correctAnswer = params?.data?.correct_answers?.find(
(item: any) => item.key === selectedCorrectAnswer[0],
)
-
const result = params.data?.outputs[index].result
- if (result && result.type == "error") {
- return LongTextCellRenderer(
- params,
- `${result?.error?.message}\n${result?.error?.stacktrace}`,
+
+ if (result && result.error && result.type == "error") {
+ setModalErrorMsg({
+ message: result.error.message,
+ stackTrace: result.error.stacktrace,
+ })
+ return (
+
)
}
return selectedCorrectAnswer[0] !== "noDiffColumnIsSelected"
From 1ba13c15d228a7861bda84fb9c9e386f6c859fe3 Mon Sep 17 00:00:00 2001
From: Kaosiso Ezealigo
Date: Fri, 7 Jun 2024 07:01:49 +0100
Subject: [PATCH 032/268] minor refactor
---
.../pages/evaluations/evaluationCompare/EvaluationCompare.tsx | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/agenta-web/src/components/pages/evaluations/evaluationCompare/EvaluationCompare.tsx b/agenta-web/src/components/pages/evaluations/evaluationCompare/EvaluationCompare.tsx
index cc87980f2c..14dcba0b7b 100644
--- a/agenta-web/src/components/pages/evaluations/evaluationCompare/EvaluationCompare.tsx
+++ b/agenta-web/src/components/pages/evaluations/evaluationCompare/EvaluationCompare.tsx
@@ -218,7 +218,9 @@ const EvaluationCompareMode: React.FC = () => {
params,
,
)
: LongTextCellRenderer(params, getTypedValue(result))}
From 400c6e003006a6ff9a09aee10d20b80f70f2fdf6 Mon Sep 17 00:00:00 2001
From: aakrem
Date: Fri, 7 Jun 2024 15:37:45 +0200
Subject: [PATCH 033/268] fixes to missing schema relationships
---
.../agenta_backend/models/db_models.py | 19 +++++++++++++++++--
1 file changed, 17 insertions(+), 2 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index c0dec4d9a7..b4f25bf925 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -250,8 +250,15 @@ class AppEnvironmentRevisionDB(Base):
revision = Column(Integer)
modified_by_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
modified_by = relationship("UserDB")
- deployed_app_variant_revision_id = Column(Integer)
- deployment_id = Column(Integer)
+
+ deployed_app_variant_revision_id = Column(
+ UUID(as_uuid=True), ForeignKey("app_variant_revisions.id")
+ )
+ deployed_app_variant_revision = relationship("AppVariantRevisionsDB")
+
+ deployment_id = Column(UUID(as_uuid=True), ForeignKey("deployments.id"))
+ deployment = relationship("DeploymentDB")
+
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
@@ -468,3 +475,11 @@ class EvaluationScenarioDB(Base):
updated_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
+
+
+class IDsMappingDB(Base):
+ __tablename__ = "ids_mapping"
+
+ table_name = Column(String, nullable=False)
+ objectid = Column(String, primary_key=True)
+ uuid = Column(UUID(as_uuid=True), nullable=False)
From f737511ad93a912abe582ff89e47db378980b825 Mon Sep 17 00:00:00 2001
From: aakrem
Date: Fri, 7 Jun 2024 15:38:58 +0200
Subject: [PATCH 034/268] initial migration
---
.../migrations/mongo_to_postgres/migration.py | 436 ++++++++++++++++++
.../migrations/mongo_to_postgres/utils.py | 140 ++++++
2 files changed, 576 insertions(+)
create mode 100644 agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
create mode 100644 agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
diff --git a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
new file mode 100644
index 0000000000..ce55e1b5ea
--- /dev/null
+++ b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
@@ -0,0 +1,436 @@
+import os
+import asyncio
+from datetime import datetime, timezone
+
+from pymongo import MongoClient
+from bson import DBRef
+from sqlalchemy import text
+from sqlalchemy.dialects.postgresql import UUID
+import uuid_utils.compat as uuid
+
+# Assuming agenta_backend.models.db_models contains your SQLAlchemy models
+from agenta_backend.models.db_models import (
+ Base,
+ UserDB,
+ ImageDB,
+ AppDB,
+ DeploymentDB,
+ VariantBaseDB,
+ AppVariantDB,
+ AppVariantRevisionsDB,
+ AppEnvironmentDB,
+ AppEnvironmentRevisionDB,
+ TemplateDB,
+ TestSetDB,
+ EvaluatorConfigDB,
+ HumanEvaluationDB,
+ HumanEvaluationScenarioDB,
+ EvaluationDB,
+ EvaluationScenarioDB,
+ IDsMappingDB,
+)
+
+from agenta_backend.migrations.mongo_to_postgres.utils import (
+ drop_all_tables,
+ create_all_tables,
+ print_migration_report,
+ store_mapping,
+ get_mapped_uuid,
+ generate_uuid,
+ get_datetime,
+ migrate_collection,
+)
+
+from agenta_backend.models.shared_models import TemplateType
+
+tables = [
+ UserDB,
+ ImageDB,
+ AppDB,
+ DeploymentDB,
+ VariantBaseDB,
+ AppVariantDB,
+ AppVariantRevisionsDB,
+ AppEnvironmentDB,
+ AppEnvironmentRevisionDB,
+ TemplateDB,
+ TestSetDB,
+ EvaluatorConfigDB,
+ HumanEvaluationDB,
+ HumanEvaluationScenarioDB,
+ EvaluationDB,
+ EvaluationScenarioDB,
+ IDsMappingDB,
+]
+
+
+async def transform_user(user):
+ user_uuid = generate_uuid()
+ await store_mapping("users", user["_id"], user_uuid)
+ return {
+ "id": user_uuid,
+ "uid": user["uid"],
+ "username": user["username"],
+ "email": user["email"],
+ "created_at": get_datetime(user.get("created_at")),
+ "updated_at": get_datetime(user.get("updated_at")),
+ }
+
+
+async def transform_image(image):
+ user_uuid = await get_mapped_uuid(
+ image["user"].id if isinstance(image["user"], DBRef) else image["user"]
+ )
+ image_uuid = generate_uuid()
+ await store_mapping("docker_images", image["_id"], image_uuid)
+ return {
+ "id": image_uuid,
+ "type": image["type"],
+ "template_uri": image.get("template_uri"),
+ "docker_id": image.get("docker_id"),
+ "tags": image.get("tags"),
+ "deletable": image.get("deletable", True),
+ "user_id": user_uuid,
+ "created_at": get_datetime(image.get("created_at")),
+ "updated_at": get_datetime(image.get("updated_at")),
+ }
+
+
+async def transform_app(app):
+ user_uuid = await get_mapped_uuid(app["user"].id)
+ app_uuid = generate_uuid()
+ await store_mapping("app_db", app["_id"], app_uuid)
+ return {
+ "id": app_uuid,
+ "app_name": app["app_name"],
+ "user_id": user_uuid,
+ "created_at": get_datetime(app.get("created_at")),
+ "updated_at": get_datetime(app.get("updated_at")),
+ }
+
+
+async def transform_deployment(deployment):
+ app_uuid = await get_mapped_uuid(deployment["app"].id)
+ user_uuid = await get_mapped_uuid(deployment["user"].id)
+ deployment_uuid = generate_uuid()
+ await store_mapping("deployments", deployment["_id"], deployment_uuid)
+ return {
+ "id": deployment_uuid,
+ "app_id": app_uuid,
+ "user_id": user_uuid,
+ "container_name": deployment.get("container_name"),
+ "container_id": deployment.get("container_id"),
+ "uri": deployment.get("uri"),
+ "status": deployment["status"],
+ "created_at": get_datetime(deployment.get("created_at")),
+ "updated_at": get_datetime(deployment.get("updated_at")),
+ }
+
+
+async def transform_variant_base(base):
+ app_uuid = await get_mapped_uuid(base["app"].id)
+ user_uuid = await get_mapped_uuid(base["user"].id)
+ image_uuid = await get_mapped_uuid(base["image"].id)
+ deployment_uuid = base["deployment"] and await get_mapped_uuid(base["deployment"])
+ base_uuid = generate_uuid()
+ await store_mapping("bases", base["_id"], base_uuid)
+ return {
+ "id": base_uuid,
+ "app_id": app_uuid,
+ "user_id": user_uuid,
+ "base_name": base["base_name"],
+ "image_id": image_uuid,
+ "deployment_id": deployment_uuid,
+ "created_at": get_datetime(base.get("created_at")),
+ "updated_at": get_datetime(base.get("updated_at")),
+ }
+
+
+async def transform_app_variant(variant):
+ app_uuid = await get_mapped_uuid(variant["app"].id)
+ image_uuid = await get_mapped_uuid(variant["image"].id)
+ user_uuid = await get_mapped_uuid(variant["user"].id)
+ modified_by_uuid = await get_mapped_uuid(variant["modified_by"].id)
+ base_uuid = await get_mapped_uuid(variant["base"].id)
+ variant_uuid = generate_uuid()
+ await store_mapping("app_variants", variant["_id"], variant_uuid)
+ return {
+ "id": variant_uuid,
+ "app_id": app_uuid,
+ "variant_name": variant["variant_name"],
+ "revision": variant["revision"],
+ "image_id": image_uuid,
+ "user_id": user_uuid,
+ "modified_by_id": modified_by_uuid,
+ "base_name": variant.get("base_name"),
+ "base_id": base_uuid,
+ "config_name": variant["config_name"],
+ "config_parameters": variant["config"],
+ "created_at": get_datetime(variant.get("created_at")),
+ "updated_at": get_datetime(variant.get("updated_at")),
+ }
+
+
+async def transform_app_variant_revision(revision):
+ variant_uuid = await get_mapped_uuid(revision["variant"].id)
+ modified_by_uuid = await get_mapped_uuid(revision["modified_by"].id)
+ base_uuid = await get_mapped_uuid(revision["base"].id)
+ revision_uuid = generate_uuid()
+ await store_mapping("app_variant_revisions", revision["_id"], revision_uuid)
+ return {
+ "id": revision_uuid,
+ "variant_id": variant_uuid,
+ "revision": revision["revision"],
+ "modified_by_id": modified_by_uuid,
+ "base_id": base_uuid,
+ "config_name": revision["config"]["config_name"],
+ "config_parameters": revision["config"]["parameters"],
+ "created_at": get_datetime(revision["created_at"]),
+ "updated_at": get_datetime(revision["updated_at"]),
+ }
+
+
+async def transform_app_environment(environment):
+ app_uuid = await get_mapped_uuid(environment["app"].id)
+ user_uuid = await get_mapped_uuid(environment["user"].id)
+ variant_uuid = await get_mapped_uuid(environment["deployed_app_variant"])
+ revision_uuid = await get_mapped_uuid(environment["deployed_app_variant_revision"])
+ deployment_uuid = await get_mapped_uuid(environment["deployment"])
+ environment_uuid = generate_uuid()
+ await store_mapping("environments", environment["_id"], environment_uuid)
+ return {
+ "id": environment_uuid,
+ "app_id": app_uuid,
+ "name": environment["name"],
+ "user_id": user_uuid,
+ "revision": environment["revision"],
+ "deployed_app_variant_id": variant_uuid,
+ "deployed_app_variant_revision_id": revision_uuid,
+ "deployment_id": deployment_uuid,
+ "created_at": get_datetime(environment.get("created_at")),
+ }
+
+
+async def transform_app_environment_revision(revision):
+ environment_uuid = await get_mapped_uuid(revision["environment"].id)
+ modified_by_uuid = await get_mapped_uuid(revision["modified_by"].id)
+ variant_revision_uuid = await get_mapped_uuid(
+ revision["deployed_app_variant_revision"]
+ )
+ deployment_uuid = await get_mapped_uuid(revision["deployment"])
+ revision_uuid = generate_uuid()
+ await store_mapping("environments_revisions", revision["_id"], revision_uuid)
+ return {
+ "id": revision_uuid,
+ "environment_id": environment_uuid,
+ "revision": revision["revision"],
+ "modified_by_id": modified_by_uuid,
+ "deployed_app_variant_revision_id": variant_revision_uuid,
+ "deployment_id": deployment_uuid,
+ "created_at": get_datetime(revision["created_at"]),
+ }
+
+
+async def transform_template(template):
+ template_uuid = generate_uuid()
+ await store_mapping("templates", template["_id"], template_uuid)
+
+ # Ensure type is correctly mapped to TemplateType enum
+ template_type = (
+ TemplateType(template["type"]) if "type" in template else TemplateType.IMAGE
+ )
+
+ return {
+ "id": template_uuid,
+ "type": template_type,
+ "template_uri": template.get("template_uri"),
+ "tag_id": template.get("tag_id"),
+ "name": template["name"],
+ "repo_name": template.get("repo_name"),
+ "title": template["title"],
+ "description": template["description"],
+ "size": template.get("size"),
+ "digest": template.get("digest"),
+ "last_pushed": get_datetime(template.get("last_pushed")),
+ }
+
+
+async def transform_test_set(test_set):
+ app_uuid = await get_mapped_uuid(test_set["app"].id)
+ user_uuid = await get_mapped_uuid(test_set["user"].id)
+ test_set_uuid = generate_uuid()
+ await store_mapping("testsets", test_set["_id"], test_set_uuid)
+ return {
+ "id": test_set_uuid,
+ "name": test_set["name"],
+ "app_id": app_uuid,
+ "csvdata": test_set["csvdata"],
+ "user_id": user_uuid,
+ "created_at": get_datetime(test_set.get("created_at")),
+ "updated_at": get_datetime(test_set.get("updated_at")),
+ }
+
+
+async def transform_evaluator_config(config):
+ evaluation_uuid = await get_mapped_uuid(config["evaluation"].id)
+ scenario_uuid = await get_mapped_uuid(config["evaluation_scenario"].id)
+ app_uuid = await get_mapped_uuid(config["app"].id)
+ user_uuid = await get_mapped_uuid(config["user"].id)
+ config_uuid = generate_uuid()
+ await store_mapping("evaluators_configs", config["_id"], config_uuid)
+ return {
+ "id": config_uuid,
+ "evaluation_id": evaluation_uuid,
+ "evaluation_scenario_id": scenario_uuid,
+ "app_id": app_uuid,
+ "user_id": user_uuid,
+ "name": config["name"],
+ "evaluator_key": config["evaluator_key"],
+ "settings_values": config["settings_values"],
+ "created_at": get_datetime(config.get("created_at")),
+ "updated_at": get_datetime(config.get("updated_at")),
+ }
+
+
+async def transform_human_evaluation(evaluation):
+ app_uuid = await get_mapped_uuid(evaluation["app"].id)
+ user_uuid = await get_mapped_uuid(evaluation["user"].id)
+ test_set_uuid = await get_mapped_uuid(evaluation["testset"].id)
+ variant_uuid = await get_mapped_uuid(evaluation["variants"][0])
+ revision_uuid = await get_mapped_uuid(evaluation["variants_revisions"][0])
+ evaluation_uuid = generate_uuid()
+ await store_mapping("human_evaluations", evaluation["_id"], evaluation_uuid)
+ return {
+ "id": evaluation_uuid,
+ "app_id": app_uuid,
+ "user_id": user_uuid,
+ "status": evaluation["status"],
+ "evaluation_type": evaluation["evaluation_type"],
+ "variant_id": variant_uuid,
+ "variant_revision_id": revision_uuid,
+ "testset_id": test_set_uuid,
+ "created_at": get_datetime(evaluation.get("created_at")),
+ "updated_at": get_datetime(evaluation.get("updated_at")),
+ }
+
+
+async def transform_human_evaluation_scenario(scenario):
+ user_uuid = await get_mapped_uuid(scenario["user"].id)
+ evaluation_uuid = await get_mapped_uuid(scenario["evaluation"].id)
+ scenario_uuid = generate_uuid()
+ await store_mapping("human_evaluations_scenarios", scenario["_id"], scenario_uuid)
+ return {
+ "id": scenario_uuid,
+ "user_id": user_uuid,
+ "evaluation_id": evaluation_uuid,
+ "inputs": scenario["inputs"],
+ "outputs": scenario["outputs"],
+ "vote": scenario.get("vote"),
+ "score": scenario.get("score"),
+ "correct_answer": scenario.get("correct_answer"),
+ "created_at": get_datetime(scenario.get("created_at")),
+ "updated_at": get_datetime(scenario.get("updated_at")),
+ "is_pinned": scenario.get("is_pinned"),
+ "note": scenario.get("note"),
+ }
+
+
+async def transform_evaluation(evaluation):
+ app_uuid = await get_mapped_uuid(evaluation["app"].id)
+ user_uuid = await get_mapped_uuid(evaluation["user"].id)
+ test_set_uuid = await get_mapped_uuid(evaluation["testset"].id)
+ variant_uuid = await get_mapped_uuid(evaluation["variant"])
+ revision_uuid = await get_mapped_uuid(evaluation["variant_revision"])
+ evaluation_uuid = generate_uuid()
+ await store_mapping("evaluations", evaluation["_id"], evaluation_uuid)
+ return {
+ "id": evaluation_uuid,
+ "app_id": app_uuid,
+ "user_id": user_uuid,
+ "status": evaluation["status"],
+ "testset_id": test_set_uuid,
+ "variant_id": variant_uuid,
+ "variant_revision_id": revision_uuid,
+ "aggregated_results": evaluation["aggregated_results"],
+ "average_cost": evaluation["average_cost"],
+ "total_cost": evaluation["total_cost"],
+ "average_latency": evaluation["average_latency"],
+ "created_at": get_datetime(evaluation.get("created_at")),
+ "updated_at": get_datetime(evaluation.get("updated_at")),
+ }
+
+
+async def transform_evaluation_scenario(scenario):
+ user_uuid = await get_mapped_uuid(scenario["user"].id)
+ evaluation_uuid = await get_mapped_uuid(scenario["evaluation"].id)
+ variant_uuid = await get_mapped_uuid(scenario["variant_id"])
+ scenario_uuid = generate_uuid()
+ await store_mapping("evaluation_scenarios", scenario["_id"], scenario_uuid)
+ return {
+ "id": scenario_uuid,
+ "user_id": user_uuid,
+ "evaluation_id": evaluation_uuid,
+ "variant_id": variant_uuid,
+ "inputs": scenario["inputs"],
+ "outputs": scenario["outputs"],
+ "correct_answers": scenario.get("correct_answers"),
+ "is_pinned": scenario.get("is_pinned"),
+ "note": scenario.get("note"),
+ "results": scenario["results"],
+ "latency": scenario.get("latency"),
+ "cost": scenario.get("cost"),
+ "created_at": get_datetime(scenario.get("created_at")),
+ "updated_at": get_datetime(scenario.get("updated_at")),
+ }
+
+
+async def main():
+ try:
+ await drop_all_tables()
+ await create_all_tables(tables=tables)
+ await migrate_collection("users", UserDB, transform_user)
+ await migrate_collection("docker_images", ImageDB, transform_image)
+ await migrate_collection("app_db", AppDB, transform_app)
+ await migrate_collection("deployments", DeploymentDB, transform_deployment)
+ await migrate_collection("bases", VariantBaseDB, transform_variant_base)
+ await migrate_collection("app_variants", AppVariantDB, transform_app_variant)
+ await migrate_collection(
+ "app_variant_revisions",
+ AppVariantRevisionsDB,
+ transform_app_variant_revision,
+ )
+ await migrate_collection(
+ "environments", AppEnvironmentDB, transform_app_environment
+ )
+ await migrate_collection(
+ "environments_revisions",
+ AppEnvironmentRevisionDB,
+ transform_app_environment_revision,
+ )
+ await migrate_collection("templates", TemplateDB, transform_template)
+ await migrate_collection("testsets", TestSetDB, transform_test_set)
+ await migrate_collection(
+ "evaluators_configs", EvaluatorConfigDB, transform_evaluator_config
+ )
+ await migrate_collection(
+ "human_evaluations", HumanEvaluationDB, transform_human_evaluation
+ )
+ await migrate_collection(
+ "human_evaluations_scenarios",
+ HumanEvaluationScenarioDB,
+ transform_human_evaluation_scenario,
+ )
+ await migrate_collection("evaluations", EvaluationDB, transform_evaluation)
+ await migrate_collection(
+ "evaluation_scenarios", EvaluationScenarioDB, transform_evaluation_scenario
+ )
+ print("Migration completed successfully.")
+ except Exception as e:
+ print(f"\n====================== Error ======================\n")
+ print(f"Error occurred: {e}")
+ finally:
+ print_migration_report()
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
new file mode 100644
index 0000000000..5951651a82
--- /dev/null
+++ b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
@@ -0,0 +1,140 @@
+import os
+import asyncio
+from datetime import datetime, timezone
+
+from pymongo import MongoClient
+from bson import ObjectId, DBRef
+from sqlalchemy import MetaData, Column, String, DateTime, text, create_engine
+from sqlalchemy.dialects.postgresql import UUID
+from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy.orm import sessionmaker
+import uuid_utils.compat as uuid
+from sqlalchemy.future import select
+
+
+from agenta_backend.models.db_engine import db_engine
+
+from agenta_backend.models.db_models import (
+ IDsMappingDB,
+ Base,
+)
+
+BATCH_SIZE = 1000
+
+# MongoDB connection
+MONGO_URI = os.environ.get("MONGODB_URI")
+DATABASE_MODE = os.environ.get("DATABASE_MODE")
+mongo_client = MongoClient(MONGO_URI)
+mongo_db_name = f"agenta_{DATABASE_MODE}"
+mongo_db = mongo_client[mongo_db_name]
+
+migration_report = {}
+
+
+async def drop_all_tables():
+ """Drop all tables in the database."""
+ async with db_engine.engine.begin() as conn:
+ await conn.run_sync(Base.metadata.reflect)
+ await conn.run_sync(Base.metadata.drop_all)
+
+
+async def create_all_tables(tables):
+ """Create all tables in the database."""
+ async with db_engine.engine.begin() as conn:
+ for table in tables:
+ print(f"====================== Creating table for {table.__name__}")
+ await conn.run_sync(table.metadata.create_all)
+ print("All tables dropped and created.")
+
+
+async def store_mapping(table_name, mongo_id, uuid):
+ """Store the mapping of MongoDB ObjectId to UUID in the mapping table."""
+ async with db_engine.get_session() as session:
+ async with session.begin():
+ mapping = IDsMappingDB(
+ table_name=table_name, objectid=str(mongo_id), uuid=uuid
+ )
+ session.add(mapping)
+
+
+async def get_mapped_uuid(mongo_id):
+ """Retrieve the mapped UUID for a given MongoDB ObjectId."""
+ async with db_engine.get_session() as session:
+ async with session.begin():
+ stmt = select(IDsMappingDB.uuid).filter(
+ IDsMappingDB.objectid == str(mongo_id)
+ )
+ result = await session.execute(stmt)
+ row = result.first()
+ return row[0] if row else None
+
+
+def get_datetime(value):
+ """Helper function to handle datetime fields."""
+ if isinstance(value, str):
+ return datetime.fromisoformat(value.replace("Z", "+00:00"))
+ return value if value else datetime.now(timezone.utc)
+
+
+def generate_uuid():
+ """Generate a new UUID."""
+ return uuid.uuid7()
+
+
+def update_migration_report(collection_name, total_docs, migrated_docs):
+ migration_report[collection_name] = {"total": total_docs, "migrated": migrated_docs}
+
+
+def print_migration_report():
+ print("\n====================== Migration Report ======================")
+
+ # Headers
+ headers = ["Table", "Total in MongoDB", "Migrated to PostgreSQL"]
+
+ # Determine the maximum lengths for each column including headers
+ max_table_length = max(
+ len(headers[0]), max(len(table) for table in migration_report.keys())
+ )
+ max_total_length = max(
+ len(headers[1]),
+ max(len(str(counts["total"])) for counts in migration_report.values()),
+ )
+ max_migrated_length = max(
+ len(headers[2]),
+ max(len(str(counts["migrated"])) for counts in migration_report.values()),
+ )
+
+ # Set the header and divider with appropriate padding
+ table_header = f"| {headers[0].ljust(max_table_length)} | {headers[1].ljust(max_total_length)} | {headers[2].ljust(max_migrated_length)} |"
+ table_divider = f"|{'-' * (max_table_length + 2)}|{'-' * (max_total_length + 2)}|{'-' * (max_migrated_length + 2)}|"
+
+ print(table_header)
+ print(table_divider)
+
+ for table, counts in migration_report.items():
+ table_row = f"| {table.ljust(max_table_length)} | {str(counts['total']).ljust(max_total_length)} | {str(counts['migrated']).ljust(max_migrated_length)} |"
+ print(table_row)
+
+
+async def migrate_collection(collection_name, model_class, transformation_func):
+ """General function to migrate a collection to a SQL table."""
+ print(
+ f"\n====================== Migrating {collection_name}... ======================\n"
+ )
+ total_docs = mongo_db[collection_name].count_documents({})
+ migrated_docs = 0
+ async with db_engine.get_session() as session:
+ async with session.begin():
+ for skip in range(0, total_docs, BATCH_SIZE):
+ batch = await asyncio.get_event_loop().run_in_executor(
+ None,
+ lambda: list(
+ mongo_db[collection_name].find().skip(skip).limit(BATCH_SIZE)
+ ),
+ )
+ for document in batch:
+ transformed_document = await transformation_func(document)
+ session.add(model_class(**transformed_document))
+ migrated_docs += 1
+ await session.commit()
+ update_migration_report(collection_name, total_docs, migrated_docs)
From 73e6677067c2abe82daeba05f64d30b99cc42261 Mon Sep 17 00:00:00 2001
From: aakrem
Date: Fri, 7 Jun 2024 22:08:58 +0200
Subject: [PATCH 035/268] fix evaluations
---
.../migrations/mongo_to_postgres/migration.py | 81 ++++++++++++++++---
.../migrations/mongo_to_postgres/utils.py | 17 +++-
2 files changed, 85 insertions(+), 13 deletions(-)
diff --git a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
index ce55e1b5ea..cd56d3dab4 100644
--- a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
+++ b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
@@ -1,3 +1,4 @@
+import json
import os
import asyncio
from datetime import datetime, timezone
@@ -10,6 +11,7 @@
# Assuming agenta_backend.models.db_models contains your SQLAlchemy models
from agenta_backend.models.db_models import (
+ EvaluationAggregatedResultDB,
Base,
UserDB,
ImageDB,
@@ -28,6 +30,8 @@
EvaluationDB,
EvaluationScenarioDB,
IDsMappingDB,
+ EvaluationEvaluatorConfigDB,
+ EvaluationScenarioResultDB,
)
from agenta_backend.migrations.mongo_to_postgres.utils import (
@@ -61,6 +65,8 @@
EvaluationDB,
EvaluationScenarioDB,
IDsMappingDB,
+ EvaluationEvaluatorConfigDB,
+ EvaluationScenarioResultDB,
]
@@ -272,16 +278,12 @@ async def transform_test_set(test_set):
async def transform_evaluator_config(config):
- evaluation_uuid = await get_mapped_uuid(config["evaluation"].id)
- scenario_uuid = await get_mapped_uuid(config["evaluation_scenario"].id)
app_uuid = await get_mapped_uuid(config["app"].id)
user_uuid = await get_mapped_uuid(config["user"].id)
config_uuid = generate_uuid()
await store_mapping("evaluators_configs", config["_id"], config_uuid)
return {
"id": config_uuid,
- "evaluation_id": evaluation_uuid,
- "evaluation_scenario_id": scenario_uuid,
"app_id": app_uuid,
"user_id": user_uuid,
"name": config["name"],
@@ -335,6 +337,40 @@ async def transform_human_evaluation_scenario(scenario):
}
+async def convert_aggregated_results(results, evaluation_id):
+ """Convert evaluator_config ObjectIds in aggregated_results to UUIDs and structure them."""
+ aggregated_results = []
+ for result in results:
+ evaluator_config_uuid = await get_mapped_uuid(result["evaluator_config"])
+ result_uuid = generate_uuid()
+ aggregated_results.append(
+ {
+ "id": result_uuid,
+ "evaluation_id": evaluation_id,
+ "evaluator_config_id": evaluator_config_uuid,
+ "result": result["result"],
+ }
+ )
+ return aggregated_results
+
+
+async def convert_scenario_aggregated_results(results, scenario_id):
+ """Convert evaluator_config ObjectIds in scenario aggregated_results to UUIDs and structure them."""
+ scenario_aggregated_results = []
+ for result in results:
+ evaluator_config_uuid = await get_mapped_uuid(result["evaluator_config"])
+ result_uuid = generate_uuid()
+ scenario_aggregated_results.append(
+ {
+ "id": result_uuid,
+ "evaluation_scenario_id": scenario_id,
+ "evaluator_config_id": evaluator_config_uuid,
+ "result": result["result"],
+ }
+ )
+ return scenario_aggregated_results
+
+
async def transform_evaluation(evaluation):
app_uuid = await get_mapped_uuid(evaluation["app"].id)
user_uuid = await get_mapped_uuid(evaluation["user"].id)
@@ -342,8 +378,10 @@ async def transform_evaluation(evaluation):
variant_uuid = await get_mapped_uuid(evaluation["variant"])
revision_uuid = await get_mapped_uuid(evaluation["variant_revision"])
evaluation_uuid = generate_uuid()
+
await store_mapping("evaluations", evaluation["_id"], evaluation_uuid)
- return {
+
+ transformed_evaluation = {
"id": evaluation_uuid,
"app_id": app_uuid,
"user_id": user_uuid,
@@ -351,7 +389,6 @@ async def transform_evaluation(evaluation):
"testset_id": test_set_uuid,
"variant_id": variant_uuid,
"variant_revision_id": revision_uuid,
- "aggregated_results": evaluation["aggregated_results"],
"average_cost": evaluation["average_cost"],
"total_cost": evaluation["total_cost"],
"average_latency": evaluation["average_latency"],
@@ -359,14 +396,22 @@ async def transform_evaluation(evaluation):
"updated_at": get_datetime(evaluation.get("updated_at")),
}
+ aggregated_results = await convert_aggregated_results(
+ evaluation["aggregated_results"], evaluation_uuid
+ )
+
+ return transformed_evaluation, aggregated_results
+
async def transform_evaluation_scenario(scenario):
user_uuid = await get_mapped_uuid(scenario["user"].id)
evaluation_uuid = await get_mapped_uuid(scenario["evaluation"].id)
variant_uuid = await get_mapped_uuid(scenario["variant_id"])
scenario_uuid = generate_uuid()
+
await store_mapping("evaluation_scenarios", scenario["_id"], scenario_uuid)
- return {
+
+ transformed_scenario = {
"id": scenario_uuid,
"user_id": user_uuid,
"evaluation_id": evaluation_uuid,
@@ -376,13 +421,20 @@ async def transform_evaluation_scenario(scenario):
"correct_answers": scenario.get("correct_answers"),
"is_pinned": scenario.get("is_pinned"),
"note": scenario.get("note"),
- "results": scenario["results"],
"latency": scenario.get("latency"),
"cost": scenario.get("cost"),
"created_at": get_datetime(scenario.get("created_at")),
"updated_at": get_datetime(scenario.get("updated_at")),
}
+ aggregated_results = []
+ if "results" in scenario:
+ aggregated_results = await convert_scenario_aggregated_results(
+ scenario["results"], scenario_uuid
+ )
+
+ return transformed_scenario, aggregated_results
+
async def main():
try:
@@ -420,10 +472,19 @@ async def main():
HumanEvaluationScenarioDB,
transform_human_evaluation_scenario,
)
- await migrate_collection("evaluations", EvaluationDB, transform_evaluation)
await migrate_collection(
- "evaluation_scenarios", EvaluationScenarioDB, transform_evaluation_scenario
+ "new_evaluations",
+ EvaluationDB,
+ transform_evaluation,
+ EvaluationAggregatedResultDB,
)
+ await migrate_collection(
+ "new_evaluation_scenarios",
+ EvaluationScenarioDB,
+ transform_evaluation_scenario,
+ EvaluationScenarioResultDB,
+ )
+
print("Migration completed successfully.")
except Exception as e:
print(f"\n====================== Error ======================\n")
diff --git a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
index 5951651a82..bc41d81dd9 100644
--- a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
+++ b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
@@ -116,7 +116,9 @@ def print_migration_report():
print(table_row)
-async def migrate_collection(collection_name, model_class, transformation_func):
+async def migrate_collection(
+ collection_name, model_class, transformation_func, association_model=None
+):
"""General function to migrate a collection to a SQL table."""
print(
f"\n====================== Migrating {collection_name}... ======================\n"
@@ -133,8 +135,17 @@ async def migrate_collection(collection_name, model_class, transformation_func):
),
)
for document in batch:
- transformed_document = await transformation_func(document)
- session.add(model_class(**transformed_document))
+ if association_model:
+ (
+ transformed_document,
+ associated_entities,
+ ) = await transformation_func(document)
+ session.add(model_class(**transformed_document))
+ for assoc_entity in associated_entities:
+ session.add(association_model(**assoc_entity))
+ else:
+ transformed_document = await transformation_func(document)
+ session.add(model_class(**transformed_document))
migrated_docs += 1
await session.commit()
update_migration_report(collection_name, total_docs, migrated_docs)
From e35dcc7ee55c70b74665a4157cd67a70d0587738 Mon Sep 17 00:00:00 2001
From: aakrem
Date: Fri, 7 Jun 2024 22:10:38 +0200
Subject: [PATCH 036/268] evaluation schema fixes
---
.../agenta_backend/models/db_models.py | 70 +++++++++++++++----
1 file changed, 57 insertions(+), 13 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index b4f25bf925..e60fc0a33b 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -322,14 +322,7 @@ class EvaluatorConfigDB(Base):
unique=True,
nullable=False,
)
- evaluation_id = Column(UUID(as_uuid=True), ForeignKey("evaluations.id"))
- evaluation = relationship("EvaluationDB", back_populates="evaluator_configs")
- evaluation_scenario_id = Column(
- UUID(as_uuid=True), ForeignKey("evaluation_scenarios.id")
- )
- evaluation_scenario = relationship(
- "EvaluationScenarioDB", back_populates="evaluator_configs"
- )
+
app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id"))
app = relationship("AppDB")
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
@@ -406,6 +399,46 @@ class HumanEvaluationScenarioDB(Base):
note = Column(String)
+class EvaluationAggregatedResultDB(Base):
+ __tablename__ = "evaluation_aggregated_results"
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ evaluation_id = Column(UUID(as_uuid=True), ForeignKey("evaluations.id"))
+ evaluation = relationship("EvaluationDB", back_populates="aggregated_results")
+ evaluator_config_id = Column(
+ UUID(as_uuid=True), ForeignKey("evaluators_configs.id")
+ )
+ evaluator_config = relationship("EvaluatorConfigDB")
+ result = Column(JSONB) # Result
+
+
+class EvaluationScenarioResultDB(Base):
+ __tablename__ = "evaluation_scenario_results"
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ evaluation_scenario_id = Column(
+ UUID(as_uuid=True), ForeignKey("evaluation_scenarios.id")
+ )
+ evaluation_scenario = relationship("EvaluationScenarioDB", back_populates="results")
+ evaluator_config_id = Column(
+ UUID(as_uuid=True), ForeignKey("evaluators_configs.id")
+ )
+ evaluator_config = relationship("EvaluatorConfigDB")
+ result = Column(JSONB) # Result
+
+
class EvaluationDB(Base):
__tablename__ = "evaluations"
@@ -429,8 +462,9 @@ class EvaluationDB(Base):
UUID(as_uuid=True), ForeignKey("app_variant_revisions.id")
)
variant_revision = relationship("AppVariantRevisionsDB")
- evaluator_configs = relationship("EvaluatorConfigDB", back_populates="evaluation")
- aggregated_results = Column(JSONB) # List of AggregatedResult
+ aggregated_results = relationship(
+ "EvaluationAggregatedResultDB", back_populates="evaluation"
+ )
average_cost = Column(JSONB) # Result
total_cost = Column(JSONB) # Result
average_latency = Column(JSONB) # Result
@@ -442,6 +476,17 @@ class EvaluationDB(Base):
)
+class EvaluationEvaluatorConfigDB(Base):
+ __tablename__ = "evaluation_evaluator_configs"
+
+ evaluation_id = Column(
+ UUID(as_uuid=True), ForeignKey("evaluations.id"), primary_key=True
+ )
+ evaluator_config_id = Column(
+ UUID(as_uuid=True), ForeignKey("evaluators_configs.id"), primary_key=True
+ )
+
+
class EvaluationScenarioDB(Base):
__tablename__ = "evaluation_scenarios"
@@ -463,10 +508,9 @@ class EvaluationScenarioDB(Base):
correct_answers = Column(JSONB) # List of CorrectAnswer
is_pinned = Column(Boolean)
note = Column(String)
- evaluator_configs = relationship(
- "EvaluatorConfigDB", back_populates="evaluation_scenario"
+ results = relationship(
+ "EvaluationScenarioResultDB", back_populates="evaluation_scenario"
)
- results = Column(JSONB) # List of EvaluationScenarioResult
latency = Column(Integer)
cost = Column(Integer)
created_at = Column(
From bbd249a937cb06121d6a66e68482ad45d7bc86e5 Mon Sep 17 00:00:00 2001
From: ashrafchowdury
Date: Sat, 8 Jun 2024 22:26:11 +0600
Subject: [PATCH 037/268] enhance: enhanced the settings llm keys buttons and
container spacing
---
.../components/pages/settings/Secrets/Secrets.tsx | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/agenta-web/src/components/pages/settings/Secrets/Secrets.tsx b/agenta-web/src/components/pages/settings/Secrets/Secrets.tsx
index ab90134f2d..3a4a25e7cb 100644
--- a/agenta-web/src/components/pages/settings/Secrets/Secrets.tsx
+++ b/agenta-web/src/components/pages/settings/Secrets/Secrets.tsx
@@ -17,17 +17,15 @@ const useStyles = createUseStyles({
marginTop: 0,
},
container: {
- marginLeft: 0,
+ margin: "0px 0",
},
apiContainer: {
- margin: "0px 0",
+ marginBottom: 10,
},
input: {
display: "flex",
alignItems: "center",
width: 420,
- marginBottom: 8,
- marginLeft: 8,
},
})
@@ -48,12 +46,12 @@ export default function Secrets() {
servers!
-
+
Available Providers
-
+
{llmProviderKeys.map(({title, key}: LlmProvider, i: number) => (
-
+
{
saveLlmProviderKey(title, key)
@@ -78,6 +77,7 @@ export default function Secrets() {
Save
{
removeSingleLlmProviderKey(title)
From 5297e91fa7a94bfd55c1e917f90b1e68581dc381 Mon Sep 17 00:00:00 2001
From: aakrem
Date: Sat, 8 Jun 2024 19:19:05 +0200
Subject: [PATCH 038/268] move shared models to a separate file
---
agenta-backend/agenta_backend/routers/app_router.py | 2 +-
agenta-backend/agenta_backend/routers/bases_router.py | 2 +-
agenta-backend/agenta_backend/routers/configs_router.py | 2 +-
agenta-backend/agenta_backend/routers/container_router.py | 2 +-
agenta-backend/agenta_backend/routers/environment_router.py | 2 +-
agenta-backend/agenta_backend/routers/evaluation_router.py | 2 +-
agenta-backend/agenta_backend/routers/evaluators_router.py | 2 +-
.../agenta_backend/routers/human_evaluation_router.py | 2 +-
agenta-backend/agenta_backend/routers/testset_router.py | 4 +++-
agenta-backend/agenta_backend/routers/variants_router.py | 2 +-
agenta-backend/agenta_backend/services/db_manager.py | 4 +++-
11 files changed, 15 insertions(+), 11 deletions(-)
diff --git a/agenta-backend/agenta_backend/routers/app_router.py b/agenta-backend/agenta_backend/routers/app_router.py
index 9fd85fc0da..e5987d28e3 100644
--- a/agenta-backend/agenta_backend/routers/app_router.py
+++ b/agenta-backend/agenta_backend/routers/app_router.py
@@ -57,7 +57,7 @@
check_rbac_permission,
check_apikey_action_access,
)
- from agenta_backend.commons.models.db_models import Permission
+ from agenta_backend.commons.models.shared_models import Permission
if isCloudProd():
diff --git a/agenta-backend/agenta_backend/routers/bases_router.py b/agenta-backend/agenta_backend/routers/bases_router.py
index cf1eaf7e98..149571e451 100644
--- a/agenta-backend/agenta_backend/routers/bases_router.py
+++ b/agenta-backend/agenta_backend/routers/bases_router.py
@@ -10,7 +10,7 @@
from agenta_backend.models.api.api_models import BaseOutput
if isCloudEE():
- from agenta_backend.commons.models.db_models import Permission
+ from agenta_backend.commons.models.shared_models import Permission
from agenta_backend.commons.utils.permissions import check_action_access
diff --git a/agenta-backend/agenta_backend/routers/configs_router.py b/agenta-backend/agenta_backend/routers/configs_router.py
index f05531d828..78fb041e52 100644
--- a/agenta-backend/agenta_backend/routers/configs_router.py
+++ b/agenta-backend/agenta_backend/routers/configs_router.py
@@ -15,7 +15,7 @@
)
if isCloudEE():
- from agenta_backend.commons.models.db_models import Permission
+ from agenta_backend.commons.models.shared_models import Permission
from agenta_backend.commons.utils.permissions import check_action_access
diff --git a/agenta-backend/agenta_backend/routers/container_router.py b/agenta-backend/agenta_backend/routers/container_router.py
index c7c06da8d3..7da10414b0 100644
--- a/agenta-backend/agenta_backend/routers/container_router.py
+++ b/agenta-backend/agenta_backend/routers/container_router.py
@@ -14,7 +14,7 @@
)
if isCloudEE():
- from agenta_backend.commons.models.db_models import Permission
+ from agenta_backend.commons.models.shared_models import Permission
from agenta_backend.commons.utils.permissions import check_action_access
from agenta_backend.commons.models.api.api_models import Image_ as Image
else:
diff --git a/agenta-backend/agenta_backend/routers/environment_router.py b/agenta-backend/agenta_backend/routers/environment_router.py
index a21efaa581..50af65bf01 100644
--- a/agenta-backend/agenta_backend/routers/environment_router.py
+++ b/agenta-backend/agenta_backend/routers/environment_router.py
@@ -8,7 +8,7 @@
from agenta_backend.models.api.api_models import DeployToEnvironmentPayload
if isCloudEE():
- from agenta_backend.commons.models.db_models import Permission
+ from agenta_backend.commons.models.shared_models import Permission
from agenta_backend.commons.utils.permissions import check_action_access
router = APIRouter()
diff --git a/agenta-backend/agenta_backend/routers/evaluation_router.py b/agenta-backend/agenta_backend/routers/evaluation_router.py
index cb71942398..2ed596f00c 100644
--- a/agenta-backend/agenta_backend/routers/evaluation_router.py
+++ b/agenta-backend/agenta_backend/routers/evaluation_router.py
@@ -21,7 +21,7 @@
)
if isCloudEE():
- from agenta_backend.commons.models.db_models import Permission
+ from agenta_backend.commons.models.shared_models import Permission
from agenta_backend.commons.utils.permissions import check_action_access
from beanie import PydanticObjectId as ObjectId
diff --git a/agenta-backend/agenta_backend/routers/evaluators_router.py b/agenta-backend/agenta_backend/routers/evaluators_router.py
index 3c1ddfc870..b0fce13c8d 100644
--- a/agenta-backend/agenta_backend/routers/evaluators_router.py
+++ b/agenta-backend/agenta_backend/routers/evaluators_router.py
@@ -15,7 +15,7 @@
)
if isCloudEE():
- from agenta_backend.commons.models.db_models import Permission
+ from agenta_backend.commons.models.shared_models import Permission
from agenta_backend.commons.utils.permissions import check_action_access
router = APIRouter()
diff --git a/agenta-backend/agenta_backend/routers/human_evaluation_router.py b/agenta-backend/agenta_backend/routers/human_evaluation_router.py
index 6cfadb4dc2..e32d313bb5 100644
--- a/agenta-backend/agenta_backend/routers/human_evaluation_router.py
+++ b/agenta-backend/agenta_backend/routers/human_evaluation_router.py
@@ -27,7 +27,7 @@
)
if isCloudEE():
- from agenta_backend.commons.models.db_models import (
+ from agenta_backend.commons.models.shared_models import (
Permission,
) # noqa pylint: disable-all
from agenta_backend.commons.utils.permissions import (
diff --git a/agenta-backend/agenta_backend/routers/testset_router.py b/agenta-backend/agenta_backend/routers/testset_router.py
index 94de6699a0..9050b8bd8d 100644
--- a/agenta-backend/agenta_backend/routers/testset_router.py
+++ b/agenta-backend/agenta_backend/routers/testset_router.py
@@ -28,9 +28,11 @@
check_action_access,
) # noqa pylint: disable-all
from agenta_backend.commons.models.db_models import (
- Permission,
TestSetDB_ as TestSetDB,
) # noqa pylint: disable-all
+ from agenta_backend.commons.models.shared_models import (
+ Permission,
+ ) # noqa pylint: disable-all
else:
from agenta_backend.models.db_models import TestSetDB
diff --git a/agenta-backend/agenta_backend/routers/variants_router.py b/agenta-backend/agenta_backend/routers/variants_router.py
index c47bafb91d..90e6fd89b3 100644
--- a/agenta-backend/agenta_backend/routers/variants_router.py
+++ b/agenta-backend/agenta_backend/routers/variants_router.py
@@ -18,7 +18,7 @@
from agenta_backend.commons.utils.permissions import (
check_action_access,
) # noqa pylint: disable-all
- from agenta_backend.commons.models.db_models import (
+ from agenta_backend.commons.models.shared_models import (
Permission,
) # noqa pylint: disable-all
from agenta_backend.commons.models.api.api_models import (
diff --git a/agenta-backend/agenta_backend/services/db_manager.py b/agenta-backend/agenta_backend/services/db_manager.py
index cd9f823416..065ae1f83c 100644
--- a/agenta-backend/agenta_backend/services/db_manager.py
+++ b/agenta-backend/agenta_backend/services/db_manager.py
@@ -28,7 +28,6 @@
from agenta_backend.commons.services.selectors import get_user_org_and_workspace_id
from agenta_backend.commons.models.db_models import (
- Permission,
AppDB_ as AppDB,
UserDB_ as UserDB,
ImageDB_ as ImageDB,
@@ -44,6 +43,9 @@
EvaluationScenarioDB_ as EvaluationScenarioDB,
HumanEvaluationScenarioDB_ as HumanEvaluationScenarioDB,
)
+ from agenta_backend.commons.models.shared_models import (
+ Permission,
+ )
else:
from agenta_backend.models.db_models import (
From 10a9fc7e60abf8265dac69c0f21adb39b43650b0 Mon Sep 17 00:00:00 2001
From: aakrem
Date: Mon, 10 Jun 2024 15:34:32 +0200
Subject: [PATCH 039/268] fix issue of order of table creation
---
agenta-backend/agenta_backend/models/base.py | 3 +
.../agenta_backend/models/db_engine.py | 2 +-
.../agenta_backend/models/db_models.py | 3 +-
.../agenta_backend/services/db_manager.py | 66 +++++++++++--------
4 files changed, 42 insertions(+), 32 deletions(-)
create mode 100644 agenta-backend/agenta_backend/models/base.py
diff --git a/agenta-backend/agenta_backend/models/base.py b/agenta-backend/agenta_backend/models/base.py
new file mode 100644
index 0000000000..59be70308c
--- /dev/null
+++ b/agenta-backend/agenta_backend/models/base.py
@@ -0,0 +1,3 @@
+from sqlalchemy.orm import declarative_base
+
+Base = declarative_base()
diff --git a/agenta-backend/agenta_backend/models/db_engine.py b/agenta-backend/agenta_backend/models/db_engine.py
index e59f78ccec..ae5f8a9a76 100644
--- a/agenta-backend/agenta_backend/models/db_engine.py
+++ b/agenta-backend/agenta_backend/models/db_engine.py
@@ -69,7 +69,7 @@
]
if isCloudEE():
- models.extend([SpanDB, OrganizationDB, WorkspaceDB, APIKeyDB])
+ models.extend([OrganizationDB, WorkspaceDB, APIKeyDB])
# Configure and set logging level
logger = logging.getLogger(__name__)
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index e60fc0a33b..af2bea84a2 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -16,8 +16,7 @@
from sqlalchemy.dialects.postgresql import UUID, JSONB
from agenta_backend.models.shared_models import TemplateType
-
-Base = declarative_base()
+from agenta_backend.models.base import Base
class UserDB(Base):
diff --git a/agenta-backend/agenta_backend/services/db_manager.py b/agenta-backend/agenta_backend/services/db_manager.py
index 065ae1f83c..02722c2d57 100644
--- a/agenta-backend/agenta_backend/services/db_manager.py
+++ b/agenta-backend/agenta_backend/services/db_manager.py
@@ -76,6 +76,7 @@
EvaluationScenarioResult,
EvaluationScenarioInput,
EvaluationScenarioOutput,
+ TemplateType,
)
from beanie.operators import In
@@ -1727,41 +1728,48 @@ async def add_zip_template(key, value):
Adds a new s3 zip template to the database
Args:
+ session: SQLAlchemy async session
key: key of the json file
value (dict): dictionary value of a key
Returns:
template_id (Str): The Id of the created template.
"""
- existing_template = await TemplateDB.find_one(TemplateDB.name == key)
-
- if existing_template:
- # Compare existing values with new values
- if (
- existing_template.title == value.get("name")
- and existing_template.description == value.get("description")
- and existing_template.template_uri == value.get("template_uri")
- ):
- # Values are unchanged, return existing template id
- return str(existing_template.id)
- else:
- # Values are changed, delete existing template
- await existing_template.delete()
-
- # Create a new template
- template_name = key
- title = value.get("name")
- description = value.get("description")
- template_uri = value.get("template_uri")
-
- template_db_instance = TemplateDB(
- type="zip",
- name=template_name,
- title=title,
- description=description,
- template_uri=template_uri,
- )
- await template_db_instance.create()
+ # Find existing template
+ async with db_engine.get_session() as session:
+ stmt = select(TemplateDB).where(TemplateDB.name == key)
+ result = await session.execute(stmt)
+ existing_template = result.scalars().first()
+
+ if existing_template:
+ # Compare existing values with new values
+ if (
+ existing_template.title == value.get("name")
+ and existing_template.description == value.get("description")
+ and existing_template.template_uri == value.get("template_uri")
+ ):
+ # Values are unchanged, return existing template id
+ return str(existing_template.id)
+ else:
+ # Values are changed, delete existing template
+ await session.delete(existing_template)
+ await session.commit()
+
+ # Create a new template
+ template_name = key
+ title = value.get("name")
+ description = value.get("description")
+ template_uri = value.get("template_uri")
+
+ template_db_instance = TemplateDB(
+ type=TemplateType.ZIP,
+ name=template_name,
+ title=title,
+ description=description,
+ template_uri=template_uri,
+ )
+ session.add(template_db_instance)
+ await session.commit()
return str(template_db_instance.id)
From e6f28f6244d48ec5c19a1ab42a6faa8e41c7067c Mon Sep 17 00:00:00 2001
From: ashrafchowdury
Date: Mon, 10 Jun 2024 21:25:09 +0600
Subject: [PATCH 040/268] refactor: improved the testset new api page imports
---
.../pages/apps/[app_id]/testsets/new/api/index.tsx | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/agenta-web/src/pages/apps/[app_id]/testsets/new/api/index.tsx b/agenta-web/src/pages/apps/[app_id]/testsets/new/api/index.tsx
index 43c03465b1..829788c3e1 100644
--- a/agenta-web/src/pages/apps/[app_id]/testsets/new/api/index.tsx
+++ b/agenta-web/src/pages/apps/[app_id]/testsets/new/api/index.tsx
@@ -1,12 +1,12 @@
import DynamicCodeBlock from "@/components/DynamicCodeBlock/DynamicCodeBlock"
-import pythonCode from "../../../../../../code_snippets/testsets/create_with_json/python"
-import cURLCode from "../../../../../../code_snippets/testsets/create_with_json/curl"
-import tsCode from "../../../../../../code_snippets/testsets/create_with_json/typescript"
+import pythonCode from "@/code_snippets/testsets/create_with_json/python"
+import cURLCode from "@/code_snippets/testsets/create_with_json/curl"
+import tsCode from "@/code_snippets/testsets/create_with_json/typescript"
-import pythonCodeUpload from "../../../../../../code_snippets/testsets/create_with_upload/python"
-import cURLCodeUpload from "../../../../../../code_snippets/testsets/create_with_upload/curl"
-import tsCodeUpload from "../../../../../../code_snippets/testsets/create_with_upload/typescript"
+import pythonCodeUpload from "@/code_snippets/testsets/create_with_upload/python"
+import cURLCodeUpload from "@/code_snippets/testsets/create_with_upload/curl"
+import tsCodeUpload from "@/code_snippets/testsets/create_with_upload/typescript"
import {Typography} from "antd"
import {useRouter} from "next/router"
import {createUseStyles} from "react-jss"
From 2f755f01beeb4f5d7bf70a8abdcc9eb60d51480c Mon Sep 17 00:00:00 2001
From: Abram
Date: Tue, 11 Jun 2024 07:29:08 +0100
Subject: [PATCH 041/268] refactor (backend): improved session life-cycle and
ensured atomicity in the case an exception occured
---
.../agenta_backend/models/db_engine.py | 26 +++++++++++++------
1 file changed, 18 insertions(+), 8 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/db_engine.py b/agenta-backend/agenta_backend/models/db_engine.py
index e59f78ccec..731b8a5a93 100644
--- a/agenta-backend/agenta_backend/models/db_engine.py
+++ b/agenta-backend/agenta_backend/models/db_engine.py
@@ -1,8 +1,11 @@
import os
import logging
+from asyncio import current_task
+from typing import AsyncGenerator
from contextlib import asynccontextmanager
-from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
-from sqlalchemy.orm import sessionmaker
+
+from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine, async_sessionmaker, async_scoped_session
+
from agenta_backend.utils.common import isCloudEE
if isCloudEE():
@@ -69,7 +72,7 @@
]
if isCloudEE():
- models.extend([SpanDB, OrganizationDB, WorkspaceDB, APIKeyDB])
+ models.extend([SpanDB, OrganizationDB, WorkspaceDB, APIKeyDB]) # type: ignore
# Configure and set logging level
logger = logging.getLogger(__name__)
@@ -83,10 +86,14 @@ class DBEngine:
def __init__(self) -> None:
self.mode = os.environ.get("DATABASE_MODE", "v2")
- self.db_url = f"{os.environ.get('POSTGRES_URI')}/agenta_{self.mode}"
- self.engine = create_async_engine(self.db_url, echo=True)
- self.async_session = sessionmaker(
- self.engine, expire_on_commit=False, class_=AsyncSession
+ self.db_url = f"{os.environ.get('POSTGRES_URI')}"
+ self.engine = create_async_engine(url=self.db_url)
+ self.async_session_maker = async_sessionmaker(
+ bind=self.engine, class_=AsyncSession, expire_on_commit=False
+ )
+ self.async_session = async_scoped_session(
+ session_factory=self.async_session_maker,
+ scopefunc=current_task
)
async def init_db(self):
@@ -110,10 +117,13 @@ async def remove_db(self) -> None:
await conn.run_sync(model.metadata.drop_all)
@asynccontextmanager
- async def get_session(self):
+ async def get_session(self) -> AsyncGenerator[AsyncSession, None]:
session = self.async_session()
try:
yield session
+ except Exception as e:
+ await session.rollback()
+ raise e
finally:
await session.close()
From 4d31e5de3b2c7f0a7d8a5c74681bb01396386b43 Mon Sep 17 00:00:00 2001
From: Abram
Date: Tue, 11 Jun 2024 07:30:16 +0100
Subject: [PATCH 042/268] refactor (backend): set template_uri, docker_uri and
tags column to be nullable
---
agenta-backend/agenta_backend/models/db_models.py | 14 ++++++++------
1 file changed, 8 insertions(+), 6 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index e60fc0a33b..1ed8d624cd 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -1,6 +1,8 @@
from datetime import datetime, timezone
-from pydantic import BaseModel, Field
from typing import Any, Dict, List, Optional
+
+import uuid_utils.compat as uuid
+from pydantic import BaseModel, Field
from sqlalchemy import (
Column,
String,
@@ -12,11 +14,11 @@
Enum,
)
from sqlalchemy.orm import relationship, declarative_base
-import uuid_utils.compat as uuid
from sqlalchemy.dialects.postgresql import UUID, JSONB
from agenta_backend.models.shared_models import TemplateType
+
Base = declarative_base()
@@ -53,9 +55,9 @@ class ImageDB(Base):
nullable=False,
)
type = Column(String, default="image")
- template_uri = Column(String)
- docker_id = Column(String, index=True)
- tags = Column(String)
+ template_uri = Column(String, nullable=True)
+ docker_id = Column(String, nullable=True, index=True)
+ tags = Column(String, nullable=True)
deletable = Column(Boolean, default=True)
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
user = relationship("UserDB")
@@ -526,4 +528,4 @@ class IDsMappingDB(Base):
table_name = Column(String, nullable=False)
objectid = Column(String, primary_key=True)
- uuid = Column(UUID(as_uuid=True), nullable=False)
+ uuid = Column(UUID(as_uuid=True), nullable=False)
\ No newline at end of file
From 110e339f363983fa6d3c55fbd8744ea37dc3b3d5 Mon Sep 17 00:00:00 2001
From: Abram
Date: Tue, 11 Jun 2024 07:31:43 +0100
Subject: [PATCH 043/268] refactor (backend): cleanup api models and update
ground_truth_keys_dict in tasks/evaluations.py
---
agenta-backend/agenta_backend/models/api/api_models.py | 1 -
agenta-backend/agenta_backend/models/api/testset_model.py | 2 +-
agenta-backend/agenta_backend/models/shared_models.py | 4 ++--
agenta-backend/agenta_backend/tasks/evaluations.py | 4 ++--
4 files changed, 5 insertions(+), 6 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/api/api_models.py b/agenta-backend/agenta_backend/models/api/api_models.py
index e01568001c..656a2fc554 100644
--- a/agenta-backend/agenta_backend/models/api/api_models.py
+++ b/agenta-backend/agenta_backend/models/api/api_models.py
@@ -93,7 +93,6 @@ class AppVariantResponse(BaseModel):
variant_id: str
variant_name: str
parameters: Optional[Dict[str, Any]]
- previous_variant_name: Optional[str]
user_id: str
base_name: str
base_id: str
diff --git a/agenta-backend/agenta_backend/models/api/testset_model.py b/agenta-backend/agenta_backend/models/api/testset_model.py
index 272fd15215..48621ddd1f 100644
--- a/agenta-backend/agenta_backend/models/api/testset_model.py
+++ b/agenta-backend/agenta_backend/models/api/testset_model.py
@@ -47,7 +47,7 @@ class NewTestset(BaseModel):
class TestSetOutputResponse(BaseModel):
id: str = Field(..., alias="_id")
name: str
- created_at: datetime
+ created_at: str
class Config:
allow_population_by_field_name = True
diff --git a/agenta-backend/agenta_backend/models/shared_models.py b/agenta-backend/agenta_backend/models/shared_models.py
index a84d7768b5..56affd84f8 100644
--- a/agenta-backend/agenta_backend/models/shared_models.py
+++ b/agenta-backend/agenta_backend/models/shared_models.py
@@ -26,12 +26,12 @@ class InvokationResult(BaseModel):
class EvaluationScenarioResult(BaseModel):
- evaluator_config: int
+ evaluator_config: str
result: Result
class AggregatedResult(BaseModel):
- evaluator_config: int
+ evaluator_config: str
result: Result
diff --git a/agenta-backend/agenta_backend/tasks/evaluations.py b/agenta-backend/agenta_backend/tasks/evaluations.py
index ed2d038b58..cd49dfe9c8 100644
--- a/agenta-backend/agenta_backend/tasks/evaluations.py
+++ b/agenta-backend/agenta_backend/tasks/evaluations.py
@@ -56,9 +56,9 @@
# Fetch all evaluators and precompute ground truth keys
all_evaluators = get_evaluators()
ground_truth_keys_dict = {
- evaluator["key"]: [
+ evaluator.key: [
key
- for key, value in evaluator.get("settings_template", {}).items()
+ for key, value in evaluator.settings_template.items()
if value.get("ground_truth_key") is True
]
for evaluator in all_evaluators
From c10dd88cd5f48614b431c7fd6210c829ca7890b0 Mon Sep 17 00:00:00 2001
From: Abram
Date: Tue, 11 Jun 2024 07:33:23 +0100
Subject: [PATCH 044/268] refactor (backend): migrate beanie odm query to
sqlalchemy
---
.../agenta_backend/services/db_manager.py | 1955 ++++++++++-------
1 file changed, 1178 insertions(+), 777 deletions(-)
diff --git a/agenta-backend/agenta_backend/services/db_manager.py b/agenta-backend/agenta_backend/services/db_manager.py
index 065ae1f83c..03e81c5af6 100644
--- a/agenta-backend/agenta_backend/services/db_manager.py
+++ b/agenta-backend/agenta_backend/services/db_manager.py
@@ -1,4 +1,5 @@
import os
+import uuid
import logging
from pathlib import Path
from urllib.parse import urlparse
@@ -12,9 +13,11 @@
from agenta_backend.utils.common import isCloudEE
from agenta_backend.services.json_importer_helper import get_json
-from sqlalchemy.ext.asyncio import AsyncSession
+from sqlalchemy import func
from sqlalchemy.future import select
from sqlalchemy.exc import NoResultFound
+from sqlalchemy.ext.asyncio import AsyncSession
+from sqlalchemy.orm import selectinload, joinedload
from agenta_backend.models.db_engine import db_engine
from agenta_backend.models.api.api_models import (
@@ -67,9 +70,12 @@
from agenta_backend.models.db_models import (
TemplateDB,
AppVariantRevisionsDB,
+ EvaluationScenarioResultDB,
+ EvaluationAggregatedResultDB,
)
from agenta_backend.models.shared_models import (
+ Result,
ConfigDB,
CorrectAnswer,
AggregatedResult,
@@ -78,8 +84,6 @@
EvaluationScenarioOutput,
)
-from beanie.operators import In
-from beanie import PydanticObjectId as ObjectId
# Define logger
logger = logging.getLogger(__name__)
@@ -94,8 +98,8 @@ async def add_testset_to_app_variant(
template_name: str,
app_name: str,
user_uid: str,
- org_id: str = None,
- workspace_id: str = None,
+ org_id: Optional[str] = None,
+ workspace_id: Optional[str] = None,
):
"""Add testset to app variant.
Args:
@@ -106,47 +110,48 @@ async def add_testset_to_app_variant(
user_uid (str): The uid of the user
"""
- try:
- app_db = await get_app_instance_by_id(app_id)
- user_db = await get_user(user_uid)
-
- json_path = os.path.join(
- PARENT_DIRECTORY,
- "resources",
- "default_testsets",
- f"{template_name}_testset.json",
- )
-
- if os.path.exists(json_path):
- csvdata = get_json(json_path)
- testset = {
- "name": f"{app_name}_testset",
- "app_name": app_name,
- "created_at": datetime.now(timezone.utc).isoformat(),
- "csvdata": csvdata,
- }
- testset_db = TestSetDB(
- **testset,
- app=app_db,
- user=user_db,
+ async with db_engine.get_session() as session:
+ try:
+ app_db = await get_app_instance_by_id(app_id)
+ user_db = await get_user(user_uid)
+
+ json_path = os.path.join(
+ PARENT_DIRECTORY,
+ "resources",
+ "default_testsets",
+ f"{template_name}_testset.json",
)
- if isCloudEE():
- # assert that if organization is provided, workspace_id is also provided, and vice versa
- assert (
- org_id is not None and workspace_id is not None
- ), "organization and workspace must be provided together"
+ if os.path.exists(json_path):
+ csvdata = get_json(json_path)
+ testset = {
+ "name": f"{app_name}_testset",
+ "csvdata": csvdata,
+ }
+ testset_db = TestSetDB(
+ **testset,
+ app_id=app_db.id,
+ user_id=user_db.id,
+ )
+
+ if isCloudEE():
+ # assert that if organization is provided, workspace_id is also provided, and vice versa
+ assert (
+ org_id is not None and workspace_id is not None
+ ), "organization and workspace must be provided together"
- organization_db = await db_manager_ee.get_organization(org_id)
- workspace_db = await db_manager_ee.get_workspace(workspace_id)
+ organization_db = await db_manager_ee.get_organization(org_id) # type: ignore
+ workspace_db = await db_manager_ee.get_workspace(workspace_id) # type: ignore
- testset_db.organization = organization_db
- testset_db.workspace = workspace_db
+ testset_db.organization_id = organization_db.id
+ testset_db.workspace_id = workspace_db.id
- await testset_db.create()
+ session.add(testset_db)
+ await session.commit()
+ await session.refresh(testset_db)
- except Exception as e:
- print(f"An error occurred in adding the default testset: {e}")
+ except Exception as e:
+ print(f"An error occurred in adding the default testset: {e}")
async def get_image_by_id(image_id: str) -> ImageDB:
@@ -159,8 +164,12 @@ async def get_image_by_id(image_id: str) -> ImageDB:
ImageDB: instance of image object
"""
- image = await ImageDB.find_one(ImageDB.id == ObjectId(image_id))
- return image
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(ImageDB).filter_by(id=uuid.UUID(image_id))
+ )
+ image = result.scalars().one_or_none()
+ return image
async def fetch_app_by_id(app_id: str) -> AppDB:
@@ -169,9 +178,12 @@ async def fetch_app_by_id(app_id: str) -> AppDB:
Args:
app_id: _description_
"""
+
assert app_id is not None, "app_id cannot be None"
- app = await AppDB.find_one(AppDB.id == ObjectId(app_id), fetch_links=True)
- return app
+ async with db_engine.get_session() as session:
+ result = await session.execute(select(AppDB).filter_by(id=uuid.UUID(app_id)))
+ app = result.scalars().one_or_none()
+ return app
async def fetch_app_variant_by_id(
@@ -186,11 +198,19 @@ async def fetch_app_variant_by_id(
Returns:
AppVariantDB: The fetched app variant, or None if no app variant was found.
"""
+
assert app_variant_id is not None, "app_variant_id cannot be None"
- app_variant = await AppVariantDB.find_one(
- AppVariantDB.id == ObjectId(app_variant_id), fetch_links=True
- )
- return app_variant
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(AppVariantDB)
+ .options(
+ joinedload(AppVariantDB.base),
+ joinedload(AppVariantDB.app)
+ )
+ .filter_by(id=uuid.UUID(app_variant_id))
+ )
+ app_variant = result.scalars().one_or_none()
+ return app_variant
async def fetch_app_variant_by_base_id(base_id: str) -> Optional[AppVariantDB]:
@@ -203,12 +223,14 @@ async def fetch_app_variant_by_base_id(base_id: str) -> Optional[AppVariantDB]:
Returns:
AppVariantDB: The fetched app variant, or None if no app variant was found.
"""
+
assert base_id is not None, "base_id cannot be None"
- app_variant = await AppVariantDB.find_one(
- AppVariantDB.base.id == ObjectId(base_id),
- fetch_links=True,
- )
- return app_variant
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(AppVariantDB).filter_by(base_id=uuid.UUID(base_id))
+ )
+ app_variant = result.scalars().one_or_none()
+ return app_variant
async def fetch_app_variant_by_base_id_and_config_name(
@@ -224,14 +246,17 @@ async def fetch_app_variant_by_base_id_and_config_name(
Returns:
AppVariantDB: The fetched app variant, or None if no app variant was found.
"""
+
assert base_id is not None, "base_id cannot be None"
assert config_name is not None, "config_name cannot be None"
- app_variant = await AppVariantDB.find_one(
- AppVariantDB.base.id == ObjectId(base_id),
- AppVariantDB.config_name == config_name,
- fetch_links=True,
- )
- return app_variant
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(AppVariantDB).filter_by(
+ base_id=uuid.UUID(base_id), config_name=config_name
+ )
+ )
+ app_variant = result.scalars().one_or_none()
+ return app_variant
async def fetch_app_variant_revision_by_variant(
@@ -246,18 +271,22 @@ async def fetch_app_variant_revision_by_variant(
Returns:
AppVariantRevisionDB
"""
+
assert app_variant_id is not None, "app_variant_id cannot be None"
assert revision is not None, "revision cannot be None"
- app_variant_revision = await AppVariantRevisionsDB.find_one(
- AppVariantRevisionsDB.variant.id == ObjectId(app_variant_id),
- AppVariantRevisionsDB.revision == revision,
- )
- if app_variant_revision is None:
- raise Exception(
- f"app variant revision for app_variant {app_variant_id} and revision {revision} not found"
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(AppVariantRevisionsDB).filter_by(
+ variant_id=uuid.UUID(app_variant_id), revision=revision
+ )
)
- return app_variant_revision
+ app_variant_revision = result.scalars().one_or_none()
+ if app_variant_revision is None:
+ raise Exception(
+ f"app variant revision for app_variant {app_variant_id} and revision {revision} not found"
+ )
+ return app_variant_revision
async def fetch_base_by_id(base_id: str) -> Optional[VariantBaseDB]:
@@ -269,15 +298,18 @@ async def fetch_base_by_id(base_id: str) -> Optional[VariantBaseDB]:
VariantBaseDB: The fetched base, or None if no base was found.
"""
- if base_id is None:
- raise Exception("No base_id provided")
-
- base = await VariantBaseDB.find_one(VariantBaseDB.id == ObjectId(base_id))
- if not base:
- logger.error("Base not found")
- return None
-
- return base
+ assert base_id is not None, "no base_id provided"
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(VariantBaseDB)
+ .options(
+ joinedload(VariantBaseDB.image),
+ joinedload(VariantBaseDB.deployment)
+ )
+ .filter_by(id=uuid.UUID(base_id))
+ )
+ base = result.scalars().one_or_none()
+ return base
async def fetch_app_variant_by_name_and_appid(
@@ -293,12 +325,14 @@ async def fetch_app_variant_by_name_and_appid(
AppVariantDB: the instance of the app variant
"""
- query_expressions = (
- AppVariantDB.variant_name == variant_name,
- AppVariantDB.app.id == ObjectId(app_id),
- )
- app_variant_db = await AppVariantDB.find_one(query_expressions)
- return app_variant_db
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(AppVariantDB).filter_by(
+ variant_name=variant_name, app_id=uuid.UUID(app_id)
+ )
+ )
+ app_variant = result.scalars().one_or_none()
+ return app_variant
async def create_new_variant_base(
@@ -320,25 +354,30 @@ async def create_new_variant_base(
Returns:
VariantBaseDB: The created base.
"""
+
logger.debug(f"Creating new base: {base_name} with image: {image} for app: {app}")
- base = VariantBaseDB(
- app=app,
- user=user,
- base_name=base_name,
- image=image,
- )
+ async with db_engine.get_session() as session:
+ base = VariantBaseDB(
+ app_id=app.id,
+ user_id=user.id,
+ base_name=base_name,
+ image_id=image.id,
+ )
- if isCloudEE():
- # assert that if organization is provided, workspace_id is also provided, and vice versa
- assert (
- organization is not None and workspace is not None
- ), "organization and workspace must be provided together"
+ if isCloudEE():
+ # assert that if organization is provided, workspace_id is also provided, and vice versa
+ assert (
+ organization is not None and workspace is not None
+ ), "organization and workspace must be provided together"
+
+ base.organization_id = uuid.UUID(organization)
+ base.workspace_id = uuid.UUID(workspace)
- base.organization = organization
- base.workspace = workspace
+ session.add(base)
+ await session.commit()
+ await session.refresh(base)
- await base.create()
- return base
+ return base
async def create_new_config(
@@ -346,17 +385,19 @@ async def create_new_config(
parameters: Dict,
) -> ConfigDB:
"""Create a new config.
+
Args:
config_name (str): The name of the config.
parameters (Dict): The parameters of the config.
+
Returns:
ConfigDB: The created config.
"""
- config_db = ConfigDB(
+
+ return ConfigDB(
config_name=config_name,
parameters=parameters,
)
- return config_db
async def create_new_app_variant(
@@ -367,8 +408,6 @@ async def create_new_app_variant(
base: VariantBaseDB,
config: ConfigDB,
base_name: str,
- config_name: str,
- parameters: Dict,
organization=None,
workspace=None,
) -> AppVariantDB:
@@ -381,45 +420,52 @@ async def create_new_app_variant(
Returns:
AppVariantDB: The created variant.
"""
+
assert (
- parameters == {}
+ config.parameters == {}
), "Parameters should be empty when calling create_new_app_variant (otherwise revision should not be set to 0)"
- variant = AppVariantDB(
- app=app,
- user=user,
- modified_by=user,
- revision=0,
- variant_name=variant_name,
- image=image,
- base=base,
- config=config,
- base_name=base_name,
- config_name=config_name,
- parameters=parameters,
- )
- if isCloudEE():
- # assert that if organization is provided, workspace_id is also provided, and vice versa
- assert (
- organization is not None and workspace is not None
- ), "organization and workspace must be provided together"
+ async with db_engine.get_session() as session:
+ variant = AppVariantDB(
+ app_id=app.id,
+ user_id=user.id,
+ modified_by_id=user.id,
+ revision=0,
+ variant_name=variant_name,
+ image_id=image.id,
+ base_id=base.id,
+ base_name=base_name,
+ config_name=config.config_name,
+ config_parameters=config.parameters,
+ )
- variant.organization = organization
- variant.workspace = workspace
+ if isCloudEE():
+ # assert that if organization is provided, workspace_id is also provided, and vice versa
+ assert (
+ organization is not None and workspace is not None
+ ), "organization and workspace must be provided together"
- await variant.create()
+ variant.organization_id = uuid.UUID(organization)
+ variant.workspace_id = uuid.UUID(workspace)
- variant_revision = AppVariantRevisionsDB(
- variant=variant,
- revision=0,
- modified_by=user,
- base=base,
- config=config,
- created_at=datetime.now(timezone.utc),
- )
- await variant_revision.create()
+ session.add(variant)
+ await session.commit()
+ await session.refresh(variant, attribute_names=["app", "image", "user", "base", ]) # Ensures the app, image, user and base relationship are loaded
+
+ variant_revision = AppVariantRevisionsDB(
+ variant_id=variant.id,
+ revision=0,
+ modified_by_id=user.id,
+ base_id=base.id,
+ config_name=config.config_name,
+ config_parameters=config.parameters,
+ )
+
+ session.add(variant_revision)
+ await session.commit()
+ await session.refresh(variant_revision)
- return variant
+ return variant
async def create_image(
@@ -428,9 +474,9 @@ async def create_image(
deletable: bool,
organization=None,
workspace=None,
- template_uri: str = None,
- docker_id: str = None,
- tags: str = None,
+ template_uri: Optional[str] = None,
+ docker_id: Optional[str] = None,
+ tags: Optional[str] = None,
) -> ImageDB:
"""Create a new image.
Args:
@@ -459,30 +505,34 @@ async def create_image(
elif image_type == "zip" and template_uri is None:
raise Exception("template_uri must be provided for type zip")
- image = ImageDB(
- deletable=deletable,
- user=user,
- )
+ async with db_engine.get_session() as session:
+ image = ImageDB(
+ deletable=deletable,
+ user_id=user.id,
+ )
- if image_type == "zip":
- image.type = "zip"
- image.template_uri = template_uri
- elif image_type == "image":
- image.type = "image"
- image.tags = tags
- image.docker_id = docker_id
+ if image_type == "zip":
+ image.type = "zip" # type: ignore
+ image.template_uri = template_uri # type: ignore
+ elif image_type == "image":
+ image.type = "image" # type: ignore
+ image.tags = tags # type: ignore
+ image.docker_id = docker_id # type: ignore
- if isCloudEE():
- # assert that if organization is provided, workspace_id is also provided, and vice versa
- assert (
- organization is not None and workspace is not None
- ), "organization and workspace must be provided together"
+ if isCloudEE():
+ # assert that if organization is provided, workspace_id is also provided, and vice versa
+ assert (
+ organization is not None and workspace is not None
+ ), "organization and workspace must be provided together"
- image.organization = organization
- image.workspace = workspace
+ image.organization_id = uuid.UUID(organization)
+ image.workspace_id = uuid.UUID(workspace)
+
+ session.add(image)
+ await session.commit()
+ await session.refresh(image)
- await image.create()
- return image
+ return image
async def create_deployment(
@@ -508,31 +558,36 @@ async def create_deployment(
Returns:
DeploymentDB: The created deployment.
"""
- try:
- deployment = DeploymentDB(
- app=app,
- user=user,
- container_name=container_name,
- container_id=container_id,
- uri=uri,
- status=status,
- )
- if isCloudEE():
- deployment.organization = organization
- deployment.workspace = workspace
+ async with db_engine.get_session() as session:
+ try:
+ deployment = DeploymentDB(
+ app=app,
+ user=user,
+ container_name=container_name,
+ container_id=container_id,
+ uri=uri,
+ status=status,
+ )
- await deployment.create()
- return deployment
- except Exception as e:
- raise Exception(f"Error while creating deployment: {e}")
+ if isCloudEE():
+ deployment.organization_id = uuid.UUID(organization)
+ deployment.workspace_id = uuid.UUID(workspace)
+
+ session.add(deployment)
+ await session.commit()
+ await session.refresh(deployment)
+
+ return deployment
+ except Exception as e:
+ raise Exception(f"Error while creating deployment: {e}")
async def create_app_and_envs(
app_name: str,
user_uid: str,
- organization_id: str = None,
- workspace_id: str = None,
+ organization_id: Optional[str] = None,
+ workspace_id: Optional[str] = None,
) -> AppDB:
"""
Create a new app with the given name and organization ID.
@@ -550,7 +605,7 @@ async def create_app_and_envs(
ValueError: If an app with the same name already exists.
"""
- user_instance = await get_user(user_uid)
+ user = await get_user(user_uid)
app = await fetch_app_by_name_and_parameters(
app_name,
user_uid,
@@ -560,23 +615,27 @@ async def create_app_and_envs(
if app is not None:
raise ValueError("App with the same name already exists")
- app = AppDB(app_name=app_name, user=user_instance)
+ async with db_engine.get_session() as session:
+ app = AppDB(app_name=app_name, user_id=user.id)
- if isCloudEE():
- # assert that if organization_id is provided, workspace_id is also provided, and vice versa
- assert (
- organization_id is not None and workspace_id is not None
- ), "org_id and workspace_id must be provided together"
+ if isCloudEE():
+ # assert that if organization_id is provided, workspace_id is also provided, and vice versa
+ assert (
+ organization_id is not None and workspace_id is not None
+ ), "org_id and workspace_id must be provided together"
+
+ organization_db = await db_manager_ee.get_organization(organization_id) # type: ignore
+ workspace_db = await db_manager_ee.get_workspace(workspace_id) # type: ignore
- organization_db = await db_manager_ee.get_organization(organization_id)
- workspace_db = await db_manager_ee.get_workspace(workspace_id)
+ app.organization_id = organization_db.id
+ app.workspace_id = workspace_db.id
- app.organization = organization_db
- app.workspace = workspace_db
+ session.add(app)
+ await session.commit()
+ await session.refresh(app)
- await app.create()
- await initialize_environments(app)
- return app
+ await initialize_environments(session=session, app_db=app)
+ return app
async def get_deployment_by_objectid(
@@ -585,17 +644,19 @@ async def get_deployment_by_objectid(
"""Get the deployment object from the database with the provided id.
Arguments:
- deployment_id (ObjectId): The deployment id
+ deployment_id (str): The deployment id
Returns:
DeploymentDB: instance of deployment object
"""
- deployment = await DeploymentDB.find_one(
- DeploymentDB.id == ObjectId(deployment_id), fetch_links=True
- )
- logger.debug(f"deployment: {deployment}")
- return deployment
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(DeploymentDB).filter_by(id=uuid.UUID(deployment_id))
+ )
+ deployment = result.scalars().one_or_none()
+ logger.debug(f"deployment: {deployment}")
+ return deployment
async def get_deployment_by_appid(app_id: str) -> DeploymentDB:
@@ -608,16 +669,18 @@ async def get_deployment_by_appid(app_id: str) -> DeploymentDB:
DeploymentDB: instance of deployment object
"""
- deployment = await DeploymentDB.find_one(
- DeploymentDB.app.id == ObjectId(app_id), fetch_links=True
- )
- logger.debug(f"deployment: {deployment}")
- return deployment
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(DeploymentDB).filter_by(app_id=uuid.UUID(app_id))
+ )
+ deployment = result.scalars().one_or_none()
+ logger.debug(f"deployment: {deployment}")
+ return deployment
async def list_app_variants_for_app_id(
app_id: str,
-) -> List[AppVariantDB]:
+):
"""
Lists all the app variants from the db
Args:
@@ -625,16 +688,17 @@ async def list_app_variants_for_app_id(
Returns:
List[AppVariant]: List of AppVariant objects
"""
+
assert app_id is not None, "app_id cannot be None"
- app_variants_db = await AppVariantDB.find(
- AppVariantDB.app.id == ObjectId(app_id), fetch_links=True
- ).to_list()
- return app_variants_db
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(AppVariantDB).filter_by(app_id=uuid.UUID(app_id))
+ )
+ app_variants = result.scalars().all()
+ return app_variants
-async def list_bases_for_app_id(
- app_id: str, base_name: Optional[str] = None
-) -> List[VariantBaseDB]:
+async def list_bases_for_app_id(app_id: str, base_name: Optional[str] = None):
"""List all the bases for the specified app_id
Args:
@@ -646,14 +710,17 @@ async def list_bases_for_app_id(
"""
assert app_id is not None, "app_id cannot be None"
- base_query = VariantBaseDB.find(VariantBaseDB.app.id == ObjectId(app_id))
- if base_name:
- base_query = base_query.find(VariantBaseDB.base_name == base_name)
- bases_db = await base_query.sort("base_name").to_list()
- return bases_db
+ async with db_engine.get_session() as session:
+ query = select(VariantBaseDB).filter_by(app_id=uuid.UUID(app_id))
+ if base_name:
+ query = query.filter_by(base_name=base_name)
+
+ result = await session.execute(query.order_by(VariantBaseDB.base_name.asc()))
+ bases = result.scalars().all()
+ return bases
-async def list_variants_for_base(base: VariantBaseDB) -> List[AppVariantDB]:
+async def list_variants_for_base(base: VariantBaseDB):
"""
Lists all the app variants from the db for a base
Args:
@@ -661,13 +728,16 @@ async def list_variants_for_base(base: VariantBaseDB) -> List[AppVariantDB]:
Returns:
List[AppVariant]: List of AppVariant objects
"""
+
assert base is not None, "base cannot be None"
- app_variants_db = (
- await AppVariantDB.find(AppVariantDB.base.id == ObjectId(base.id))
- .sort("variant_name")
- .to_list()
- )
- return app_variants_db
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(AppVariantDB)
+ .filter_by(base_id=base.id)
+ .order_by(AppVariantDB.variant_name.asc())
+ )
+ app_variants = result.scalars().all()
+ return app_variants
async def get_user(user_uid: str) -> UserDB:
@@ -680,24 +750,31 @@ async def get_user(user_uid: str) -> UserDB:
UserDB: instance of user
"""
- user = await UserDB.find_one(UserDB.uid == user_uid)
- if user is None:
- if not isCloudEE():
- # create user
+ async with db_engine.get_session() as session:
+ result = await session.execute(select(UserDB).filter_by(uid=user_uid))
+ user = result.scalars().one_or_none()
+
+ if user is None and isCloudEE():
+ raise Exception("Please login or signup")
+
+ if user is None and not isCloudEE():
user_db = UserDB(uid="0")
- user = await user_db.create()
- return user
- raise Exception("Please login or signup")
- return user
+ session.add(user_db)
+ await session.commit()
+ await session.refresh(user_db)
+
+ return user_db
+
+ return user
-async def get_user_with_id(user_id: ObjectId):
+async def get_user_with_id(user_id: str):
"""
Retrieves a user from a database based on their ID.
Args:
- user_id (ObjectId): The ID of the user to retrieve from the database.
+ user_id (str): The ID of the user to retrieve from the database.
Returns:
user: The user object retrieved from the database.
@@ -705,12 +782,14 @@ async def get_user_with_id(user_id: ObjectId):
Raises:
Exception: If an error occurs while getting the user from the database.
"""
- try:
- user = await UserDB.find_one(UserDB.id == user_id)
+
+ async with db_engine.get_session() as session:
+ result = await session.execute(select(UserDB).filter_by(id=uuid.UUID(user_id)))
+ user = result.scalars().one_or_none()
+ if user is None:
+ logger.error("Failed to get user with id")
+ raise Exception("Error while getting user")
return user
- except Exception as e:
- logger.error(f"Failed to get user with id: {e}")
- raise Exception(f"Error while getting user: {e}")
async def get_user_with_email(email: str):
@@ -730,18 +809,20 @@ async def get_user_with_email(email: str):
Example Usage:
user = await get_user_with_email('example@example.com')
"""
+
if "@" not in email:
raise Exception("Please provide a valid email address")
- try:
- user = await UserDB.find_one(UserDB.email == email)
+ async with db_engine.get_session() as session:
+ result = await session.execute(select(UserDB).filter_by(email=email))
+ user = result.scalars().one_or_none()
+ if user is None:
+ logger.error("Failed to get user with email address")
+ raise Exception("Error while getting user")
return user
- except Exception as e:
- logger.error(f"Failed to get user with email address: {e}")
- raise Exception(f"Error while getting user: {e}")
-async def get_users_by_ids(user_ids: List) -> List:
+async def get_users_by_ids(user_ids: List):
"""
Retrieve users from the database by their IDs.
@@ -752,76 +833,83 @@ async def get_users_by_ids(user_ids: List) -> List:
List: A list of dictionaries representing the retrieved users.
"""
- users_db = await UserDB.find(In(UserDB.id, user_ids)).to_list()
- return users_db
+ async with db_engine.get_session() as session:
+ user_uids = [uuid.UUID(user_id) for user_id in user_ids]
+ result = await session.execute(select(UserDB).where(UserDB.id.in_(user_uids)))
+ users = result.scalars().all()
+ return users
async def get_orga_image_instance_by_docker_id(
- docker_id: str, organization_id: str = None, workspace_id: str = None
+ docker_id: str,
+ organization_id: Optional[str] = None,
+ workspace_id: Optional[str] = None,
) -> ImageDB:
"""Get the image object from the database with the provided id.
Arguments:
- organization_id (str): The orga unique identifier
+ organization_id (str): The organization unique identifier
docker_id (str): The image id
Returns:
ImageDB: instance of image object
"""
- query_expression = {"docker_id": docker_id}
+ async with db_engine.get_session() as session:
+ query = select(ImageDB).filter_by(docker_id=docker_id)
- if isCloudEE():
- # assert that if organization is provided, workspace_id is also provided, and vice versa
- assert (
- organization_id is not None and workspace_id is not None
- ), "organization and workspace must be provided together"
-
- query_expression.update(
- {
- "organization.id": ObjectId(organization_id),
- "workspace.id": ObjectId(workspace_id),
- }
- )
+ if isCloudEE():
+ # assert that if organization is provided, workspace_id is also provided, and vice versa
+ assert (
+ organization_id is not None and workspace_id is not None
+ ), "organization and workspace must be provided together"
+
+ query = query.filter_by(
+ organization_id=uuid.UUID(organization_id),
+ workspace_id=uuid.UUID(workspace_id),
+ )
- image = await ImageDB.find_one(query_expression)
- return image
+ result = await session.execute(query)
+ image = result.scalars().one_or_none()
+ return image
async def get_orga_image_instance_by_uri(
- template_uri: str, organization_id: str = None, workspace_id: str = None
+ template_uri: str,
+ organization_id: Optional[str] = None,
+ workspace_id: Optional[str] = None,
) -> ImageDB:
"""Get the image object from the database with the provided id.
Arguments:
- organization_id (str): The orga unique identifier
+ organization_id (str): The organization unique identifier
template_uri (url): The image template url
Returns:
ImageDB: instance of image object
"""
- parsed_url = urlparse(template_uri)
+ parsed_url = urlparse(template_uri)
if not parsed_url.scheme and not parsed_url.netloc:
raise ValueError(f"Invalid URL: {template_uri}")
- query_expression = {"template_uri": template_uri}
+ async with db_engine.get_session() as session:
+ query = select(ImageDB).filter_by(template_uri=template_uri)
- if isCloudEE():
- # assert that if organization is provided, workspace_id is also provided, and vice versa
- assert (
- organization_id is not None and workspace_id is not None
- ), "organization and workspace must be provided together"
-
- query_expression.update(
- {
- "organization.id": ObjectId(organization_id),
- "workspace.id": ObjectId(workspace_id),
- }
- )
+ if isCloudEE():
+ # assert that if organization is provided, workspace_id is also provided, and vice versa
+ assert (
+ organization_id is not None and workspace_id is not None
+ ), "organization and workspace must be provided together"
+
+ query = query.filter_by(
+ organization_id=uuid.UUID(organization_id),
+ workspace_id=uuid.UUID(workspace_id),
+ )
- image = await ImageDB.find_one(query_expression)
- return image
+ result = await session.execute(query)
+ image = result.scalars().one_or_none()
+ return image
async def get_app_instance_by_id(app_id: str) -> AppDB:
@@ -834,8 +922,10 @@ async def get_app_instance_by_id(app_id: str) -> AppDB:
AppDB: instance of app object
"""
- app = await AppDB.find_one(AppDB.id == ObjectId(app_id))
- return app
+ async with db_engine.get_session() as session:
+ result = await session.execute(select(AppDB).filter_by(id=uuid.UUID(app_id)))
+ app = result.scalars().one_or_none()
+ return app
async def add_variant_from_base_and_config(
@@ -843,7 +933,7 @@ async def add_variant_from_base_and_config(
new_config_name: str,
parameters: Dict[str, Any],
user_uid: str,
-):
+) -> AppVariantDB:
"""
Add a new variant to the database based on an existing base and a new configuration.
@@ -856,68 +946,67 @@ async def add_variant_from_base_and_config(
Returns:
AppVariantDB: The newly created app variant.
"""
+
new_variant_name = f"{base_db.base_name}.{new_config_name}"
previous_app_variant_db = await find_previous_variant_from_base_id(str(base_db.id))
if previous_app_variant_db is None:
logger.error("Failed to find the previous app variant in the database.")
- raise HTTPException(status_code=500, detail="Previous app variant not found")
+ raise HTTPException(status_code=404, detail="Previous app variant not found")
+
logger.debug(f"Located previous variant: {previous_app_variant_db}")
app_variant_for_base = await list_variants_for_base(base_db)
already_exists = any(
- av for av in app_variant_for_base if av.config_name == new_config_name
+ av for av in app_variant_for_base if av.config_name == new_config_name # type: ignore
)
if already_exists:
raise ValueError("App variant with the same name already exists")
+
user_db = await get_user(user_uid)
- config_db = ConfigDB(
- config_name=new_config_name,
- parameters=parameters,
- )
+ async with db_engine.get_session() as session:
+ db_app_variant = AppVariantDB(
+ app_id=previous_app_variant_db.app_id,
+ variant_name=new_variant_name,
+ image_id=base_db.image_id,
+ user_id=user_db.id,
+ modified_by_id=user_db.id,
+ revision=1,
+ base_name=base_db.base_name,
+ base_id=base_db.id,
+ config_name=new_config_name,
+ config_parameters=parameters,
+ )
- # Prefetch image in base_db
- await base_db.fetch_link(VariantBaseDB.image)
+ if isCloudEE():
+ db_app_variant.organization_id = previous_app_variant_db.organization_id
+ db_app_variant.workspace_id = previous_app_variant_db.workspace_id
- db_app_variant = AppVariantDB(
- app=previous_app_variant_db.app,
- variant_name=new_variant_name,
- image=base_db.image,
- user=user_db,
- modified_by=user_db,
- revision=1,
- parameters=parameters,
- previous_variant_name=previous_app_variant_db.variant_name, # TODO: Remove in future
- base_name=base_db.base_name,
- base=base_db,
- config_name=new_config_name,
- config=config_db,
- is_deleted=False,
- )
+ session.add(db_app_variant)
+ await session.commit()
+ await session.refresh(db_app_variant)
- if isCloudEE():
- db_app_variant.organization = previous_app_variant_db.organization
- db_app_variant.workspace = previous_app_variant_db.workspace
-
- await db_app_variant.create()
- variant_revision = AppVariantRevisionsDB(
- variant=db_app_variant,
- revision=1,
- modified_by=user_db,
- base=base_db,
- config=config_db,
- created_at=datetime.now(timezone.utc),
- )
- await variant_revision.create()
+ variant_revision = AppVariantRevisionsDB(
+ variant_id=db_app_variant.id,
+ revision=1,
+ modified_by_id=user_db.id,
+ base_id=base_db.id,
+ config_name=new_config_name,
+ config_parameters=parameters,
+ )
- return db_app_variant
+ session.add(variant_revision)
+ await session.commit()
+ await session.refresh(variant_revision)
+
+ return db_app_variant
async def list_apps(
user_uid: str,
- app_name: str = None,
- org_id: str = None,
- workspace_id: str = None,
-) -> List[App]:
+ app_name: Optional[str] = None,
+ org_id: Optional[str] = None,
+ workspace_id: Optional[str] = None,
+):
"""
Lists all the unique app names and their IDs from the database
@@ -940,47 +1029,52 @@ async def list_apps(
)
return [converters.app_db_to_pydantic(app_db)]
- elif (org_id is not None) or (workspace_id is not None):
+ elif org_id is not None or workspace_id is not None:
if not isCloudEE():
- return JSONResponse(
- {
+ raise HTTPException(
+ status_code=400,
+ detail={
"error": "organization and/or workspace is only available in Cloud and EE"
},
- status_code=400,
)
# assert that if org_id is provided, workspace_id is also provided, and vice versa
assert (
org_id is not None and workspace_id is not None
), "org_id and workspace_id must be provided together"
-
- user_org_workspace_data = await get_user_org_and_workspace_id(user_uid)
- has_permission = await check_rbac_permission(
- user_org_workspace_data=user_org_workspace_data,
- workspace_id=ObjectId(workspace_id),
- organization_id=ObjectId(org_id),
- permission=Permission.VIEW_APPLICATION,
- )
- logger.debug(f"User has Permission to list apps: {has_permission}")
- if not has_permission:
- error_msg = f"You do not have access to perform this action. Please contact your organization admin."
- return JSONResponse(
- {"detail": error_msg},
- status_code=403,
+ if isCloudEE():
+ user_org_workspace_data = await get_user_org_and_workspace_id(user_uid) # type: ignore
+ has_permission = await check_rbac_permission( # type: ignore
+ user_org_workspace_data=user_org_workspace_data,
+ workspace_id=uuid.UUID(workspace_id),
+ organization_id=uuid.UUID(org_id),
+ permission=Permission.VIEW_APPLICATION, # type: ignore
)
-
- apps: List[AppDB] = await AppDB.find(
- AppDB.organization.id == ObjectId(org_id),
- AppDB.workspace.id == ObjectId(workspace_id),
- ).to_list()
- return [converters.app_db_to_pydantic(app) for app in apps]
+ logger.debug(f"User has Permission to list apps: {has_permission}")
+ if not has_permission:
+ raise HTTPException(
+ status_code=403,
+ detail="You do not have access to perform this action. Please contact your organization admin.",
+ )
+
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(AppDB).filter_by(
+ organization_id=uuid.UUID(org_id),
+ workspace_id=uuid.UUID(workspace_id),
+ )
+ )
+ apps = result.scalars().all()
+ return [converters.app_db_to_pydantic(app) for app in apps]
else:
- apps = await AppDB.find(AppDB.user.id == user.id).to_list()
- return [converters.app_db_to_pydantic(app) for app in apps]
+ async with db_engine.get_session() as session:
+ result = await session.execute(select(AppDB).filter_by(user_id=user.id))
+ apps = result.scalars().all()
+ return [converters.app_db_to_pydantic(app) for app in apps]
-async def list_app_variants(app_id: str) -> List[AppVariantDB]:
+async def list_app_variants(app_id: str):
"""
Lists all the app variants from the db
Args:
@@ -989,35 +1083,42 @@ async def list_app_variants(app_id: str) -> List[AppVariantDB]:
List[AppVariant]: List of AppVariant objects
"""
- # Construct query expressions
- app_variants_db = await AppVariantDB.find(
- AppVariantDB.app.id == ObjectId(app_id), fetch_links=True
- ).to_list()
- return app_variants_db
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(AppVariantDB)
+ .options(joinedload(AppVariantDB.app), joinedload(AppVariantDB.base))
+ .filter_by(app_id=uuid.UUID(app_id))
+ )
+ app_variants = result.scalars().all()
+ return app_variants
async def check_is_last_variant_for_image(db_app_variant: AppVariantDB) -> bool:
- """Checks whether the input variant is the sole variant that uses its linked image
- This is a helpful function to determine whether to delete the image when removing a variant
- Usually many variants will use the same image (these variants would have been created using the UI)
- We only delete the image and shutdown the container if the variant is the last one using the image
+ """Checks whether the input variant is the sole variant that uses its linked image.
+
+ NOTE: This is a helpful function to determine whether to delete the image when removing a variant. Usually many variants will use the same image (these variants would have been created using the UI). We only delete the image and shutdown the container if the variant is the last one using the image
Arguments:
app_variant -- AppVariant to check
+
Returns:
true if it's the last variant, false otherwise
"""
- query = AppVariantDB.find(AppVariantDB.base.id == ObjectId(db_app_variant.base.id))
+ async with db_engine.get_session() as session:
+ query = select(AppVariantDB).filter_by(base_id=db_app_variant.base_id)
- if isCloudEE():
- query = query.find(
- AppVariantDB.organization.id == db_app_variant.organization.id,
- AppVariantDB.workspace.id == db_app_variant.workspace.id,
- )
+ if isCloudEE():
+ query = query.filter(
+ AppVariantDB.organization_id == db_app_variant.organization_id,
+ AppVariantDB.workspace_id == db_app_variant.workspace_id,
+ )
- count_variants = await query.count()
- return count_variants == 1
+ count_result = await session.execute(
+ query.with_only_columns(func.count()) # type: ignore
+ )
+ count_variants = count_result.scalar()
+ return count_variants == 1
async def remove_deployment(deployment_db: DeploymentDB):
@@ -1026,13 +1127,37 @@ async def remove_deployment(deployment_db: DeploymentDB):
Arguments:
deployment -- Deployment to remove
"""
+
logger.debug("Removing deployment")
assert deployment_db is not None, "deployment_db is missing"
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(DeploymentDB).filter_by(id=deployment_db.id)
+ )
+ deployment = result.scalars().one_or_none()
+ if not deployment:
+ raise NoResultFound(f"Deployment with {str(deployment_db.id)} not found")
- deployment = await DeploymentDB.find_one(
- DeploymentDB.id == ObjectId(deployment_db.id)
- )
- await deployment.delete()
+ await session.delete(deployment)
+ await session.commit()
+
+
+async def list_deployments(app_id: str):
+ """Lists all the deployments that belongs to an app.
+
+ Args:
+ app_id (str): The ID of the app
+
+ Returns:
+ a list/sequence of all the deployments that were retrieved
+ """
+
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(DeploymentDB).filter_by(app_id=uuid.UUID(app_id))
+ )
+ environments = result.scalars().all()
+ return environments
async def remove_app_variant_from_db(app_variant_db: AppVariantDB):
@@ -1042,28 +1167,33 @@ async def remove_app_variant_from_db(app_variant_db: AppVariantDB):
Arguments:
app_variant -- AppVariant to remove
"""
+
logger.debug("Removing app variant")
assert app_variant_db is not None, "app_variant_db is missing"
- # Remove the variant from the associated environments
- logger.debug("list_environments_by_variant")
- environments = await list_environments_by_variant(app_variant_db)
- for environment in environments:
- environment.deployed_app_variant = None
- await environment.save()
-
+ logger.debug("list_app_variants_revisions_by_variant")
app_variant_revisions = await list_app_variant_revisions_by_variant(app_variant_db)
- for app_variant_revision in app_variant_revisions:
- await app_variant_revision.delete()
- app_variant = await AppVariantDB.find_one(
- AppVariantDB.id == ObjectId(app_variant_db.id)
- )
- await app_variant.delete()
+ async with db_engine.get_session() as session:
+ logger.debug("list_environments_by_variant")
+ environments = await list_environments_by_variant(session, app_variant_db)
+
+ # Remove the variant from the associated environments
+ for environment in environments:
+ environment.deployed_app_variant = None
+ await session.commit()
+
+ # Delete all the revisions associated with the variant
+ for app_variant_revision in app_variant_revisions:
+ await session.delete(app_variant_revision)
+
+ # Delete app variant and commit action to database
+ await session.delete(app_variant_db)
+ await session.commit()
async def deploy_to_environment(
- environment_name: str, variant_id: str, **user_org_data: dict
+ environment_name: str, variant_id: str, **user_org_data
):
"""
Deploys an app variant to a specified environment.
@@ -1081,43 +1211,51 @@ async def deploy_to_environment(
app_variant_db = await fetch_app_variant_by_id(variant_id)
app_variant_revision_db = await fetch_app_variant_revision_by_variant(
- app_variant_id=variant_id, revision=app_variant_db.revision
+ app_variant_id=variant_id, revision=app_variant_db.revision # type: ignore
)
if app_variant_db is None:
raise ValueError("App variant not found")
- # Find the environment for the given app name and user
- environment_db = await AppEnvironmentDB.find_one(
- AppEnvironmentDB.app.id == app_variant_db.app.id,
- AppEnvironmentDB.name == environment_name,
- )
-
- if environment_db is None:
- raise ValueError(f"Environment {environment_name} not found")
- # TODO: Modify below to add logic to disable redployment of the same variant revision here and in frontend
- # if environment_db.deployed_app_variant_ == app_variant_db.id:
- # raise ValueError(
- # f"Variant {app_variant_db.app.app_name}/{app_variant_db.variant_name} is already deployed to the environment {environment_name}"
- # )
-
# Retrieve app deployment
- deployment = await get_deployment_by_appid(str(app_variant_db.app.id))
-
- # Update the environment with the new variant name
- environment_db.revision += 1
- environment_db.deployed_app_variant = app_variant_db.id
- environment_db.deployed_app_variant_revision = app_variant_revision_db
- environment_db.deployment = deployment.id
+ deployment = await get_deployment_by_appid(str(app_variant_db.app_id))
- # Create revision for app environment
+ # Retrieve user
+ assert "user_uid" in user_org_data, "User uid is required"
user = await get_user(user_uid=user_org_data["user_uid"])
- await create_environment_revision(
- environment_db,
- user,
- deployed_app_variant_revision=app_variant_revision_db.id,
- deployment=deployment.id,
- )
- await environment_db.save()
+
+ async with db_engine.get_session() as session:
+ # Find the environment for the given app name and user
+ result = await session.execute(
+ select(AppEnvironmentDB).filter_by(
+ app_id=app_variant_db.app_id, name=environment_name
+ )
+ )
+ environment_db = result.scalars().one_or_none()
+ if environment_db is None:
+ raise ValueError(f"Environment {environment_name} not found")
+
+ # TODO: Modify below to add logic to disable redeployment of the same variant revision here and in front-end
+ # if environment_db.deployed_app_variant_ == app_variant_db.id:
+ # raise ValueError(
+ # f"Variant {app_variant_db.app.app_name}/{app_variant_db.variant_name} is already deployed to the environment {environment_name}"
+ # )
+
+ # Update the environment with the new variant name
+ environment_db.revision += 1 # type: ignore
+ environment_db.deployed_app_variant_id = app_variant_db.id
+ environment_db.deployed_app_variant_revision_id = app_variant_revision_db.id
+ environment_db.deployment_id = deployment.id
+
+ # Create revision for app environment
+ await create_environment_revision(
+ session,
+ environment_db,
+ user,
+ deployed_app_variant_revision=app_variant_revision_db,
+ deployment=deployment,
+ )
+
+ await session.commit()
async def fetch_app_environment_by_name_and_appid(
@@ -1133,12 +1271,14 @@ async def fetch_app_environment_by_name_and_appid(
AppEnvironmentDB: app environment object
"""
- app_environment = await AppEnvironmentDB.find_one(
- AppEnvironmentDB.app.id == ObjectId(app_id),
- AppEnvironmentDB.name == environment_name,
- fetch_links=True,
- )
- return app_environment
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(AppEnvironmentDB).filter_by(
+ app_id=uuid.UUID(app_id), name=environment_name
+ )
+ )
+ app_environment = result.scalars().one_or_none()
+ return app_environment
async def fetch_app_variant_revision_by_id(
@@ -1153,10 +1293,13 @@ async def fetch_app_variant_revision_by_id(
AppVariantRevisionsDB: app variant revision object
"""
- app_revision = await AppVariantRevisionsDB.find_one(
- AppVariantRevisionsDB.id == ObjectId(variant_revision_id),
- )
- return app_revision
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(AppVariantRevisionsDB)
+ .filter_by(id=uuid.UUID(variant_revision_id))
+ )
+ app_revision = result.scalars().one_or_none()
+ return app_revision
async def fetch_environment_revisions_for_environment(
@@ -1172,10 +1315,12 @@ async def fetch_environment_revisions_for_environment(
List[AppEnvironmentRevisionDB]: A list of AppEnvironmentRevisionDB objects.
"""
- environment_revisions = await AppEnvironmentRevisionDB.find(
- AppEnvironmentRevisionDB.environment.id == environment.id, fetch_links=True
- ).to_list()
- return environment_revisions
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(AppEnvironmentRevisionDB).filter_by(environment_id=environment.id)
+ )
+ environment_revisions = result.scalars().all()
+ return environment_revisions
async def fetch_app_environment_revision(revision_id: str) -> AppEnvironmentRevisionDB:
@@ -1185,10 +1330,12 @@ async def fetch_app_environment_revision(revision_id: str) -> AppEnvironmentRevi
revision_id (str): The ID of the revision
"""
- environment_revision = await AppEnvironmentRevisionDB.find_one(
- AppEnvironmentRevisionDB.id == ObjectId(revision_id), fetch_links=True
- )
- return environment_revision
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(AppEnvironmentRevisionDB).filter_by(id=uuid.UUID(revision_id))
+ )
+ environment_revision = result.scalars().all()
+ return environment_revision
async def update_app_environment(
@@ -1201,7 +1348,13 @@ async def update_app_environment(
values_to_update (dict): the values to update with
"""
- await app_environment.update({"$set": values_to_update})
+ async with db_engine.get_session() as session:
+ for key, value in values_to_update.items():
+ if hasattr(app_environment, key):
+ setattr(app_environment, key, value)
+
+ await session.commit()
+ await session.refresh(app_environment)
async def update_app_environment_deployed_variant_revision(
@@ -1214,17 +1367,25 @@ async def update_app_environment_deployed_variant_revision(
deployed_variant_revision (str): the ID of the deployed variant revision
"""
- app_variant_revision = await AppVariantRevisionsDB.find_one(
- AppVariantRevisionsDB.id == ObjectId(deployed_variant_revision)
- )
- if app_variant_revision is None:
- raise Exception(f"App variant revision {deployed_variant_revision} not found")
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(AppVariantRevisionsDB).filter_by(
+ id=uuid.UUID(deployed_variant_revision)
+ )
+ )
+ app_variant_revision = result.scalars().one_or_none()
+ if app_variant_revision is None:
+ raise Exception(
+ f"App variant revision {deployed_variant_revision} not found"
+ )
- app_environment.deployed_app_variant_revision = app_variant_revision
- await app_environment.save()
+ app_environment.deployed_app_variant_revision = app_variant_revision
+
+ await session.commit()
+ await session.refresh(app_environment)
-async def list_environments(app_id: str, **kwargs: dict) -> List[AppEnvironmentDB]:
+async def list_environments(app_id: str, **kwargs: dict):
"""
List all environments for a given app ID.
@@ -1234,19 +1395,24 @@ async def list_environments(app_id: str, **kwargs: dict) -> List[AppEnvironmentD
Returns:
List[AppEnvironmentDB]: A list of AppEnvironmentDB objects representing the environments for the given app ID.
"""
+
logging.debug("Listing environments for app %s", app_id)
app_instance = await fetch_app_by_id(app_id=app_id)
if app_instance is None:
logging.error(f"App with id {app_id} not found")
raise ValueError("App not found")
- environments_db = await AppEnvironmentDB.find(
- AppEnvironmentDB.app.id == ObjectId(app_id), fetch_links=True
- ).to_list()
- return environments_db
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(AppEnvironmentDB).filter_by(app_id=uuid.UUID(app_id))
+ )
+ environments_db = result.scalars().all()
+ return environments_db
-async def initialize_environments(app_db: AppDB) -> List[AppEnvironmentDB]:
+async def initialize_environments(
+ session: AsyncSession, app_db: AppDB
+) -> List[AppEnvironmentDB]:
"""
Initializes the environments for the app with the given database.
@@ -1256,14 +1422,17 @@ async def initialize_environments(app_db: AppDB) -> List[AppEnvironmentDB]:
Returns:
List[AppEnvironmentDB]: A list of the initialized environments.
"""
+
environments = []
for env_name in ["development", "staging", "production"]:
- env = await create_environment(name=env_name, app_db=app_db)
+ env = await create_environment(session=session, name=env_name, app_db=app_db)
environments.append(env)
return environments
-async def create_environment(name: str, app_db: AppDB) -> AppEnvironmentDB:
+async def create_environment(
+ session: AsyncSession, name: str, app_db: AppDB
+) -> AppEnvironmentDB:
"""
Creates a new environment in the database.
@@ -1274,20 +1443,24 @@ async def create_environment(name: str, app_db: AppDB) -> AppEnvironmentDB:
Returns:
AppEnvironmentDB: The newly created AppEnvironmentDB object.
"""
+
environment_db = AppEnvironmentDB(
- app=app_db, name=name, user=app_db.user, revision=0
+ app_id=app_db.id, name=name, user_id=app_db.user_id, revision=0
)
if isCloudEE():
- environment_db.organization = app_db.organization
- environment_db.workspace = app_db.workspace
+ environment_db.organization_id = app_db.organization_id
+ environment_db.workspace_id = app_db.workspace_id
+
+ session.add(environment_db)
+ await session.commit()
+ await session.refresh(environment_db)
- await environment_db.create()
return environment_db
async def create_environment_revision(
- environment: AppEnvironmentDB, user: UserDB, **kwargs: dict
+ session: AsyncSession, environment: AppEnvironmentDB, user: UserDB, **kwargs: dict
):
"""Creates a new environment revision.
@@ -1300,33 +1473,45 @@ async def create_environment_revision(
assert user is not None, "user cannot be None"
environment_revision = AppEnvironmentRevisionDB(
- environment=environment,
+ environment_id=environment.id,
revision=environment.revision,
- modified_by=user,
- created_at=datetime.now(timezone.utc),
+ modified_by_id=user.id,
)
if kwargs:
+ assert (
+ "deployed_app_variant_revision" in kwargs
+ ), "Deployed app variant revision is required"
+ assert (
+ isinstance(
+ kwargs.get("deployed_app_variant_revision"), AppVariantRevisionsDB
+ )
+ == True
+ ), "Type of deployed_app_variant_revision in kwargs is not correct"
deployed_app_variant_revision = kwargs.get("deployed_app_variant_revision")
+
if deployed_app_variant_revision is not None:
- environment_revision.deployed_app_variant_revision = (
- deployed_app_variant_revision
+ environment_revision.deployed_app_variant_revision_id = ( # type: ignore
+ deployed_app_variant_revision.id # type: ignore
)
deployment = kwargs.get("deployment")
+ assert (
+ isinstance(deployment, DeploymentDB) == True
+ ), "Type of deployment in kwargs is not correct"
if deployment is not None:
- environment_revision.deployment = deployment
+ environment_revision.deployment_id = deployment.id # type: ignore
if isCloudEE():
- environment_revision.organization = environment.organization
- environment_revision.workspace = environment.workspace
+ environment_revision.organization_id = environment.organization_id
+ environment_revision.workspace_id = environment.workspace_id
- await environment_revision.create()
+ session.add(environment_revision)
async def list_app_variant_revisions_by_variant(
app_variant: AppVariantDB,
-) -> List[AppVariantRevisionsDB]:
+):
"""Returns list of app variant revision for the given app variant
Args:
@@ -1335,15 +1520,16 @@ async def list_app_variant_revisions_by_variant(
Returns:
List[AppVariantRevisionsDB]: A list of AppVariantRevisionsDB objects.
"""
- app_variant_revision = await AppVariantRevisionsDB.find(
- AppVariantRevisionsDB.variant.id == app_variant.id, fetch_links=True
- ).to_list()
- return app_variant_revision
+
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(AppVariantRevisionsDB).filter_by(variant_id=app_variant.id)
+ )
+ app_variant_revisions = result.scalars().all()
+ return app_variant_revisions
-async def fetch_app_variant_revision(
- app_variant: str, revision_number: int
-) -> List[AppVariantRevisionsDB]:
+async def fetch_app_variant_revision(app_variant: str, revision_number: int):
"""Returns list of app variant revision for the given app variant
Args:
@@ -1352,30 +1538,35 @@ async def fetch_app_variant_revision(
Returns:
List[AppVariantRevisionsDB]: A list of AppVariantRevisionsDB objects.
"""
- app_variant_revision = await AppVariantRevisionsDB.find_one(
- AppVariantRevisionsDB.variant.id == ObjectId(app_variant),
- AppVariantRevisionsDB.revision == revision_number,
- fetch_links=True,
- )
- return app_variant_revision
+
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(AppVariantRevisionsDB).filter_by(
+ variant_id=uuid.UUID(app_variant), revision=revision_number
+ )
+ )
+ app_variant_revisions = result.scalars().all()
+ return app_variant_revisions
async def list_environments_by_variant(
- app_variant: AppVariantDB,
-) -> List[AppEnvironmentDB]:
+ session: AsyncSession, app_variant: AppVariantDB,
+):
"""
Returns a list of environments for a given app variant.
Args:
+ session (AsyncSession): the current ongoing session
app_variant (AppVariantDB): The app variant to retrieve environments for.
Returns:
List[AppEnvironmentDB]: A list of AppEnvironmentDB objects.
"""
- environments_db = await AppEnvironmentDB.find(
- AppEnvironmentDB.app.id == app_variant.app.id, fetch_links=True
- ).to_list()
+ result = await session.execute(
+ select(AppEnvironmentDB).filter_by(app_id=app_variant.app.id)
+ )
+ environments_db = result.scalars().all()
return environments_db
@@ -1392,11 +1583,16 @@ async def remove_image(image: ImageDB):
Returns:
None
"""
+
if image is None:
raise ValueError("Image is None")
- image = await ImageDB.find_one(ImageDB.id == ObjectId(image.id))
- await image.delete()
+ async with db_engine.get_session() as session:
+ result = await session.execute(select(ImageDB).filter_by(id=image.id))
+ image = result.scalars().one_or_none()
+
+ await session.delete(image)
+ await session.commit()
async def remove_environment(environment_db: AppEnvironmentDB):
@@ -1412,8 +1608,11 @@ async def remove_environment(environment_db: AppEnvironmentDB):
Returns:
None
"""
+
assert environment_db is not None, "environment_db is missing"
- await environment_db.delete()
+ async with db_engine.get_session() as session:
+ await session.delete(environment_db)
+ await session.commit()
async def remove_app_testsets(app_id: str):
@@ -1426,25 +1625,26 @@ async def remove_app_testsets(app_id: str):
int: The number of testsets deleted
"""
- # Get user object
# Find testsets owned by the app
deleted_count: int = 0
- # Build query expression
- testsets = await TestSetDB.find(
- TestSetDB.app.id == ObjectId(app_id), fetch_links=True
- ).to_list()
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(TestSetDB).filter_by(app_id=uuid.UUID(app_id))
+ )
+ testsets = result.scalars().all()
+
+ if len(testsets) == 0:
+ logger.info(f"No testsets found for app {app_id}")
+ return 0
- # Perform deletion if there are testsets to delete
- if testsets is not None:
for testset in testsets:
- await testset.delete()
+ await session.delete(testset)
deleted_count += 1
logger.info(f"{deleted_count} testset(s) deleted for app {app_id}")
- return deleted_count
- logger.info(f"No testsets found for app {app_id}")
- return 0
+ await session.commit()
+ return deleted_count
async def remove_base_from_db(base: VariantBaseDB):
@@ -1460,9 +1660,13 @@ async def remove_base_from_db(base: VariantBaseDB):
Returns:
None
"""
+
if base is None:
raise ValueError("Base is None")
- await base.delete()
+
+ async with db_engine.get_session() as session:
+ await session.delete(base)
+ await session.commit()
async def remove_app_by_id(app_id: str):
@@ -1478,10 +1682,14 @@ async def remove_app_by_id(app_id: str):
Returns:
None
"""
+
assert app_id is not None, "app_id cannot be None"
- app_instance = await fetch_app_by_id(app_id=app_id)
- assert app_instance is not None, f"app instance for {app_id} could not be found"
- await app_instance.delete()
+ app_db = await fetch_app_by_id(app_id=app_id)
+ assert app_db is not None, f"app instance for {app_id} could not be found"
+
+ async with db_engine.get_session() as session:
+ await session.delete(app_db)
+ await session.commit()
async def update_variant_parameters(
@@ -1498,34 +1706,42 @@ async def update_variant_parameters(
Raises:
ValueError: If there is an issue updating the variant parameters.
"""
+
assert app_variant_db is not None, "app_variant is missing"
assert parameters is not None, "parameters is missing"
- try:
- logging.debug("Updating variant parameters")
- user = await get_user(user_uid)
- # Update associated ConfigDB parameters and versioning
- config_db = app_variant_db.config
- config_db.parameters = parameters
- app_variant_db.revision = app_variant_db.revision + 1
- app_variant_db.modified_by = user
+ logging.debug("Updating variant parameters")
+ user = await get_user(user_uid)
- # Save updated ConfigDB
- await app_variant_db.save()
+ async with db_engine.get_session() as session:
+ try:
+ # Update associated ConfigDB parameters
+ for key, value in parameters.items():
+ if hasattr(app_variant_db.config_parameters, key):
+ setattr(app_variant_db.config_parameters, key, value)
- variant_revision = AppVariantRevisionsDB(
- variant=app_variant_db,
- revision=app_variant_db.revision,
- modified_by=user,
- base=app_variant_db.base,
- config=config_db,
- created_at=datetime.now(timezone.utc),
- )
- await variant_revision.save()
+ # ...and variant versioning
+ app_variant_db.revision += 1 # type: ignore
+ app_variant_db.modified_by_id = user.id
+
+ # Save updated ConfigDB
+ await session.commit()
+
+ variant_revision = AppVariantRevisionsDB(
+ variant_id=app_variant_db.id,
+ revision=app_variant_db.revision,
+ modified_by_id=user.id,
+ base_id=app_variant_db.base.id,
+ config_name=app_variant_db.config_name,
+ config_parameters=app_variant_db.config_parameters,
+ )
+
+ session.add(variant_revision)
+ await session.commit()
- except Exception as e:
- logging.error(f"Issue updating variant parameters: {e}")
- raise ValueError("Issue updating variant parameters")
+ except Exception as e:
+ logging.error(f"Issue updating variant parameters: {e}")
+ raise ValueError("Issue updating variant parameters")
async def get_app_variant_instance_by_id(variant_id: str) -> AppVariantDB:
@@ -1538,10 +1754,17 @@ async def get_app_variant_instance_by_id(variant_id: str) -> AppVariantDB:
AppVariantDB: instance of app variant object
"""
- app_variant_db = await AppVariantDB.find_one(
- AppVariantDB.id == ObjectId(variant_id), fetch_links=True
- )
- return app_variant_db
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(AppVariantDB)
+ .options(
+ joinedload(AppVariantDB.base),
+ joinedload(AppVariantDB.app)
+ )
+ .filter_by(id=uuid.UUID(variant_id))
+ )
+ app_variant_db = result.scalars().one_or_none()
+ return app_variant_db
async def get_app_variant_revision_by_id(
@@ -1556,11 +1779,12 @@ async def get_app_variant_revision_by_id(
AppVariantDB: instance of app variant object
"""
- variant_revision_db = await AppVariantRevisionsDB.find_one(
- AppVariantRevisionsDB.id == ObjectId(variant_revision_id),
- fetch_links=fetch_links,
- )
- return variant_revision_db
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(AppVariantRevisionsDB).filter_by(id=uuid.UUID(variant_revision_id))
+ )
+ variant_revision_db = result.scalars().one_or_none()
+ return variant_revision_db
async def fetch_testset_by_id(testset_id: str) -> Optional[TestSetDB]:
@@ -1570,23 +1794,88 @@ async def fetch_testset_by_id(testset_id: str) -> Optional[TestSetDB]:
Returns:
TestSetDB: The fetched testset, or None if no testset was found.
"""
+
assert testset_id is not None, "testset_id cannot be None"
- testset = await TestSetDB.find_one(
- TestSetDB.id == ObjectId(testset_id), fetch_links=True
- )
- return testset
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(TestSetDB).filter_by(id=uuid.UUID(testset_id))
+ )
+ testset = result.scalars().one_or_none()
+ return testset
+
+
+async def create_testset(app: AppDB, user_uid: str, testset_data: Dict[str, Any]):
+ """
+ Creates a testset.
+
+ Args:
+ app (AppDB): The app object
+ user_uid (str): The user uID
+ testset_data (dict): The data of the testset to create with
+
+ Returns:
+ returns the newly created TestsetDB
+ """
+
+ user = await get_user(user_uid=user_uid)
+ async with db_engine.get_session() as session:
+ testset_db = TestSetDB(
+ **testset_data,
+ app_id=app.id,
+ user_id=user.id
+ )
+ if isCloudEE():
+ testset_db.organization_id = app.organization_id
+ testset_db.workspace_id = app.workspace_id
+
+ session.add(testset_db)
+ await session.commit()
+ await session.refresh(testset_db)
+
+ return testset_db
+
+async def update_testset(testset_id: str, values_to_update: dict) -> None:
+ """Update a testset.
+
+ Args:
+ testset (TestsetDB): the testset object to update
+ values_to_update (dict): The values to update
+ """
+
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(TestSetDB).filter_by(id=uuid.UUID(testset_id))
+ )
+ testset = result.scalars().one_or_none()
+
+ # Validate keys in values_to_update and update attributes
+ valid_keys = [
+ key
+ for key in values_to_update.keys()
+ if hasattr(testset, key)
+ ]
+ for key in valid_keys:
+ setattr(testset, key, values_to_update[key])
+
+ await session.commit()
+ await session.refresh(testset)
-async def fetch_testsets_by_app_id(app_id: str) -> List[TestSetDB]:
+async def fetch_testsets_by_app_id(app_id: str):
"""Fetches all testsets for a given app.
Args:
app_id (str): The ID of the app to fetch testsets for.
Returns:
List[TestSetDB]: The fetched testsets.
"""
+
assert app_id is not None, "app_id cannot be None"
- testsets = await TestSetDB.find(TestSetDB.app.id == ObjectId(app_id)).to_list()
- return testsets
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(TestSetDB).filter_by(app_id=uuid.UUID(app_id))
+ )
+ testsets = result.scalars().all()
+ return testsets
async def fetch_evaluation_by_id(evaluation_id: str) -> Optional[EvaluationDB]:
@@ -1596,11 +1885,14 @@ async def fetch_evaluation_by_id(evaluation_id: str) -> Optional[EvaluationDB]:
Returns:
EvaluationDB: The fetched evaluation, or None if no evaluation was found.
"""
+
assert evaluation_id is not None, "evaluation_id cannot be None"
- evaluation = await EvaluationDB.find_one(
- EvaluationDB.id == ObjectId(evaluation_id), fetch_links=True
- )
- return evaluation
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(EvaluationDB).filter_by(id=uuid.UUID(evaluation_id))
+ )
+ evaluation = result.scalars().one_or_none()
+ return evaluation
async def fetch_human_evaluation_by_id(
@@ -1612,11 +1904,14 @@ async def fetch_human_evaluation_by_id(
Returns:
EvaluationDB: The fetched evaluation, or None if no evaluation was found.
"""
+
assert evaluation_id is not None, "evaluation_id cannot be None"
- evaluation = await HumanEvaluationDB.find_one(
- HumanEvaluationDB.id == ObjectId(evaluation_id), fetch_links=True
- )
- return evaluation
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(HumanEvaluationDB).filter_by(id=uuid.UUID(evaluation_id))
+ )
+ evaluation = result.scalars().one_or_none()
+ return evaluation
async def fetch_evaluation_scenario_by_id(
@@ -1628,11 +1923,14 @@ async def fetch_evaluation_scenario_by_id(
Returns:
EvaluationScenarioDB: The fetched evaluation scenario, or None if no evaluation scenario was found.
"""
+
assert evaluation_scenario_id is not None, "evaluation_scenario_id cannot be None"
- evaluation_scenario = await EvaluationScenarioDB.find_one(
- EvaluationScenarioDB.id == ObjectId(evaluation_scenario_id, fetch_links=True)
- )
- return evaluation_scenario
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(EvaluationScenarioDB).filter_by(id=uuid.UUID(evaluation_scenario_id))
+ )
+ evaluation_scenario = result.scalars().one_or_none()
+ return evaluation_scenario
async def fetch_human_evaluation_scenario_by_id(
@@ -1644,12 +1942,16 @@ async def fetch_human_evaluation_scenario_by_id(
Returns:
EvaluationScenarioDB: The fetched evaluation scenario, or None if no evaluation scenario was found.
"""
+
assert evaluation_scenario_id is not None, "evaluation_scenario_id cannot be None"
- evaluation_scenario = await HumanEvaluationScenarioDB.find_one(
- HumanEvaluationScenarioDB.id == ObjectId(evaluation_scenario_id),
- fetch_links=True,
- )
- return evaluation_scenario
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(HumanEvaluationScenarioDB).filter_by(
+ id=uuid.UUID(evaluation_scenario_id)
+ )
+ )
+ evaluation_scenario = result.scalars().one_or_none()
+ return evaluation_scenario
async def fetch_human_evaluation_scenario_by_evaluation_id(
@@ -1661,12 +1963,16 @@ async def fetch_human_evaluation_scenario_by_evaluation_id(
Returns:
EvaluationScenarioDB: The fetched evaluation scenario, or None if no evaluation scenario was found.
"""
+
evaluation = await fetch_human_evaluation_by_id(evaluation_id)
- human_eval_scenario = await HumanEvaluationScenarioDB.find_one(
- HumanEvaluationScenarioDB.evaluation.id == ObjectId(evaluation.id),
- fetch_links=True,
- )
- return human_eval_scenario
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(HumanEvaluationScenarioDB).filter_by(
+ evaluation_id=evaluation.id # type: ignore
+ )
+ )
+ human_eval_scenario = result.scalars().one_or_none()
+ return human_eval_scenario
async def find_previous_variant_from_base_id(
@@ -1680,19 +1986,18 @@ async def find_previous_variant_from_base_id(
Returns:
Optional[AppVariantDB]: The previous variant, or None if no previous variant was found.
"""
+
assert base_id is not None, "base_id cannot be None"
- previous_variants = await AppVariantDB.find(
- AppVariantDB.base.id == ObjectId(base_id)
- ).to_list()
- logger.debug("previous_variants: %s", previous_variants)
- if len(list(previous_variants)) == 0:
- return None
- # select the variant for which previous_variant_name is None
- for previous_variant in previous_variants:
- # if previous_variant.previous_variant_name is None:
- # logger.debug("previous_variant: %s", previous_variant)
- return previous_variant # we don't care which variant do we return
- assert False, "None of the previous variants has previous_variant_name=None"
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(AppVariantDB)
+ .filter_by(base_id=uuid.UUID(base_id))
+ .order_by(AppVariantDB.created_at.desc())
+ )
+ last_variant = result.scalars().first()
+ if not last_variant:
+ return None
+ return last_variant
async def add_template(**kwargs: dict) -> str:
@@ -1714,12 +2019,14 @@ async def add_template(**kwargs: dict) -> str:
if existing_template is None:
db_template = TemplateDB(**kwargs)
+
session.add(db_template)
await session.commit()
await session.refresh(db_template)
+
return str(db_template.id)
- else:
- return str(existing_template.id)
+
+ return str(existing_template.id)
async def add_zip_template(key, value):
@@ -1733,36 +2040,42 @@ async def add_zip_template(key, value):
Returns:
template_id (Str): The Id of the created template.
"""
- existing_template = await TemplateDB.find_one(TemplateDB.name == key)
-
- if existing_template:
- # Compare existing values with new values
- if (
- existing_template.title == value.get("name")
- and existing_template.description == value.get("description")
- and existing_template.template_uri == value.get("template_uri")
- ):
- # Values are unchanged, return existing template id
- return str(existing_template.id)
- else:
- # Values are changed, delete existing template
- await existing_template.delete()
-
- # Create a new template
- template_name = key
- title = value.get("name")
- description = value.get("description")
- template_uri = value.get("template_uri")
-
- template_db_instance = TemplateDB(
- type="zip",
- name=template_name,
- title=title,
- description=description,
- template_uri=template_uri,
- )
- await template_db_instance.create()
- return str(template_db_instance.id)
+
+ async with db_engine.get_session() as session:
+ result = await session.execute(select(TemplateDB).filter_by(name=key))
+ existing_template = result.scalars().one_or_none()
+ if existing_template:
+ # Compare existing values with new values
+ if (
+ existing_template.title == value.get("name")
+ and existing_template.description == value.get("description")
+ and existing_template.template_uri == value.get("template_uri")
+ ):
+ # Values are unchanged, return existing template id
+ return str(existing_template.id)
+ else:
+ # Values are changed, delete existing template
+ await session.delete(existing_template)
+
+ # Create a new template
+ template_name = key
+ title = value.get("name")
+ description = value.get("description")
+ template_uri = value.get("template_uri")
+
+ template_db_instance = TemplateDB(
+ type="zip",
+ name=template_name,
+ title=title,
+ description=description,
+ template_uri=template_uri,
+ )
+
+ session.add(template_db_instance)
+ await session.commit()
+ await session.refresh(template_db_instance)
+
+ return str(template_db_instance.id)
async def get_template(template_id: str) -> TemplateDB:
@@ -1777,8 +2090,12 @@ async def get_template(template_id: str) -> TemplateDB:
"""
assert template_id is not None, "template_id cannot be None"
- template_db = await TemplateDB.find_one(TemplateDB.id == ObjectId(template_id))
- return template_db
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(TemplateDB).filter_by(id=uuid.UUID(template_id))
+ )
+ template_db = result.scalars().one_or_none()
+ return template_db
async def remove_old_template_from_db(tag_ids: list) -> None:
@@ -1806,13 +2123,22 @@ async def remove_old_template_from_db(tag_ids: list) -> None:
await session.commit()
-async def get_templates() -> List[Template]:
- templates = await TemplateDB.find().to_list()
- return converters.templates_db_to_pydantic(templates)
+async def get_templates():
+ """
+ Gets the templates.
+
+ Returns:
+ The docker templates to create an LLM app from the UI.
+ """
+
+ async with db_engine.get_session() as session:
+ result = await session.execute(select(TemplateDB))
+ templates = result.scalars().all()
+ return converters.templates_db_to_pydantic(templates) # type: ignore
async def update_base(
- base: VariantBaseDB,
+ base_id: str,
**kwargs: dict,
) -> VariantBaseDB:
"""Update the base object in the database with the provided id.
@@ -1821,12 +2147,31 @@ async def update_base(
base (VariantBaseDB): The base object to update.
"""
- for key, value in kwargs.items():
- if hasattr(base, key):
- setattr(base, key, value)
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(VariantBaseDB).filter_by(id=uuid.UUID(base_id))
+ )
+ base = result.scalars().one_or_none()
+ for key, value in kwargs.items():
+ if hasattr(base, key):
+ setattr(base, key, value)
+
+ await session.commit()
+ await session.refresh(base)
+
+ return base
- await base.save()
- return base
+
+async def remove_base(base_db: VariantBaseDB):
+ """Delete the base object in the database with the provided id.
+
+ Arguments:
+ base (VariantBaseDB): The base object to update.
+ """
+
+ async with db_engine.get_session() as session:
+ await session.delete(base_db)
+ await session.commit()
async def update_app_variant(
@@ -1838,21 +2183,24 @@ async def update_app_variant(
Arguments:
app_variant (AppVariantDB): The app variant object to update.
"""
- for key, value in kwargs.items():
- if hasattr(app_variant, key):
- setattr(app_variant, key, value)
- await app_variant.save()
- return app_variant
+ async with db_engine.get_session() as session:
+ for key, value in kwargs.items():
+ if hasattr(app_variant, key):
+ setattr(app_variant, key, value)
+
+ await session.commit()
+ await session.refresh(app_variant)
+ return app_variant
async def fetch_app_by_name_and_parameters(
app_name: str,
user_uid: str,
- organization_id: str = None,
- workspace_id: str = None,
+ organization_id: Optional[str] = None,
+ workspace_id: Optional[str] = None,
):
- """Fetch an app by its name, organization id, and workspace id.
+ """Fetch an app by its name, organization id, and workspace_id.
Args:
app_name (str): The name of the app
@@ -1863,39 +2211,38 @@ async def fetch_app_by_name_and_parameters(
AppDB: the instance of the app
"""
- query_expression = {"app_name": app_name}
+ async with db_engine.get_session() as session:
+ base_query = select(AppDB).filter_by(app_name=app_name)
- if isCloudEE():
- # assert that if organization is provided, workspace_id is also provided, and vice versa
- assert (
- organization_id is not None and workspace_id is not None
- ), "organization_id and workspace_id must be provided together"
-
- query_expression.update(
- {
- "organization.id": ObjectId(organization_id),
- "workspace.id": ObjectId(workspace_id),
- }
- )
- else:
- query_expression.update(
- {
- "user.uid": user_uid,
- }
- )
+ if isCloudEE():
+ # assert that if organization is provided, workspace_id is also provided, and vice versa
+ assert (
+ organization_id is not None and workspace_id is not None
+ ), "organization_id and workspace_id must be provided together"
+
+ query = base_query.filter_by(
+ organization_id=uuid.UUID(organization_id),
+ workspace_id=uuid.UUID(workspace_id),
+ )
+ else:
+ query = (
+ base_query.join(UserDB)
+ .filter(UserDB.uid == user_uid)
+ .options(selectinload(AppDB.user))
+ )
- app_db = await AppDB.find_one(query_expression, fetch_links=True)
- return app_db
+ result = await session.execute(query)
+ app_db = result.scalars().one_or_none()
+ return app_db
async def create_new_evaluation(
app: AppDB,
user: UserDB,
testset: TestSetDB,
- status: str,
+ status: Result,
variant: str,
variant_revision: str,
- evaluators_configs: List[str],
organization=None,
workspace=None,
) -> EvaluationDB:
@@ -1903,30 +2250,31 @@ async def create_new_evaluation(
Returns:
EvaluationScenarioDB: The created evaluation scenario.
"""
- evaluation = EvaluationDB(
- app=app,
- user=user,
- testset=testset,
- status=status,
- variant=variant,
- variant_revision=variant_revision,
- evaluators_configs=evaluators_configs,
- aggregated_results=[],
- created_at=datetime.now(timezone.utc).isoformat(),
- updated_at=datetime.now(timezone.utc).isoformat(),
- )
- if isCloudEE():
- # assert that if organization is provided, workspace is also provided, and vice versa
- assert (
- organization is not None and workspace is not None
- ), "organization and workspace must be provided together"
+ async with db_engine.get_session() as session:
+ evaluation = EvaluationDB(
+ app_id=app.id,
+ user_id=user.id,
+ testset_id=testset.id,
+ status=status.dict(),
+ variant_id=uuid.UUID(variant),
+ variant_revision_id=uuid.UUID(variant_revision),
+ )
- evaluation.organization = organization
- evaluation.workspace = workspace
+ if isCloudEE():
+ # assert that if organization is provided, workspace is also provided, and vice versa
+ assert (
+ organization is not None and workspace is not None
+ ), "organization and workspace must be provided together"
+
+ evaluation.organization_id = organization_id
+ evaluation.workspace_id = workspace_id
+
+ session.add(evaluation)
+ await session.commit()
+ await session.refresh(evaluation)
- await evaluation.create()
- return evaluation
+ return evaluation
async def create_new_evaluation_scenario(
@@ -1947,45 +2295,71 @@ async def create_new_evaluation_scenario(
Returns:
EvaluationScenarioDB: The created evaluation scenario.
"""
- evaluation_scenario = EvaluationScenarioDB(
- user=user,
- evaluation=evaluation,
- variant_id=ObjectId(variant_id),
- inputs=inputs,
- outputs=outputs,
- correct_answers=correct_answers,
- is_pinned=is_pinned,
- note=note,
- evaluators_configs=evaluators_configs,
- results=results,
- )
- if isCloudEE():
- # assert that if organization is provided, workspace is also provided, and vice versa
- assert (
- organization is not None and workspace is not None
- ), "organization and workspace must be provided together"
+ async with db_engine.get_session() as session:
+ evaluation_scenario = EvaluationScenarioDB(
+ user_id=user.id,
+ evaluation_id=evaluation.id,
+ variant_id=uuid.UUID(variant_id),
+ inputs=inputs,
+ outputs=outputs,
+ correct_answers=correct_answers,
+ is_pinned=is_pinned,
+ note=note,
+ )
- evaluation_scenario.organization = organization
- evaluation_scenario.workspace = workspace
+ if isCloudEE():
+ # assert that if organization is provided, workspace is also provided, and vice versa
+ assert (
+ organization is not None and workspace is not None
+ ), "organization and workspace must be provided together"
- await evaluation_scenario.create()
- return evaluation_scenario
+ evaluation_scenario.organization_id = organization_id
+ evaluation_scenario.workspace_id = workspace_id
+ session.add(evaluation_scenario)
+ await session.commit()
+ await session.refresh(evaluation_scenario)
+
+ # create evaluation scenario result
+ for result in results:
+ evaluation_scenario_result = EvaluationScenarioResultDB(
+ evaluation_scenario_id=evaluation_scenario.id,
+ evaluator_config_id=uuid.UUID(result.evaluator_config),
+ result=result.result.dict(),
+ )
-async def update_evaluation_with_aggregated_results(
- evaluation_id: ObjectId, aggregated_results: List[AggregatedResult]
-) -> EvaluationDB:
- evaluation = await EvaluationDB.find_one(EvaluationDB.id == ObjectId(evaluation_id))
+ session.add(evaluation_scenario_result)
- if not evaluation:
- raise ValueError("Evaluation not found")
+ await session.commit() # ensures that scenario results insertion is committed
+ await session.refresh(evaluation_scenario)
- evaluation.aggregated_results = aggregated_results
- evaluation.updated_at = datetime.now(timezone.utc).isoformat()
+ return evaluation_scenario
- await evaluation.save()
- return evaluation
+
+async def update_evaluation_with_aggregated_results(
+ evaluation_id: str, aggregated_results: List[AggregatedResult]
+):
+ async with db_engine.get_session() as session:
+ base_query = select(EvaluationAggregatedResultDB).filter_by(
+ evaluation_id=uuid.UUID(evaluation_id)
+ )
+ for result in aggregated_results:
+ query = base_query.filter_by(
+ evaluator_config_id=uuid.UUID(result.evaluator_config)
+ )
+ db_result = await session.execute(query)
+ evaluation_aggregated_result = db_result.scalars().one_or_none()
+ if not evaluation_aggregated_result:
+ raise NoResultFound(
+ f"Aggregated result with id {result.evaluator_config} not found for the evaluation"
+ )
+
+ for key, value in result.result.dict(exclude_unset=True):
+ if hasattr(evaluation_aggregated_result.result, key):
+ setattr(evaluation_aggregated_result.result, key, value)
+
+ await session.commit()
async def fetch_evaluators_configs(app_id: str):
@@ -1994,15 +2368,14 @@ async def fetch_evaluators_configs(app_id: str):
Returns:
List[EvaluatorConfigDB]: A list of evaluator configuration objects.
"""
- assert app_id is not None, "evaluation_id cannot be None"
- try:
- evaluators_configs = await EvaluatorConfigDB.find(
- EvaluatorConfigDB.app.id == ObjectId(app_id)
- ).to_list()
+ assert app_id is not None, "evaluation_id cannot be None"
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(EvaluatorConfigDB).filter_by(app_id=uuid.UUID(app_id))
+ )
+ evaluators_configs = result.scalars().all()
return evaluators_configs
- except Exception as e:
- raise e
async def fetch_evaluator_config(evaluator_config_id: str):
@@ -2012,13 +2385,12 @@ async def fetch_evaluator_config(evaluator_config_id: str):
EvaluatorConfigDB: the evaluator configuration object.
"""
- try:
- evaluator_config: EvaluatorConfigDB = await EvaluatorConfigDB.find_one(
- EvaluatorConfigDB.id == ObjectId(evaluator_config_id), fetch_links=True
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(EvaluatorConfigDB).filter_by(id=uuid.UUID(evaluator_config_id))
)
+ evaluator_config = result.scalars().one_or_none()
return evaluator_config
- except Exception as e:
- raise e
async def check_if_ai_critique_exists_in_list_of_evaluators_configs(
@@ -2030,21 +2402,21 @@ async def check_if_ai_critique_exists_in_list_of_evaluators_configs(
EvaluatorConfigDB: the evaluator configuration object.
"""
- try:
- evaluator_configs_object_ids = [
- ObjectId(evaluator_config_id)
+ async with db_engine.get_session() as session:
+ evaluator_config_uuids = [
+ uuid.UUID(evaluator_config_id)
for evaluator_config_id in evaluators_configs_ids
]
- evaluators_configs: List[EvaluatorConfigDB] = await EvaluatorConfigDB.find(
- {
- "_id": {"$in": evaluator_configs_object_ids},
- "evaluator_key": "auto_ai_critique",
- }
- ).to_list()
+
+ query = select(EvaluatorConfigDB).where(
+ EvaluatorConfigDB.id.in_(evaluator_config_uuids),
+ EvaluatorConfigDB.evaluator_key == "auto_ai_critique",
+ )
+
+ result = await session.execute(query)
+ evaluators_configs = result.scalars().all()
return bool(evaluators_configs)
- except Exception as e:
- raise e
async def fetch_evaluator_config_by_appId(
@@ -2060,19 +2432,19 @@ async def fetch_evaluator_config_by_appId(
EvaluatorConfigDB: the evaluator configuration object.
"""
- try:
- evaluator_config = await EvaluatorConfigDB.find_one(
- EvaluatorConfigDB.app.id == ObjectId(app_id),
- EvaluatorConfigDB.evaluator_key == evaluator_name,
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(EvaluatorConfigDB).filter_by(
+ app_id=uuid.UUID(app_id), evaluator_key=evaluator_name
+ )
)
+ evaluator_config = result.scalars().one_or_none()
return evaluator_config
- except Exception as e:
- raise e
async def create_evaluator_config(
app: AppDB,
- user: UserDB,
+ user_id: str,
name: str,
evaluator_key: str,
organization=None,
@@ -2081,28 +2453,32 @@ async def create_evaluator_config(
) -> EvaluatorConfigDB:
"""Create a new evaluator configuration in the database."""
- new_evaluator_config = EvaluatorConfigDB(
- app=app,
- user=user,
- name=name,
- evaluator_key=evaluator_key,
- settings_values=settings_values,
- )
+ async with db_engine.get_session() as session:
+ new_evaluator_config = EvaluatorConfigDB(
+ app_id=app.id,
+ user_id=uuid.UUID(user_id),
+ name=name,
+ evaluator_key=evaluator_key,
+ settings_values=settings_values,
+ )
- if isCloudEE():
- # assert that if organization is provided, workspace is also provided, and vice versa
- assert (
- organization is not None and workspace is not None
- ), "organization and workspace must be provided together"
+ if isCloudEE():
+ # assert that if organization is provided, workspace is also provided, and vice versa
+ assert (
+ organization is not None and workspace is not None
+ ), "organization and workspace must be provided together"
- new_evaluator_config.organization = organization
- new_evaluator_config.workspace = workspace
+ new_evaluator_config.organization_id = uuid.UUID(organization)
+ new_evaluator_config.workspace_id = uuid.UUID(workspace)
+
+ try:
+ session.add(new_evaluator_config)
+ await session.commit()
+ await session.refresh(new_evaluator_config)
- try:
- await new_evaluator_config.create()
- return new_evaluator_config
- except Exception as e:
- raise e
+ return new_evaluator_config
+ except Exception as e:
+ raise e
async def update_evaluator_config(
@@ -2119,30 +2495,44 @@ async def update_evaluator_config(
EvaluatorConfigDB: The updated evaluator configuration object.
"""
- evaluator_config = await EvaluatorConfigDB.find_one(
- EvaluatorConfigDB.id == ObjectId(evaluator_config_id)
- )
- updates_dict = updates.dict(exclude_unset=True)
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(EvaluatorConfigDB).filter_by(id=uuid.UUID(evaluator_config_id))
+ )
+ evaluator_config = result.scalars().one_or_none()
+ if not evaluator_config:
+ raise NoResultFound(
+ f"Evaluator config with id {evaluator_config_id} not found"
+ )
+
+ for key, value in updates.items():
+ if hasattr(evaluator_config.settings_values, key):
+ setattr(evaluator_config.settings_values, key, value)
+
+ await session.commit()
+ await session.refresh(evaluator_config)
- for key, value in updates_dict.items():
- if hasattr(evaluator_config, key):
- setattr(evaluator_config, key, value)
- await evaluator_config.save()
- return evaluator_config
+ return evaluator_config
async def delete_evaluator_config(evaluator_config_id: str) -> bool:
"""Delete an evaluator configuration from the database."""
- assert evaluator_config_id is not None, "Evaluator Config ID cannot be None"
- try:
- evaluator_config = await EvaluatorConfigDB.find_one(
- EvaluatorConfigDB.id == ObjectId(evaluator_config_id)
+ assert evaluator_config_id is not None, "Evaluator Config ID cannot be None"
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(EvaluatorConfigDB).filter_by(id=uuid.UUID(evaluator_config_id))
)
- delete_result = await evaluator_config.delete()
- return delete_result.acknowledged
- except Exception as e:
- raise e
+ evaluator_config = result.scalars().one_or_none()
+ if evaluator_config is None:
+ raise NoResultFound(
+ f"Evaluator config with id {evaluator_config_id} not found"
+ )
+
+ await session.delete(evaluator_config)
+ await session.commit()
+
+ return True
async def update_evaluation(
@@ -2158,24 +2548,35 @@ async def update_evaluation(
Returns:
EvaluatorConfigDB: The updated evaluator configuration object.
"""
- evaluation = await EvaluationDB.get(ObjectId(evaluation_id))
- for key, value in updates.items():
- if hasattr(evaluation, key):
- setattr(evaluation, key, value)
- await evaluation.save()
- return evaluation
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(EvaluationDB).filter_by(id=uuid.UUID(evaluation_id))
+ )
+ evaluation = result.scalars().one_or_none()
+ for key, value in updates.items():
+ if hasattr(evaluation, key):
+ setattr(evaluation, key, value)
+
+ await session.commit()
+ await session.refresh(evaluation)
+
+ return evaluation
async def check_if_evaluation_contains_failed_evaluation_scenarios(
evaluation_id: str,
) -> bool:
- query = EvaluationScenarioDB.find(
- EvaluationScenarioDB.evaluation.id == ObjectId(evaluation_id),
- {"results": {"$elemMatch": {"result.type": "error"}}},
- )
+ async with db_engine.get_session() as session:
+ query = select(func.count(EvaluationScenarioDB.id)).where(
+ EvaluationScenarioDB.evaluation_id == uuid.UUID(evaluation_id),
+ EvaluationScenarioDB.results.any(
+ EvaluationScenarioDB.result.has(type="error")
+ ),
+ )
- count = await query.count()
- if count > 0:
- return True
- return False
+ result = await session.execute(query)
+ count = result.scalar()
+ if not count:
+ return False
+ return count > 0
From 4c447bcadce500ab62187d351dcd977151001e0d Mon Sep 17 00:00:00 2001
From: Abram
Date: Tue, 11 Jun 2024 07:38:03 +0100
Subject: [PATCH 045/268] refactor (backend): improve clarity of code in app
and evaluator managers
---
.../agenta_backend/services/app_manager.py | 159 ++++++++++--------
.../services/evaluator_manager.py | 25 +--
2 files changed, 106 insertions(+), 78 deletions(-)
diff --git a/agenta-backend/agenta_backend/services/app_manager.py b/agenta-backend/agenta_backend/services/app_manager.py
index dfd9f0c021..a96790a821 100644
--- a/agenta-backend/agenta_backend/services/app_manager.py
+++ b/agenta-backend/agenta_backend/services/app_manager.py
@@ -2,9 +2,10 @@
"""
import os
+import uuid
import logging
from urllib.parse import urlparse
-from typing import List, Any, Dict
+from typing import List, Any, Dict, Optional
from agenta_backend.models.api.api_models import (
URI,
@@ -54,7 +55,7 @@
async def start_variant(
db_app_variant: AppVariantDB,
- env_vars: DockerEnvVars = None,
+ env_vars: Optional[DockerEnvVars] = None,
) -> URI:
"""
Starts a Docker container for a given app variant.
@@ -92,11 +93,11 @@ async def start_variant(
"http://host.docker.internal" # unclear why this stopped working
)
# domain_name = "http://localhost"
- env_vars = {} if env_vars is None else env_vars
+ env_vars = {} if env_vars is None else env_vars # type: ignore
env_vars.update(
{
- "AGENTA_BASE_ID": str(db_app_variant.base.id),
- "AGENTA_APP_ID": str(db_app_variant.app.id),
+ "AGENTA_BASE_ID": str(db_app_variant.base_id),
+ "AGENTA_APP_ID": str(db_app_variant.app_id),
"AGENTA_HOST": domain_name,
}
)
@@ -113,8 +114,8 @@ async def start_variant(
)
await db_manager.update_base(
- db_app_variant.base,
- deployment=deployment.id,
+ str(db_app_variant.base_id),
+ deployment_id=deployment.id, # type: ignore
)
except Exception as e:
import traceback
@@ -127,7 +128,7 @@ async def start_variant(
f"Failed to start Docker container for app variant {db_app_variant.app.app_name}/{db_app_variant.variant_name} \n {str(e)}"
) from e
- return URI(uri=deployment.uri)
+ return URI(uri=deployment.uri) # type: ignore
async def update_variant_image(
@@ -165,7 +166,7 @@ async def update_variant_image(
workspace=app_variant_db.workspace if isCloudEE() else None, # noqa
)
# Update base with new image
- await db_manager.update_base(app_variant_db.base, image=db_image)
+ await db_manager.update_base(str(app_variant_db.base_id), image=db_image)
# Update variant to remove configuration
await db_manager.update_variant_parameters(
app_variant_db=app_variant_db, parameters={}, user_uid=user_uid
@@ -178,7 +179,7 @@ async def update_variant_image(
async def terminate_and_remove_app_variant(
- app_variant_id: str = None, app_variant_db=None
+ app_variant_id: Optional[str] = None, app_variant_db: Optional[AppVariantDB] =None
) -> None:
"""
Removes app variant from the database. If it's the last one using an image, performs additional operations:
@@ -193,6 +194,7 @@ async def terminate_and_remove_app_variant(
ValueError: If the app variant is not found in the database.
Exception: Any other exception raised during the operation.
"""
+
assert (
app_variant_id or app_variant_db
), "Either app_variant_id or app_variant_db must be provided"
@@ -205,7 +207,7 @@ async def terminate_and_remove_app_variant(
app_variant_db = await db_manager.fetch_app_variant_by_id(app_variant_id)
logger.debug(f"Fetched app variant {app_variant_db}")
- app_id = app_variant_db.app.id
+ app_id = str(app_variant_db.app_id) # type: ignore
if app_variant_db is None:
error_msg = f"Failed to delete app variant {app_variant_id}: Not found in DB."
logger.error(error_msg)
@@ -217,59 +219,63 @@ async def terminate_and_remove_app_variant(
)
if is_last_variant_for_image:
# remove variant + terminate and rm containers + remove base
+ base_db = await db_manager.fetch_base_by_id(base_id=str(app_variant_db.base_id))
+ if not base_db:
+ raise
- image = app_variant_db.base.image
+ image = base_db.image
logger.debug("is_last_variant_for_image {image}")
- if image:
- logger.debug("_stop_and_delete_app_container")
- try:
- deployment = await db_manager.get_deployment_by_objectid(
- app_variant_db.base.deployment
- )
- except Exception as e:
- logger.error(f"Failed to get deployment {e}")
- deployment = None
- if deployment:
- try:
- await deployment_manager.stop_and_delete_service(deployment)
- except RuntimeError as e:
- logger.error(
- f"Failed to stop and delete service {deployment} {e}"
- )
-
- # If image deletable is True, remove docker image and image db
- if image.deletable:
- try:
- if isCloudEE():
- await deployment_manager.remove_repository(image.tags)
- else:
- await deployment_manager.remove_image(image)
- except RuntimeError as e:
- logger.error(f"Failed to remove image {image} {e}")
- await db_manager.remove_image(image)
-
- logger.debug("remove base")
- await db_manager.remove_app_variant_from_db(app_variant_db)
- logger.debug("Remove image object from db")
- if deployment:
- await db_manager.remove_deployment(deployment)
- await db_manager.remove_base_from_db(app_variant_db.base)
- logger.debug("remove_app_variant_from_db")
-
- # Only delete the docker image for users that are running the oss version
-
- else:
+
+ if not isinstance(base_db.image_id, uuid.UUID): # type: ignore
logger.debug(
f"Image associated with app variant {app_variant_db.app.app_name}/{app_variant_db.variant_name} not found. Skipping deletion."
)
+
+ logger.debug("_stop_and_delete_app_container")
+ try:
+ deployment = await db_manager.get_deployment_by_objectid(
+ str(base_db.deployment_id)
+ )
+ except Exception as e:
+ logger.error(f"Failed to get deployment {e}")
+ deployment = None
+
+ if deployment:
+ try:
+ await deployment_manager.stop_and_delete_service(deployment)
+ except RuntimeError as e:
+ logger.error(
+ f"Failed to stop and delete service {deployment} {e}"
+ )
+
+ # If image deletable is True, remove docker image and image db
+ if image.deletable:
+ try:
+ if isCloudEE():
+ await deployment_manager.remove_repository(image.tags) # type: ignore
+ else:
+ await deployment_manager.remove_image(image)
+ except RuntimeError as e:
+ logger.error(f"Failed to remove image {image} {e}")
+ await db_manager.remove_image(image)
+
+ logger.debug("remove base")
+ await db_manager.remove_app_variant_from_db(app_variant_db)
+
+ logger.debug("Remove image object from db")
+ if deployment:
+ await db_manager.remove_deployment(deployment)
+
+ await db_manager.remove_base_from_db(base_db)
+ logger.debug("remove_app_variant_from_db")
else:
# remove variant + config
logger.debug("remove_app_variant_from_db")
await db_manager.remove_app_variant_from_db(app_variant_db)
- logger.debug("list_app_variants")
+
app_variants = await db_manager.list_app_variants(app_id)
- logger.debug(f"{app_variants}")
- if len(app_variants) == 0: # this was the last variant for an app
+ logger.debug(f"Count of app variants available: {len(app_variants)}")
+ if len(app_variants) == 0: # remove app related resources if the length of the app variants hit 0
logger.debug("remove_app_related_resources")
await remove_app_related_resources(app_id)
except Exception as e:
@@ -290,16 +296,35 @@ async def remove_app_related_resources(app_id: str):
"""
try:
# Delete associated environments
- environments: List[AppEnvironmentDB] = await db_manager.list_environments(
+ environments = await db_manager.list_environments(
app_id
)
for environment_db in environments:
await db_manager.remove_environment(environment_db)
logger.info(f"Successfully deleted environment {environment_db.name}.")
+
# Delete associated testsets
await db_manager.remove_app_testsets(app_id)
logger.info(f"Successfully deleted test sets associated with app {app_id}.")
+ # Delete associated bases
+ bases = await db_manager.list_bases_for_app_id(app_id)
+ for base_db in bases:
+ await db_manager.remove_base(base_db)
+ logger.info(f"Successfully deleted base {base_db.base_name}")
+
+ # Delete associated deployments
+ deployments = await db_manager.list_deployments(app_id)
+ for deployment_db in deployments:
+ await db_manager.remove_deployment(deployment_db)
+ logger.info(f"Successfully deleted deployment {str(deployment_db.id)}")
+
+ # Deleted associated evaluators_configs
+ evaluators_configs = await db_manager.fetch_evaluators_configs(app_id)
+ for evaluator_config_db in evaluators_configs:
+ await db_manager.delete_evaluator_config(str(evaluator_config_db.id))
+ logger.info(f"Successfully deleted evaluator config {str(evaluator_config_db.id)}")
+
await db_manager.remove_app_by_id(app_id)
logger.info(f"Successfully remove app object {app_id}.")
except Exception as e:
@@ -323,7 +348,7 @@ async def remove_app(app: AppDB):
logger.error(error_msg)
raise ValueError(error_msg)
try:
- app_variants = await db_manager.list_app_variants(app.id)
+ app_variants = await db_manager.list_app_variants(str(app.id))
for app_variant_db in app_variants:
await terminate_and_remove_app_variant(app_variant_db=app_variant_db)
logger.info(
@@ -351,13 +376,16 @@ async def update_variant_parameters(
parameters -- the parameters to update
user_uid -- the user uid
"""
+
assert app_variant_id is not None, "app_variant_id must be provided"
assert parameters is not None, "parameters must be provided"
+
app_variant_db = await db_manager.fetch_app_variant_by_id(app_variant_id)
if app_variant_db is None:
error_msg = f"Failed to update app variant {app_variant_id}: Not found in DB."
logger.error(error_msg)
raise ValueError(error_msg)
+
try:
await db_manager.update_variant_parameters(
app_variant_db=app_variant_db, parameters=parameters, user_uid=user_uid
@@ -374,8 +402,8 @@ async def add_variant_based_on_image(
variant_name: str,
docker_id_or_template_uri: str,
user_uid: str,
- tags: str = None,
- base_name: str = None,
+ tags: Optional[str] = None,
+ base_name: Optional[str] = None,
config_name: str = "default",
is_template_image: bool = False,
) -> AppVariantDB:
@@ -399,9 +427,8 @@ async def add_variant_based_on_image(
ValueError: If the app variant or image is None, or if an app variant with the same name already exists.
HTTPException: If an error occurs while creating the app variant.
"""
- logger.debug("Start: Creating app variant based on image")
- # Validate input parameters
+ logger.debug("Start: Creating app variant based on image")
logger.debug("Step 1: Validating input parameters")
if (
app in [None, ""]
@@ -421,7 +448,7 @@ async def add_variant_based_on_image(
# Check if app variant already exists
logger.debug("Step 2: Checking if app variant already exists")
variants = await db_manager.list_app_variants_for_app_id(app_id=str(app.id))
- already_exists = any(av for av in variants if av.variant_name == variant_name)
+ already_exists = any(av for av in variants if av.variant_name == variant_name) # type: ignore
if already_exists:
logger.error("App variant with the same name already exists")
raise ValueError("App variant with the same name already exists")
@@ -432,14 +459,14 @@ async def add_variant_based_on_image(
if parsed_url.scheme and parsed_url.netloc:
db_image = await db_manager.get_orga_image_instance_by_uri(
template_uri=docker_id_or_template_uri,
- organization_id=str(app.organization.id) if isCloudEE() else None, # noqa
- workspace_id=str(app.workspace.id) if isCloudEE() else None, # noqa
+ organization_id=str(app.organization.id) if isCloudEE() else None, # type: ignore
+ workspace_id=str(app.workspace.id) if isCloudEE() else None, # type: ignore
)
else:
db_image = await db_manager.get_orga_image_instance_by_docker_id(
docker_id=docker_id_or_template_uri,
- organization_id=str(app.organization.id) if isCloudEE() else None, # noqa
- workspace_id=str(app.workspace.id) if isCloudEE() else None, # noqa
+ organization_id=str(app.organization.id) if isCloudEE() else None, # type: ignore
+ workspace_id=str(app.workspace.id) if isCloudEE() else None, # type: ignore
)
# Create new image if not exists
@@ -496,9 +523,7 @@ async def add_variant_based_on_image(
user=user_instance,
organization=app.organization if isCloudEE() else None, # noqa
workspace=app.workspace if isCloudEE() else None, # noqa
- parameters={},
base_name=base_name,
- config_name=config_name,
base=db_base,
config=config_db,
)
diff --git a/agenta-backend/agenta_backend/services/evaluator_manager.py b/agenta-backend/agenta_backend/services/evaluator_manager.py
index c4cc49ace3..5de263c4f5 100644
--- a/agenta-backend/agenta_backend/services/evaluator_manager.py
+++ b/agenta-backend/agenta_backend/services/evaluator_manager.py
@@ -20,15 +20,16 @@
from agenta_backend.resources.evaluators import evaluators
-def get_evaluators() -> Optional[List[Evaluator]]:
+def get_evaluators() -> List[Evaluator]:
"""
Fetches a list of evaluators from a JSON file.
Returns:
- Optional[List[Evaluator]]: A list of evaluator objects or None if an error occurs.
+ List[Evaluator]: A list of evaluator objects.
"""
- return get_all_evaluators()
+ evaluators_as_dict = get_all_evaluators()
+ return [Evaluator(**evaluator_dict) for evaluator_dict in evaluators_as_dict]
async def get_evaluators_configs(app_id: str) -> List[EvaluatorConfig]:
@@ -83,11 +84,11 @@ async def create_evaluator_config(
evaluator_config = await db_manager.create_evaluator_config(
app=app,
- organization=app.organization if isCloudEE() else None, # noqa,
- workspace=app.workspace if isCloudEE() else None, # noqa,
- user=app.user,
+ user_id=str(app.user_id),
name=name,
evaluator_key=evaluator_key,
+ organization=app.organization if isCloudEE() else None, # noqa,
+ workspace=app.workspace if isCloudEE() else None, # noqa,
settings_values=settings_values,
)
return evaluator_config_db_to_pydantic(evaluator_config=evaluator_config)
@@ -141,14 +142,15 @@ async def create_ready_to_use_evaluators(app: AppDB):
Returns:
Nothing. The function works by side effect, modifying the database.
"""
+
direct_use_evaluators = [
- evaluator for evaluator in get_evaluators() if evaluator.get("direct_use")
+ evaluator for evaluator in get_evaluators() if evaluator.direct_use
]
for evaluator in direct_use_evaluators:
settings_values = {
setting_name: setting.get("default")
- for setting_name, setting in evaluator.get("settings_template", {}).items()
+ for setting_name, setting in evaluator.settings_template.items()
if setting.get("ground_truth_key") is True and setting.get("default", "")
}
@@ -157,13 +159,14 @@ async def create_ready_to_use_evaluators(app: AppDB):
default_value != ""
), f"Default value for ground truth key '{setting_name}' in Evaluator is empty"
+ assert hasattr(evaluator, "name") and hasattr(evaluator, "key"), f"'name' and 'key' does not exist in the evaluator: {evaluator}"
await db_manager.create_evaluator_config(
app=app,
+ user_id=str(app.user_id),
+ name=evaluator.name,
+ evaluator_key=evaluator.key,
organization=app.organization if isCloudEE() else None, # noqa,
workspace=app.workspace if isCloudEE() else None, # noqa,
- user=app.user,
- name=evaluator["name"],
- evaluator_key=evaluator["key"],
settings_values=settings_values,
)
From 24ab8c44b0993a842d437f848ba542c3032b3137 Mon Sep 17 00:00:00 2001
From: Abram
Date: Tue, 11 Jun 2024 07:38:38 +0100
Subject: [PATCH 046/268] refactor (backend): migrate beanie odm query to
sqlalchemy
---
.../services/evaluation_service.py | 41 +++++++++++--------
1 file changed, 23 insertions(+), 18 deletions(-)
diff --git a/agenta-backend/agenta_backend/services/evaluation_service.py b/agenta-backend/agenta_backend/services/evaluation_service.py
index 96b7f904c1..f70b745e26 100644
--- a/agenta-backend/agenta_backend/services/evaluation_service.py
+++ b/agenta-backend/agenta_backend/services/evaluation_service.py
@@ -1,12 +1,18 @@
+import uuid
import logging
from typing import Dict, List
from datetime import datetime, timezone
from fastapi import HTTPException
+from sqlalchemy.orm import Session
+from sqlalchemy.future import select
+from sqlalchemy.ext.asyncio import AsyncSession
+
from agenta_backend.models import converters
from agenta_backend.services import db_manager
from agenta_backend.utils.common import isCloudEE
+from agenta_backend.models.db_engine import db_engine
from agenta_backend.models.api.evaluation_model import (
Evaluation,
@@ -549,7 +555,6 @@ async def create_new_evaluation(
),
variant=variant_id,
variant_revision=str(variant_revision.id),
- evaluators_configs=evaluator_config_ids,
organization=app.organization if isCloudEE() else None,
workspace=app.workspace if isCloudEE() else None,
)
@@ -655,21 +660,21 @@ def remove_duplicates(csvdata):
async def fetch_evaluations_by_resource(resource_type: str, resource_ids: List[str]):
- ids = list(map(lambda x: ObjectId(x), resource_ids))
- if resource_type == "variant":
- res = await EvaluationDB.find(In(EvaluationDB.variant, ids)).to_list()
- elif resource_type == "testset":
- res = await EvaluationDB.find(In(EvaluationDB.testset.id, ids)).to_list()
- elif resource_type == "evaluator_config":
- res = await EvaluationDB.find(
- In(
- EvaluationDB.evaluators_configs,
- ids,
+ ids = list(map(uuid.UUID, resource_ids))
+
+ async with db_engine.get_session() as session:
+ if resource_type == "variant":
+ query = select(EvaluationDB).filter(EvaluationDB.variant_id.in_(ids))
+ elif resource_type == "testset":
+ query = select(EvaluationDB).filter(EvaluationDB.testset_id.in_(ids))
+ # elif resource_type == "evaluator_config":
+ # query = select(EvaluationDB).filter(EvaluationDB.evaluators_configs_id.in_(ids))
+ else:
+ raise HTTPException(
+ status_code=400,
+ detail=f"resource_type {resource_type} is not supported",
)
- ).to_list()
- else:
- raise HTTPException(
- status_code=400,
- detail=f"resource_type {resource_type} is not supported",
- )
- return res
+
+ result = await session.execute(query)
+ res = result.scalars().all()
+ return res
From 30500dfb16fecbc8b07f6a947b2594e464c87cb4 Mon Sep 17 00:00:00 2001
From: Abram
Date: Tue, 11 Jun 2024 07:40:52 +0100
Subject: [PATCH 047/268] refactor (backend): update routers to work with
sqlalchemy migration
---
.../agenta_backend/routers/app_router.py | 26 ++++---
.../agenta_backend/routers/bases_router.py | 4 +-
.../agenta_backend/routers/configs_router.py | 5 +-
.../routers/container_router.py | 8 +-
.../agenta_backend/routers/testset_router.py | 73 +++++++++----------
.../agenta_backend/routers/variants_router.py | 2 +
6 files changed, 60 insertions(+), 58 deletions(-)
diff --git a/agenta-backend/agenta_backend/routers/app_router.py b/agenta-backend/agenta_backend/routers/app_router.py
index e5987d28e3..c0f6f02031 100644
--- a/agenta-backend/agenta_backend/routers/app_router.py
+++ b/agenta-backend/agenta_backend/routers/app_router.py
@@ -3,9 +3,9 @@
from typing import List, Optional
from docker.errors import DockerException
+
from fastapi.responses import JSONResponse
from fastapi import HTTPException, Request
-from beanie import PydanticObjectId as ObjectId
from agenta_backend.models import converters
from agenta_backend.utils.common import (
@@ -122,6 +122,8 @@ async def list_app_variants(
]
except Exception as e:
+ import traceback
+ traceback.print_exc()
raise HTTPException(status_code=500, detail=str(e))
@@ -491,8 +493,8 @@ async def create_app_and_variant_from_template(
app = await db_manager.fetch_app_by_name_and_parameters(
app_name,
request.state.user_id,
- payload.organization_id if isCloudEE() else None,
- payload.workspace_id if isCloudEE() else None,
+ payload.organization_id if isCloudEE() else None, # type: ignore
+ payload.workspace_id if isCloudEE() else None, # type: ignore
)
if app is not None:
raise Exception(
@@ -508,8 +510,8 @@ async def create_app_and_variant_from_template(
app = await db_manager.create_app_and_envs(
app_name,
request.state.user_id,
- payload.organization_id if isCloudEE() else None,
- payload.workspace_id if isCloudEE() else None,
+ payload.organization_id if isCloudEE() else None, # type: ignore
+ payload.workspace_id if isCloudEE() else None, # type: ignore
)
logger.debug(
@@ -529,10 +531,10 @@ async def create_app_and_variant_from_template(
app_variant_db = await app_manager.add_variant_based_on_image(
app=app,
variant_name="app.default",
- docker_id_or_template_uri=(
+ docker_id_or_template_uri=( # type: ignore
template_db.template_uri if isCloudEE() else template_db.digest
),
- tags=f"{image_name}" if not isCloudEE() else None,
+ tags=f"{image_name}" if not isCloudEE() else None, # type: ignore
base_name="app",
config_name="default",
is_template_image=True,
@@ -546,10 +548,10 @@ async def create_app_and_variant_from_template(
)
await db_manager.add_testset_to_app_variant(
app_id=str(app.id),
- org_id=payload.organization_id if isCloudEE() else None,
- workspace_id=payload.workspace_id if isCloudEE() else None,
- template_name=template_db.name,
- app_name=app.app_name,
+ org_id=payload.organization_id if isCloudEE() else None, # type: ignore
+ workspace_id=payload.workspace_id if isCloudEE() else None, # type: ignore
+ template_name=template_db.name, # type: ignore
+ app_name=app.app_name, # type: ignore
user_uid=request.state.user_id,
)
@@ -608,6 +610,8 @@ async def create_app_and_variant_from_template(
return await converters.app_variant_db_to_output(app_variant_db)
except Exception as e:
+ import traceback
+ traceback.print_exc()
logger.exception(f"Error: Exception caught - {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
diff --git a/agenta-backend/agenta_backend/routers/bases_router.py b/agenta-backend/agenta_backend/routers/bases_router.py
index 149571e451..6e98a06656 100644
--- a/agenta-backend/agenta_backend/routers/bases_router.py
+++ b/agenta-backend/agenta_backend/routers/bases_router.py
@@ -22,7 +22,7 @@
@router.get("/", response_model=List[BaseOutput], operation_id="list_bases")
async def list_bases(
request: Request,
- app_id: Optional[str] = None,
+ app_id: str,
base_name: Optional[str] = None,
) -> List[BaseOutput]:
"""
@@ -30,7 +30,7 @@ async def list_bases(
Args:
request (Request): The incoming request.
- app_id (Optional[str], optional): The ID of the app to filter by. Defaults to None.
+ app_id (str): The ID of the app to filter by.
base_name (Optional[str], optional): The name of the base to filter by. Defaults to None.
Returns:
diff --git a/agenta-backend/agenta_backend/routers/configs_router.py b/agenta-backend/agenta_backend/routers/configs_router.py
index 78fb041e52..f0cf1b8394 100644
--- a/agenta-backend/agenta_backend/routers/configs_router.py
+++ b/agenta-backend/agenta_backend/routers/configs_router.py
@@ -52,8 +52,9 @@ async def save_config(
if variant_db.config_name == payload.config_name:
variant_to_overwrite = variant_db
break
+
if variant_to_overwrite is not None:
- if payload.overwrite or variant_to_overwrite.config.parameters == {}:
+ if payload.overwrite or variant_to_overwrite.config_parameters == {}:
print(f"update_variant_parameters ===> {payload.overwrite}")
await app_manager.update_variant_parameters(
app_variant_id=str(variant_to_overwrite.id),
@@ -79,6 +80,8 @@ async def save_config(
logger.error(f"save_config http exception ===> {e.detail}")
raise
except Exception as e:
+ import traceback
+ traceback.print_exc()
logger.error(f"save_config exception ===> {e}")
raise HTTPException(status_code=500, detail=str(e)) from e
diff --git a/agenta-backend/agenta_backend/routers/container_router.py b/agenta-backend/agenta_backend/routers/container_router.py
index 7da10414b0..648d81619e 100644
--- a/agenta-backend/agenta_backend/routers/container_router.py
+++ b/agenta-backend/agenta_backend/routers/container_router.py
@@ -179,13 +179,13 @@ async def construct_app_container_url(
raise HTTPException(status_code=403, detail=error_msg)
try:
- if getattr(object_db, "deployment", None): # this is a base
+ if getattr(object_db, "deployment_id", None): # this is a base
deployment = await db_manager.get_deployment_by_objectid(
- object_db.deployment
+ str(object_db.deployment_id) # type: ignore
)
- elif getattr(object_db.base, "deployment", None): # this is a variant
+ elif getattr(object_db, "base_id", None): # this is a variant
deployment = await db_manager.get_deployment_by_objectid(
- object_db.base.deployment
+ str(object_db.base.deployment_id) # type: ignore
)
else:
raise HTTPException(
diff --git a/agenta-backend/agenta_backend/routers/testset_router.py b/agenta-backend/agenta_backend/routers/testset_router.py
index 9050b8bd8d..86615bb8be 100644
--- a/agenta-backend/agenta_backend/routers/testset_router.py
+++ b/agenta-backend/agenta_backend/routers/testset_router.py
@@ -1,5 +1,6 @@
import io
import csv
+import uuid
import json
import logging
import requests
@@ -254,27 +255,20 @@ async def create_testset(
status_code=403,
)
- user = await get_user(request.state.user_id)
- testset = {
- "created_at": datetime.now(timezone.utc).isoformat(),
- "name": csvdata.name,
- "app": app,
- "csvdata": csvdata.csvdata,
- "user": user,
- }
-
- if isCloudEE():
- testset["organization"] = app.organization
- testset["workspace"] = app.workspace
-
try:
- testset_instance = TestSetDB(**testset)
- await testset_instance.create()
-
+ testset_data = {
+ "name": csvdata.name,
+ "csvdata": csvdata.csvdata,
+ }
+ testset_instance = await db_manager.create_testset(
+ app=app,
+ user_uid=request.state.user_id,
+ testset_data=testset_data
+ )
if testset_instance is not None:
return TestSetSimpleResponse(
id=str(testset_instance.id),
- name=testset_instance.name,
+ name=testset_instance.name, # type: ignore
created_at=str(testset_instance.created_at),
)
except Exception as e:
@@ -298,11 +292,15 @@ async def update_testset(
Returns:
str: The id of the test set updated.
"""
- test_set = await db_manager.fetch_testset_by_id(testset_id=testset_id)
+
+ testset = await db_manager.fetch_testset_by_id(testset_id=testset_id)
+ if testset is None:
+ raise HTTPException(status_code=404, detail="testset not found")
+
if isCloudEE():
has_permission = await check_action_access(
user_uid=request.state.user_id,
- object=test_set,
+ object=testset,
permission=Permission.EDIT_TESTSET,
)
logger.debug(f"User has Permission to update Testset: {has_permission}")
@@ -314,25 +312,20 @@ async def update_testset(
status_code=403,
)
- testset_update = {
- "name": csvdata.name,
- "csvdata": csvdata.csvdata,
- "updated_at": datetime.now(timezone.utc).isoformat(),
- }
-
- if test_set is None:
- raise HTTPException(status_code=404, detail="testset not found")
-
try:
- await test_set.update({"$set": testset_update})
- if isinstance(test_set.id, ObjectId):
- return {
- "status": "success",
- "message": "testset updated successfully",
- "_id": testset_id,
- }
- else:
- raise HTTPException(status_code=404, detail="testset not found")
+ testset_update = {
+ "name": csvdata.name,
+ "csvdata": csvdata.csvdata,
+ }
+ await db_manager.update_testset(
+ testset_id=str(testset.id),
+ values_to_update=testset_update
+ )
+ return {
+ "status": "success",
+ "message": "testset updated successfully",
+ "_id": testset_id,
+ }
except Exception as e:
print(str(e))
raise HTTPException(status_code=500, detail=str(e))
@@ -371,12 +364,12 @@ async def get_testsets(
if app is None:
raise HTTPException(status_code=404, detail="App not found")
- testsets: List[TestSetDB] = await db_manager.fetch_testsets_by_app_id(app_id=app_id)
+ testsets = await db_manager.fetch_testsets_by_app_id(app_id=app_id)
return [
TestSetOutputResponse(
- id=str(testset.id),
+ id=str(testset.id), # type: ignore
name=testset.name,
- created_at=testset.created_at,
+ created_at=str(testset.created_at),
)
for testset in testsets
]
diff --git a/agenta-backend/agenta_backend/routers/variants_router.py b/agenta-backend/agenta_backend/routers/variants_router.py
index 90e6fd89b3..e6e8298c9f 100644
--- a/agenta-backend/agenta_backend/routers/variants_router.py
+++ b/agenta-backend/agenta_backend/routers/variants_router.py
@@ -147,6 +147,8 @@ async def remove_variant(
detail = f"Docker error while trying to remove the app variant: {str(e)}"
raise HTTPException(status_code=500, detail=detail)
except Exception as e:
+ import traceback
+ traceback.print_exc()
detail = f"Unexpected error while trying to remove the app variant: {str(e)}"
raise HTTPException(status_code=500, detail=detail)
From 44aefa89d31c487e16369ad9efa94bbd60b6201b Mon Sep 17 00:00:00 2001
From: Abram
Date: Tue, 11 Jun 2024 07:41:39 +0100
Subject: [PATCH 048/268] refactor (backend): update converts that translate db
models into pydantic (api) models
---
.../agenta_backend/models/converters.py | 42 +++++++++----------
1 file changed, 21 insertions(+), 21 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/converters.py b/agenta-backend/agenta_backend/models/converters.py
index 7fdbb55c72..b9ddbd0c19 100644
--- a/agenta-backend/agenta_backend/models/converters.py
+++ b/agenta-backend/agenta_backend/models/converters.py
@@ -1,6 +1,7 @@
"""Converts db models to pydantic models
"""
+import uuid
import json
import logging
from typing import List, Tuple, Any
@@ -290,33 +291,33 @@ def app_variant_db_to_pydantic(
async def app_variant_db_to_output(app_variant_db: AppVariantDB) -> AppVariantResponse:
- if app_variant_db.base.deployment:
+ if type(app_variant_db.base_id) == uuid.UUID and type(app_variant_db.base.deployment_id) == uuid.UUID:
deployment = await db_manager.get_deployment_by_objectid(
- app_variant_db.base.deployment
+ str(app_variant_db.base.deployment_id)
)
uri = deployment.uri
else:
deployment = None
uri = None
- logger.info(f"uri: {uri} deployment: {app_variant_db.base.deployment} {deployment}")
+
+ logger.info(f"uri: {uri} deployment: {str(app_variant_db.base.deployment_id)} {deployment}")
variant_response = AppVariantResponse(
- app_id=str(app_variant_db.app.id),
+ app_id=str(app_variant_db.app_id),
app_name=str(app_variant_db.app.app_name),
- variant_name=app_variant_db.variant_name,
+ variant_name=app_variant_db.variant_name, # type: ignore
variant_id=str(app_variant_db.id),
- user_id=str(app_variant_db.user.id),
- parameters=app_variant_db.config.parameters,
- previous_variant_name=app_variant_db.previous_variant_name,
- base_name=app_variant_db.base_name,
- base_id=str(app_variant_db.base.id),
- config_name=app_variant_db.config_name,
+ user_id=str(app_variant_db.user_id),
+ parameters=app_variant_db.config_parameters, # type: ignore
+ base_name=app_variant_db.base_name, # type: ignore
+ base_id=str(app_variant_db.base_id),
+ config_name=app_variant_db.config_name, # type: ignore
uri=uri,
- revision=app_variant_db.revision,
+ revision=app_variant_db.revision, # type: ignore
)
if isCloudEE():
- variant_response.organization_id = str(app_variant_db.organization.id)
- variant_response.workspace_id = str(app_variant_db.workspace.id)
+ variant_response.organization_id = str(app_variant_db.organization_id)
+ variant_response.workspace_id = str(app_variant_db.workspace_id)
return variant_response
@@ -352,8 +353,8 @@ async def environment_db_to_output(
environment_db: AppEnvironmentDB,
) -> EnvironmentOutput:
deployed_app_variant_id = (
- str(environment_db.deployed_app_variant)
- if environment_db.deployed_app_variant
+ str(environment_db.deployed_app_variant_id)
+ if environment_db.deployed_app_variant_id and isinstance(environment_db.deployed_app_variant_id, uuid.UUID) # type: ignore
else None
)
if deployed_app_variant_id:
@@ -366,21 +367,20 @@ async def environment_db_to_output(
deployed_variant_name = None
revision = None
- await environment_db.fetch_link(AppEnvironmentDB.deployed_app_variant_revision)
environment_output = EnvironmentOutput(
name=environment_db.name,
- app_id=str(environment_db.app.id),
+ app_id=str(environment_db.app_id),
deployed_app_variant_id=deployed_app_variant_id,
deployed_variant_name=deployed_variant_name,
deployed_app_variant_revision_id=str(
- environment_db.deployed_app_variant_revision
+ environment_db.deployed_app_variant_revision_id
),
revision=revision,
)
if isCloudEE():
- environment_output.organization_id = str(environment_db.organization.id)
- environment_output.workspace_id = str(environment_db.workspace.id)
+ environment_output.organization_id = str(environment_db.organization_id)
+ environment_output.workspace_id = str(environment_db.workspace_id)
return environment_output
From 355891f25e8410d2a6a373d01256c9e076d939a5 Mon Sep 17 00:00:00 2001
From: Abram
Date: Tue, 11 Jun 2024 07:49:16 +0100
Subject: [PATCH 049/268] feat (backend): implement close method in db_engine
to ensure that all connections are closed on server crash/disconnect
---
agenta-backend/agenta_backend/main.py | 3 ++-
.../agenta_backend/models/db_engine.py | 16 ++++++++++++++++
2 files changed, 18 insertions(+), 1 deletion(-)
diff --git a/agenta-backend/agenta_backend/main.py b/agenta-backend/agenta_backend/main.py
index d23ce2ec63..21352d5414 100644
--- a/agenta-backend/agenta_backend/main.py
+++ b/agenta-backend/agenta_backend/main.py
@@ -27,7 +27,6 @@
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
-from sqlalchemy.ext.asyncio import AsyncSession
from celery import Celery
@@ -53,9 +52,11 @@ async def lifespan(application: FastAPI, cache=True):
application: FastAPI application.
cache: A boolean value that indicates whether to use the cached data or not.
"""
+
await db_engine.init_db()
await templates_manager.update_and_sync_templates(cache=cache)
yield
+ await db_engine.close()
app = FastAPI(lifespan=lifespan, openapi_tags=open_api_tags_metadata)
diff --git a/agenta-backend/agenta_backend/models/db_engine.py b/agenta-backend/agenta_backend/models/db_engine.py
index 731b8a5a93..74c7613dc0 100644
--- a/agenta-backend/agenta_backend/models/db_engine.py
+++ b/agenta-backend/agenta_backend/models/db_engine.py
@@ -127,5 +127,21 @@ async def get_session(self) -> AsyncGenerator[AsyncSession, None]:
finally:
await session.close()
+ async def close(self):
+ """
+ Closes and dispose all the connections using the engine.
+
+ :raises Exception: if engine is initialized
+ """
+
+ if self.engine is None:
+ raise Exception("DBEngine is not initialized")
+
+ await self.engine.dispose()
+
+ self.engine = None
+ self.async_session_maker = None
+ self.async_session = None
+
db_engine = DBEngine()
From 17c1fac36ca30d3ff7f492d30d1e7c99d3a4f4c5 Mon Sep 17 00:00:00 2001
From: Abram
Date: Tue, 11 Jun 2024 07:51:25 +0100
Subject: [PATCH 050/268] chore (backend): format codebase with black@23.12.0
---
.../agenta_backend/models/converters.py | 21 ++++----
.../agenta_backend/models/db_engine.py | 14 ++++--
.../agenta_backend/models/db_models.py | 2 +-
.../agenta_backend/routers/app_router.py | 22 +++++----
.../agenta_backend/routers/configs_router.py | 1 +
.../routers/container_router.py | 4 +-
.../agenta_backend/routers/testset_router.py | 11 ++---
.../agenta_backend/routers/variants_router.py | 1 +
.../agenta_backend/services/app_manager.py | 38 ++++++++-------
.../agenta_backend/services/db_manager.py | 48 ++++++++-----------
.../services/evaluator_manager.py | 4 +-
11 files changed, 87 insertions(+), 79 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/converters.py b/agenta-backend/agenta_backend/models/converters.py
index b9ddbd0c19..9e9bf90cbb 100644
--- a/agenta-backend/agenta_backend/models/converters.py
+++ b/agenta-backend/agenta_backend/models/converters.py
@@ -291,7 +291,10 @@ def app_variant_db_to_pydantic(
async def app_variant_db_to_output(app_variant_db: AppVariantDB) -> AppVariantResponse:
- if type(app_variant_db.base_id) == uuid.UUID and type(app_variant_db.base.deployment_id) == uuid.UUID:
+ if (
+ type(app_variant_db.base_id) == uuid.UUID
+ and type(app_variant_db.base.deployment_id) == uuid.UUID
+ ):
deployment = await db_manager.get_deployment_by_objectid(
str(app_variant_db.base.deployment_id)
)
@@ -300,19 +303,21 @@ async def app_variant_db_to_output(app_variant_db: AppVariantDB) -> AppVariantRe
deployment = None
uri = None
- logger.info(f"uri: {uri} deployment: {str(app_variant_db.base.deployment_id)} {deployment}")
+ logger.info(
+ f"uri: {uri} deployment: {str(app_variant_db.base.deployment_id)} {deployment}"
+ )
variant_response = AppVariantResponse(
app_id=str(app_variant_db.app_id),
app_name=str(app_variant_db.app.app_name),
- variant_name=app_variant_db.variant_name, # type: ignore
+ variant_name=app_variant_db.variant_name, # type: ignore
variant_id=str(app_variant_db.id),
user_id=str(app_variant_db.user_id),
- parameters=app_variant_db.config_parameters, # type: ignore
- base_name=app_variant_db.base_name, # type: ignore
+ parameters=app_variant_db.config_parameters, # type: ignore
+ base_name=app_variant_db.base_name, # type: ignore
base_id=str(app_variant_db.base_id),
- config_name=app_variant_db.config_name, # type: ignore
+ config_name=app_variant_db.config_name, # type: ignore
uri=uri,
- revision=app_variant_db.revision, # type: ignore
+ revision=app_variant_db.revision, # type: ignore
)
if isCloudEE():
@@ -354,7 +359,7 @@ async def environment_db_to_output(
) -> EnvironmentOutput:
deployed_app_variant_id = (
str(environment_db.deployed_app_variant_id)
- if environment_db.deployed_app_variant_id and isinstance(environment_db.deployed_app_variant_id, uuid.UUID) # type: ignore
+ if environment_db.deployed_app_variant_id and isinstance(environment_db.deployed_app_variant_id, uuid.UUID) # type: ignore
else None
)
if deployed_app_variant_id:
diff --git a/agenta-backend/agenta_backend/models/db_engine.py b/agenta-backend/agenta_backend/models/db_engine.py
index 74c7613dc0..669f3ed3fa 100644
--- a/agenta-backend/agenta_backend/models/db_engine.py
+++ b/agenta-backend/agenta_backend/models/db_engine.py
@@ -4,7 +4,12 @@
from typing import AsyncGenerator
from contextlib import asynccontextmanager
-from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine, async_sessionmaker, async_scoped_session
+from sqlalchemy.ext.asyncio import (
+ AsyncSession,
+ create_async_engine,
+ async_sessionmaker,
+ async_scoped_session,
+)
from agenta_backend.utils.common import isCloudEE
@@ -72,7 +77,7 @@
]
if isCloudEE():
- models.extend([SpanDB, OrganizationDB, WorkspaceDB, APIKeyDB]) # type: ignore
+ models.extend([SpanDB, OrganizationDB, WorkspaceDB, APIKeyDB]) # type: ignore
# Configure and set logging level
logger = logging.getLogger(__name__)
@@ -92,8 +97,7 @@ def __init__(self) -> None:
bind=self.engine, class_=AsyncSession, expire_on_commit=False
)
self.async_session = async_scoped_session(
- session_factory=self.async_session_maker,
- scopefunc=current_task
+ session_factory=self.async_session_maker, scopefunc=current_task
)
async def init_db(self):
@@ -130,7 +134,7 @@ async def get_session(self) -> AsyncGenerator[AsyncSession, None]:
async def close(self):
"""
Closes and dispose all the connections using the engine.
-
+
:raises Exception: if engine is initialized
"""
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index 1ed8d624cd..2532cc6bf7 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -528,4 +528,4 @@ class IDsMappingDB(Base):
table_name = Column(String, nullable=False)
objectid = Column(String, primary_key=True)
- uuid = Column(UUID(as_uuid=True), nullable=False)
\ No newline at end of file
+ uuid = Column(UUID(as_uuid=True), nullable=False)
diff --git a/agenta-backend/agenta_backend/routers/app_router.py b/agenta-backend/agenta_backend/routers/app_router.py
index c0f6f02031..bed5fbf315 100644
--- a/agenta-backend/agenta_backend/routers/app_router.py
+++ b/agenta-backend/agenta_backend/routers/app_router.py
@@ -123,6 +123,7 @@ async def list_app_variants(
except Exception as e:
import traceback
+
traceback.print_exc()
raise HTTPException(status_code=500, detail=str(e))
@@ -493,8 +494,8 @@ async def create_app_and_variant_from_template(
app = await db_manager.fetch_app_by_name_and_parameters(
app_name,
request.state.user_id,
- payload.organization_id if isCloudEE() else None, # type: ignore
- payload.workspace_id if isCloudEE() else None, # type: ignore
+ payload.organization_id if isCloudEE() else None, # type: ignore
+ payload.workspace_id if isCloudEE() else None, # type: ignore
)
if app is not None:
raise Exception(
@@ -510,8 +511,8 @@ async def create_app_and_variant_from_template(
app = await db_manager.create_app_and_envs(
app_name,
request.state.user_id,
- payload.organization_id if isCloudEE() else None, # type: ignore
- payload.workspace_id if isCloudEE() else None, # type: ignore
+ payload.organization_id if isCloudEE() else None, # type: ignore
+ payload.workspace_id if isCloudEE() else None, # type: ignore
)
logger.debug(
@@ -531,10 +532,10 @@ async def create_app_and_variant_from_template(
app_variant_db = await app_manager.add_variant_based_on_image(
app=app,
variant_name="app.default",
- docker_id_or_template_uri=( # type: ignore
+ docker_id_or_template_uri=( # type: ignore
template_db.template_uri if isCloudEE() else template_db.digest
),
- tags=f"{image_name}" if not isCloudEE() else None, # type: ignore
+ tags=f"{image_name}" if not isCloudEE() else None, # type: ignore
base_name="app",
config_name="default",
is_template_image=True,
@@ -548,10 +549,10 @@ async def create_app_and_variant_from_template(
)
await db_manager.add_testset_to_app_variant(
app_id=str(app.id),
- org_id=payload.organization_id if isCloudEE() else None, # type: ignore
- workspace_id=payload.workspace_id if isCloudEE() else None, # type: ignore
- template_name=template_db.name, # type: ignore
- app_name=app.app_name, # type: ignore
+ org_id=payload.organization_id if isCloudEE() else None, # type: ignore
+ workspace_id=payload.workspace_id if isCloudEE() else None, # type: ignore
+ template_name=template_db.name, # type: ignore
+ app_name=app.app_name, # type: ignore
user_uid=request.state.user_id,
)
@@ -611,6 +612,7 @@ async def create_app_and_variant_from_template(
except Exception as e:
import traceback
+
traceback.print_exc()
logger.exception(f"Error: Exception caught - {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
diff --git a/agenta-backend/agenta_backend/routers/configs_router.py b/agenta-backend/agenta_backend/routers/configs_router.py
index f0cf1b8394..05e367f674 100644
--- a/agenta-backend/agenta_backend/routers/configs_router.py
+++ b/agenta-backend/agenta_backend/routers/configs_router.py
@@ -81,6 +81,7 @@ async def save_config(
raise
except Exception as e:
import traceback
+
traceback.print_exc()
logger.error(f"save_config exception ===> {e}")
raise HTTPException(status_code=500, detail=str(e)) from e
diff --git a/agenta-backend/agenta_backend/routers/container_router.py b/agenta-backend/agenta_backend/routers/container_router.py
index 648d81619e..b2415522e9 100644
--- a/agenta-backend/agenta_backend/routers/container_router.py
+++ b/agenta-backend/agenta_backend/routers/container_router.py
@@ -181,11 +181,11 @@ async def construct_app_container_url(
try:
if getattr(object_db, "deployment_id", None): # this is a base
deployment = await db_manager.get_deployment_by_objectid(
- str(object_db.deployment_id) # type: ignore
+ str(object_db.deployment_id) # type: ignore
)
elif getattr(object_db, "base_id", None): # this is a variant
deployment = await db_manager.get_deployment_by_objectid(
- str(object_db.base.deployment_id) # type: ignore
+ str(object_db.base.deployment_id) # type: ignore
)
else:
raise HTTPException(
diff --git a/agenta-backend/agenta_backend/routers/testset_router.py b/agenta-backend/agenta_backend/routers/testset_router.py
index 86615bb8be..102d4183e1 100644
--- a/agenta-backend/agenta_backend/routers/testset_router.py
+++ b/agenta-backend/agenta_backend/routers/testset_router.py
@@ -261,14 +261,12 @@ async def create_testset(
"csvdata": csvdata.csvdata,
}
testset_instance = await db_manager.create_testset(
- app=app,
- user_uid=request.state.user_id,
- testset_data=testset_data
+ app=app, user_uid=request.state.user_id, testset_data=testset_data
)
if testset_instance is not None:
return TestSetSimpleResponse(
id=str(testset_instance.id),
- name=testset_instance.name, # type: ignore
+ name=testset_instance.name, # type: ignore
created_at=str(testset_instance.created_at),
)
except Exception as e:
@@ -318,8 +316,7 @@ async def update_testset(
"csvdata": csvdata.csvdata,
}
await db_manager.update_testset(
- testset_id=str(testset.id),
- values_to_update=testset_update
+ testset_id=str(testset.id), values_to_update=testset_update
)
return {
"status": "success",
@@ -367,7 +364,7 @@ async def get_testsets(
testsets = await db_manager.fetch_testsets_by_app_id(app_id=app_id)
return [
TestSetOutputResponse(
- id=str(testset.id), # type: ignore
+ id=str(testset.id), # type: ignore
name=testset.name,
created_at=str(testset.created_at),
)
diff --git a/agenta-backend/agenta_backend/routers/variants_router.py b/agenta-backend/agenta_backend/routers/variants_router.py
index e6e8298c9f..5d825ab4f7 100644
--- a/agenta-backend/agenta_backend/routers/variants_router.py
+++ b/agenta-backend/agenta_backend/routers/variants_router.py
@@ -148,6 +148,7 @@ async def remove_variant(
raise HTTPException(status_code=500, detail=detail)
except Exception as e:
import traceback
+
traceback.print_exc()
detail = f"Unexpected error while trying to remove the app variant: {str(e)}"
raise HTTPException(status_code=500, detail=detail)
diff --git a/agenta-backend/agenta_backend/services/app_manager.py b/agenta-backend/agenta_backend/services/app_manager.py
index a96790a821..a5066e2c6e 100644
--- a/agenta-backend/agenta_backend/services/app_manager.py
+++ b/agenta-backend/agenta_backend/services/app_manager.py
@@ -93,7 +93,7 @@ async def start_variant(
"http://host.docker.internal" # unclear why this stopped working
)
# domain_name = "http://localhost"
- env_vars = {} if env_vars is None else env_vars # type: ignore
+ env_vars = {} if env_vars is None else env_vars # type: ignore
env_vars.update(
{
"AGENTA_BASE_ID": str(db_app_variant.base_id),
@@ -115,7 +115,7 @@ async def start_variant(
await db_manager.update_base(
str(db_app_variant.base_id),
- deployment_id=deployment.id, # type: ignore
+ deployment_id=deployment.id, # type: ignore
)
except Exception as e:
import traceback
@@ -128,7 +128,7 @@ async def start_variant(
f"Failed to start Docker container for app variant {db_app_variant.app.app_name}/{db_app_variant.variant_name} \n {str(e)}"
) from e
- return URI(uri=deployment.uri) # type: ignore
+ return URI(uri=deployment.uri) # type: ignore
async def update_variant_image(
@@ -179,7 +179,7 @@ async def update_variant_image(
async def terminate_and_remove_app_variant(
- app_variant_id: Optional[str] = None, app_variant_db: Optional[AppVariantDB] =None
+ app_variant_id: Optional[str] = None, app_variant_db: Optional[AppVariantDB] = None
) -> None:
"""
Removes app variant from the database. If it's the last one using an image, performs additional operations:
@@ -207,7 +207,7 @@ async def terminate_and_remove_app_variant(
app_variant_db = await db_manager.fetch_app_variant_by_id(app_variant_id)
logger.debug(f"Fetched app variant {app_variant_db}")
- app_id = str(app_variant_db.app_id) # type: ignore
+ app_id = str(app_variant_db.app_id) # type: ignore
if app_variant_db is None:
error_msg = f"Failed to delete app variant {app_variant_id}: Not found in DB."
logger.error(error_msg)
@@ -219,14 +219,16 @@ async def terminate_and_remove_app_variant(
)
if is_last_variant_for_image:
# remove variant + terminate and rm containers + remove base
- base_db = await db_manager.fetch_base_by_id(base_id=str(app_variant_db.base_id))
+ base_db = await db_manager.fetch_base_by_id(
+ base_id=str(app_variant_db.base_id)
+ )
if not base_db:
- raise
+ raise
image = base_db.image
logger.debug("is_last_variant_for_image {image}")
- if not isinstance(base_db.image_id, uuid.UUID): # type: ignore
+ if not isinstance(base_db.image_id, uuid.UUID): # type: ignore
logger.debug(
f"Image associated with app variant {app_variant_db.app.app_name}/{app_variant_db.variant_name} not found. Skipping deletion."
)
@@ -244,15 +246,13 @@ async def terminate_and_remove_app_variant(
try:
await deployment_manager.stop_and_delete_service(deployment)
except RuntimeError as e:
- logger.error(
- f"Failed to stop and delete service {deployment} {e}"
- )
+ logger.error(f"Failed to stop and delete service {deployment} {e}")
# If image deletable is True, remove docker image and image db
if image.deletable:
try:
if isCloudEE():
- await deployment_manager.remove_repository(image.tags) # type: ignore
+ await deployment_manager.remove_repository(image.tags) # type: ignore
else:
await deployment_manager.remove_image(image)
except RuntimeError as e:
@@ -275,7 +275,9 @@ async def terminate_and_remove_app_variant(
app_variants = await db_manager.list_app_variants(app_id)
logger.debug(f"Count of app variants available: {len(app_variants)}")
- if len(app_variants) == 0: # remove app related resources if the length of the app variants hit 0
+ if (
+ len(app_variants) == 0
+ ): # remove app related resources if the length of the app variants hit 0
logger.debug("remove_app_related_resources")
await remove_app_related_resources(app_id)
except Exception as e:
@@ -296,9 +298,7 @@ async def remove_app_related_resources(app_id: str):
"""
try:
# Delete associated environments
- environments = await db_manager.list_environments(
- app_id
- )
+ environments = await db_manager.list_environments(app_id)
for environment_db in environments:
await db_manager.remove_environment(environment_db)
logger.info(f"Successfully deleted environment {environment_db.name}.")
@@ -323,7 +323,9 @@ async def remove_app_related_resources(app_id: str):
evaluators_configs = await db_manager.fetch_evaluators_configs(app_id)
for evaluator_config_db in evaluators_configs:
await db_manager.delete_evaluator_config(str(evaluator_config_db.id))
- logger.info(f"Successfully deleted evaluator config {str(evaluator_config_db.id)}")
+ logger.info(
+ f"Successfully deleted evaluator config {str(evaluator_config_db.id)}"
+ )
await db_manager.remove_app_by_id(app_id)
logger.info(f"Successfully remove app object {app_id}.")
@@ -448,7 +450,7 @@ async def add_variant_based_on_image(
# Check if app variant already exists
logger.debug("Step 2: Checking if app variant already exists")
variants = await db_manager.list_app_variants_for_app_id(app_id=str(app.id))
- already_exists = any(av for av in variants if av.variant_name == variant_name) # type: ignore
+ already_exists = any(av for av in variants if av.variant_name == variant_name) # type: ignore
if already_exists:
logger.error("App variant with the same name already exists")
raise ValueError("App variant with the same name already exists")
diff --git a/agenta-backend/agenta_backend/services/db_manager.py b/agenta-backend/agenta_backend/services/db_manager.py
index 03e81c5af6..79b6d826c0 100644
--- a/agenta-backend/agenta_backend/services/db_manager.py
+++ b/agenta-backend/agenta_backend/services/db_manager.py
@@ -203,12 +203,9 @@ async def fetch_app_variant_by_id(
async with db_engine.get_session() as session:
result = await session.execute(
select(AppVariantDB)
- .options(
- joinedload(AppVariantDB.base),
- joinedload(AppVariantDB.app)
- )
+ .options(joinedload(AppVariantDB.base), joinedload(AppVariantDB.app))
.filter_by(id=uuid.UUID(app_variant_id))
- )
+ )
app_variant = result.scalars().one_or_none()
return app_variant
@@ -303,8 +300,7 @@ async def fetch_base_by_id(base_id: str) -> Optional[VariantBaseDB]:
result = await session.execute(
select(VariantBaseDB)
.options(
- joinedload(VariantBaseDB.image),
- joinedload(VariantBaseDB.deployment)
+ joinedload(VariantBaseDB.image), joinedload(VariantBaseDB.deployment)
)
.filter_by(id=uuid.UUID(base_id))
)
@@ -450,7 +446,15 @@ async def create_new_app_variant(
session.add(variant)
await session.commit()
- await session.refresh(variant, attribute_names=["app", "image", "user", "base", ]) # Ensures the app, image, user and base relationship are loaded
+ await session.refresh(
+ variant,
+ attribute_names=[
+ "app",
+ "image",
+ "user",
+ "base",
+ ],
+ ) # Ensures the app, image, user and base relationship are loaded
variant_revision = AppVariantRevisionsDB(
variant_id=variant.id,
@@ -1144,7 +1148,7 @@ async def remove_deployment(deployment_db: DeploymentDB):
async def list_deployments(app_id: str):
"""Lists all the deployments that belongs to an app.
-
+
Args:
app_id (str): The ID of the app
@@ -1295,8 +1299,7 @@ async def fetch_app_variant_revision_by_id(
async with db_engine.get_session() as session:
result = await session.execute(
- select(AppVariantRevisionsDB)
- .filter_by(id=uuid.UUID(variant_revision_id))
+ select(AppVariantRevisionsDB).filter_by(id=uuid.UUID(variant_revision_id))
)
app_revision = result.scalars().one_or_none()
return app_revision
@@ -1550,7 +1553,8 @@ async def fetch_app_variant_revision(app_variant: str, revision_number: int):
async def list_environments_by_variant(
- session: AsyncSession, app_variant: AppVariantDB,
+ session: AsyncSession,
+ app_variant: AppVariantDB,
):
"""
Returns a list of environments for a given app variant.
@@ -1757,10 +1761,7 @@ async def get_app_variant_instance_by_id(variant_id: str) -> AppVariantDB:
async with db_engine.get_session() as session:
result = await session.execute(
select(AppVariantDB)
- .options(
- joinedload(AppVariantDB.base),
- joinedload(AppVariantDB.app)
- )
+ .options(joinedload(AppVariantDB.base), joinedload(AppVariantDB.app))
.filter_by(id=uuid.UUID(variant_id))
)
app_variant_db = result.scalars().one_or_none()
@@ -1812,18 +1813,14 @@ async def create_testset(app: AppDB, user_uid: str, testset_data: Dict[str, Any]
app (AppDB): The app object
user_uid (str): The user uID
testset_data (dict): The data of the testset to create with
-
+
Returns:
returns the newly created TestsetDB
"""
user = await get_user(user_uid=user_uid)
async with db_engine.get_session() as session:
- testset_db = TestSetDB(
- **testset_data,
- app_id=app.id,
- user_id=user.id
- )
+ testset_db = TestSetDB(**testset_data, app_id=app.id, user_id=user.id)
if isCloudEE():
testset_db.organization_id = app.organization_id
testset_db.workspace_id = app.workspace_id
@@ -1834,6 +1831,7 @@ async def create_testset(app: AppDB, user_uid: str, testset_data: Dict[str, Any]
return testset_db
+
async def update_testset(testset_id: str, values_to_update: dict) -> None:
"""Update a testset.
@@ -1849,11 +1847,7 @@ async def update_testset(testset_id: str, values_to_update: dict) -> None:
testset = result.scalars().one_or_none()
# Validate keys in values_to_update and update attributes
- valid_keys = [
- key
- for key in values_to_update.keys()
- if hasattr(testset, key)
- ]
+ valid_keys = [key for key in values_to_update.keys() if hasattr(testset, key)]
for key in valid_keys:
setattr(testset, key, values_to_update[key])
diff --git a/agenta-backend/agenta_backend/services/evaluator_manager.py b/agenta-backend/agenta_backend/services/evaluator_manager.py
index 5de263c4f5..472c8c4a1b 100644
--- a/agenta-backend/agenta_backend/services/evaluator_manager.py
+++ b/agenta-backend/agenta_backend/services/evaluator_manager.py
@@ -159,7 +159,9 @@ async def create_ready_to_use_evaluators(app: AppDB):
default_value != ""
), f"Default value for ground truth key '{setting_name}' in Evaluator is empty"
- assert hasattr(evaluator, "name") and hasattr(evaluator, "key"), f"'name' and 'key' does not exist in the evaluator: {evaluator}"
+ assert hasattr(evaluator, "name") and hasattr(
+ evaluator, "key"
+ ), f"'name' and 'key' does not exist in the evaluator: {evaluator}"
await db_manager.create_evaluator_config(
app=app,
user_id=str(app.user_id),
From 469754c1e0b3d402021bc021868cf3633962559c Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Thu, 13 Jun 2024 03:15:33 +0000
Subject: [PATCH 051/268] build(deps): bump pydantic from 2.7.1 to 2.7.4 in
/agenta-cli
Bumps [pydantic](https://github.com/pydantic/pydantic) from 2.7.1 to 2.7.4.
- [Release notes](https://github.com/pydantic/pydantic/releases)
- [Changelog](https://github.com/pydantic/pydantic/blob/main/HISTORY.md)
- [Commits](https://github.com/pydantic/pydantic/compare/v2.7.1...v2.7.4)
---
updated-dependencies:
- dependency-name: pydantic
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
---
agenta-cli/poetry.lock | 253 +++++++++++++++--------------------------
1 file changed, 93 insertions(+), 160 deletions(-)
diff --git a/agenta-cli/poetry.lock b/agenta-cli/poetry.lock
index 599be327fb..4b3675c6f7 100644
--- a/agenta-cli/poetry.lock
+++ b/agenta-cli/poetry.lock
@@ -15,7 +15,6 @@ files = [
name = "anyio"
version = "4.4.0"
description = "High level compatibility layer for multiple asynchronous event loop implementations"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -38,7 +37,6 @@ trio = ["trio (>=0.23)"]
name = "asttokens"
version = "2.4.1"
description = "Annotate AST trees with source code positions"
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -57,7 +55,6 @@ test = ["astroid (>=1,<2)", "astroid (>=2,<4)", "pytest"]
name = "backoff"
version = "2.2.1"
description = "Function decoration for backoff and retry"
-category = "main"
optional = false
python-versions = ">=3.7,<4.0"
files = [
@@ -69,7 +66,6 @@ files = [
name = "cachetools"
version = "5.3.3"
description = "Extensible memoizing collections and decorators"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -81,7 +77,6 @@ files = [
name = "certifi"
version = "2024.2.2"
description = "Python package for providing Mozilla's CA Bundle."
-category = "main"
optional = false
python-versions = ">=3.6"
files = [
@@ -93,7 +88,6 @@ files = [
name = "charset-normalizer"
version = "3.3.2"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
-category = "main"
optional = false
python-versions = ">=3.7.0"
files = [
@@ -193,7 +187,6 @@ files = [
name = "click"
version = "8.1.7"
description = "Composable command line interface toolkit"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -208,7 +201,6 @@ colorama = {version = "*", markers = "platform_system == \"Windows\""}
name = "colorama"
version = "0.4.6"
description = "Cross-platform colored terminal text."
-category = "main"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
files = [
@@ -220,7 +212,6 @@ files = [
name = "decorator"
version = "5.1.1"
description = "Decorators for Humans"
-category = "main"
optional = false
python-versions = ">=3.5"
files = [
@@ -232,7 +223,6 @@ files = [
name = "dnspython"
version = "2.6.1"
description = "DNS toolkit"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -253,7 +243,6 @@ wmi = ["wmi (>=1.5.1)"]
name = "docker"
version = "7.1.0"
description = "A Python library for the Docker Engine API."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -276,7 +265,6 @@ websockets = ["websocket-client (>=1.3.0)"]
name = "email-validator"
version = "2.1.1"
description = "A robust email address syntax and deliverability validation library."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -292,7 +280,6 @@ idna = ">=2.0.0"
name = "exceptiongroup"
version = "1.2.1"
description = "Backport of PEP 654 (exception groups)"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -307,7 +294,6 @@ test = ["pytest (>=6)"]
name = "executing"
version = "2.0.1"
description = "Get the currently executing AST node of a frame, and other information"
-category = "main"
optional = false
python-versions = ">=3.5"
files = [
@@ -322,7 +308,6 @@ tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipyth
name = "fastapi"
version = "0.111.0"
description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -350,7 +335,6 @@ all = ["email_validator (>=2.0.0)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)"
name = "fastapi-cli"
version = "0.0.4"
description = "Run and manage FastAPI apps from the command line with FastAPI CLI. 🚀"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -368,7 +352,6 @@ standard = ["fastapi", "uvicorn[standard] (>=0.15.0)"]
name = "h11"
version = "0.14.0"
description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -380,7 +363,6 @@ files = [
name = "httpcore"
version = "1.0.5"
description = "A minimal low-level HTTP client."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -395,14 +377,13 @@ h11 = ">=0.13,<0.15"
[package.extras]
asyncio = ["anyio (>=4.0,<5.0)"]
http2 = ["h2 (>=3,<5)"]
-socks = ["socksio (>=1.0.0,<2.0.0)"]
+socks = ["socksio (==1.*)"]
trio = ["trio (>=0.22.0,<0.26.0)"]
[[package]]
name = "httptools"
version = "0.6.1"
description = "A collection of framework independent HTTP protocol utils."
-category = "main"
optional = false
python-versions = ">=3.8.0"
files = [
@@ -451,7 +432,6 @@ test = ["Cython (>=0.29.24,<0.30.0)"]
name = "httpx"
version = "0.27.0"
description = "The next generation HTTP client."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -462,21 +442,20 @@ files = [
[package.dependencies]
anyio = "*"
certifi = "*"
-httpcore = ">=1.0.0,<2.0.0"
+httpcore = "==1.*"
idna = "*"
sniffio = "*"
[package.extras]
brotli = ["brotli", "brotlicffi"]
-cli = ["click (>=8.0.0,<9.0.0)", "pygments (>=2.0.0,<3.0.0)", "rich (>=10,<14)"]
+cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"]
http2 = ["h2 (>=3,<5)"]
-socks = ["socksio (>=1.0.0,<2.0.0)"]
+socks = ["socksio (==1.*)"]
[[package]]
name = "idna"
version = "3.7"
description = "Internationalized Domain Names in Applications (IDNA)"
-category = "main"
optional = false
python-versions = ">=3.5"
files = [
@@ -488,7 +467,6 @@ files = [
name = "importlib-metadata"
version = "7.1.0"
description = "Read metadata from Python packages"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -508,7 +486,6 @@ testing = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)",
name = "iniconfig"
version = "2.0.0"
description = "brain-dead simple config-ini parsing"
-category = "dev"
optional = false
python-versions = ">=3.7"
files = [
@@ -520,7 +497,6 @@ files = [
name = "ipdb"
version = "0.13.13"
description = "IPython-enabled pdb"
-category = "main"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
files = [
@@ -537,7 +513,6 @@ tomli = {version = "*", markers = "python_version > \"3.6\" and python_version <
name = "ipython"
version = "8.18.0"
description = "IPython: Productive Interactive Computing"
-category = "main"
optional = false
python-versions = ">=3.9"
files = [
@@ -575,7 +550,6 @@ test-extra = ["curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.22)", "pa
name = "jedi"
version = "0.19.1"
description = "An autocompletion tool for Python that can be used for text editors."
-category = "main"
optional = false
python-versions = ">=3.6"
files = [
@@ -595,7 +569,6 @@ testing = ["Django", "attrs", "colorama", "docopt", "pytest (<7.0.0)"]
name = "jinja2"
version = "3.1.4"
description = "A very fast and expressive template engine."
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -613,7 +586,6 @@ i18n = ["Babel (>=2.7)"]
name = "markdown-it-py"
version = "3.0.0"
description = "Python port of markdown-it. Markdown parsing, done right!"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -638,7 +610,6 @@ testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"]
name = "markupsafe"
version = "2.1.5"
description = "Safely add untrusted strings to HTML/XML markup."
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -708,7 +679,6 @@ files = [
name = "matplotlib-inline"
version = "0.1.7"
description = "Inline Matplotlib backend for Jupyter"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -723,7 +693,6 @@ traitlets = "*"
name = "mdurl"
version = "0.1.2"
description = "Markdown URL utilities"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -735,7 +704,6 @@ files = [
name = "monotonic"
version = "1.6"
description = "An implementation of time.monotonic() for Python 2 & < 3.3"
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -747,7 +715,6 @@ files = [
name = "orjson"
version = "3.10.3"
description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -803,7 +770,6 @@ files = [
name = "packaging"
version = "24.0"
description = "Core utilities for Python packages"
-category = "dev"
optional = false
python-versions = ">=3.7"
files = [
@@ -815,7 +781,6 @@ files = [
name = "parso"
version = "0.8.4"
description = "A Python Parser"
-category = "main"
optional = false
python-versions = ">=3.6"
files = [
@@ -831,7 +796,6 @@ testing = ["docopt", "pytest"]
name = "pexpect"
version = "4.9.0"
description = "Pexpect allows easy control of interactive console applications."
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -846,7 +810,6 @@ ptyprocess = ">=0.5"
name = "pluggy"
version = "1.5.0"
description = "plugin and hook calling mechanisms for python"
-category = "dev"
optional = false
python-versions = ">=3.8"
files = [
@@ -862,7 +825,6 @@ testing = ["pytest", "pytest-benchmark"]
name = "posthog"
version = "3.5.0"
description = "Integrate PostHog into any python application."
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -886,7 +848,6 @@ test = ["coverage", "flake8", "freezegun (==0.3.15)", "mock (>=2.0.0)", "pylint"
name = "prompt-toolkit"
version = "3.0.36"
description = "Library for building powerful interactive command lines in Python"
-category = "main"
optional = false
python-versions = ">=3.6.2"
files = [
@@ -901,7 +862,6 @@ wcwidth = "*"
name = "ptyprocess"
version = "0.7.0"
description = "Run a subprocess in a pseudo terminal"
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -913,7 +873,6 @@ files = [
name = "pure-eval"
version = "0.2.2"
description = "Safely evaluate AST nodes without side effects"
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -926,18 +885,18 @@ tests = ["pytest"]
[[package]]
name = "pydantic"
-version = "2.7.1"
+version = "2.7.4"
description = "Data validation using Python type hints"
optional = false
python-versions = ">=3.8"
files = [
- {file = "pydantic-2.7.1-py3-none-any.whl", hash = "sha256:e029badca45266732a9a79898a15ae2e8b14840b1eabbb25844be28f0b33f3d5"},
- {file = "pydantic-2.7.1.tar.gz", hash = "sha256:e9dbb5eada8abe4d9ae5f46b9939aead650cd2b68f249bb3a8139dbe125803cc"},
+ {file = "pydantic-2.7.4-py3-none-any.whl", hash = "sha256:ee8538d41ccb9c0a9ad3e0e5f07bf15ed8015b481ced539a1759d8cc89ae90d0"},
+ {file = "pydantic-2.7.4.tar.gz", hash = "sha256:0c84efd9548d545f63ac0060c1e4d39bb9b14db8b3c0652338aecc07b5adec52"},
]
[package.dependencies]
annotated-types = ">=0.4.0"
-pydantic-core = "2.18.2"
+pydantic-core = "2.18.4"
typing-extensions = ">=4.6.1"
[package.extras]
@@ -945,90 +904,90 @@ email = ["email-validator (>=2.0.0)"]
[[package]]
name = "pydantic-core"
-version = "2.18.2"
+version = "2.18.4"
description = "Core functionality for Pydantic validation and serialization"
optional = false
python-versions = ">=3.8"
files = [
- {file = "pydantic_core-2.18.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:9e08e867b306f525802df7cd16c44ff5ebbe747ff0ca6cf3fde7f36c05a59a81"},
- {file = "pydantic_core-2.18.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f0a21cbaa69900cbe1a2e7cad2aa74ac3cf21b10c3efb0fa0b80305274c0e8a2"},
- {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0680b1f1f11fda801397de52c36ce38ef1c1dc841a0927a94f226dea29c3ae3d"},
- {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:95b9d5e72481d3780ba3442eac863eae92ae43a5f3adb5b4d0a1de89d42bb250"},
- {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4fcf5cd9c4b655ad666ca332b9a081112cd7a58a8b5a6ca7a3104bc950f2038"},
- {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b5155ff768083cb1d62f3e143b49a8a3432e6789a3abee8acd005c3c7af1c74"},
- {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:553ef617b6836fc7e4df130bb851e32fe357ce36336d897fd6646d6058d980af"},
- {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b89ed9eb7d616ef5714e5590e6cf7f23b02d0d539767d33561e3675d6f9e3857"},
- {file = "pydantic_core-2.18.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:75f7e9488238e920ab6204399ded280dc4c307d034f3924cd7f90a38b1829563"},
- {file = "pydantic_core-2.18.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ef26c9e94a8c04a1b2924149a9cb081836913818e55681722d7f29af88fe7b38"},
- {file = "pydantic_core-2.18.2-cp310-none-win32.whl", hash = "sha256:182245ff6b0039e82b6bb585ed55a64d7c81c560715d1bad0cbad6dfa07b4027"},
- {file = "pydantic_core-2.18.2-cp310-none-win_amd64.whl", hash = "sha256:e23ec367a948b6d812301afc1b13f8094ab7b2c280af66ef450efc357d2ae543"},
- {file = "pydantic_core-2.18.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:219da3f096d50a157f33645a1cf31c0ad1fe829a92181dd1311022f986e5fbe3"},
- {file = "pydantic_core-2.18.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cc1cfd88a64e012b74e94cd00bbe0f9c6df57049c97f02bb07d39e9c852e19a4"},
- {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05b7133a6e6aeb8df37d6f413f7705a37ab4031597f64ab56384c94d98fa0e90"},
- {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:224c421235f6102e8737032483f43c1a8cfb1d2f45740c44166219599358c2cd"},
- {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b14d82cdb934e99dda6d9d60dc84a24379820176cc4a0d123f88df319ae9c150"},
- {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2728b01246a3bba6de144f9e3115b532ee44bd6cf39795194fb75491824a1413"},
- {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:470b94480bb5ee929f5acba6995251ada5e059a5ef3e0dfc63cca287283ebfa6"},
- {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:997abc4df705d1295a42f95b4eec4950a37ad8ae46d913caeee117b6b198811c"},
- {file = "pydantic_core-2.18.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:75250dbc5290e3f1a0f4618db35e51a165186f9034eff158f3d490b3fed9f8a0"},
- {file = "pydantic_core-2.18.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4456f2dca97c425231d7315737d45239b2b51a50dc2b6f0c2bb181fce6207664"},
- {file = "pydantic_core-2.18.2-cp311-none-win32.whl", hash = "sha256:269322dcc3d8bdb69f054681edff86276b2ff972447863cf34c8b860f5188e2e"},
- {file = "pydantic_core-2.18.2-cp311-none-win_amd64.whl", hash = "sha256:800d60565aec896f25bc3cfa56d2277d52d5182af08162f7954f938c06dc4ee3"},
- {file = "pydantic_core-2.18.2-cp311-none-win_arm64.whl", hash = "sha256:1404c69d6a676245199767ba4f633cce5f4ad4181f9d0ccb0577e1f66cf4c46d"},
- {file = "pydantic_core-2.18.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:fb2bd7be70c0fe4dfd32c951bc813d9fe6ebcbfdd15a07527796c8204bd36242"},
- {file = "pydantic_core-2.18.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6132dd3bd52838acddca05a72aafb6eab6536aa145e923bb50f45e78b7251043"},
- {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7d904828195733c183d20a54230c0df0eb46ec746ea1a666730787353e87182"},
- {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c9bd70772c720142be1020eac55f8143a34ec9f82d75a8e7a07852023e46617f"},
- {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b8ed04b3582771764538f7ee7001b02e1170223cf9b75dff0bc698fadb00cf3"},
- {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e6dac87ddb34aaec85f873d737e9d06a3555a1cc1a8e0c44b7f8d5daeb89d86f"},
- {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ca4ae5a27ad7a4ee5170aebce1574b375de390bc01284f87b18d43a3984df72"},
- {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:886eec03591b7cf058467a70a87733b35f44707bd86cf64a615584fd72488b7c"},
- {file = "pydantic_core-2.18.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ca7b0c1f1c983e064caa85f3792dd2fe3526b3505378874afa84baf662e12241"},
- {file = "pydantic_core-2.18.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b4356d3538c3649337df4074e81b85f0616b79731fe22dd11b99499b2ebbdf3"},
- {file = "pydantic_core-2.18.2-cp312-none-win32.whl", hash = "sha256:8b172601454f2d7701121bbec3425dd71efcb787a027edf49724c9cefc14c038"},
- {file = "pydantic_core-2.18.2-cp312-none-win_amd64.whl", hash = "sha256:b1bd7e47b1558ea872bd16c8502c414f9e90dcf12f1395129d7bb42a09a95438"},
- {file = "pydantic_core-2.18.2-cp312-none-win_arm64.whl", hash = "sha256:98758d627ff397e752bc339272c14c98199c613f922d4a384ddc07526c86a2ec"},
- {file = "pydantic_core-2.18.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:9fdad8e35f278b2c3eb77cbdc5c0a49dada440657bf738d6905ce106dc1de439"},
- {file = "pydantic_core-2.18.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1d90c3265ae107f91a4f279f4d6f6f1d4907ac76c6868b27dc7fb33688cfb347"},
- {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:390193c770399861d8df9670fb0d1874f330c79caaca4642332df7c682bf6b91"},
- {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:82d5d4d78e4448683cb467897fe24e2b74bb7b973a541ea1dcfec1d3cbce39fb"},
- {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4774f3184d2ef3e14e8693194f661dea5a4d6ca4e3dc8e39786d33a94865cefd"},
- {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d4d938ec0adf5167cb335acb25a4ee69a8107e4984f8fbd2e897021d9e4ca21b"},
- {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0e8b1be28239fc64a88a8189d1df7fad8be8c1ae47fcc33e43d4be15f99cc70"},
- {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:868649da93e5a3d5eacc2b5b3b9235c98ccdbfd443832f31e075f54419e1b96b"},
- {file = "pydantic_core-2.18.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:78363590ef93d5d226ba21a90a03ea89a20738ee5b7da83d771d283fd8a56761"},
- {file = "pydantic_core-2.18.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:852e966fbd035a6468fc0a3496589b45e2208ec7ca95c26470a54daed82a0788"},
- {file = "pydantic_core-2.18.2-cp38-none-win32.whl", hash = "sha256:6a46e22a707e7ad4484ac9ee9f290f9d501df45954184e23fc29408dfad61350"},
- {file = "pydantic_core-2.18.2-cp38-none-win_amd64.whl", hash = "sha256:d91cb5ea8b11607cc757675051f61b3d93f15eca3cefb3e6c704a5d6e8440f4e"},
- {file = "pydantic_core-2.18.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:ae0a8a797a5e56c053610fa7be147993fe50960fa43609ff2a9552b0e07013e8"},
- {file = "pydantic_core-2.18.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:042473b6280246b1dbf530559246f6842b56119c2926d1e52b631bdc46075f2a"},
- {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a388a77e629b9ec814c1b1e6b3b595fe521d2cdc625fcca26fbc2d44c816804"},
- {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25add29b8f3b233ae90ccef2d902d0ae0432eb0d45370fe315d1a5cf231004b"},
- {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f459a5ce8434614dfd39bbebf1041952ae01da6bed9855008cb33b875cb024c0"},
- {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eff2de745698eb46eeb51193a9f41d67d834d50e424aef27df2fcdee1b153845"},
- {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8309f67285bdfe65c372ea3722b7a5642680f3dba538566340a9d36e920b5f0"},
- {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f93a8a2e3938ff656a7c1bc57193b1319960ac015b6e87d76c76bf14fe0244b4"},
- {file = "pydantic_core-2.18.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:22057013c8c1e272eb8d0eebc796701167d8377441ec894a8fed1af64a0bf399"},
- {file = "pydantic_core-2.18.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cfeecd1ac6cc1fb2692c3d5110781c965aabd4ec5d32799773ca7b1456ac636b"},
- {file = "pydantic_core-2.18.2-cp39-none-win32.whl", hash = "sha256:0d69b4c2f6bb3e130dba60d34c0845ba31b69babdd3f78f7c0c8fae5021a253e"},
- {file = "pydantic_core-2.18.2-cp39-none-win_amd64.whl", hash = "sha256:d9319e499827271b09b4e411905b24a426b8fb69464dfa1696258f53a3334641"},
- {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a1874c6dd4113308bd0eb568418e6114b252afe44319ead2b4081e9b9521fe75"},
- {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:ccdd111c03bfd3666bd2472b674c6899550e09e9f298954cfc896ab92b5b0e6d"},
- {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e18609ceaa6eed63753037fc06ebb16041d17d28199ae5aba0052c51449650a9"},
- {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e5c584d357c4e2baf0ff7baf44f4994be121e16a2c88918a5817331fc7599d7"},
- {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43f0f463cf89ace478de71a318b1b4f05ebc456a9b9300d027b4b57c1a2064fb"},
- {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e1b395e58b10b73b07b7cf740d728dd4ff9365ac46c18751bf8b3d8cca8f625a"},
- {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0098300eebb1c837271d3d1a2cd2911e7c11b396eac9661655ee524a7f10587b"},
- {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:36789b70d613fbac0a25bb07ab3d9dba4d2e38af609c020cf4d888d165ee0bf3"},
- {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3f9a801e7c8f1ef8718da265bba008fa121243dfe37c1cea17840b0944dfd72c"},
- {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:3a6515ebc6e69d85502b4951d89131ca4e036078ea35533bb76327f8424531ce"},
- {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20aca1e2298c56ececfd8ed159ae4dde2df0781988c97ef77d5c16ff4bd5b400"},
- {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:223ee893d77a310a0391dca6df00f70bbc2f36a71a895cecd9a0e762dc37b349"},
- {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2334ce8c673ee93a1d6a65bd90327588387ba073c17e61bf19b4fd97d688d63c"},
- {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:cbca948f2d14b09d20268cda7b0367723d79063f26c4ffc523af9042cad95592"},
- {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b3ef08e20ec49e02d5c6717a91bb5af9b20f1805583cb0adfe9ba2c6b505b5ae"},
- {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c6fdc8627910eed0c01aed6a390a252fe3ea6d472ee70fdde56273f198938374"},
- {file = "pydantic_core-2.18.2.tar.gz", hash = "sha256:2e29d20810dfc3043ee13ac7d9e25105799817683348823f305ab3f349b9386e"},
+ {file = "pydantic_core-2.18.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:f76d0ad001edd426b92233d45c746fd08f467d56100fd8f30e9ace4b005266e4"},
+ {file = "pydantic_core-2.18.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:59ff3e89f4eaf14050c8022011862df275b552caef8082e37b542b066ce1ff26"},
+ {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a55b5b16c839df1070bc113c1f7f94a0af4433fcfa1b41799ce7606e5c79ce0a"},
+ {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4d0dcc59664fcb8974b356fe0a18a672d6d7cf9f54746c05f43275fc48636851"},
+ {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8951eee36c57cd128f779e641e21eb40bc5073eb28b2d23f33eb0ef14ffb3f5d"},
+ {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4701b19f7e3a06ea655513f7938de6f108123bf7c86bbebb1196eb9bd35cf724"},
+ {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e00a3f196329e08e43d99b79b286d60ce46bed10f2280d25a1718399457e06be"},
+ {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:97736815b9cc893b2b7f663628e63f436018b75f44854c8027040e05230eeddb"},
+ {file = "pydantic_core-2.18.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6891a2ae0e8692679c07728819b6e2b822fb30ca7445f67bbf6509b25a96332c"},
+ {file = "pydantic_core-2.18.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bc4ff9805858bd54d1a20efff925ccd89c9d2e7cf4986144b30802bf78091c3e"},
+ {file = "pydantic_core-2.18.4-cp310-none-win32.whl", hash = "sha256:1b4de2e51bbcb61fdebd0ab86ef28062704f62c82bbf4addc4e37fa4b00b7cbc"},
+ {file = "pydantic_core-2.18.4-cp310-none-win_amd64.whl", hash = "sha256:6a750aec7bf431517a9fd78cb93c97b9b0c496090fee84a47a0d23668976b4b0"},
+ {file = "pydantic_core-2.18.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:942ba11e7dfb66dc70f9ae66b33452f51ac7bb90676da39a7345e99ffb55402d"},
+ {file = "pydantic_core-2.18.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b2ebef0e0b4454320274f5e83a41844c63438fdc874ea40a8b5b4ecb7693f1c4"},
+ {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a642295cd0c8df1b86fc3dced1d067874c353a188dc8e0f744626d49e9aa51c4"},
+ {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f09baa656c904807e832cf9cce799c6460c450c4ad80803517032da0cd062e2"},
+ {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98906207f29bc2c459ff64fa007afd10a8c8ac080f7e4d5beff4c97086a3dabd"},
+ {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:19894b95aacfa98e7cb093cd7881a0c76f55731efad31073db4521e2b6ff5b7d"},
+ {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fbbdc827fe5e42e4d196c746b890b3d72876bdbf160b0eafe9f0334525119c8"},
+ {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f85d05aa0918283cf29a30b547b4df2fbb56b45b135f9e35b6807cb28bc47951"},
+ {file = "pydantic_core-2.18.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e85637bc8fe81ddb73fda9e56bab24560bdddfa98aa64f87aaa4e4b6730c23d2"},
+ {file = "pydantic_core-2.18.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2f5966897e5461f818e136b8451d0551a2e77259eb0f73a837027b47dc95dab9"},
+ {file = "pydantic_core-2.18.4-cp311-none-win32.whl", hash = "sha256:44c7486a4228413c317952e9d89598bcdfb06399735e49e0f8df643e1ccd0558"},
+ {file = "pydantic_core-2.18.4-cp311-none-win_amd64.whl", hash = "sha256:8a7164fe2005d03c64fd3b85649891cd4953a8de53107940bf272500ba8a788b"},
+ {file = "pydantic_core-2.18.4-cp311-none-win_arm64.whl", hash = "sha256:4e99bc050fe65c450344421017f98298a97cefc18c53bb2f7b3531eb39bc7805"},
+ {file = "pydantic_core-2.18.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:6f5c4d41b2771c730ea1c34e458e781b18cc668d194958e0112455fff4e402b2"},
+ {file = "pydantic_core-2.18.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2fdf2156aa3d017fddf8aea5adfba9f777db1d6022d392b682d2a8329e087cef"},
+ {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4748321b5078216070b151d5271ef3e7cc905ab170bbfd27d5c83ee3ec436695"},
+ {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:847a35c4d58721c5dc3dba599878ebbdfd96784f3fb8bb2c356e123bdcd73f34"},
+ {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c40d4eaad41f78e3bbda31b89edc46a3f3dc6e171bf0ecf097ff7a0ffff7cb1"},
+ {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:21a5e440dbe315ab9825fcd459b8814bb92b27c974cbc23c3e8baa2b76890077"},
+ {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01dd777215e2aa86dfd664daed5957704b769e726626393438f9c87690ce78c3"},
+ {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4b06beb3b3f1479d32befd1f3079cc47b34fa2da62457cdf6c963393340b56e9"},
+ {file = "pydantic_core-2.18.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:564d7922e4b13a16b98772441879fcdcbe82ff50daa622d681dd682175ea918c"},
+ {file = "pydantic_core-2.18.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0eb2a4f660fcd8e2b1c90ad566db2b98d7f3f4717c64fe0a83e0adb39766d5b8"},
+ {file = "pydantic_core-2.18.4-cp312-none-win32.whl", hash = "sha256:8b8bab4c97248095ae0c4455b5a1cd1cdd96e4e4769306ab19dda135ea4cdb07"},
+ {file = "pydantic_core-2.18.4-cp312-none-win_amd64.whl", hash = "sha256:14601cdb733d741b8958224030e2bfe21a4a881fb3dd6fbb21f071cabd48fa0a"},
+ {file = "pydantic_core-2.18.4-cp312-none-win_arm64.whl", hash = "sha256:c1322d7dd74713dcc157a2b7898a564ab091ca6c58302d5c7b4c07296e3fd00f"},
+ {file = "pydantic_core-2.18.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:823be1deb01793da05ecb0484d6c9e20baebb39bd42b5d72636ae9cf8350dbd2"},
+ {file = "pydantic_core-2.18.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ebef0dd9bf9b812bf75bda96743f2a6c5734a02092ae7f721c048d156d5fabae"},
+ {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae1d6df168efb88d7d522664693607b80b4080be6750c913eefb77e34c12c71a"},
+ {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f9899c94762343f2cc2fc64c13e7cae4c3cc65cdfc87dd810a31654c9b7358cc"},
+ {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99457f184ad90235cfe8461c4d70ab7dd2680e28821c29eca00252ba90308c78"},
+ {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18f469a3d2a2fdafe99296a87e8a4c37748b5080a26b806a707f25a902c040a8"},
+ {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7cdf28938ac6b8b49ae5e92f2735056a7ba99c9b110a474473fd71185c1af5d"},
+ {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:938cb21650855054dc54dfd9120a851c974f95450f00683399006aa6e8abb057"},
+ {file = "pydantic_core-2.18.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:44cd83ab6a51da80fb5adbd9560e26018e2ac7826f9626bc06ca3dc074cd198b"},
+ {file = "pydantic_core-2.18.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:972658f4a72d02b8abfa2581d92d59f59897d2e9f7e708fdabe922f9087773af"},
+ {file = "pydantic_core-2.18.4-cp38-none-win32.whl", hash = "sha256:1d886dc848e60cb7666f771e406acae54ab279b9f1e4143babc9c2258213daa2"},
+ {file = "pydantic_core-2.18.4-cp38-none-win_amd64.whl", hash = "sha256:bb4462bd43c2460774914b8525f79b00f8f407c945d50881568f294c1d9b4443"},
+ {file = "pydantic_core-2.18.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:44a688331d4a4e2129140a8118479443bd6f1905231138971372fcde37e43528"},
+ {file = "pydantic_core-2.18.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a2fdd81edd64342c85ac7cf2753ccae0b79bf2dfa063785503cb85a7d3593223"},
+ {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:86110d7e1907ab36691f80b33eb2da87d780f4739ae773e5fc83fb272f88825f"},
+ {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:46387e38bd641b3ee5ce247563b60c5ca098da9c56c75c157a05eaa0933ed154"},
+ {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:123c3cec203e3f5ac7b000bd82235f1a3eced8665b63d18be751f115588fea30"},
+ {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dc1803ac5c32ec324c5261c7209e8f8ce88e83254c4e1aebdc8b0a39f9ddb443"},
+ {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53db086f9f6ab2b4061958d9c276d1dbe3690e8dd727d6abf2321d6cce37fa94"},
+ {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:abc267fa9837245cc28ea6929f19fa335f3dc330a35d2e45509b6566dc18be23"},
+ {file = "pydantic_core-2.18.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a0d829524aaefdebccb869eed855e2d04c21d2d7479b6cada7ace5448416597b"},
+ {file = "pydantic_core-2.18.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:509daade3b8649f80d4e5ff21aa5673e4ebe58590b25fe42fac5f0f52c6f034a"},
+ {file = "pydantic_core-2.18.4-cp39-none-win32.whl", hash = "sha256:ca26a1e73c48cfc54c4a76ff78df3727b9d9f4ccc8dbee4ae3f73306a591676d"},
+ {file = "pydantic_core-2.18.4-cp39-none-win_amd64.whl", hash = "sha256:c67598100338d5d985db1b3d21f3619ef392e185e71b8d52bceacc4a7771ea7e"},
+ {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:574d92eac874f7f4db0ca653514d823a0d22e2354359d0759e3f6a406db5d55d"},
+ {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1f4d26ceb5eb9eed4af91bebeae4b06c3fb28966ca3a8fb765208cf6b51102ab"},
+ {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77450e6d20016ec41f43ca4a6c63e9fdde03f0ae3fe90e7c27bdbeaece8b1ed4"},
+ {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d323a01da91851a4f17bf592faf46149c9169d68430b3146dcba2bb5e5719abc"},
+ {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43d447dd2ae072a0065389092a231283f62d960030ecd27565672bd40746c507"},
+ {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:578e24f761f3b425834f297b9935e1ce2e30f51400964ce4801002435a1b41ef"},
+ {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:81b5efb2f126454586d0f40c4d834010979cb80785173d1586df845a632e4e6d"},
+ {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ab86ce7c8f9bea87b9d12c7f0af71102acbf5ecbc66c17796cff45dae54ef9a5"},
+ {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:90afc12421df2b1b4dcc975f814e21bc1754640d502a2fbcc6d41e77af5ec312"},
+ {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:51991a89639a912c17bef4b45c87bd83593aee0437d8102556af4885811d59f5"},
+ {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:293afe532740370aba8c060882f7d26cfd00c94cae32fd2e212a3a6e3b7bc15e"},
+ {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b48ece5bde2e768197a2d0f6e925f9d7e3e826f0ad2271120f8144a9db18d5c8"},
+ {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:eae237477a873ab46e8dd748e515c72c0c804fb380fbe6c85533c7de51f23a8f"},
+ {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:834b5230b5dfc0c1ec37b2fda433b271cbbc0e507560b5d1588e2cc1148cf1ce"},
+ {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e858ac0a25074ba4bce653f9b5d0a85b7456eaddadc0ce82d3878c22489fa4ee"},
+ {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2fd41f6eff4c20778d717af1cc50eca52f5afe7805ee530a4fbd0bae284f16e9"},
+ {file = "pydantic_core-2.18.4.tar.gz", hash = "sha256:ec3beeada09ff865c344ff3bc2f427f5e6c26401cc6113d77e372c3fdac73864"},
]
[package.dependencies]
@@ -1038,7 +997,6 @@ typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
name = "pygments"
version = "2.18.0"
description = "Pygments is a syntax highlighting package written in Python."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1053,7 +1011,6 @@ windows-terminal = ["colorama (>=0.4.6)"]
name = "pymongo"
version = "4.7.3"
description = "Python driver for MongoDB "
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -1135,7 +1092,6 @@ zstd = ["zstandard"]
name = "pytest"
version = "8.2.2"
description = "pytest: simple powerful testing with Python"
-category = "dev"
optional = false
python-versions = ">=3.8"
files = [
@@ -1158,7 +1114,6 @@ dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments
name = "python-dateutil"
version = "2.9.0.post0"
description = "Extensions to the standard Python datetime module"
-category = "main"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
files = [
@@ -1173,7 +1128,6 @@ six = ">=1.5"
name = "python-dotenv"
version = "1.0.1"
description = "Read key-value pairs from a .env file and set them as environment variables"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1188,7 +1142,6 @@ cli = ["click (>=5.0)"]
name = "python-multipart"
version = "0.0.9"
description = "A streaming multipart parser for Python"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1203,7 +1156,6 @@ dev = ["atomicwrites (==1.4.1)", "attrs (==23.2.0)", "coverage (==7.4.1)", "hatc
name = "pywin32"
version = "306"
description = "Python for Window Extensions"
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -1227,7 +1179,6 @@ files = [
name = "pyyaml"
version = "6.0.1"
description = "YAML parser and emitter for Python"
-category = "main"
optional = false
python-versions = ">=3.6"
files = [
@@ -1236,6 +1187,7 @@ files = [
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"},
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"},
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"},
+ {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"},
{file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"},
{file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"},
{file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"},
@@ -1243,6 +1195,7 @@ files = [
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"},
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"},
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"},
+ {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"},
{file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"},
{file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
{file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
@@ -1268,6 +1221,7 @@ files = [
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"},
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"},
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"},
+ {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"},
{file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"},
{file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"},
{file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"},
@@ -1275,6 +1229,7 @@ files = [
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"},
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"},
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"},
+ {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"},
{file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"},
{file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"},
{file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"},
@@ -1284,7 +1239,6 @@ files = [
name = "questionary"
version = "2.0.1"
description = "Python library to build pretty command line user prompts ⭐️"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1299,7 +1253,6 @@ prompt_toolkit = ">=2.0,<=3.0.36"
name = "requests"
version = "2.32.3"
description = "Python HTTP for Humans."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1321,7 +1274,6 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
name = "rich"
version = "13.7.1"
description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal"
-category = "main"
optional = false
python-versions = ">=3.7.0"
files = [
@@ -1340,7 +1292,6 @@ jupyter = ["ipywidgets (>=7.5.1,<9)"]
name = "setuptools"
version = "70.0.0"
description = "Easily download, build, install, upgrade, and uninstall Python packages"
-category = "dev"
optional = false
python-versions = ">=3.8"
files = [
@@ -1356,7 +1307,6 @@ testing = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "importlib-metad
name = "shellingham"
version = "1.5.4"
description = "Tool to Detect Surrounding Shell"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -1368,7 +1318,6 @@ files = [
name = "six"
version = "1.16.0"
description = "Python 2 and 3 compatibility utilities"
-category = "main"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
files = [
@@ -1380,7 +1329,6 @@ files = [
name = "sniffio"
version = "1.3.1"
description = "Sniff out which async library your code is running under"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -1392,7 +1340,6 @@ files = [
name = "stack-data"
version = "0.6.3"
description = "Extract data from python stack frames and tracebacks for informative displays"
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -1412,7 +1359,6 @@ tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"]
name = "starlette"
version = "0.37.2"
description = "The little ASGI library that shines."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1431,7 +1377,6 @@ full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.7
name = "toml"
version = "0.10.2"
description = "Python Library for Tom's Obvious, Minimal Language"
-category = "main"
optional = false
python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
files = [
@@ -1443,7 +1388,6 @@ files = [
name = "tomli"
version = "2.0.1"
description = "A lil' TOML parser"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -1455,7 +1399,6 @@ files = [
name = "traitlets"
version = "5.14.3"
description = "Traitlets Python configuration system"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1471,7 +1414,6 @@ test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0,
name = "typer"
version = "0.12.3"
description = "Typer, build great CLIs. Easy to code. Based on Python type hints."
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -1489,7 +1431,6 @@ typing-extensions = ">=3.7.4.3"
name = "typing-extensions"
version = "4.12.0"
description = "Backported and Experimental Type Hints for Python 3.8+"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1501,7 +1442,6 @@ files = [
name = "ujson"
version = "5.10.0"
description = "Ultra fast JSON encoder and decoder for Python"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1589,7 +1529,6 @@ files = [
name = "urllib3"
version = "2.2.1"
description = "HTTP library with thread-safe connection pooling, file post, and more."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1607,7 +1546,6 @@ zstd = ["zstandard (>=0.18.0)"]
name = "uvicorn"
version = "0.30.0"
description = "The lightning-fast ASGI server."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1623,7 +1561,7 @@ httptools = {version = ">=0.5.0", optional = true, markers = "extra == \"standar
python-dotenv = {version = ">=0.13", optional = true, markers = "extra == \"standard\""}
pyyaml = {version = ">=5.1", optional = true, markers = "extra == \"standard\""}
typing-extensions = {version = ">=4.0", markers = "python_version < \"3.11\""}
-uvloop = {version = ">=0.14.0,<0.15.0 || >0.15.0,<0.15.1 || >0.15.1", optional = true, markers = "sys_platform != \"win32\" and sys_platform != \"cygwin\" and platform_python_implementation != \"PyPy\" and extra == \"standard\""}
+uvloop = {version = ">=0.14.0,<0.15.0 || >0.15.0,<0.15.1 || >0.15.1", optional = true, markers = "(sys_platform != \"win32\" and sys_platform != \"cygwin\") and platform_python_implementation != \"PyPy\" and extra == \"standard\""}
watchfiles = {version = ">=0.13", optional = true, markers = "extra == \"standard\""}
websockets = {version = ">=10.4", optional = true, markers = "extra == \"standard\""}
@@ -1634,7 +1572,6 @@ standard = ["colorama (>=0.4)", "httptools (>=0.5.0)", "python-dotenv (>=0.13)",
name = "uvloop"
version = "0.19.0"
description = "Fast implementation of asyncio event loop on top of libuv"
-category = "main"
optional = false
python-versions = ">=3.8.0"
files = [
@@ -1679,7 +1616,6 @@ test = ["Cython (>=0.29.36,<0.30.0)", "aiohttp (==3.9.0b0)", "aiohttp (>=3.8.1)"
name = "watchfiles"
version = "0.22.0"
description = "Simple, modern and high performance file watching and code reload in python."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1767,7 +1703,6 @@ anyio = ">=3.0.0"
name = "wcwidth"
version = "0.2.13"
description = "Measures the displayed width of unicode strings in a terminal"
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -1779,7 +1714,6 @@ files = [
name = "websockets"
version = "12.0"
description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1861,7 +1795,6 @@ files = [
name = "zipp"
version = "3.19.0"
description = "Backport of pathlib-compatible object wrapper for zip files"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
From d223acdc124a0860e049043d1ebcfcc3ab4ca2e2 Mon Sep 17 00:00:00 2001
From: Abram
Date: Thu, 13 Jun 2024 19:36:46 +0100
Subject: [PATCH 052/268] refactor (backend): added cascade to foreignkey and
improved relationships
---
.../agenta_backend/models/db_models.py | 163 +++++++++---------
1 file changed, 85 insertions(+), 78 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index af2bea84a2..956da67f94 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -1,6 +1,8 @@
from datetime import datetime, timezone
-from pydantic import BaseModel, Field
from typing import Any, Dict, List, Optional
+
+import uuid_utils.compat as uuid
+from pydantic import BaseModel, Field
from sqlalchemy import (
Column,
String,
@@ -12,11 +14,12 @@
Enum,
)
from sqlalchemy.orm import relationship, declarative_base
-import uuid_utils.compat as uuid
from sqlalchemy.dialects.postgresql import UUID, JSONB
from agenta_backend.models.shared_models import TemplateType
-from agenta_backend.models.base import Base
+
+
+Base = declarative_base()
class UserDB(Base):
@@ -52,9 +55,9 @@ class ImageDB(Base):
nullable=False,
)
type = Column(String, default="image")
- template_uri = Column(String)
- docker_id = Column(String, index=True)
- tags = Column(String)
+ template_uri = Column(String, nullable=True)
+ docker_id = Column(String, nullable=True, index=True)
+ tags = Column(String, nullable=True)
deletable = Column(Boolean, default=True)
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
user = relationship("UserDB")
@@ -78,7 +81,6 @@ class AppDB(Base):
)
app_name = Column(String)
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
- user = relationship("UserDB")
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
@@ -86,6 +88,14 @@ class AppDB(Base):
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
+ user = relationship("UserDB")
+ variant = relationship("AppVariantDB", cascade="all, delete-orphan", backref="app")
+ evaluator_config = relationship("EvaluatorConfigDB", cascade="all, delete-orphan", backref="app")
+ testset = relationship("TestSetDB", cascade="all, delete-orphan", backref="app")
+ base = relationship("DeploymentDB", cascade="all, delete-orphan", backref="app")
+ deployment = relationship("VariantBaseDB", cascade="all, delete-orphan", backref="app")
+ evaluation = relationship("EvaluationDB", cascade="all, delete-orphan", backref="app")
+
class DeploymentDB(Base):
__tablename__ = "deployments"
@@ -98,9 +108,7 @@ class DeploymentDB(Base):
nullable=False,
)
app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id"))
- app = relationship("AppDB")
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
- user = relationship("UserDB")
container_name = Column(String)
container_id = Column(String)
uri = Column(String)
@@ -112,6 +120,8 @@ class DeploymentDB(Base):
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
+ user = relationship("UserDB")
+
class VariantBaseDB(Base):
__tablename__ = "bases"
@@ -124,16 +134,10 @@ class VariantBaseDB(Base):
nullable=False,
)
app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id"))
- app = relationship("AppDB")
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
- user = relationship("UserDB")
base_name = Column(String)
image_id = Column(UUID(as_uuid=True), ForeignKey("docker_images.id"))
- image = relationship("ImageDB")
-
- deployment_id = Column(UUID(as_uuid=True), ForeignKey("deployments.id"))
- deployment = relationship("DeploymentDB")
-
+ deployment_id = Column(UUID(as_uuid=True), ForeignKey("deployments.id", ondelete="SET NULL"))
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
@@ -141,6 +145,11 @@ class VariantBaseDB(Base):
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
+ # app = relationship("AppDB", back_populates="base")
+ user = relationship("UserDB")
+ image = relationship("ImageDB")
+ deployment = relationship("DeploymentDB")
+
class AppVariantDB(Base):
__tablename__ = "app_variants"
@@ -152,19 +161,14 @@ class AppVariantDB(Base):
unique=True,
nullable=False,
)
- app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id"))
- app = relationship("AppDB")
+ app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id", ondelete="CASCADE"))
variant_name = Column(String)
revision = Column(Integer)
image_id = Column(UUID(as_uuid=True), ForeignKey("docker_images.id"))
- image = relationship("ImageDB")
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
- user = relationship("UserDB", foreign_keys=[user_id])
modified_by_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
- modified_by = relationship("UserDB", foreign_keys=[modified_by_id])
base_name = Column(String)
base_id = Column(UUID(as_uuid=True), ForeignKey("bases.id"))
- base = relationship("VariantBaseDB")
config_name = Column(String, nullable=False)
config_parameters = Column(JSONB, nullable=False, default=dict)
created_at = Column(
@@ -174,6 +178,12 @@ class AppVariantDB(Base):
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
+ image = relationship("ImageDB")
+ user = relationship("UserDB", foreign_keys=[user_id])
+ modified_by = relationship("UserDB", foreign_keys=[modified_by_id])
+ base = relationship("VariantBaseDB")
+ revisions = relationship("AppVariantRevisionsDB", cascade="all, delete-orphan", backref="variant")
+
class AppVariantRevisionsDB(Base):
__tablename__ = "app_variant_revisions"
@@ -185,13 +195,10 @@ class AppVariantRevisionsDB(Base):
unique=True,
nullable=False,
)
- variant_id = Column(UUID(as_uuid=True), ForeignKey("app_variants.id"))
- variant = relationship("AppVariantDB")
+ variant_id = Column(UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="CASCADE"))
revision = Column(Integer)
modified_by_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
- modified_by = relationship("UserDB")
base_id = Column(UUID(as_uuid=True), ForeignKey("bases.id"))
- base = relationship("VariantBaseDB")
config_name = Column(String, nullable=False)
config_parameters = Column(JSONB, nullable=False, default=dict)
created_at = Column(
@@ -201,6 +208,9 @@ class AppVariantRevisionsDB(Base):
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
+ modified_by = relationship("UserDB")
+ base = relationship("VariantBaseDB")
+
class AppEnvironmentDB(Base):
__tablename__ = "environments"
@@ -212,27 +222,25 @@ class AppEnvironmentDB(Base):
unique=True,
nullable=False,
)
- app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id"))
- app = relationship("AppDB")
+ app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id", ondelete="CASCADE"))
name = Column(String)
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
- user = relationship("UserDB")
revision = Column(Integer)
-
- deployed_app_variant_id = Column(UUID(as_uuid=True), ForeignKey("app_variants.id"))
- deployed_app_variant = relationship("AppVariantDB")
-
+ deployed_app_variant_id = Column(UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="SET NULL"))
deployed_app_variant_revision_id = Column(
- UUID(as_uuid=True), ForeignKey("app_variant_revisions.id")
+ UUID(as_uuid=True),
+ ForeignKey("app_variant_revisions.id", ondelete="SET NULL")
)
- deployed_app_variant_revision = relationship("AppVariantRevisionsDB")
-
- deployment_id = Column(UUID(as_uuid=True), ForeignKey("deployments.id"))
- deployment = relationship("DeploymentDB")
+ deployment_id = Column(UUID(as_uuid=True), ForeignKey("deployments.id", ondelete="SET NULL"))
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
+ user = relationship("UserDB")
+ environment_revisions = relationship("AppEnvironmentRevisionDB", cascade="all, delete-orphan", backref="environment")
+ deployed_app_variant = relationship("AppVariantDB")
+ deployed_app_variant_revision = relationship("AppVariantRevisionsDB")
+
class AppEnvironmentRevisionDB(Base):
__tablename__ = "environments_revisions"
@@ -244,24 +252,19 @@ class AppEnvironmentRevisionDB(Base):
unique=True,
nullable=False,
)
- environment_id = Column(UUID(as_uuid=True), ForeignKey("environments.id"))
- environment = relationship("AppEnvironmentDB")
+ environment_id = Column(UUID(as_uuid=True), ForeignKey("environments.id", ondelete="CASCADE"))
revision = Column(Integer)
modified_by_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
- modified_by = relationship("UserDB")
-
deployed_app_variant_revision_id = Column(
- UUID(as_uuid=True), ForeignKey("app_variant_revisions.id")
+ UUID(as_uuid=True), ForeignKey("app_variant_revisions.id", ondelete="SET NULL")
)
- deployed_app_variant_revision = relationship("AppVariantRevisionsDB")
-
- deployment_id = Column(UUID(as_uuid=True), ForeignKey("deployments.id"))
- deployment = relationship("DeploymentDB")
-
+ deployment_id = Column(UUID(as_uuid=True), ForeignKey("deployments.id", ondelete="SET NULL"))
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
+ modified_by = relationship("UserDB")
+
class TemplateDB(Base):
__tablename__ = "templates"
@@ -298,11 +301,9 @@ class TestSetDB(Base):
nullable=False,
)
name = Column(String)
- app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id"))
- app = relationship("AppDB")
+ app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id", ondelete="CASCADE"))
csvdata = Column(JSONB)
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
- user = relationship("UserDB")
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
@@ -310,6 +311,8 @@ class TestSetDB(Base):
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
+ user = relationship("UserDB")
+
class EvaluatorConfigDB(Base):
__tablename__ = "evaluators_configs"
@@ -322,10 +325,8 @@ class EvaluatorConfigDB(Base):
nullable=False,
)
- app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id"))
- app = relationship("AppDB")
+ app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id", ondelete="CASCADE"))
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
- user = relationship("UserDB")
name = Column(String)
evaluator_key = Column(String)
settings_values = Column(JSONB, default=dict)
@@ -336,6 +337,8 @@ class EvaluatorConfigDB(Base):
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
+ user = relationship("UserDB")
+
class HumanEvaluationDB(Base):
__tablename__ = "human_evaluations"
@@ -353,10 +356,10 @@ class HumanEvaluationDB(Base):
user = relationship("UserDB")
status = Column(String)
evaluation_type = Column(String)
- variant_id = Column(UUID(as_uuid=True), ForeignKey("app_variants.id"))
+ variant_id = Column(UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="SET NULL"))
variant = relationship("AppVariantDB")
variant_revision_id = Column(
- UUID(as_uuid=True), ForeignKey("app_variant_revisions.id")
+ UUID(as_uuid=True), ForeignKey("app_variant_revisions.id", ondelete="SET NULL")
)
variant_revision = relationship("AppVariantRevisionsDB")
testset_id = Column(UUID(as_uuid=True), ForeignKey("testsets.id"))
@@ -409,13 +412,14 @@ class EvaluationAggregatedResultDB(Base):
nullable=False,
)
evaluation_id = Column(UUID(as_uuid=True), ForeignKey("evaluations.id"))
- evaluation = relationship("EvaluationDB", back_populates="aggregated_results")
evaluator_config_id = Column(
UUID(as_uuid=True), ForeignKey("evaluators_configs.id")
)
- evaluator_config = relationship("EvaluatorConfigDB")
result = Column(JSONB) # Result
+ evaluation = relationship("EvaluationDB", back_populates="aggregated_results")
+ evaluator_config = relationship("EvaluatorConfigDB")
+
class EvaluationScenarioResultDB(Base):
__tablename__ = "evaluation_scenario_results"
@@ -448,21 +452,13 @@ class EvaluationDB(Base):
unique=True,
nullable=False,
)
- app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id"))
- app = relationship("AppDB")
+ app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id", ondelete="CASCADE"))
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
- user = relationship("UserDB")
status = Column(JSONB) # Result
- testset_id = Column(UUID(as_uuid=True), ForeignKey("testsets.id"))
- testset = relationship("TestSetDB")
- variant_id = Column(UUID(as_uuid=True), ForeignKey("app_variants.id"))
- variant = relationship("AppVariantDB")
+ testset_id = Column(UUID(as_uuid=True), ForeignKey("testsets.id", ondelete="SET NULL"))
+ variant_id = Column(UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="SET NULL"))
variant_revision_id = Column(
- UUID(as_uuid=True), ForeignKey("app_variant_revisions.id")
- )
- variant_revision = relationship("AppVariantRevisionsDB")
- aggregated_results = relationship(
- "EvaluationAggregatedResultDB", back_populates="evaluation"
+ UUID(as_uuid=True), ForeignKey("app_variant_revisions.id", ondelete="SET NULL")
)
average_cost = Column(JSONB) # Result
total_cost = Column(JSONB) # Result
@@ -474,6 +470,17 @@ class EvaluationDB(Base):
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
+ user = relationship("UserDB")
+ testset = relationship("TestSetDB")
+ variant = relationship("AppVariantDB")
+ variant_revision = relationship("AppVariantRevisionsDB")
+ aggregated_results = relationship(
+ "EvaluationAggregatedResultDB", back_populates="evaluation"
+ )
+ evaluation_scenarios = relationship(
+ "EvaluationScenarioDB", cascade="all, delete-orphan", backref="evaluation"
+ )
+
class EvaluationEvaluatorConfigDB(Base):
__tablename__ = "evaluation_evaluator_configs"
@@ -497,19 +504,13 @@ class EvaluationScenarioDB(Base):
nullable=False,
)
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
- user = relationship("UserDB")
- evaluation_id = Column(UUID(as_uuid=True), ForeignKey("evaluations.id"))
- evaluation = relationship("EvaluationDB")
- variant_id = Column(UUID(as_uuid=True), ForeignKey("app_variants.id"))
- variant = relationship("AppVariantDB")
+ evaluation_id = Column(UUID(as_uuid=True), ForeignKey("evaluations.id", ondelete="CASCADE"))
+ variant_id = Column(UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="SET NULL"))
inputs = Column(JSONB) # List of EvaluationScenarioInput
outputs = Column(JSONB) # List of EvaluationScenarioOutput
correct_answers = Column(JSONB) # List of CorrectAnswer
is_pinned = Column(Boolean)
note = Column(String)
- results = relationship(
- "EvaluationScenarioResultDB", back_populates="evaluation_scenario"
- )
latency = Column(Integer)
cost = Column(Integer)
created_at = Column(
@@ -519,6 +520,12 @@ class EvaluationScenarioDB(Base):
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
+ user = relationship("UserDB")
+ variant = relationship("AppVariantDB")
+ results = relationship(
+ "EvaluationScenarioResultDB", back_populates="evaluation_scenario"
+ )
+
class IDsMappingDB(Base):
__tablename__ = "ids_mapping"
From 5dacbfd79443d3366422dc70a241ee99cab9bb2e Mon Sep 17 00:00:00 2001
From: Abram
Date: Thu, 13 Jun 2024 19:42:05 +0100
Subject: [PATCH 053/268] minor refactor (backend): remove redundant imports
---
agenta-backend/agenta_backend/models/db_models.py | 7 ++-----
1 file changed, 2 insertions(+), 5 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index 956da67f94..cf820be808 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -2,7 +2,6 @@
from typing import Any, Dict, List, Optional
import uuid_utils.compat as uuid
-from pydantic import BaseModel, Field
from sqlalchemy import (
Column,
String,
@@ -13,15 +12,13 @@
Float,
Enum,
)
-from sqlalchemy.orm import relationship, declarative_base
+from sqlalchemy.orm import relationship
from sqlalchemy.dialects.postgresql import UUID, JSONB
+from agenta_backend.models.base import Base
from agenta_backend.models.shared_models import TemplateType
-Base = declarative_base()
-
-
class UserDB(Base):
__tablename__ = "users"
From 457ad5e3f59023f2bc91c3734f5124fa94f6a018 Mon Sep 17 00:00:00 2001
From: Abram
Date: Fri, 14 Jun 2024 00:46:04 +0100
Subject: [PATCH 054/268] refactor (backend): added cascade to fk relationships
---
.../agenta_backend/models/db_models.py | 148 +++++++++---------
1 file changed, 76 insertions(+), 72 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index 2532cc6bf7..eb9954cdc3 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -81,7 +81,6 @@ class AppDB(Base):
)
app_name = Column(String)
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
- user = relationship("UserDB")
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
@@ -89,6 +88,14 @@ class AppDB(Base):
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
+ user = relationship("UserDB")
+ variant = relationship("AppVariantDB", cascade="all, delete-orphan", backref="app")
+ evaluator_config = relationship("EvaluatorConfigDB", cascade="all, delete-orphan", backref="app")
+ testset = relationship("TestSetDB", cascade="all, delete-orphan", backref="app")
+ base = relationship("DeploymentDB", cascade="all, delete-orphan", backref="app")
+ deployment = relationship("VariantBaseDB", cascade="all, delete-orphan", backref="app")
+ evaluation = relationship("EvaluationDB", cascade="all, delete-orphan", backref="app")
+
class DeploymentDB(Base):
__tablename__ = "deployments"
@@ -101,9 +108,7 @@ class DeploymentDB(Base):
nullable=False,
)
app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id"))
- app = relationship("AppDB")
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
- user = relationship("UserDB")
container_name = Column(String)
container_id = Column(String)
uri = Column(String)
@@ -115,6 +120,8 @@ class DeploymentDB(Base):
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
+ user = relationship("UserDB")
+
class VariantBaseDB(Base):
__tablename__ = "bases"
@@ -127,16 +134,10 @@ class VariantBaseDB(Base):
nullable=False,
)
app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id"))
- app = relationship("AppDB")
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
- user = relationship("UserDB")
base_name = Column(String)
image_id = Column(UUID(as_uuid=True), ForeignKey("docker_images.id"))
- image = relationship("ImageDB")
-
- deployment_id = Column(UUID(as_uuid=True), ForeignKey("deployments.id"))
- deployment = relationship("DeploymentDB")
-
+ deployment_id = Column(UUID(as_uuid=True), ForeignKey("deployments.id", ondelete="SET NULL"))
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
@@ -144,6 +145,11 @@ class VariantBaseDB(Base):
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
+ # app = relationship("AppDB", back_populates="base")
+ user = relationship("UserDB")
+ image = relationship("ImageDB")
+ deployment = relationship("DeploymentDB")
+
class AppVariantDB(Base):
__tablename__ = "app_variants"
@@ -155,19 +161,14 @@ class AppVariantDB(Base):
unique=True,
nullable=False,
)
- app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id"))
- app = relationship("AppDB")
+ app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id", ondelete="CASCADE"))
variant_name = Column(String)
revision = Column(Integer)
image_id = Column(UUID(as_uuid=True), ForeignKey("docker_images.id"))
- image = relationship("ImageDB")
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
- user = relationship("UserDB", foreign_keys=[user_id])
modified_by_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
- modified_by = relationship("UserDB", foreign_keys=[modified_by_id])
base_name = Column(String)
base_id = Column(UUID(as_uuid=True), ForeignKey("bases.id"))
- base = relationship("VariantBaseDB")
config_name = Column(String, nullable=False)
config_parameters = Column(JSONB, nullable=False, default=dict)
created_at = Column(
@@ -177,6 +178,12 @@ class AppVariantDB(Base):
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
+ image = relationship("ImageDB")
+ user = relationship("UserDB", foreign_keys=[user_id])
+ modified_by = relationship("UserDB", foreign_keys=[modified_by_id])
+ base = relationship("VariantBaseDB")
+ revisions = relationship("AppVariantRevisionsDB", cascade="all, delete-orphan", backref="variant")
+
class AppVariantRevisionsDB(Base):
__tablename__ = "app_variant_revisions"
@@ -188,13 +195,10 @@ class AppVariantRevisionsDB(Base):
unique=True,
nullable=False,
)
- variant_id = Column(UUID(as_uuid=True), ForeignKey("app_variants.id"))
- variant = relationship("AppVariantDB")
+ variant_id = Column(UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="CASCADE"))
revision = Column(Integer)
modified_by_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
- modified_by = relationship("UserDB")
base_id = Column(UUID(as_uuid=True), ForeignKey("bases.id"))
- base = relationship("VariantBaseDB")
config_name = Column(String, nullable=False)
config_parameters = Column(JSONB, nullable=False, default=dict)
created_at = Column(
@@ -204,6 +208,9 @@ class AppVariantRevisionsDB(Base):
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
+ modified_by = relationship("UserDB")
+ base = relationship("VariantBaseDB")
+
class AppEnvironmentDB(Base):
__tablename__ = "environments"
@@ -215,27 +222,25 @@ class AppEnvironmentDB(Base):
unique=True,
nullable=False,
)
- app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id"))
- app = relationship("AppDB")
+ app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id", ondelete="CASCADE"))
name = Column(String)
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
- user = relationship("UserDB")
revision = Column(Integer)
-
- deployed_app_variant_id = Column(UUID(as_uuid=True), ForeignKey("app_variants.id"))
- deployed_app_variant = relationship("AppVariantDB")
-
+ deployed_app_variant_id = Column(UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="SET NULL"))
deployed_app_variant_revision_id = Column(
- UUID(as_uuid=True), ForeignKey("app_variant_revisions.id")
+ UUID(as_uuid=True),
+ ForeignKey("app_variant_revisions.id", ondelete="SET NULL")
)
- deployed_app_variant_revision = relationship("AppVariantRevisionsDB")
-
- deployment_id = Column(UUID(as_uuid=True), ForeignKey("deployments.id"))
- deployment = relationship("DeploymentDB")
+ deployment_id = Column(UUID(as_uuid=True), ForeignKey("deployments.id", ondelete="SET NULL"))
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
+ user = relationship("UserDB")
+ environment_revisions = relationship("AppEnvironmentRevisionDB", cascade="all, delete-orphan", backref="environment")
+ deployed_app_variant = relationship("AppVariantDB")
+ deployed_app_variant_revision = relationship("AppVariantRevisionsDB")
+
class AppEnvironmentRevisionDB(Base):
__tablename__ = "environments_revisions"
@@ -247,24 +252,19 @@ class AppEnvironmentRevisionDB(Base):
unique=True,
nullable=False,
)
- environment_id = Column(UUID(as_uuid=True), ForeignKey("environments.id"))
- environment = relationship("AppEnvironmentDB")
+ environment_id = Column(UUID(as_uuid=True), ForeignKey("environments.id", ondelete="CASCADE"))
revision = Column(Integer)
modified_by_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
- modified_by = relationship("UserDB")
-
deployed_app_variant_revision_id = Column(
- UUID(as_uuid=True), ForeignKey("app_variant_revisions.id")
+ UUID(as_uuid=True), ForeignKey("app_variant_revisions.id", ondelete="SET NULL")
)
- deployed_app_variant_revision = relationship("AppVariantRevisionsDB")
-
- deployment_id = Column(UUID(as_uuid=True), ForeignKey("deployments.id"))
- deployment = relationship("DeploymentDB")
-
+ deployment_id = Column(UUID(as_uuid=True), ForeignKey("deployments.id", ondelete="SET NULL"))
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
+ modified_by = relationship("UserDB")
+
class TemplateDB(Base):
__tablename__ = "templates"
@@ -301,11 +301,9 @@ class TestSetDB(Base):
nullable=False,
)
name = Column(String)
- app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id"))
- app = relationship("AppDB")
+ app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id", ondelete="CASCADE"))
csvdata = Column(JSONB)
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
- user = relationship("UserDB")
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
@@ -313,6 +311,8 @@ class TestSetDB(Base):
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
+ user = relationship("UserDB")
+
class EvaluatorConfigDB(Base):
__tablename__ = "evaluators_configs"
@@ -325,10 +325,8 @@ class EvaluatorConfigDB(Base):
nullable=False,
)
- app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id"))
- app = relationship("AppDB")
+ app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id", ondelete="CASCADE"))
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
- user = relationship("UserDB")
name = Column(String)
evaluator_key = Column(String)
settings_values = Column(JSONB, default=dict)
@@ -339,6 +337,8 @@ class EvaluatorConfigDB(Base):
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
+ user = relationship("UserDB")
+
class HumanEvaluationDB(Base):
__tablename__ = "human_evaluations"
@@ -356,10 +356,10 @@ class HumanEvaluationDB(Base):
user = relationship("UserDB")
status = Column(String)
evaluation_type = Column(String)
- variant_id = Column(UUID(as_uuid=True), ForeignKey("app_variants.id"))
+ variant_id = Column(UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="SET NULL"))
variant = relationship("AppVariantDB")
variant_revision_id = Column(
- UUID(as_uuid=True), ForeignKey("app_variant_revisions.id")
+ UUID(as_uuid=True), ForeignKey("app_variant_revisions.id", ondelete="SET NULL")
)
variant_revision = relationship("AppVariantRevisionsDB")
testset_id = Column(UUID(as_uuid=True), ForeignKey("testsets.id"))
@@ -412,13 +412,14 @@ class EvaluationAggregatedResultDB(Base):
nullable=False,
)
evaluation_id = Column(UUID(as_uuid=True), ForeignKey("evaluations.id"))
- evaluation = relationship("EvaluationDB", back_populates="aggregated_results")
evaluator_config_id = Column(
UUID(as_uuid=True), ForeignKey("evaluators_configs.id")
)
- evaluator_config = relationship("EvaluatorConfigDB")
result = Column(JSONB) # Result
+ evaluation = relationship("EvaluationDB", back_populates="aggregated_results")
+ evaluator_config = relationship("EvaluatorConfigDB", backref="evaluator_config")
+
class EvaluationScenarioResultDB(Base):
__tablename__ = "evaluation_scenario_results"
@@ -451,21 +452,13 @@ class EvaluationDB(Base):
unique=True,
nullable=False,
)
- app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id"))
- app = relationship("AppDB")
+ app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id", ondelete="CASCADE"))
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
- user = relationship("UserDB")
status = Column(JSONB) # Result
- testset_id = Column(UUID(as_uuid=True), ForeignKey("testsets.id"))
- testset = relationship("TestSetDB")
- variant_id = Column(UUID(as_uuid=True), ForeignKey("app_variants.id"))
- variant = relationship("AppVariantDB")
+ testset_id = Column(UUID(as_uuid=True), ForeignKey("testsets.id", ondelete="SET NULL"))
+ variant_id = Column(UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="SET NULL"))
variant_revision_id = Column(
- UUID(as_uuid=True), ForeignKey("app_variant_revisions.id")
- )
- variant_revision = relationship("AppVariantRevisionsDB")
- aggregated_results = relationship(
- "EvaluationAggregatedResultDB", back_populates="evaluation"
+ UUID(as_uuid=True), ForeignKey("app_variant_revisions.id", ondelete="SET NULL")
)
average_cost = Column(JSONB) # Result
total_cost = Column(JSONB) # Result
@@ -477,6 +470,17 @@ class EvaluationDB(Base):
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
+ user = relationship("UserDB")
+ testset = relationship("TestSetDB")
+ variant = relationship("AppVariantDB")
+ variant_revision = relationship("AppVariantRevisionsDB")
+ aggregated_results = relationship(
+ "EvaluationAggregatedResultDB", back_populates="evaluation"
+ )
+ evaluation_scenarios = relationship(
+ "EvaluationScenarioDB", cascade="all, delete-orphan", backref="evaluation"
+ )
+
class EvaluationEvaluatorConfigDB(Base):
__tablename__ = "evaluation_evaluator_configs"
@@ -500,19 +504,13 @@ class EvaluationScenarioDB(Base):
nullable=False,
)
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
- user = relationship("UserDB")
- evaluation_id = Column(UUID(as_uuid=True), ForeignKey("evaluations.id"))
- evaluation = relationship("EvaluationDB")
- variant_id = Column(UUID(as_uuid=True), ForeignKey("app_variants.id"))
- variant = relationship("AppVariantDB")
+ evaluation_id = Column(UUID(as_uuid=True), ForeignKey("evaluations.id", ondelete="CASCADE"))
+ variant_id = Column(UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="SET NULL"))
inputs = Column(JSONB) # List of EvaluationScenarioInput
outputs = Column(JSONB) # List of EvaluationScenarioOutput
correct_answers = Column(JSONB) # List of CorrectAnswer
is_pinned = Column(Boolean)
note = Column(String)
- results = relationship(
- "EvaluationScenarioResultDB", back_populates="evaluation_scenario"
- )
latency = Column(Integer)
cost = Column(Integer)
created_at = Column(
@@ -522,6 +520,12 @@ class EvaluationScenarioDB(Base):
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
+ user = relationship("UserDB")
+ variant = relationship("AppVariantDB")
+ results = relationship(
+ "EvaluationScenarioResultDB", back_populates="evaluation_scenario"
+ )
+
class IDsMappingDB(Base):
__tablename__ = "ids_mapping"
From fe2ef53147bd485591ee82ebdac19f9a33853a86 Mon Sep 17 00:00:00 2001
From: Abram
Date: Fri, 14 Jun 2024 00:47:14 +0100
Subject: [PATCH 055/268] minor refactor (backend): update evaluator config
model
---
agenta-backend/agenta_backend/models/api/evaluation_model.py | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/api/evaluation_model.py b/agenta-backend/agenta_backend/models/api/evaluation_model.py
index 0313022409..8852e4a242 100644
--- a/agenta-backend/agenta_backend/models/api/evaluation_model.py
+++ b/agenta-backend/agenta_backend/models/api/evaluation_model.py
@@ -18,8 +18,8 @@ class EvaluatorConfig(BaseModel):
name: str
evaluator_key: str
settings_values: Optional[Dict[str, Any]]
- created_at: datetime
- updated_at: datetime
+ created_at: str
+ updated_at: str
class EvaluationType(str, Enum):
@@ -168,7 +168,6 @@ class EvaluationScenario(BaseModel):
evaluation_id: str
inputs: List[EvaluationScenarioInput]
outputs: List[EvaluationScenarioOutput]
- evaluation: Optional[str]
correct_answers: Optional[List[CorrectAnswer]]
is_pinned: Optional[bool]
note: Optional[str]
From 6a8e4fdeb846dfe14b0b810006f0d1bc8978b52a Mon Sep 17 00:00:00 2001
From: Abram
Date: Fri, 14 Jun 2024 00:49:21 +0100
Subject: [PATCH 056/268] refactor (backend): removed code redundancy and
simplified use of converters
---
.../agenta_backend/models/converters.py | 96 ++++++++++---------
1 file changed, 49 insertions(+), 47 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/converters.py b/agenta-backend/agenta_backend/models/converters.py
index 9e9bf90cbb..ce181f82cb 100644
--- a/agenta-backend/agenta_backend/models/converters.py
+++ b/agenta-backend/agenta_backend/models/converters.py
@@ -9,6 +9,7 @@
from agenta_backend.services import db_manager
from agenta_backend.utils.common import isCloudEE
from agenta_backend.models.api.user_models import User
+from agenta_backend.models.shared_models import ConfigDB
from agenta_backend.models.api.evaluation_model import (
CorrectAnswer,
Evaluation,
@@ -91,7 +92,6 @@
)
from fastapi import Depends
-from beanie import Link, PydanticObjectId as ObjectId
logger = logging.getLogger(__name__)
@@ -114,37 +114,32 @@ async def evaluation_db_to_pydantic(
evaluation_db: EvaluationDB,
) -> Evaluation:
variant = await db_manager.get_app_variant_instance_by_id(
- str(evaluation_db.variant)
+ str(evaluation_db.variant_id)
)
- variant_name = variant.variant_name if variant else str(evaluation_db.variant)
+ variant_name = variant.variant_name if variant else str(evaluation_db.variant_id)
variant_revision = await db_manager.get_app_variant_revision_by_id(
- str(evaluation_db.variant_revision)
+ str(evaluation_db.variant_revision_id)
)
revision = str(variant_revision.revision)
- aggregated_results = await aggregated_result_to_pydantic(
- evaluation_db.aggregated_results
+ aggregated_results = await aggregated_result_of_evaluation_to_pydantic(
+ str(evaluation_db.id)
)
+
return Evaluation(
id=str(evaluation_db.id),
- app_id=str(evaluation_db.app.id),
- user_id=str(evaluation_db.user.id),
+ app_id=str(evaluation_db.app_id),
+ user_id=str(evaluation_db.user_id),
user_username=evaluation_db.user.username or "",
status=evaluation_db.status,
- variant_ids=[str(evaluation_db.variant)],
- variant_revision_ids=[str(evaluation_db.variant_revision)],
+ variant_ids=[str(evaluation_db.variant_id)],
+ variant_revision_ids=[str(evaluation_db.variant_revision_id)],
revisions=[revision],
variant_names=[variant_name],
- testset_id=(
- "" if type(evaluation_db.testset) is Link else str(evaluation_db.testset.id)
- ),
- testset_name=(
- ""
- if type(evaluation_db.testset) is Link
- else str(evaluation_db.testset.name)
- ),
+ testset_id=str(evaluation_db.testset_id),
+ testset_name=evaluation_db.testset.name,
aggregated_results=aggregated_results,
- created_at=evaluation_db.created_at,
- updated_at=evaluation_db.updated_at,
+ created_at=str(evaluation_db.created_at),
+ updated_at=str(evaluation_db.updated_at),
average_cost=evaluation_db.average_cost,
total_cost=evaluation_db.total_cost,
average_latency=evaluation_db.average_latency,
@@ -210,63 +205,67 @@ def human_evaluation_scenario_db_to_pydantic(
)
-async def aggregated_result_to_pydantic(results: List[AggregatedResult]) -> List[dict]:
+async def aggregated_result_of_evaluation_to_pydantic(evaluation_id: str) -> List[dict]:
transformed_results = []
- for result in results:
- evaluator_config_db = await db_manager.fetch_evaluator_config(
- str(result.evaluator_config)
- )
+ aggregated_results = await db_manager.fetch_eval_aggregated_results(
+ evaluation_id=evaluation_id
+ )
+ for aggregated_result in aggregated_results:
evaluator_config_dict = (
- evaluator_config_db.json() if evaluator_config_db else None
+ aggregated_result.evaluator_config.__dict__
+ if isinstance(aggregated_result.evaluator_config_id, uuid.UUID)
+ else None
)
transformed_results.append(
{
"evaluator_config": (
{}
if evaluator_config_dict is None
- else json.loads(evaluator_config_dict)
+ else evaluator_config_dict
),
- "result": result.result.dict(),
+ "result": aggregated_result.result,
}
)
return transformed_results
-def evaluation_scenarios_results_to_pydantic(
- results: List[EvaluationScenarioResult],
+async def evaluation_scenarios_results_to_pydantic(
+ evaluation_scenario_id: str,
) -> List[dict]:
+ scenario_results = await db_manager.fetch_evaluation_scenario_results(evaluation_scenario_id)
return [
{
- "evaluator_config": str(result.evaluator_config),
- "result": result.result.dict(),
+ "evaluator_config": str(scenario_result.evaluator_config_id),
+ "result": scenario_result.result,
}
- for result in results
+ for scenario_result in scenario_results
]
-def evaluation_scenario_db_to_pydantic(
+async def evaluation_scenario_db_to_pydantic(
evaluation_scenario_db: EvaluationScenarioDB, evaluation_id: str
) -> EvaluationScenario:
+ scenario_results = await evaluation_scenarios_results_to_pydantic(
+ str(evaluation_scenario_db.id)
+ )
return EvaluationScenario(
id=str(evaluation_scenario_db.id),
evaluation_id=evaluation_id,
inputs=[
- EvaluationScenarioInput(**scenario_input.dict())
+ EvaluationScenarioInput(**scenario_input) # type: ignore
for scenario_input in evaluation_scenario_db.inputs
],
outputs=[
- EvaluationScenarioOutput(**scenario_output.dict())
+ EvaluationScenarioOutput(**scenario_output) # type: ignore
for scenario_output in evaluation_scenario_db.outputs
],
correct_answers=[
- CorrectAnswer(**correct_answer.dict())
+ CorrectAnswer(**correct_answer) # type: ignore
for correct_answer in evaluation_scenario_db.correct_answers
],
- is_pinned=evaluation_scenario_db.is_pinned or False,
- note=evaluation_scenario_db.note or "",
- results=evaluation_scenarios_results_to_pydantic(
- evaluation_scenario_db.results
- ),
+ is_pinned=evaluation_scenario_db.is_pinned or False, # type: ignore
+ note=evaluation_scenario_db.note or "", # type: ignore
+ results=scenario_results, # type: ignore
)
@@ -295,7 +294,7 @@ async def app_variant_db_to_output(app_variant_db: AppVariantDB) -> AppVariantRe
type(app_variant_db.base_id) == uuid.UUID
and type(app_variant_db.base.deployment_id) == uuid.UUID
):
- deployment = await db_manager.get_deployment_by_objectid(
+ deployment = await db_manager.get_deployment_by_id(
str(app_variant_db.base.deployment_id)
)
uri = deployment.uri
@@ -349,8 +348,11 @@ async def app_variant_db_revision_to_output(
return AppVariantRevision(
revision=app_variant_revision_db.revision,
modified_by=app_variant_revision_db.modified_by.username,
- config=app_variant_revision_db.config,
- created_at=app_variant_revision_db.created_at,
+ config=ConfigDB(**{
+ "config_name": app_variant_revision_db.config_name,
+ "parameters": app_variant_revision_db.config_parameters
+ }),
+ created_at=str(app_variant_revision_db.created_at),
)
@@ -515,8 +517,8 @@ def evaluator_config_db_to_pydantic(evaluator_config: EvaluatorConfigDB):
name=evaluator_config.name,
evaluator_key=evaluator_config.evaluator_key,
settings_values=evaluator_config.settings_values,
- created_at=evaluator_config.created_at,
- updated_at=evaluator_config.updated_at,
+ created_at=str(evaluator_config.created_at),
+ updated_at=str(evaluator_config.updated_at),
)
From 3a71718ec4c5ae7c6427b1c9a2b988a2f2583a44 Mon Sep 17 00:00:00 2001
From: Abram
Date: Fri, 14 Jun 2024 00:50:52 +0100
Subject: [PATCH 057/268] refactor (backend): remove redundant code in app
manager, and improved code clarity
---
.../agenta_backend/services/app_manager.py | 75 ++++---------------
.../services/deployment_manager.py | 4 +-
2 files changed, 16 insertions(+), 63 deletions(-)
diff --git a/agenta-backend/agenta_backend/services/app_manager.py b/agenta-backend/agenta_backend/services/app_manager.py
index a5066e2c6e..0a1b9ee30f 100644
--- a/agenta-backend/agenta_backend/services/app_manager.py
+++ b/agenta-backend/agenta_backend/services/app_manager.py
@@ -144,12 +144,12 @@ async def update_variant_image(
valid_image = await deployment_manager.validate_image(image)
if not valid_image:
raise ValueError("Image could not be found in registry.")
- deployment = await db_manager.get_deployment_by_objectid(
+ deployment = await db_manager.get_deployment_by_id(
app_variant_db.base.deployment
)
await deployment_manager.stop_and_delete_service(deployment)
- await db_manager.remove_deployment(deployment)
+ await db_manager.remove_deployment(str(deployment.id))
if isOssEE():
await deployment_manager.remove_image(app_variant_db.base.image)
@@ -202,11 +202,10 @@ async def terminate_and_remove_app_variant(
app_variant_id and app_variant_db
), "Only one of app_variant_id or app_variant_db must be provided"
- logger.debug(f"Removing app variant {app_variant_id}")
if app_variant_id:
app_variant_db = await db_manager.fetch_app_variant_by_id(app_variant_id)
+ logger.debug(f"Fetched app variant {app_variant_db}")
- logger.debug(f"Fetched app variant {app_variant_db}")
app_id = str(app_variant_db.app_id) # type: ignore
if app_variant_db is None:
error_msg = f"Failed to delete app variant {app_variant_id}: Not found in DB."
@@ -218,7 +217,6 @@ async def terminate_and_remove_app_variant(
app_variant_db
)
if is_last_variant_for_image:
- # remove variant + terminate and rm containers + remove base
base_db = await db_manager.fetch_base_by_id(
base_id=str(app_variant_db.base_id)
)
@@ -235,7 +233,7 @@ async def terminate_and_remove_app_variant(
logger.debug("_stop_and_delete_app_container")
try:
- deployment = await db_manager.get_deployment_by_objectid(
+ deployment = await db_manager.get_deployment_by_id(
str(base_db.deployment_id)
)
except Exception as e:
@@ -257,17 +255,8 @@ async def terminate_and_remove_app_variant(
await deployment_manager.remove_image(image)
except RuntimeError as e:
logger.error(f"Failed to remove image {image} {e}")
- await db_manager.remove_image(image)
-
- logger.debug("remove base")
- await db_manager.remove_app_variant_from_db(app_variant_db)
-
- logger.debug("Remove image object from db")
- if deployment:
- await db_manager.remove_deployment(deployment)
-
- await db_manager.remove_base_from_db(base_db)
- logger.debug("remove_app_variant_from_db")
+ finally:
+ await db_manager.remove_image(image)
else:
# remove variant + config
logger.debug("remove_app_variant_from_db")
@@ -276,7 +265,7 @@ async def terminate_and_remove_app_variant(
app_variants = await db_manager.list_app_variants(app_id)
logger.debug(f"Count of app variants available: {len(app_variants)}")
if (
- len(app_variants) == 0
+ len(app_variants) <= 1
): # remove app related resources if the length of the app variants hit 0
logger.debug("remove_app_related_resources")
await remove_app_related_resources(app_id)
@@ -288,7 +277,7 @@ async def terminate_and_remove_app_variant(
async def remove_app_related_resources(app_id: str):
- """Removes environments and testsets associated with an app after its deletion.
+ """Removes associated tables with an app after its deletion.
When an app or its last variant is deleted, this function ensures that
all related resources such as environments and testsets are also deleted.
@@ -296,37 +285,8 @@ async def remove_app_related_resources(app_id: str):
Args:
app_name: The name of the app whose associated resources are to be removed.
"""
- try:
- # Delete associated environments
- environments = await db_manager.list_environments(app_id)
- for environment_db in environments:
- await db_manager.remove_environment(environment_db)
- logger.info(f"Successfully deleted environment {environment_db.name}.")
-
- # Delete associated testsets
- await db_manager.remove_app_testsets(app_id)
- logger.info(f"Successfully deleted test sets associated with app {app_id}.")
-
- # Delete associated bases
- bases = await db_manager.list_bases_for_app_id(app_id)
- for base_db in bases:
- await db_manager.remove_base(base_db)
- logger.info(f"Successfully deleted base {base_db.base_name}")
-
- # Delete associated deployments
- deployments = await db_manager.list_deployments(app_id)
- for deployment_db in deployments:
- await db_manager.remove_deployment(deployment_db)
- logger.info(f"Successfully deleted deployment {str(deployment_db.id)}")
-
- # Deleted associated evaluators_configs
- evaluators_configs = await db_manager.fetch_evaluators_configs(app_id)
- for evaluator_config_db in evaluators_configs:
- await db_manager.delete_evaluator_config(str(evaluator_config_db.id))
- logger.info(
- f"Successfully deleted evaluator config {str(evaluator_config_db.id)}"
- )
+ try:
await db_manager.remove_app_by_id(app_id)
logger.info(f"Successfully remove app object {app_id}.")
except Exception as e:
@@ -344,11 +304,12 @@ async def remove_app(app: AppDB):
Arguments:
app_name -- the app name to remove
"""
- # checks if it is the last app variant using its image
+
if app is None:
error_msg = f"Failed to delete app {app.id}: Not found in DB."
logger.error(error_msg)
raise ValueError(error_msg)
+
try:
app_variants = await db_manager.list_app_variants(str(app.id))
for app_variant_db in app_variants:
@@ -357,7 +318,7 @@ async def remove_app(app: AppDB):
f"Successfully deleted app variant {app_variant_db.app.app_name}/{app_variant_db.variant_name}."
)
- if len(app_variants) == 0: # Failsafe in case something went wrong before
+ if len(app_variants) <= 1: # Failsafe in case something went wrong before
logger.debug("remove_app_related_resources")
await remove_app_related_resources(str(app.id))
@@ -382,20 +343,12 @@ async def update_variant_parameters(
assert app_variant_id is not None, "app_variant_id must be provided"
assert parameters is not None, "parameters must be provided"
- app_variant_db = await db_manager.fetch_app_variant_by_id(app_variant_id)
- if app_variant_db is None:
- error_msg = f"Failed to update app variant {app_variant_id}: Not found in DB."
- logger.error(error_msg)
- raise ValueError(error_msg)
-
try:
await db_manager.update_variant_parameters(
- app_variant_db=app_variant_db, parameters=parameters, user_uid=user_uid
+ app_variant_id=app_variant_id, parameters=parameters, user_uid=user_uid
)
except Exception as e:
- logger.error(
- f"Error updating app variant {app_variant_db.app.app_name}/{app_variant_db.variant_name}"
- )
+ logger.error(f"Error updating app variant {app_variant_id}")
raise e from None
diff --git a/agenta-backend/agenta_backend/services/deployment_manager.py b/agenta-backend/agenta_backend/services/deployment_manager.py
index 4c3f803ac6..8815b65c3b 100644
--- a/agenta-backend/agenta_backend/services/deployment_manager.py
+++ b/agenta-backend/agenta_backend/services/deployment_manager.py
@@ -146,5 +146,5 @@ async def validate_image(image: Image) -> bool:
return True
-def get_deployment_uri(deployment: DeploymentDB) -> str:
- return deployment.uri.replace("http://localhost", "http://host.docker.internal")
+def get_deployment_uri(uri: str) -> str:
+ return uri.replace("http://localhost", "http://host.docker.internal")
From 74285a2bc42fe157bac7d66abf4649b2e7c587ee Mon Sep 17 00:00:00 2001
From: Abram
Date: Fri, 14 Jun 2024 00:57:00 +0100
Subject: [PATCH 058/268] refactor (backend): migrate evaluation service from
Beanie to SQLAlchemy and fix QA evaluation bugs
---
.../routers/evaluation_router.py | 88 +++++++++---------
.../services/aggregation_service.py | 18 ++--
.../services/evaluation_service.py | 93 ++++---------------
.../services/evaluators_service.py | 16 +++-
.../services/llm_apps_service.py | 5 +-
.../agenta_backend/tasks/evaluations.py | 83 ++++++++++-------
6 files changed, 136 insertions(+), 167 deletions(-)
diff --git a/agenta-backend/agenta_backend/routers/evaluation_router.py b/agenta-backend/agenta_backend/routers/evaluation_router.py
index 2ed596f00c..7d16f427e3 100644
--- a/agenta-backend/agenta_backend/routers/evaluation_router.py
+++ b/agenta-backend/agenta_backend/routers/evaluation_router.py
@@ -245,6 +245,12 @@ async def fetch_evaluation_scenarios(
try:
evaluation = await db_manager.fetch_evaluation_by_id(evaluation_id)
+ if not evaluation:
+ raise HTTPException(
+ status_code=404,
+ detail=f"Evaluation with id {evaluation_id} not found"
+ )
+
if isCloudEE():
has_permission = await check_action_access(
user_uid=request.state.user_id,
@@ -257,20 +263,24 @@ async def fetch_evaluation_scenarios(
if not has_permission:
error_msg = f"You do not have permission to perform this action. Please contact your organization admin."
logger.error(error_msg)
- return JSONResponse(
- {"detail": error_msg},
+ raise HTTPException(
+ detail=error_msg,
status_code=403,
)
eval_scenarios = (
await evaluation_service.fetch_evaluation_scenarios_for_evaluation(
- evaluation=evaluation
+ evaluation_id=str(evaluation.id)
)
)
return eval_scenarios
except Exception as exc:
- raise HTTPException(status_code=500, detail=str(exc))
+ import traceback
+
+ traceback.print_exc()
+ status_code = exc.status_code if hasattr(exc, "status_code") else 500
+ raise HTTPException(status_code=status_code, detail=str(exc))
@router.get("/", response_model=List[Evaluation])
@@ -307,7 +317,11 @@ async def fetch_list_evaluations(
return await evaluation_service.fetch_list_evaluations(app)
except Exception as exc:
- raise HTTPException(status_code=500, detail=str(exc))
+ import traceback
+
+ traceback.print_exc()
+ status_code = exc.status_code if hasattr(exc, "status_code") else 500
+ raise HTTPException(status_code=status_code, detail=f"Could not retrieve evaluation results: {str(exc)}")
@router.get(
@@ -327,6 +341,9 @@ async def fetch_evaluation(
"""
try:
evaluation = await db_manager.fetch_evaluation_by_id(evaluation_id)
+ if not evaluation:
+ raise HTTPException(status_code=404, detail=f"Evaluation with id {evaluation_id} not found")
+
if isCloudEE():
has_permission = await check_action_access(
user_uid=request.state.user_id,
@@ -340,19 +357,20 @@ async def fetch_evaluation(
if not has_permission:
error_msg = f"You do not have permission to perform this action. Please contact your organization admin."
logger.error(error_msg)
- return JSONResponse(
- {"detail": error_msg},
+ raise HTTPException(
+ detail=error_msg,
status_code=403,
)
return await converters.evaluation_db_to_pydantic(evaluation)
except Exception as exc:
- raise HTTPException(status_code=500, detail=str(exc))
+ status_code = exc.status_code if hasattr(exc, "status_code") else 500
+ raise HTTPException(status_code=status_code, detail=str(exc))
@router.delete("/", response_model=List[str], operation_id="delete_evaluations")
async def delete_evaluations(
- delete_evaluations: DeleteEvaluation,
+ payload: DeleteEvaluation,
request: Request,
):
"""
@@ -367,48 +385,30 @@ async def delete_evaluations(
try:
if isCloudEE():
- for evaluation_id in delete_evaluations.evaluations_ids:
- has_permission = await check_action_access(
- user_uid=request.state.user_id,
- object_id=evaluation_id,
- object_type="evaluation",
- permission=Permission.DELETE_EVALUATION,
- )
- logger.debug(
- f"User has permission to delete evaluation: {has_permission}"
+ # TODO (abram): improve rbac logic for evaluation permission
+ has_permission = await check_action_access(
+ user_uid=request.state.user_id,
+ # object_id=evaluation_id,
+ object_type="evaluation",
+ permission=Permission.DELETE_EVALUATION,
+ )
+ logger.debug(
+ f"User has permission to delete evaluation: {has_permission}"
+ )
+ if not has_permission:
+ error_msg = f"You do not have permission to perform this action. Please contact your organization admin."
+ logger.error(error_msg)
+ return JSONResponse(
+ {"detail": error_msg},
+ status_code=403,
)
- if not has_permission:
- error_msg = f"You do not have permission to perform this action. Please contact your organization admin."
- logger.error(error_msg)
- return JSONResponse(
- {"detail": error_msg},
- status_code=403,
- )
- await evaluation_service.delete_evaluations(delete_evaluations.evaluations_ids)
+ await evaluation_service.delete_evaluations(payload.evaluations_ids)
return Response(status_code=status.HTTP_204_NO_CONTENT)
except Exception as exc:
raise HTTPException(status_code=500, detail=str(exc))
-@router.post(
- "/webhook_example_fake/",
- response_model=EvaluationWebhook,
- operation_id="webhook_example_fake",
-)
-async def webhook_example_fake():
- """Returns a fake score response for example webhook evaluation
-
- Returns:
- _description_
- """
-
- # return a random score b/w 0 and 1
- random_generator = secrets.SystemRandom()
- random_number = random_generator.random()
- return {"score": random_number}
-
-
@router.get(
"/evaluation_scenarios/comparison-results/",
response_model=Any,
diff --git a/agenta-backend/agenta_backend/services/aggregation_service.py b/agenta-backend/agenta_backend/services/aggregation_service.py
index b55c2e1995..f5f616df82 100644
--- a/agenta-backend/agenta_backend/services/aggregation_service.py
+++ b/agenta-backend/agenta_backend/services/aggregation_service.py
@@ -17,16 +17,18 @@ def aggregate_ai_critique(results: List[Result]) -> Result:
numeric_scores = []
for result in results:
- # Extract the first number found in the result value
- match = re.search(r"\d+", result.value)
- if match:
- try:
- score = int(match.group())
- numeric_scores.append(score)
- except ValueError:
- # Ignore if the extracted value is not an integer
+ try:
+ # Extract the first number found in the result value
+ match = re.search(r"\d+", result.value) # type: ignore
+ if not match:
continue
+ score = int(match.group())
+ numeric_scores.append(score)
+ except (TypeError, ValueError):
+ # Ignore if the extracted value is not an integer or is None
+ continue
+
# Calculate the average of numeric scores if any are present
average_value = (
sum(numeric_scores) / len(numeric_scores) if numeric_scores else None
diff --git a/agenta-backend/agenta_backend/services/evaluation_service.py b/agenta-backend/agenta_backend/services/evaluation_service.py
index f70b745e26..d650cc876d 100644
--- a/agenta-backend/agenta_backend/services/evaluation_service.py
+++ b/agenta-backend/agenta_backend/services/evaluation_service.py
@@ -155,46 +155,6 @@ async def prepare_csvdata_and_create_evaluation_scenario(
await eval_scenario_instance.create()
-async def create_evaluation_scenario(
- evaluation_id: str, payload: EvaluationScenario
-) -> None:
- """
- Create a new evaluation scenario.
-
- Args:
- evaluation_id (str): The ID of the evaluation.
- payload (EvaluationScenario): Evaluation scenario data.
-
- Raises:
- HTTPException: If evaluation not found or access denied.
- """
- evaluation = await db_manager.fetch_evaluation_by_id(evaluation_id)
-
- scenario_inputs = [
- EvaluationScenarioInput(
- input_name=input_item.input_name,
- input_value=input_item.input_value,
- )
- for input_item in payload.inputs
- ]
-
- new_eval_scenario = EvaluationScenarioDB(
- user=evaluation.user,
- organization=evaluation.organization,
- workspace=evaluation.workspace,
- evaluation=evaluation,
- inputs=scenario_inputs,
- outputs=[],
- is_pinned=False,
- note="",
- **_extend_with_evaluation(evaluation.evaluation_type),
- created_at=datetime.now(timezone.utc),
- updated_at=datetime.now(timezone.utc),
- )
-
- await new_eval_scenario.create()
-
-
async def update_human_evaluation_service(
evaluation: EvaluationDB, update_payload: HumanEvaluationUpdate
) -> None:
@@ -219,36 +179,28 @@ async def update_human_evaluation_service(
async def fetch_evaluation_scenarios_for_evaluation(
- evaluation_id: str = None, evaluation: EvaluationDB = None
-) -> List[EvaluationScenario]:
+ evaluation_id: str
+):
"""
Fetch evaluation scenarios for a given evaluation ID.
Args:
evaluation_id (str): The ID of the evaluation.
- evaluation (EvaluationDB): The evaluation instance.
-
- Raises:
- HTTPException: If the evaluation is not found or access is denied.
Returns:
List[EvaluationScenario]: A list of evaluation scenarios.
"""
- assert (
- evaluation_id or evaluation
- ), "Please provide either evaluation_id or evaluation"
- if not evaluation:
- evaluation = await db_manager.fetch_evaluation_by_id(evaluation_id)
-
- scenarios = await EvaluationScenarioDB.find(
- EvaluationScenarioDB.evaluation.id == ObjectId(evaluation.id)
- ).to_list()
- eval_scenarios = [
- converters.evaluation_scenario_db_to_pydantic(scenario, str(evaluation.id))
- for scenario in scenarios
+ evaluation_scenarios = await db_manager.fetch_evaluation_scenarios(
+ evaluation_id=evaluation_id
+ )
+ return [
+ await converters.evaluation_scenario_db_to_pydantic(
+ evaluation_scenario_db=evaluation_scenario,
+ evaluation_id=evaluation_id
+ )
+ for evaluation_scenario in evaluation_scenarios
]
- return eval_scenarios
async def fetch_human_evaluation_scenarios_for_evaluation(
@@ -371,15 +323,12 @@ async def fetch_list_evaluations(
List[Evaluation]: A list of evaluations.
"""
- evaluations_db = await EvaluationDB.find(
- EvaluationDB.app.id == app.id, fetch_links=True
- ).to_list()
+ evaluations_db = await db_manager.list_evaluations(app_id=str(app.id))
return [
await converters.evaluation_db_to_pydantic(evaluation)
for evaluation in evaluations_db
]
-
async def fetch_list_human_evaluations(
app_id: str,
) -> List[HumanEvaluation]:
@@ -439,9 +388,8 @@ async def delete_evaluations(evaluation_ids: List[str]) -> None:
Raises:
HTTPException: If evaluation not found or access denied.
"""
- for evaluation_id in evaluation_ids:
- evaluation = await db_manager.fetch_evaluation_by_id(evaluation_id)
- await evaluation.delete()
+
+ await db_manager.delete_evaluations(evaluation_ids=evaluation_ids)
async def create_new_human_evaluation(
@@ -539,24 +487,23 @@ async def create_new_evaluation(
"""
app = await db_manager.fetch_app_by_id(app_id=app_id)
-
- testset = await db_manager.fetch_testset_by_id(testset_id)
- variant_db = await db_manager.get_app_variant_instance_by_id(variant_id)
+ testset = await db_manager.fetch_testset_by_id(testset_id=testset_id)
+ variant_db = await db_manager.get_app_variant_instance_by_id(variant_id=variant_id)
variant_revision = await db_manager.fetch_app_variant_revision_by_variant(
- variant_id, variant_db.revision
+ app_variant_id=variant_id, revision=variant_db.revision # type: ignore
)
evaluation_db = await db_manager.create_new_evaluation(
app=app,
- user=app.user,
+ user_id=str(app.user_id),
testset=testset,
status=Result(
value=EvaluationStatusEnum.EVALUATION_STARTED, type="status", error=None
),
variant=variant_id,
variant_revision=str(variant_revision.id),
- organization=app.organization if isCloudEE() else None,
- workspace=app.workspace if isCloudEE() else None,
+ organization=str(app.organization_id) if isCloudEE() else None,
+ workspace=str(app.workspace_id) if isCloudEE() else None,
)
return await converters.evaluation_db_to_pydantic(evaluation_db)
diff --git a/agenta-backend/agenta_backend/services/evaluators_service.py b/agenta-backend/agenta_backend/services/evaluators_service.py
index 710affb89e..129f565499 100644
--- a/agenta-backend/agenta_backend/services/evaluators_service.py
+++ b/agenta-backend/agenta_backend/services/evaluators_service.py
@@ -1,6 +1,7 @@
+import re
import json
import logging
-import re
+import traceback
from typing import Any, Dict, List, Tuple
import httpx
@@ -212,8 +213,11 @@ def auto_custom_code_run(
app_params=app_params,
inputs=inputs,
output=output,
- data_point=data_point,
+ correct_answer=data_point.get(
+ "correct_answer", None
+ ), # for backward compatibility
code=settings_values["code"],
+ datapoint=data_point,
)
return Result(type="number", value=result)
except Exception as e: # pylint: disable=broad-except
@@ -272,14 +276,16 @@ def auto_ai_critique(
model="gpt-3.5-turbo", messages=messages, temperature=0.8
)
- evaluation_output = response.choices[0].message["content"].strip()
-
+ evaluation_output = response.choices[0].message.content.strip()
return Result(type="text", value=evaluation_output)
except Exception as e: # pylint: disable=broad-except
return Result(
type="error",
value=None,
- error=Error(message="Error during Auto AI Critique", stacktrace=str(e)),
+ error=Error(
+ message="Error during Auto AI Critique",
+ stacktrace=traceback.format_exc()
+ ),
)
diff --git a/agenta-backend/agenta_backend/services/llm_apps_service.py b/agenta-backend/agenta_backend/services/llm_apps_service.py
index 93654b3729..a7d0a646b1 100644
--- a/agenta-backend/agenta_backend/services/llm_apps_service.py
+++ b/agenta-backend/agenta_backend/services/llm_apps_service.py
@@ -52,7 +52,8 @@ async def make_payload(
elif param["type"] == "file_url":
payload[param["name"]] = datapoint.get(param["name"], "")
else:
- payload[param["name"]] = parameters[param["name"]]
+ if param["name"] in parameters: # hotfix
+ payload[param["name"]] = parameters[param["name"]]
if inputs_dict:
payload["inputs"] = inputs_dict
@@ -80,7 +81,6 @@ async def invoke_app(
"""
url = f"{uri}/generate"
payload = await make_payload(datapoint, parameters, openapi_parameters)
-
async with aiohttp.ClientSession() as client:
try:
logger.debug(f"Invoking app {uri} with payload {payload}")
@@ -157,6 +157,7 @@ async def run_with_retry(
InvokationResult: The invokation result.
"""
+
retries = 0
last_exception = None
while retries < max_retry_count:
diff --git a/agenta-backend/agenta_backend/tasks/evaluations.py b/agenta-backend/agenta_backend/tasks/evaluations.py
index cd49dfe9c8..ceec3ba624 100644
--- a/agenta-backend/agenta_backend/tasks/evaluations.py
+++ b/agenta-backend/agenta_backend/tasks/evaluations.py
@@ -36,7 +36,7 @@
fetch_evaluation_by_id,
fetch_evaluator_config,
fetch_testset_by_id,
- get_deployment_by_objectid,
+ get_deployment_by_id,
update_evaluation,
update_evaluation_with_aggregated_results,
EvaluationScenarioResult,
@@ -103,7 +103,7 @@ def evaluate(
assert (
app_variant_db is not None
), f"App variant with id {variant_id} not found!"
- app_variant_parameters = app_variant_db.config.parameters
+ app_variant_parameters = app_variant_db.config_parameters
testset_db = loop.run_until_complete(fetch_testset_by_id(testset_id))
new_evaluation_db = loop.run_until_complete(
fetch_evaluation_by_id(evaluation_id)
@@ -115,9 +115,9 @@ def evaluate(
)
evaluator_config_dbs.append(evaluator_config)
deployment_db = loop.run_until_complete(
- get_deployment_by_objectid(app_variant_db.base.deployment)
+ get_deployment_by_id(str(app_variant_db.base.deployment_id))
)
- uri = deployment_manager.get_deployment_uri(deployment_db)
+ uri = deployment_manager.get_deployment_uri(uri=deployment_db.uri) # type: ignore
# 2. Initialize vars
evaluators_aggregated_data = {
@@ -132,8 +132,8 @@ def evaluate(
app_outputs: List[InvokationResult] = loop.run_until_complete(
llm_apps_service.batch_invoke(
uri,
- testset_db.csvdata,
- app_variant_parameters,
+ testset_db.csvdata, # type: ignore
+ app_variant_parameters, # type: ignore
rate_limit_config,
)
)
@@ -143,7 +143,7 @@ def evaluate(
llm_apps_service.get_parameters_from_openapi(uri + "/openapi.json")
)
- for data_point, app_output in zip(testset_db.csvdata, app_outputs):
+ for data_point, app_output in zip(testset_db.csvdata, app_outputs): # type: ignore
# 1. We prepare the inputs
logger.debug(f"Preparing inputs for data point: {data_point}")
list_inputs = get_app_inputs(app_variant_parameters, openapi_parameters)
@@ -170,7 +170,7 @@ def evaluate(
print("There is an error when invoking the llm app so we need to skip")
error_results = [
EvaluationScenarioResult(
- evaluator_config=evaluator_config_db.id,
+ evaluator_config=str(evaluator_config_db.id),
result=Result(
type=app_output.result.type,
value=None,
@@ -185,16 +185,10 @@ def evaluate(
loop.run_until_complete(
create_new_evaluation_scenario(
- user=app.user,
- organization=app.organization if isCloudEE() else None,
- workspace=app.workspace if isCloudEE() else None,
+ user_id=str(app.user_id),
evaluation=new_evaluation_db,
variant_id=variant_id,
- evaluators_configs=new_evaluation_db.evaluators_configs,
inputs=inputs,
- is_pinned=False,
- note="",
- correct_answers=None,
outputs=[
EvaluationScenarioOutput(
result=Result(
@@ -207,7 +201,12 @@ def evaluate(
)
)
],
+ correct_answers=None,
+ is_pinned=False,
+ note="",
results=error_results,
+ organization=app.organization if isCloudEE() else None,
+ workspace=app.workspace if isCloudEE() else None,
)
)
continue
@@ -232,7 +231,7 @@ def evaluate(
output=app_output.result.value,
data_point=data_point,
settings_values=evaluator_config_db.settings_values,
- app_params=app_variant_parameters,
+ app_params=app_variant_parameters, # type: ignore
inputs=data_point,
lm_providers_keys=lm_providers_keys,
)
@@ -244,7 +243,7 @@ def evaluate(
evaluator_results.append(result)
result_object = EvaluationScenarioResult(
- evaluator_config=evaluator_config_db.id,
+ evaluator_config=str(evaluator_config_db.id),
result=result,
)
logger.debug(f"Result: {result_object}")
@@ -259,25 +258,24 @@ def evaluate(
else CorrectAnswer(key=ground_truth_column_name, value="")
for ground_truth_column_name in ground_truth_column_names
]
- # 4. We save the result of the eval scenario in the db
+ # 4. We save the result of the eval scenario in the db
loop.run_until_complete(
create_new_evaluation_scenario(
- user=app.user,
+ user_id=str(app.user_id),
evaluation=new_evaluation_db,
variant_id=variant_id,
- evaluators_configs=new_evaluation_db.evaluators_configs,
inputs=inputs,
- is_pinned=False,
- note="",
- correct_answers=all_correct_answers,
outputs=[
- EvaluationScenarioOutputDB(
+ EvaluationScenarioOutput(
result=Result(type="text", value=app_output.result.value),
latency=app_output.latency,
cost=app_output.cost,
)
],
+ correct_answers=all_correct_answers,
+ is_pinned=False,
+ note="",
results=evaluators_results,
organization=app.organization if isCloudEE() else None,
workspace=app.workspace if isCloudEE() else None,
@@ -298,9 +296,9 @@ def evaluate(
update_evaluation(
evaluation_id,
{
- "average_latency": average_latency,
- "average_cost": average_cost,
- "total_cost": total_cost,
+ "average_latency": average_latency.dict(),
+ "average_cost": average_cost.dict(),
+ "total_cost": total_cost.dict(),
},
)
)
@@ -315,8 +313,11 @@ def evaluate(
"status": Result(
type="status",
value="EVALUATION_FAILED",
- error=Error(message="Evaluation Failed", stacktrace=str(e)),
- )
+ error=Error(
+ message="Evaluation Failed",
+ stacktrace=str(e)
+ )
+ ).dict()
},
)
)
@@ -324,16 +325,16 @@ def evaluate(
return
aggregated_results = loop.run_until_complete(
- aggregate_evaluator_results(app, evaluators_aggregated_data)
+ aggregate_evaluator_results(evaluators_aggregated_data)
)
loop.run_until_complete(
update_evaluation_with_aggregated_results(
- new_evaluation_db.id, aggregated_results
+ str(new_evaluation_db.id), aggregated_results
)
)
failed_evaluation_scenarios = loop.run_until_complete(
- check_if_evaluation_contains_failed_evaluation_scenarios(new_evaluation_db.id)
+ check_if_evaluation_contains_failed_evaluation_scenarios(str(new_evaluation_db.id))
)
evaluation_status = Result(
@@ -349,14 +350,25 @@ def evaluate(
loop.run_until_complete(
update_evaluation(
- evaluation_id=new_evaluation_db.id, updates={"status": evaluation_status}
+ evaluation_id=str(new_evaluation_db.id),
+ updates={"status": evaluation_status.dict()}
)
)
async def aggregate_evaluator_results(
- app: AppDB, evaluators_aggregated_data: dict
+ evaluators_aggregated_data: dict
) -> List[AggregatedResult]:
+ """
+ Aggregate the results of the evaluation evaluator.
+
+ Args:
+ evaluators_aggregated_data (dict): The evaluators aggregated data
+
+ Returns:
+ the aggregated result of the evaluation evaluator
+ """
+
aggregated_results = []
for config_id, val in evaluators_aggregated_data.items():
evaluator_key = val["evaluator_key"] or ""
@@ -395,7 +407,7 @@ async def aggregate_evaluator_results(
evaluator_config = await fetch_evaluator_config(config_id)
aggregated_result = AggregatedResult(
- evaluator_config=evaluator_config.id,
+ evaluator_config=str(evaluator_config.id), # type: ignore
result=result,
)
aggregated_results.append(aggregated_result)
@@ -413,6 +425,7 @@ def get_app_inputs(app_variant_parameters, openapi_parameters) -> List[Dict[str,
Returns:
list: A list of dictionaries representing the application inputs, where each dictionary contains the input name and type.
"""
+
list_inputs = []
for param in openapi_parameters:
if param["type"] == "input":
From 94bf6ce47b9970aba13b01b200867f9a36edb6ca Mon Sep 17 00:00:00 2001
From: Abram
Date: Fri, 14 Jun 2024 00:58:01 +0100
Subject: [PATCH 059/268] refactor (backend): migrate testsets from Beanie to
SQLAlchemy and fix QA bugs
---
.../agenta_backend/routers/testset_router.py | 103 +++++++-----------
1 file changed, 42 insertions(+), 61 deletions(-)
diff --git a/agenta-backend/agenta_backend/routers/testset_router.py b/agenta-backend/agenta_backend/routers/testset_router.py
index 102d4183e1..facbfe109c 100644
--- a/agenta-backend/agenta_backend/routers/testset_router.py
+++ b/agenta-backend/agenta_backend/routers/testset_router.py
@@ -85,16 +85,10 @@ async def upload_file(
# Create a document
document = {
- "created_at": datetime.now(timezone.utc).isoformat(),
"name": testset_name if testset_name else file.filename,
- "app": app,
"csvdata": [],
}
- if isCloudEE():
- document["organization"] = app.organization
- document["workspace"] = app.workspace
-
if upload_type == "JSON":
# Read and parse the JSON file
json_data = await file.read()
@@ -118,19 +112,19 @@ async def upload_file(
for row in csv_reader:
document["csvdata"].append(row)
- user = await get_user(request.state.user_id)
try:
- testset_instance = TestSetDB(**document, user=user)
- except ValidationError as e:
- raise HTTPException(status_code=403, detail=e.errors())
- result = await testset_instance.create()
-
- if isinstance(result.id, ObjectId):
+ testset = await db_manager.create_testset(
+ app=app,
+ user_uid=request.state.user_id,
+ testset_data=document
+ )
return TestSetSimpleResponse(
- id=str(result.id),
+ id=str(testset.id),
name=document["name"],
- created_at=document["created_at"],
+ created_at=str(testset.created_at),
)
+ except ValidationError as e:
+ raise HTTPException(status_code=403, detail=e.errors())
@router.post(
@@ -170,7 +164,6 @@ async def import_testset(
try:
response = requests.get(endpoint, timeout=10)
-
if response.status_code != 200:
raise HTTPException(
status_code=400, detail="Failed to fetch testset from endpoint"
@@ -178,31 +171,25 @@ async def import_testset(
# Create a document
document = {
- "created_at": datetime.now(timezone.utc).isoformat(),
"name": testset_name,
- "app": app,
"csvdata": [],
}
- if isCloudEE():
- document["organization"] = app.organization
- document["workspace"] = app.workspace
-
# Populate the document with column names and values
json_response = response.json()
for row in json_response:
document["csvdata"].append(row)
- user = await get_user(request.state.user_id)
- testset_instance = TestSetDB(**document, user=user)
- result = await testset_instance.create()
-
- if isinstance(result.id, ObjectId):
- return TestSetSimpleResponse(
- id=str(result.id),
- name=document["name"],
- created_at=document["created_at"],
- )
+ testset = await db_manager.create_testset(
+ app=app,
+ user_uid=request.state.user_id,
+ testset_data=document
+ )
+ return TestSetSimpleResponse(
+ id=str(testset.id),
+ name=document["name"],
+ created_at=str(testset.created_at),
+ )
except HTTPException as error:
print(error)
@@ -353,8 +340,8 @@ async def get_testsets(
if not has_permission:
error_msg = f"You do not have permission to perform this action. Please contact your organization admin."
logger.error(error_msg)
- return JSONResponse(
- {"detail": error_msg},
+ raise HTTPException(
+ detail=error_msg,
status_code=403,
)
@@ -410,7 +397,7 @@ async def get_single_testset(
@router.delete("/", response_model=List[str], operation_id="delete_testsets")
async def delete_testsets(
- delete_testsets: DeleteTestsets,
+ payload: DeleteTestsets,
request: Request,
):
"""
@@ -422,29 +409,23 @@ async def delete_testsets(
Returns:
A list of the deleted testsets' IDs.
"""
- deleted_ids = []
- for testset_id in delete_testsets.testset_ids:
- test_set = await db_manager.fetch_testset_by_id(testset_id=testset_id)
- if test_set is None:
- raise HTTPException(status_code=404, detail="testset not found")
-
- if isCloudEE():
- for testset_id in delete_testsets.testset_ids:
- has_permission = await check_action_access(
- user_uid=request.state.user_id,
- object=test_set,
- permission=Permission.DELETE_TESTSET,
- )
- logger.debug(f"User has Permission to delete Testset: {has_permission}")
- if not has_permission:
- error_msg = f"You do not have permission to perform this action. Please contact your organization admin."
- logger.error(error_msg)
- return JSONResponse(
- {"detail": error_msg},
- status_code=403,
- )
-
- await test_set.delete()
- deleted_ids.append(testset_id)
-
- return deleted_ids
+
+ if isCloudEE():
+ # TODO: improve rbac logic for testset permission
+ # has_permission = await check_action_access(
+ # user_uid=request.state.user_id,
+ # object=test_set,
+ # permission=Permission.DELETE_TESTSET,
+ # )
+ has_permission = False
+ logger.debug(f"User has Permission to delete Testset: {has_permission}")
+ if not has_permission:
+ error_msg = f"You do not have permission to perform this action. Please contact your organization admin."
+ logger.error(error_msg)
+ return JSONResponse(
+ {"detail": error_msg},
+ status_code=403,
+ )
+
+ await db_manager.remove_testsets(testset_ids=payload.testset_ids)
+ return payload.testset_ids
\ No newline at end of file
From 1e6d19bd690d4332a2fc9febf312b822feafa8ea Mon Sep 17 00:00:00 2001
From: Abram
Date: Fri, 14 Jun 2024 00:58:38 +0100
Subject: [PATCH 060/268] minor refactor (backend): rename
get_deployment_by_objectid to get_deployment_by_id
---
agenta-backend/agenta_backend/routers/container_router.py | 6 +++---
agenta-backend/agenta_backend/routers/variants_router.py | 3 +++
2 files changed, 6 insertions(+), 3 deletions(-)
diff --git a/agenta-backend/agenta_backend/routers/container_router.py b/agenta-backend/agenta_backend/routers/container_router.py
index b2415522e9..fb678ff9a7 100644
--- a/agenta-backend/agenta_backend/routers/container_router.py
+++ b/agenta-backend/agenta_backend/routers/container_router.py
@@ -105,7 +105,7 @@ async def restart_docker_container(
logger.debug(f"Restarting container for variant {payload.variant_id}")
app_variant_db = await db_manager.fetch_app_variant_by_id(payload.variant_id)
try:
- deployment = await db_manager.get_deployment_by_objectid(
+ deployment = await db_manager.get_deployment_by_id(
app_variant_db.base.deployment
)
container_id = deployment.container_id
@@ -180,11 +180,11 @@ async def construct_app_container_url(
try:
if getattr(object_db, "deployment_id", None): # this is a base
- deployment = await db_manager.get_deployment_by_objectid(
+ deployment = await db_manager.get_deployment_by_id(
str(object_db.deployment_id) # type: ignore
)
elif getattr(object_db, "base_id", None): # this is a variant
- deployment = await db_manager.get_deployment_by_objectid(
+ deployment = await db_manager.get_deployment_by_id(
str(object_db.base.deployment_id) # type: ignore
)
else:
diff --git a/agenta-backend/agenta_backend/routers/variants_router.py b/agenta-backend/agenta_backend/routers/variants_router.py
index 5d825ab4f7..77dc7971d9 100644
--- a/agenta-backend/agenta_backend/routers/variants_router.py
+++ b/agenta-backend/agenta_backend/routers/variants_router.py
@@ -202,6 +202,9 @@ async def update_variant_parameters(
detail = f"Error while trying to update the app variant: {str(e)}"
raise HTTPException(status_code=500, detail=detail)
except Exception as e:
+ import traceback
+
+ traceback.print_exc()
detail = f"Unexpected error while trying to update the app variant: {str(e)}"
raise HTTPException(status_code=500, detail=detail)
From 151cda835fad6e0c3affa3ad9d7ddc0b07a6f592 Mon Sep 17 00:00:00 2001
From: Abram
Date: Fri, 14 Jun 2024 00:59:34 +0100
Subject: [PATCH 061/268] refactor (backend): improve queries by making use of
joinedload and load_only
---
.../agenta_backend/services/db_manager.py | 322 +++++++++++++-----
1 file changed, 229 insertions(+), 93 deletions(-)
diff --git a/agenta-backend/agenta_backend/services/db_manager.py b/agenta-backend/agenta_backend/services/db_manager.py
index 79b6d826c0..ff2f6d9739 100644
--- a/agenta-backend/agenta_backend/services/db_manager.py
+++ b/agenta-backend/agenta_backend/services/db_manager.py
@@ -17,8 +17,8 @@
from sqlalchemy.future import select
from sqlalchemy.exc import NoResultFound
from sqlalchemy.ext.asyncio import AsyncSession
-from sqlalchemy.orm import selectinload, joinedload
from agenta_backend.models.db_engine import db_engine
+from sqlalchemy.orm import selectinload, joinedload, aliased, load_only
from agenta_backend.models.api.api_models import (
App,
@@ -69,6 +69,7 @@
)
from agenta_backend.models.db_models import (
TemplateDB,
+ EvaluatorConfigDB,
AppVariantRevisionsDB,
EvaluationScenarioResultDB,
EvaluationAggregatedResultDB,
@@ -642,7 +643,7 @@ async def create_app_and_envs(
return app
-async def get_deployment_by_objectid(
+async def get_deployment_by_id(
deployment_id: str,
) -> DeploymentDB:
"""Get the deployment object from the database with the provided id.
@@ -656,10 +657,10 @@ async def get_deployment_by_objectid(
async with db_engine.get_session() as session:
result = await session.execute(
- select(DeploymentDB).filter_by(id=uuid.UUID(deployment_id))
+ select(DeploymentDB)
+ .filter_by(id=uuid.UUID(deployment_id))
)
deployment = result.scalars().one_or_none()
- logger.debug(f"deployment: {deployment}")
return deployment
@@ -1063,18 +1064,22 @@ async def list_apps(
async with db_engine.get_session() as session:
result = await session.execute(
- select(AppDB).filter_by(
+ select(AppDB)
+ .filter_by(
organization_id=uuid.UUID(org_id),
workspace_id=uuid.UUID(workspace_id),
)
)
- apps = result.scalars().all()
+ apps = result.unique().scalars().all()
return [converters.app_db_to_pydantic(app) for app in apps]
else:
async with db_engine.get_session() as session:
- result = await session.execute(select(AppDB).filter_by(user_id=user.id))
- apps = result.scalars().all()
+ result = await session.execute(
+ select(AppDB)
+ .filter_by(user_id=user.id)
+ )
+ apps = result.unique().scalars().all()
return [converters.app_db_to_pydantic(app) for app in apps]
@@ -1125,7 +1130,7 @@ async def check_is_last_variant_for_image(db_app_variant: AppVariantDB) -> bool:
return count_variants == 1
-async def remove_deployment(deployment_db: DeploymentDB):
+async def remove_deployment(deployment_id: str):
"""Remove a deployment from the db
Arguments:
@@ -1133,18 +1138,19 @@ async def remove_deployment(deployment_db: DeploymentDB):
"""
logger.debug("Removing deployment")
- assert deployment_db is not None, "deployment_db is missing"
+ assert deployment_id is not None, "deployment_id is missing"
+
async with db_engine.get_session() as session:
result = await session.execute(
- select(DeploymentDB).filter_by(id=deployment_db.id)
+ select(DeploymentDB).filter_by(id=uuid.UUID(deployment_id))
)
deployment = result.scalars().one_or_none()
if not deployment:
- raise NoResultFound(f"Deployment with {str(deployment_db.id)} not found")
+ raise NoResultFound(f"Deployment with {deployment_id} not found")
await session.delete(deployment)
await session.commit()
-
+
async def list_deployments(app_id: str):
"""Lists all the deployments that belongs to an app.
@@ -1307,7 +1313,7 @@ async def fetch_app_variant_revision_by_id(
async def fetch_environment_revisions_for_environment(
environment: AppEnvironmentDB, **kwargs: dict
-) -> List[AppEnvironmentRevisionDB]:
+):
"""Returns list of app environment revision for the given environment.
Args:
@@ -1544,11 +1550,17 @@ async def fetch_app_variant_revision(app_variant: str, revision_number: int):
async with db_engine.get_session() as session:
result = await session.execute(
- select(AppVariantRevisionsDB).filter_by(
- variant_id=uuid.UUID(app_variant), revision=revision_number
+ select(AppVariantRevisionsDB)
+ .options(
+ joinedload(AppVariantRevisionsDB.modified_by)
+ .load_only(UserDB.username) # type: ignore
+ )
+ .filter_by(
+ variant_id=uuid.UUID(app_variant),
+ revision=revision_number
)
)
- app_variant_revisions = result.scalars().all()
+ app_variant_revisions = result.scalars().one_or_none()
return app_variant_revisions
@@ -1651,12 +1663,12 @@ async def remove_app_testsets(app_id: str):
return deleted_count
-async def remove_base_from_db(base: VariantBaseDB):
+async def remove_base_from_db(base_id: str):
"""
Remove a base from the database.
Args:
- base (VariantBaseDB): The base to be removed from the database.
+ base_id (str): The base to be removed from the database.
Raises:
ValueError: If the base is None.
@@ -1665,10 +1677,15 @@ async def remove_base_from_db(base: VariantBaseDB):
None
"""
- if base is None:
- raise ValueError("Base is None")
-
+ assert base_id is None, "base_id is required"
async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(VariantBaseDB).filter_by(id=uuid.UUID(base_id))
+ )
+ base = result.scalars().one_or_none()
+ if not base:
+ raise NoResultFound(f"Base with id {base_id} not found")
+
await session.delete(base)
await session.commit()
@@ -1688,64 +1705,63 @@ async def remove_app_by_id(app_id: str):
"""
assert app_id is not None, "app_id cannot be None"
- app_db = await fetch_app_by_id(app_id=app_id)
- assert app_db is not None, f"app instance for {app_id} could not be found"
-
async with db_engine.get_session() as session:
+ result = await session.execute(select(AppDB).filter_by(id=uuid.UUID(app_id)))
+ app_db = result.scalars().one_or_none()
+ if not app_db:
+ raise NoResultFound(f"App with id {app_id} not found")
+
await session.delete(app_db)
await session.commit()
async def update_variant_parameters(
- app_variant_db: AppVariantDB, parameters: Dict[str, Any], user_uid: str
+ app_variant_id: str, parameters: Dict[str, Any], user_uid: str
) -> None:
"""
Update the parameters of an app variant in the database.
Args:
- app_variant_db (AppVariantDB): The app variant to update.
+ app_variant_id (str): The app variant ID.
parameters (Dict[str, Any]): The new parameters to set for the app variant.
user_uid (str): The UID of the user that is updating the app variant.
Raises:
- ValueError: If there is an issue updating the variant parameters.
+ NoResultFound: If there is an issue updating the variant parameters.
"""
- assert app_variant_db is not None, "app_variant is missing"
- assert parameters is not None, "parameters is missing"
-
- logging.debug("Updating variant parameters")
user = await get_user(user_uid)
-
async with db_engine.get_session() as session:
- try:
- # Update associated ConfigDB parameters
- for key, value in parameters.items():
- if hasattr(app_variant_db.config_parameters, key):
- setattr(app_variant_db.config_parameters, key, value)
+ result = await session.execute(
+ select(AppVariantDB).filter_by(id=uuid.UUID(app_variant_id))
+ )
+ app_variant_db = result.scalars().one_or_none()
+ if not app_variant_db:
+ raise NoResultFound(f"App variant with id {app_variant_id} not found")
- # ...and variant versioning
- app_variant_db.revision += 1 # type: ignore
- app_variant_db.modified_by_id = user.id
+ # Update associated ConfigDB parameters
+ for key, value in parameters.items():
+ if hasattr(app_variant_db.config_parameters, key):
+ setattr(app_variant_db.config_parameters, key, value)
- # Save updated ConfigDB
- await session.commit()
+ # ...and variant versioning
+ app_variant_db.revision += 1 # type: ignore
+ app_variant_db.modified_by_id = user.id
- variant_revision = AppVariantRevisionsDB(
- variant_id=app_variant_db.id,
- revision=app_variant_db.revision,
- modified_by_id=user.id,
- base_id=app_variant_db.base.id,
- config_name=app_variant_db.config_name,
- config_parameters=app_variant_db.config_parameters,
- )
+ # Save updated ConfigDB
+ await session.commit()
- session.add(variant_revision)
- await session.commit()
+ variant_revision = AppVariantRevisionsDB(
+ variant_id=app_variant_db.id,
+ revision=app_variant_db.revision,
+ modified_by_id=user.id,
+ base_id=app_variant_db.base_id,
+ config_name=app_variant_db.config_name,
+ config_parameters=app_variant_db.config_parameters,
+ )
- except Exception as e:
- logging.error(f"Issue updating variant parameters: {e}")
- raise ValueError("Issue updating variant parameters")
+ session.add(variant_revision)
+ await session.commit()
async def get_app_variant_instance_by_id(variant_id: str) -> AppVariantDB:
@@ -1874,8 +1890,10 @@ async def fetch_testsets_by_app_id(app_id: str):
async def fetch_evaluation_by_id(evaluation_id: str) -> Optional[EvaluationDB]:
"""Fetches a evaluation by its ID.
+
Args:
evaluation_id (str): The ID of the evaluation to fetch.
+
Returns:
EvaluationDB: The fetched evaluation, or None if no evaluation was found.
"""
@@ -1883,7 +1901,12 @@ async def fetch_evaluation_by_id(evaluation_id: str) -> Optional[EvaluationDB]:
assert evaluation_id is not None, "evaluation_id cannot be None"
async with db_engine.get_session() as session:
result = await session.execute(
- select(EvaluationDB).filter_by(id=uuid.UUID(evaluation_id))
+ select(EvaluationDB)
+ .options(
+ joinedload(EvaluationDB.user).load_only(UserDB.username), # type: ignore
+ joinedload(EvaluationDB.testset).load_only(TestSetDB.id, TestSetDB.name) # type: ignore
+ )
+ .filter_by(id=uuid.UUID(evaluation_id))
)
evaluation = result.scalars().one_or_none()
return evaluation
@@ -1908,6 +1931,52 @@ async def fetch_human_evaluation_by_id(
return evaluation
+async def fetch_evaluation_scenarios(evaluation_id: str):
+ """
+ Fetches evaluation scenarios.
+
+ Args:
+ evaluation_id (str): The evaluation identifier
+
+ Returns:
+ The evaluation scenarios.
+ """
+
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(EvaluationScenarioDB)
+ .filter_by(evaluation_id=uuid.UUID(evaluation_id))
+ )
+ evaluation_scenarios = result.scalars().all()
+ return evaluation_scenarios
+
+
+async def fetch_evaluation_scenario_results(evaluation_scenario_id: str):
+ """
+ Fetches evaluation scenario results.
+
+ Args:
+ evaluation_scenario_id (str): The evaluation scenario identifier
+
+ Returns:
+ The evaluation scenario results.
+ """
+
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(EvaluationScenarioResultDB)
+ .options(
+ load_only(
+ EvaluationScenarioResultDB.evaluator_config_id, # type: ignore
+ EvaluationScenarioResultDB.result # type: ignore
+ )
+ )
+ .filter_by(evaluation_scenario_id=uuid.UUID(evaluation_scenario_id))
+ )
+ scenario_results = result.scalars().all()
+ return scenario_results
+
+
async def fetch_evaluation_scenario_by_id(
evaluation_scenario_id: str,
) -> Optional[EvaluationScenarioDB]:
@@ -2220,19 +2289,19 @@ async def fetch_app_by_name_and_parameters(
)
else:
query = (
- base_query.join(UserDB)
+ base_query
+ .join(UserDB)
.filter(UserDB.uid == user_uid)
- .options(selectinload(AppDB.user))
)
result = await session.execute(query)
- app_db = result.scalars().one_or_none()
+ app_db = result.unique().scalars().one_or_none()
return app_db
async def create_new_evaluation(
app: AppDB,
- user: UserDB,
+ user_id: str,
testset: TestSetDB,
status: Result,
variant: str,
@@ -2248,7 +2317,7 @@ async def create_new_evaluation(
async with db_engine.get_session() as session:
evaluation = EvaluationDB(
app_id=app.id,
- user_id=user.id,
+ user_id=uuid.UUID(user_id),
testset_id=testset.id,
status=status.dict(),
variant_id=uuid.UUID(variant),
@@ -2261,18 +2330,57 @@ async def create_new_evaluation(
organization is not None and workspace is not None
), "organization and workspace must be provided together"
- evaluation.organization_id = organization_id
- evaluation.workspace_id = workspace_id
+ evaluation.organization_id = uuid.UUID(organization_id) # type: ignore
+ evaluation.workspace_id = uuid.UUID(workspace_id) # type: ignore
session.add(evaluation)
await session.commit()
- await session.refresh(evaluation)
+ await session.refresh(evaluation, attribute_names=["user", "testset", "aggregated_results"])
return evaluation
+async def list_evaluations(app_id: str):
+ """Retrieves evaluations of the specified app from the db.
+
+ Args:
+ app_id (str): The ID of the app
+ """
+
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(EvaluationDB)
+ .options(
+ joinedload(EvaluationDB.user).load_only(UserDB.id, UserDB.username), # type: ignore
+ joinedload(EvaluationDB.testset).load_only(TestSetDB.id, TestSetDB.name), # type: ignore
+ joinedload(EvaluationDB.aggregated_results)
+ )
+ .filter_by(app_id=uuid.UUID(app_id))
+ )
+ evaluations = result.unique().scalars().all()
+ return evaluations
+
+
+async def delete_evaluations(evaluation_ids: List[str]) -> None:
+ """Delete evaluations based on the ids provided from the db.
+
+ Args:
+ evaluations_ids (list[str]): The IDs of the evaluation
+ """
+
+ async with db_engine.get_session() as session:
+ query = select(EvaluationDB).where(
+ EvaluationDB.id.in_(evaluation_ids)
+ )
+ result = await session.execute(query)
+ evaluations = result.scalars().all()
+ for evaluation in evaluations:
+ await session.delete(evaluation)
+ await session.commit()
+
+
async def create_new_evaluation_scenario(
- user: UserDB,
+ user_id: str,
evaluation: EvaluationDB,
variant_id: str,
inputs: List[EvaluationScenarioInput],
@@ -2280,24 +2388,28 @@ async def create_new_evaluation_scenario(
correct_answers: Optional[List[CorrectAnswer]],
is_pinned: Optional[bool],
note: Optional[str],
- evaluators_configs: List[EvaluatorConfigDB],
results: List[EvaluationScenarioResult],
organization=None,
workspace=None,
) -> EvaluationScenarioDB:
"""Create a new evaluation scenario.
+
Returns:
EvaluationScenarioDB: The created evaluation scenario.
"""
async with db_engine.get_session() as session:
evaluation_scenario = EvaluationScenarioDB(
- user_id=user.id,
+ user_id=uuid.UUID(user_id),
evaluation_id=evaluation.id,
variant_id=uuid.UUID(variant_id),
- inputs=inputs,
- outputs=outputs,
- correct_answers=correct_answers,
+ inputs=[input.dict() for input in inputs],
+ outputs=[output.dict() for output in outputs],
+ correct_answers=(
+ [correct_answer.dict() for correct_answer in correct_answers]
+ if correct_answers is not None
+ else []
+ ),
is_pinned=is_pinned,
note=note,
)
@@ -2308,8 +2420,8 @@ async def create_new_evaluation_scenario(
organization is not None and workspace is not None
), "organization and workspace must be provided together"
- evaluation_scenario.organization_id = organization_id
- evaluation_scenario.workspace_id = workspace_id
+ evaluation_scenario.organization_id = organization_id # type: ignore
+ evaluation_scenario.workspace_id = workspace_id # type: ignore
session.add(evaluation_scenario)
await session.commit()
@@ -2335,27 +2447,48 @@ async def update_evaluation_with_aggregated_results(
evaluation_id: str, aggregated_results: List[AggregatedResult]
):
async with db_engine.get_session() as session:
- base_query = select(EvaluationAggregatedResultDB).filter_by(
- evaluation_id=uuid.UUID(evaluation_id)
- )
for result in aggregated_results:
- query = base_query.filter_by(
- evaluator_config_id=uuid.UUID(result.evaluator_config)
+ aggregated_result = EvaluationAggregatedResultDB(
+ evaluation_id=uuid.UUID(evaluation_id),
+ evaluator_config_id=uuid.UUID(result.evaluator_config),
+ result=result.result.dict()
)
- db_result = await session.execute(query)
- evaluation_aggregated_result = db_result.scalars().one_or_none()
- if not evaluation_aggregated_result:
- raise NoResultFound(
- f"Aggregated result with id {result.evaluator_config} not found for the evaluation"
- )
-
- for key, value in result.result.dict(exclude_unset=True):
- if hasattr(evaluation_aggregated_result.result, key):
- setattr(evaluation_aggregated_result.result, key, value)
+ session.add(aggregated_result)
await session.commit()
+async def fetch_eval_aggregated_results(evaluation_id: str):
+ """
+ Fetches an evaluation aggregated results by evaluation identifier.
+
+ Args:
+ evaluation_id (str): The evaluation identifier
+
+ Returns:
+ The evaluation aggregated results by evaluation identifier.
+ """
+
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(EvaluationAggregatedResultDB)
+ .options(
+ joinedload(EvaluationAggregatedResultDB.evaluator_config)
+ .load_only(
+ EvaluatorConfigDB.id, # type: ignore
+ EvaluatorConfigDB.name, # type: ignore
+ EvaluatorConfigDB.evaluator_key, # type: ignore
+ EvaluatorConfigDB.settings_values, # type: ignore
+ EvaluatorConfigDB.created_at, # type: ignore
+ EvaluatorConfigDB.updated_at, # type: ignore
+ )
+ )
+ .filter_by(evaluation_id=uuid.UUID(evaluation_id))
+ )
+ aggregated_results = result.scalars().all()
+ return aggregated_results
+
+
async def fetch_evaluators_configs(app_id: str):
"""Fetches a list of evaluator configurations from the database.
@@ -2562,11 +2695,14 @@ async def check_if_evaluation_contains_failed_evaluation_scenarios(
evaluation_id: str,
) -> bool:
async with db_engine.get_session() as session:
- query = select(func.count(EvaluationScenarioDB.id)).where(
- EvaluationScenarioDB.evaluation_id == uuid.UUID(evaluation_id),
- EvaluationScenarioDB.results.any(
- EvaluationScenarioDB.result.has(type="error")
- ),
+ EvaluationResultAlias = aliased(EvaluationScenarioResultDB)
+ query = (
+ select(func.count(EvaluationScenarioDB.id))
+ .join(EvaluationResultAlias, EvaluationScenarioDB.results)
+ .where(
+ EvaluationScenarioDB.evaluation_id == uuid.UUID(evaluation_id),
+ EvaluationResultAlias.result["type"].astext == "error"
+ )
)
result = await session.execute(query)
From def4d75b3813edbc9e96a429f687c333714b8a87 Mon Sep 17 00:00:00 2001
From: Abram
Date: Fri, 14 Jun 2024 01:00:14 +0100
Subject: [PATCH 062/268] minor refactor (tools): comment out redundant compose
services
---
docker-compose.yml | 70 ++++++++++++++++++++++++----------------------
1 file changed, 37 insertions(+), 33 deletions(-)
diff --git a/docker-compose.yml b/docker-compose.yml
index f18adc22d7..d5ebd2676d 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -61,7 +61,9 @@ services:
"/api",
]
depends_on:
- mongo:
+ # mongo:
+ # condition: service_healthy
+ postgres:
condition: service_healthy
restart: always
@@ -85,38 +87,38 @@ services:
- NEXT_PUBLIC_POSTHOG_API_KEY=phc_hmVSxIjTW1REBHXgj2aw4HW9X6CXb6FzerBgP9XenC7
restart: always
- mongo:
- image: mongo:5.0
- environment:
- MONGO_INITDB_ROOT_USERNAME: username
- MONGO_INITDB_ROOT_PASSWORD: password
- volumes:
- - mongodb_data:/data/db
- ports:
- - "27017:27017"
- networks:
- - agenta-network
- healthcheck:
- test: ["CMD", "mongo", "--eval", "db.adminCommand('ping')"]
- interval: 10s
- timeout: 10s
- retries: 20
- restart: always
+ # mongo:
+ # image: mongo:5.0
+ # environment:
+ # MONGO_INITDB_ROOT_USERNAME: username
+ # MONGO_INITDB_ROOT_PASSWORD: password
+ # volumes:
+ # - mongodb_data:/data/db
+ # ports:
+ # - "27017:27017"
+ # networks:
+ # - agenta-network
+ # healthcheck:
+ # test: ["CMD", "mongo", "--eval", "db.adminCommand('ping')"]
+ # interval: 10s
+ # timeout: 10s
+ # retries: 20
+ # restart: always
- mongo_express:
- image: mongo-express:0.54.0
- environment:
- ME_CONFIG_MONGODB_ADMINUSERNAME: username
- ME_CONFIG_MONGODB_ADMINPASSWORD: password
- ME_CONFIG_MONGODB_SERVER: mongo
- ports:
- - "8081:8081"
- networks:
- - agenta-network
- depends_on:
- mongo:
- condition: service_healthy
- restart: always
+ # mongo_express:
+ # image: mongo-express:0.54.0
+ # environment:
+ # ME_CONFIG_MONGODB_ADMINUSERNAME: username
+ # ME_CONFIG_MONGODB_ADMINPASSWORD: password
+ # ME_CONFIG_MONGODB_SERVER: mongo
+ # ports:
+ # - "8081:8081"
+ # networks:
+ # - agenta-network
+ # depends_on:
+ # mongo:
+ # condition: service_healthy
+ # restart: always
redis:
image: redis:latest
@@ -144,6 +146,7 @@ services:
command: >
watchmedo auto-restart --directory=./agenta_backend --pattern=*.py --recursive -- celery -A agenta_backend.main.celery_app worker --concurrency=1 --loglevel=INFO
environment:
+ - POSTGRES_URI=postgresql+asyncpg://username:password@postgres:5432
- MONGODB_URI=mongodb://username:password@mongo:27017
- REDIS_URL=redis://redis:6379/0
- CELERY_BROKER_URL=amqp://guest@rabbitmq//
@@ -153,7 +156,8 @@ services:
- ./agenta-backend/agenta_backend:/app/agenta_backend
- /var/run/docker.sock:/var/run/docker.sock
depends_on:
- - mongo
+ # - mongo
+ - postgres
- rabbitmq
- redis
extra_hosts:
From 028507933bb8ccefbc4ec14738dd621a3913ce5e Mon Sep 17 00:00:00 2001
From: Abram
Date: Fri, 14 Jun 2024 01:01:34 +0100
Subject: [PATCH 063/268] chore (backend): format codebase with black@23.12.0
---
.../agenta_backend/models/converters.py | 30 ++++---
.../agenta_backend/models/db_models.py | 69 ++++++++++----
.../routers/evaluation_router.py | 16 ++--
.../agenta_backend/routers/testset_router.py | 10 +--
.../services/aggregation_service.py | 2 +-
.../agenta_backend/services/app_manager.py | 4 +-
.../agenta_backend/services/db_manager.py | 89 ++++++++-----------
.../services/evaluation_service.py | 10 +--
.../services/evaluators_service.py | 2 +-
.../agenta_backend/tasks/evaluations.py | 27 +++---
10 files changed, 137 insertions(+), 122 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/converters.py b/agenta-backend/agenta_backend/models/converters.py
index ce181f82cb..7495d18630 100644
--- a/agenta-backend/agenta_backend/models/converters.py
+++ b/agenta-backend/agenta_backend/models/converters.py
@@ -219,9 +219,7 @@ async def aggregated_result_of_evaluation_to_pydantic(evaluation_id: str) -> Lis
transformed_results.append(
{
"evaluator_config": (
- {}
- if evaluator_config_dict is None
- else evaluator_config_dict
+ {} if evaluator_config_dict is None else evaluator_config_dict
),
"result": aggregated_result.result,
}
@@ -232,7 +230,9 @@ async def aggregated_result_of_evaluation_to_pydantic(evaluation_id: str) -> Lis
async def evaluation_scenarios_results_to_pydantic(
evaluation_scenario_id: str,
) -> List[dict]:
- scenario_results = await db_manager.fetch_evaluation_scenario_results(evaluation_scenario_id)
+ scenario_results = await db_manager.fetch_evaluation_scenario_results(
+ evaluation_scenario_id
+ )
return [
{
"evaluator_config": str(scenario_result.evaluator_config_id),
@@ -252,20 +252,20 @@ async def evaluation_scenario_db_to_pydantic(
id=str(evaluation_scenario_db.id),
evaluation_id=evaluation_id,
inputs=[
- EvaluationScenarioInput(**scenario_input) # type: ignore
+ EvaluationScenarioInput(**scenario_input) # type: ignore
for scenario_input in evaluation_scenario_db.inputs
],
outputs=[
- EvaluationScenarioOutput(**scenario_output) # type: ignore
+ EvaluationScenarioOutput(**scenario_output) # type: ignore
for scenario_output in evaluation_scenario_db.outputs
],
correct_answers=[
- CorrectAnswer(**correct_answer) # type: ignore
+ CorrectAnswer(**correct_answer) # type: ignore
for correct_answer in evaluation_scenario_db.correct_answers
],
- is_pinned=evaluation_scenario_db.is_pinned or False, # type: ignore
- note=evaluation_scenario_db.note or "", # type: ignore
- results=scenario_results, # type: ignore
+ is_pinned=evaluation_scenario_db.is_pinned or False, # type: ignore
+ note=evaluation_scenario_db.note or "", # type: ignore
+ results=scenario_results, # type: ignore
)
@@ -348,10 +348,12 @@ async def app_variant_db_revision_to_output(
return AppVariantRevision(
revision=app_variant_revision_db.revision,
modified_by=app_variant_revision_db.modified_by.username,
- config=ConfigDB(**{
- "config_name": app_variant_revision_db.config_name,
- "parameters": app_variant_revision_db.config_parameters
- }),
+ config=ConfigDB(
+ **{
+ "config_name": app_variant_revision_db.config_name,
+ "parameters": app_variant_revision_db.config_parameters,
+ }
+ ),
created_at=str(app_variant_revision_db.created_at),
)
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index eb9954cdc3..908ddc89ce 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -90,11 +90,17 @@ class AppDB(Base):
user = relationship("UserDB")
variant = relationship("AppVariantDB", cascade="all, delete-orphan", backref="app")
- evaluator_config = relationship("EvaluatorConfigDB", cascade="all, delete-orphan", backref="app")
+ evaluator_config = relationship(
+ "EvaluatorConfigDB", cascade="all, delete-orphan", backref="app"
+ )
testset = relationship("TestSetDB", cascade="all, delete-orphan", backref="app")
base = relationship("DeploymentDB", cascade="all, delete-orphan", backref="app")
- deployment = relationship("VariantBaseDB", cascade="all, delete-orphan", backref="app")
- evaluation = relationship("EvaluationDB", cascade="all, delete-orphan", backref="app")
+ deployment = relationship(
+ "VariantBaseDB", cascade="all, delete-orphan", backref="app"
+ )
+ evaluation = relationship(
+ "EvaluationDB", cascade="all, delete-orphan", backref="app"
+ )
class DeploymentDB(Base):
@@ -137,7 +143,9 @@ class VariantBaseDB(Base):
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
base_name = Column(String)
image_id = Column(UUID(as_uuid=True), ForeignKey("docker_images.id"))
- deployment_id = Column(UUID(as_uuid=True), ForeignKey("deployments.id", ondelete="SET NULL"))
+ deployment_id = Column(
+ UUID(as_uuid=True), ForeignKey("deployments.id", ondelete="SET NULL")
+ )
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
@@ -182,8 +190,10 @@ class AppVariantDB(Base):
user = relationship("UserDB", foreign_keys=[user_id])
modified_by = relationship("UserDB", foreign_keys=[modified_by_id])
base = relationship("VariantBaseDB")
- revisions = relationship("AppVariantRevisionsDB", cascade="all, delete-orphan", backref="variant")
-
+ revisions = relationship(
+ "AppVariantRevisionsDB", cascade="all, delete-orphan", backref="variant"
+ )
+
class AppVariantRevisionsDB(Base):
__tablename__ = "app_variant_revisions"
@@ -195,7 +205,9 @@ class AppVariantRevisionsDB(Base):
unique=True,
nullable=False,
)
- variant_id = Column(UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="CASCADE"))
+ variant_id = Column(
+ UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="CASCADE")
+ )
revision = Column(Integer)
modified_by_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
base_id = Column(UUID(as_uuid=True), ForeignKey("bases.id"))
@@ -226,18 +238,23 @@ class AppEnvironmentDB(Base):
name = Column(String)
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
revision = Column(Integer)
- deployed_app_variant_id = Column(UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="SET NULL"))
+ deployed_app_variant_id = Column(
+ UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="SET NULL")
+ )
deployed_app_variant_revision_id = Column(
- UUID(as_uuid=True),
- ForeignKey("app_variant_revisions.id", ondelete="SET NULL")
+ UUID(as_uuid=True), ForeignKey("app_variant_revisions.id", ondelete="SET NULL")
+ )
+ deployment_id = Column(
+ UUID(as_uuid=True), ForeignKey("deployments.id", ondelete="SET NULL")
)
- deployment_id = Column(UUID(as_uuid=True), ForeignKey("deployments.id", ondelete="SET NULL"))
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
user = relationship("UserDB")
- environment_revisions = relationship("AppEnvironmentRevisionDB", cascade="all, delete-orphan", backref="environment")
+ environment_revisions = relationship(
+ "AppEnvironmentRevisionDB", cascade="all, delete-orphan", backref="environment"
+ )
deployed_app_variant = relationship("AppVariantDB")
deployed_app_variant_revision = relationship("AppVariantRevisionsDB")
@@ -252,13 +269,17 @@ class AppEnvironmentRevisionDB(Base):
unique=True,
nullable=False,
)
- environment_id = Column(UUID(as_uuid=True), ForeignKey("environments.id", ondelete="CASCADE"))
+ environment_id = Column(
+ UUID(as_uuid=True), ForeignKey("environments.id", ondelete="CASCADE")
+ )
revision = Column(Integer)
modified_by_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
deployed_app_variant_revision_id = Column(
UUID(as_uuid=True), ForeignKey("app_variant_revisions.id", ondelete="SET NULL")
)
- deployment_id = Column(UUID(as_uuid=True), ForeignKey("deployments.id", ondelete="SET NULL"))
+ deployment_id = Column(
+ UUID(as_uuid=True), ForeignKey("deployments.id", ondelete="SET NULL")
+ )
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
@@ -356,7 +377,9 @@ class HumanEvaluationDB(Base):
user = relationship("UserDB")
status = Column(String)
evaluation_type = Column(String)
- variant_id = Column(UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="SET NULL"))
+ variant_id = Column(
+ UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="SET NULL")
+ )
variant = relationship("AppVariantDB")
variant_revision_id = Column(
UUID(as_uuid=True), ForeignKey("app_variant_revisions.id", ondelete="SET NULL")
@@ -455,8 +478,12 @@ class EvaluationDB(Base):
app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id", ondelete="CASCADE"))
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
status = Column(JSONB) # Result
- testset_id = Column(UUID(as_uuid=True), ForeignKey("testsets.id", ondelete="SET NULL"))
- variant_id = Column(UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="SET NULL"))
+ testset_id = Column(
+ UUID(as_uuid=True), ForeignKey("testsets.id", ondelete="SET NULL")
+ )
+ variant_id = Column(
+ UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="SET NULL")
+ )
variant_revision_id = Column(
UUID(as_uuid=True), ForeignKey("app_variant_revisions.id", ondelete="SET NULL")
)
@@ -504,8 +531,12 @@ class EvaluationScenarioDB(Base):
nullable=False,
)
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
- evaluation_id = Column(UUID(as_uuid=True), ForeignKey("evaluations.id", ondelete="CASCADE"))
- variant_id = Column(UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="SET NULL"))
+ evaluation_id = Column(
+ UUID(as_uuid=True), ForeignKey("evaluations.id", ondelete="CASCADE")
+ )
+ variant_id = Column(
+ UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="SET NULL")
+ )
inputs = Column(JSONB) # List of EvaluationScenarioInput
outputs = Column(JSONB) # List of EvaluationScenarioOutput
correct_answers = Column(JSONB) # List of CorrectAnswer
diff --git a/agenta-backend/agenta_backend/routers/evaluation_router.py b/agenta-backend/agenta_backend/routers/evaluation_router.py
index 7d16f427e3..7e21e05091 100644
--- a/agenta-backend/agenta_backend/routers/evaluation_router.py
+++ b/agenta-backend/agenta_backend/routers/evaluation_router.py
@@ -247,8 +247,7 @@ async def fetch_evaluation_scenarios(
evaluation = await db_manager.fetch_evaluation_by_id(evaluation_id)
if not evaluation:
raise HTTPException(
- status_code=404,
- detail=f"Evaluation with id {evaluation_id} not found"
+ status_code=404, detail=f"Evaluation with id {evaluation_id} not found"
)
if isCloudEE():
@@ -321,7 +320,10 @@ async def fetch_list_evaluations(
traceback.print_exc()
status_code = exc.status_code if hasattr(exc, "status_code") else 500
- raise HTTPException(status_code=status_code, detail=f"Could not retrieve evaluation results: {str(exc)}")
+ raise HTTPException(
+ status_code=status_code,
+ detail=f"Could not retrieve evaluation results: {str(exc)}",
+ )
@router.get(
@@ -342,7 +344,9 @@ async def fetch_evaluation(
try:
evaluation = await db_manager.fetch_evaluation_by_id(evaluation_id)
if not evaluation:
- raise HTTPException(status_code=404, detail=f"Evaluation with id {evaluation_id} not found")
+ raise HTTPException(
+ status_code=404, detail=f"Evaluation with id {evaluation_id} not found"
+ )
if isCloudEE():
has_permission = await check_action_access(
@@ -392,9 +396,7 @@ async def delete_evaluations(
object_type="evaluation",
permission=Permission.DELETE_EVALUATION,
)
- logger.debug(
- f"User has permission to delete evaluation: {has_permission}"
- )
+ logger.debug(f"User has permission to delete evaluation: {has_permission}")
if not has_permission:
error_msg = f"You do not have permission to perform this action. Please contact your organization admin."
logger.error(error_msg)
diff --git a/agenta-backend/agenta_backend/routers/testset_router.py b/agenta-backend/agenta_backend/routers/testset_router.py
index facbfe109c..5a28380b80 100644
--- a/agenta-backend/agenta_backend/routers/testset_router.py
+++ b/agenta-backend/agenta_backend/routers/testset_router.py
@@ -114,9 +114,7 @@ async def upload_file(
try:
testset = await db_manager.create_testset(
- app=app,
- user_uid=request.state.user_id,
- testset_data=document
+ app=app, user_uid=request.state.user_id, testset_data=document
)
return TestSetSimpleResponse(
id=str(testset.id),
@@ -181,9 +179,7 @@ async def import_testset(
document["csvdata"].append(row)
testset = await db_manager.create_testset(
- app=app,
- user_uid=request.state.user_id,
- testset_data=document
+ app=app, user_uid=request.state.user_id, testset_data=document
)
return TestSetSimpleResponse(
id=str(testset.id),
@@ -428,4 +424,4 @@ async def delete_testsets(
)
await db_manager.remove_testsets(testset_ids=payload.testset_ids)
- return payload.testset_ids
\ No newline at end of file
+ return payload.testset_ids
diff --git a/agenta-backend/agenta_backend/services/aggregation_service.py b/agenta-backend/agenta_backend/services/aggregation_service.py
index f5f616df82..fc2fa1db7e 100644
--- a/agenta-backend/agenta_backend/services/aggregation_service.py
+++ b/agenta-backend/agenta_backend/services/aggregation_service.py
@@ -19,7 +19,7 @@ def aggregate_ai_critique(results: List[Result]) -> Result:
for result in results:
try:
# Extract the first number found in the result value
- match = re.search(r"\d+", result.value) # type: ignore
+ match = re.search(r"\d+", result.value) # type: ignore
if not match:
continue
diff --git a/agenta-backend/agenta_backend/services/app_manager.py b/agenta-backend/agenta_backend/services/app_manager.py
index 0a1b9ee30f..86ae8c81d9 100644
--- a/agenta-backend/agenta_backend/services/app_manager.py
+++ b/agenta-backend/agenta_backend/services/app_manager.py
@@ -144,9 +144,7 @@ async def update_variant_image(
valid_image = await deployment_manager.validate_image(image)
if not valid_image:
raise ValueError("Image could not be found in registry.")
- deployment = await db_manager.get_deployment_by_id(
- app_variant_db.base.deployment
- )
+ deployment = await db_manager.get_deployment_by_id(app_variant_db.base.deployment)
await deployment_manager.stop_and_delete_service(deployment)
await db_manager.remove_deployment(str(deployment.id))
diff --git a/agenta-backend/agenta_backend/services/db_manager.py b/agenta-backend/agenta_backend/services/db_manager.py
index ff2f6d9739..1cdef3abff 100644
--- a/agenta-backend/agenta_backend/services/db_manager.py
+++ b/agenta-backend/agenta_backend/services/db_manager.py
@@ -657,8 +657,7 @@ async def get_deployment_by_id(
async with db_engine.get_session() as session:
result = await session.execute(
- select(DeploymentDB)
- .filter_by(id=uuid.UUID(deployment_id))
+ select(DeploymentDB).filter_by(id=uuid.UUID(deployment_id))
)
deployment = result.scalars().one_or_none()
return deployment
@@ -1064,8 +1063,7 @@ async def list_apps(
async with db_engine.get_session() as session:
result = await session.execute(
- select(AppDB)
- .filter_by(
+ select(AppDB).filter_by(
organization_id=uuid.UUID(org_id),
workspace_id=uuid.UUID(workspace_id),
)
@@ -1075,10 +1073,7 @@ async def list_apps(
else:
async with db_engine.get_session() as session:
- result = await session.execute(
- select(AppDB)
- .filter_by(user_id=user.id)
- )
+ result = await session.execute(select(AppDB).filter_by(user_id=user.id))
apps = result.unique().scalars().all()
return [converters.app_db_to_pydantic(app) for app in apps]
@@ -1150,7 +1145,7 @@ async def remove_deployment(deployment_id: str):
await session.delete(deployment)
await session.commit()
-
+
async def list_deployments(app_id: str):
"""Lists all the deployments that belongs to an app.
@@ -1552,13 +1547,11 @@ async def fetch_app_variant_revision(app_variant: str, revision_number: int):
result = await session.execute(
select(AppVariantRevisionsDB)
.options(
- joinedload(AppVariantRevisionsDB.modified_by)
- .load_only(UserDB.username) # type: ignore
- )
- .filter_by(
- variant_id=uuid.UUID(app_variant),
- revision=revision_number
+ joinedload(AppVariantRevisionsDB.modified_by).load_only(
+ UserDB.username
+ ) # type: ignore
)
+ .filter_by(variant_id=uuid.UUID(app_variant), revision=revision_number)
)
app_variant_revisions = result.scalars().one_or_none()
return app_variant_revisions
@@ -1903,8 +1896,8 @@ async def fetch_evaluation_by_id(evaluation_id: str) -> Optional[EvaluationDB]:
result = await session.execute(
select(EvaluationDB)
.options(
- joinedload(EvaluationDB.user).load_only(UserDB.username), # type: ignore
- joinedload(EvaluationDB.testset).load_only(TestSetDB.id, TestSetDB.name) # type: ignore
+ joinedload(EvaluationDB.user).load_only(UserDB.username), # type: ignore
+ joinedload(EvaluationDB.testset).load_only(TestSetDB.id, TestSetDB.name), # type: ignore
)
.filter_by(id=uuid.UUID(evaluation_id))
)
@@ -1937,15 +1930,16 @@ async def fetch_evaluation_scenarios(evaluation_id: str):
Args:
evaluation_id (str): The evaluation identifier
-
+
Returns:
The evaluation scenarios.
"""
async with db_engine.get_session() as session:
result = await session.execute(
- select(EvaluationScenarioDB)
- .filter_by(evaluation_id=uuid.UUID(evaluation_id))
+ select(EvaluationScenarioDB).filter_by(
+ evaluation_id=uuid.UUID(evaluation_id)
+ )
)
evaluation_scenarios = result.scalars().all()
return evaluation_scenarios
@@ -1967,8 +1961,8 @@ async def fetch_evaluation_scenario_results(evaluation_scenario_id: str):
select(EvaluationScenarioResultDB)
.options(
load_only(
- EvaluationScenarioResultDB.evaluator_config_id, # type: ignore
- EvaluationScenarioResultDB.result # type: ignore
+ EvaluationScenarioResultDB.evaluator_config_id, # type: ignore
+ EvaluationScenarioResultDB.result, # type: ignore
)
)
.filter_by(evaluation_scenario_id=uuid.UUID(evaluation_scenario_id))
@@ -2288,11 +2282,7 @@ async def fetch_app_by_name_and_parameters(
workspace_id=uuid.UUID(workspace_id),
)
else:
- query = (
- base_query
- .join(UserDB)
- .filter(UserDB.uid == user_uid)
- )
+ query = base_query.join(UserDB).filter(UserDB.uid == user_uid)
result = await session.execute(query)
app_db = result.unique().scalars().one_or_none()
@@ -2330,12 +2320,14 @@ async def create_new_evaluation(
organization is not None and workspace is not None
), "organization and workspace must be provided together"
- evaluation.organization_id = uuid.UUID(organization_id) # type: ignore
- evaluation.workspace_id = uuid.UUID(workspace_id) # type: ignore
+ evaluation.organization_id = uuid.UUID(organization_id) # type: ignore
+ evaluation.workspace_id = uuid.UUID(workspace_id) # type: ignore
session.add(evaluation)
await session.commit()
- await session.refresh(evaluation, attribute_names=["user", "testset", "aggregated_results"])
+ await session.refresh(
+ evaluation, attribute_names=["user", "testset", "aggregated_results"]
+ )
return evaluation
@@ -2351,9 +2343,9 @@ async def list_evaluations(app_id: str):
result = await session.execute(
select(EvaluationDB)
.options(
- joinedload(EvaluationDB.user).load_only(UserDB.id, UserDB.username), # type: ignore
- joinedload(EvaluationDB.testset).load_only(TestSetDB.id, TestSetDB.name), # type: ignore
- joinedload(EvaluationDB.aggregated_results)
+ joinedload(EvaluationDB.user).load_only(UserDB.id, UserDB.username), # type: ignore
+ joinedload(EvaluationDB.testset).load_only(TestSetDB.id, TestSetDB.name), # type: ignore
+ joinedload(EvaluationDB.aggregated_results),
)
.filter_by(app_id=uuid.UUID(app_id))
)
@@ -2369,9 +2361,7 @@ async def delete_evaluations(evaluation_ids: List[str]) -> None:
"""
async with db_engine.get_session() as session:
- query = select(EvaluationDB).where(
- EvaluationDB.id.in_(evaluation_ids)
- )
+ query = select(EvaluationDB).where(EvaluationDB.id.in_(evaluation_ids))
result = await session.execute(query)
evaluations = result.scalars().all()
for evaluation in evaluations:
@@ -2406,8 +2396,8 @@ async def create_new_evaluation_scenario(
inputs=[input.dict() for input in inputs],
outputs=[output.dict() for output in outputs],
correct_answers=(
- [correct_answer.dict() for correct_answer in correct_answers]
- if correct_answers is not None
+ [correct_answer.dict() for correct_answer in correct_answers]
+ if correct_answers is not None
else []
),
is_pinned=is_pinned,
@@ -2420,8 +2410,8 @@ async def create_new_evaluation_scenario(
organization is not None and workspace is not None
), "organization and workspace must be provided together"
- evaluation_scenario.organization_id = organization_id # type: ignore
- evaluation_scenario.workspace_id = workspace_id # type: ignore
+ evaluation_scenario.organization_id = organization_id # type: ignore
+ evaluation_scenario.workspace_id = workspace_id # type: ignore
session.add(evaluation_scenario)
await session.commit()
@@ -2451,7 +2441,7 @@ async def update_evaluation_with_aggregated_results(
aggregated_result = EvaluationAggregatedResultDB(
evaluation_id=uuid.UUID(evaluation_id),
evaluator_config_id=uuid.UUID(result.evaluator_config),
- result=result.result.dict()
+ result=result.result.dict(),
)
session.add(aggregated_result)
@@ -2473,14 +2463,13 @@ async def fetch_eval_aggregated_results(evaluation_id: str):
result = await session.execute(
select(EvaluationAggregatedResultDB)
.options(
- joinedload(EvaluationAggregatedResultDB.evaluator_config)
- .load_only(
- EvaluatorConfigDB.id, # type: ignore
- EvaluatorConfigDB.name, # type: ignore
- EvaluatorConfigDB.evaluator_key, # type: ignore
- EvaluatorConfigDB.settings_values, # type: ignore
- EvaluatorConfigDB.created_at, # type: ignore
- EvaluatorConfigDB.updated_at, # type: ignore
+ joinedload(EvaluationAggregatedResultDB.evaluator_config).load_only(
+ EvaluatorConfigDB.id, # type: ignore
+ EvaluatorConfigDB.name, # type: ignore
+ EvaluatorConfigDB.evaluator_key, # type: ignore
+ EvaluatorConfigDB.settings_values, # type: ignore
+ EvaluatorConfigDB.created_at, # type: ignore
+ EvaluatorConfigDB.updated_at, # type: ignore
)
)
.filter_by(evaluation_id=uuid.UUID(evaluation_id))
@@ -2701,7 +2690,7 @@ async def check_if_evaluation_contains_failed_evaluation_scenarios(
.join(EvaluationResultAlias, EvaluationScenarioDB.results)
.where(
EvaluationScenarioDB.evaluation_id == uuid.UUID(evaluation_id),
- EvaluationResultAlias.result["type"].astext == "error"
+ EvaluationResultAlias.result["type"].astext == "error",
)
)
diff --git a/agenta-backend/agenta_backend/services/evaluation_service.py b/agenta-backend/agenta_backend/services/evaluation_service.py
index d650cc876d..b7f540c2a3 100644
--- a/agenta-backend/agenta_backend/services/evaluation_service.py
+++ b/agenta-backend/agenta_backend/services/evaluation_service.py
@@ -178,9 +178,7 @@ async def update_human_evaluation_service(
await evaluation.update({"$set": updates})
-async def fetch_evaluation_scenarios_for_evaluation(
- evaluation_id: str
-):
+async def fetch_evaluation_scenarios_for_evaluation(evaluation_id: str):
"""
Fetch evaluation scenarios for a given evaluation ID.
@@ -196,8 +194,7 @@ async def fetch_evaluation_scenarios_for_evaluation(
)
return [
await converters.evaluation_scenario_db_to_pydantic(
- evaluation_scenario_db=evaluation_scenario,
- evaluation_id=evaluation_id
+ evaluation_scenario_db=evaluation_scenario, evaluation_id=evaluation_id
)
for evaluation_scenario in evaluation_scenarios
]
@@ -329,6 +326,7 @@ async def fetch_list_evaluations(
for evaluation in evaluations_db
]
+
async def fetch_list_human_evaluations(
app_id: str,
) -> List[HumanEvaluation]:
@@ -490,7 +488,7 @@ async def create_new_evaluation(
testset = await db_manager.fetch_testset_by_id(testset_id=testset_id)
variant_db = await db_manager.get_app_variant_instance_by_id(variant_id=variant_id)
variant_revision = await db_manager.fetch_app_variant_revision_by_variant(
- app_variant_id=variant_id, revision=variant_db.revision # type: ignore
+ app_variant_id=variant_id, revision=variant_db.revision # type: ignore
)
evaluation_db = await db_manager.create_new_evaluation(
diff --git a/agenta-backend/agenta_backend/services/evaluators_service.py b/agenta-backend/agenta_backend/services/evaluators_service.py
index 129f565499..3a7d0c6746 100644
--- a/agenta-backend/agenta_backend/services/evaluators_service.py
+++ b/agenta-backend/agenta_backend/services/evaluators_service.py
@@ -284,7 +284,7 @@ def auto_ai_critique(
value=None,
error=Error(
message="Error during Auto AI Critique",
- stacktrace=traceback.format_exc()
+ stacktrace=traceback.format_exc(),
),
)
diff --git a/agenta-backend/agenta_backend/tasks/evaluations.py b/agenta-backend/agenta_backend/tasks/evaluations.py
index ceec3ba624..400e0a8478 100644
--- a/agenta-backend/agenta_backend/tasks/evaluations.py
+++ b/agenta-backend/agenta_backend/tasks/evaluations.py
@@ -117,7 +117,7 @@ def evaluate(
deployment_db = loop.run_until_complete(
get_deployment_by_id(str(app_variant_db.base.deployment_id))
)
- uri = deployment_manager.get_deployment_uri(uri=deployment_db.uri) # type: ignore
+ uri = deployment_manager.get_deployment_uri(uri=deployment_db.uri) # type: ignore
# 2. Initialize vars
evaluators_aggregated_data = {
@@ -132,8 +132,8 @@ def evaluate(
app_outputs: List[InvokationResult] = loop.run_until_complete(
llm_apps_service.batch_invoke(
uri,
- testset_db.csvdata, # type: ignore
- app_variant_parameters, # type: ignore
+ testset_db.csvdata, # type: ignore
+ app_variant_parameters, # type: ignore
rate_limit_config,
)
)
@@ -143,7 +143,7 @@ def evaluate(
llm_apps_service.get_parameters_from_openapi(uri + "/openapi.json")
)
- for data_point, app_output in zip(testset_db.csvdata, app_outputs): # type: ignore
+ for data_point, app_output in zip(testset_db.csvdata, app_outputs): # type: ignore
# 1. We prepare the inputs
logger.debug(f"Preparing inputs for data point: {data_point}")
list_inputs = get_app_inputs(app_variant_parameters, openapi_parameters)
@@ -231,7 +231,7 @@ def evaluate(
output=app_output.result.value,
data_point=data_point,
settings_values=evaluator_config_db.settings_values,
- app_params=app_variant_parameters, # type: ignore
+ app_params=app_variant_parameters, # type: ignore
inputs=data_point,
lm_providers_keys=lm_providers_keys,
)
@@ -313,10 +313,7 @@ def evaluate(
"status": Result(
type="status",
value="EVALUATION_FAILED",
- error=Error(
- message="Evaluation Failed",
- stacktrace=str(e)
- )
+ error=Error(message="Evaluation Failed", stacktrace=str(e)),
).dict()
},
)
@@ -334,7 +331,9 @@ def evaluate(
)
failed_evaluation_scenarios = loop.run_until_complete(
- check_if_evaluation_contains_failed_evaluation_scenarios(str(new_evaluation_db.id))
+ check_if_evaluation_contains_failed_evaluation_scenarios(
+ str(new_evaluation_db.id)
+ )
)
evaluation_status = Result(
@@ -351,17 +350,17 @@ def evaluate(
loop.run_until_complete(
update_evaluation(
evaluation_id=str(new_evaluation_db.id),
- updates={"status": evaluation_status.dict()}
+ updates={"status": evaluation_status.dict()},
)
)
async def aggregate_evaluator_results(
- evaluators_aggregated_data: dict
+ evaluators_aggregated_data: dict,
) -> List[AggregatedResult]:
"""
Aggregate the results of the evaluation evaluator.
-
+
Args:
evaluators_aggregated_data (dict): The evaluators aggregated data
@@ -407,7 +406,7 @@ async def aggregate_evaluator_results(
evaluator_config = await fetch_evaluator_config(config_id)
aggregated_result = AggregatedResult(
- evaluator_config=str(evaluator_config.id), # type: ignore
+ evaluator_config=str(evaluator_config.id), # type: ignore
result=result,
)
aggregated_results.append(aggregated_result)
From b59bf538d5bb412f2ef06c1d98e9c085b0802009 Mon Sep 17 00:00:00 2001
From: Abram
Date: Fri, 14 Jun 2024 01:12:43 +0100
Subject: [PATCH 064/268] chore (backend): format db_manager with black@23.12.0
---
agenta-backend/agenta_backend/services/db_manager.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/agenta-backend/agenta_backend/services/db_manager.py b/agenta-backend/agenta_backend/services/db_manager.py
index 8553771cf0..063f809f3a 100644
--- a/agenta-backend/agenta_backend/services/db_manager.py
+++ b/agenta-backend/agenta_backend/services/db_manager.py
@@ -2133,7 +2133,7 @@ async def add_zip_template(key, value):
)
session.add(template_db_instance)
await session.commit()
-
+
return str(template_db_instance.id)
From 947731d0c2d94d52b40b43082a844f6cdff25321 Mon Sep 17 00:00:00 2001
From: Abram
Date: Fri, 14 Jun 2024 01:29:01 +0100
Subject: [PATCH 065/268] refactor (tools): added volumes for persistent
storage
---
docker-compose.yml | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/docker-compose.yml b/docker-compose.yml
index d5ebd2676d..658696bed4 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -176,6 +176,8 @@ services:
- "5432:5432"
networks:
- agenta-network
+ volumes:
+ - postgresdb-data:/var/lib/postgresql/data/
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
interval: 10s
@@ -196,6 +198,8 @@ services:
- "5050:80"
networks:
- agenta-network
+ volumes:
+ - pgadmin-data:/var/lib/pgadmin
depends_on:
postgres:
condition: service_healthy
@@ -205,6 +209,8 @@ networks:
name: agenta-network
volumes:
- mongodb_data:
+ # mongodb_data:
redis_data:
nextjs_cache:
+ postgresdb-data:
+ pgadmin-data:
\ No newline at end of file
From 99672cf7c55d055d6fbfe90a21cdb4cedf20ce5d Mon Sep 17 00:00:00 2001
From: Abram
Date: Fri, 14 Jun 2024 01:30:11 +0100
Subject: [PATCH 066/268] refactor (tools): removed redundant mongo services
and added postgres to test compose
---
docker-compose.test.yml | 65 +++++++++++++++++------------------------
1 file changed, 26 insertions(+), 39 deletions(-)
diff --git a/docker-compose.test.yml b/docker-compose.test.yml
index 061e09d174..47f90a6e77 100644
--- a/docker-compose.test.yml
+++ b/docker-compose.test.yml
@@ -1,7 +1,7 @@
-version: '3.8'
services:
reverse-proxy:
image: traefik:v2.10
+ container_name: agenta-reverse_proxy-test
command: --api.dashboard=true --api.insecure=true --providers.docker --entrypoints.web.address=:80
ports:
- "80:80"
@@ -15,14 +15,13 @@ services:
build: ./agenta-backend
container_name: agenta-backend-test
environment:
- - MONGODB_URI=mongodb://username:password@mongo:27017/
+ - POSTGRES_URI=postgresql+asyncpg://username:password@postgres:5432
- REDIS_URL=redis://redis:6379/0
- ENVIRONMENT=${ENVIRONMENT}
- BARE_DOMAIN_NAME=localhost
- DOMAIN_NAME=http://localhost
- CELERY_BROKER_URL=amqp://guest@rabbitmq//
- CELERY_RESULT_BACKEND=redis://redis:6379/0
- - DATABASE_MODE=test
- FEATURE_FLAG=oss
- OPENAI_API_KEY=${OPENAI_API_KEY}
- AGENTA_TEMPLATE_REPO=agentaai/templates_v2
@@ -57,44 +56,13 @@ services:
- "traefik.http.services.backend.loadbalancer.server.port=8000"
- "traefik.http.routers.backend.service=backend"
depends_on:
- mongo:
+ postgres:
condition: service_healthy
extra_hosts:
- host.docker.internal:host-gateway
networks:
- agenta-network
- mongo_express:
- image: mongo-express:0.54.0
- environment:
- ME_CONFIG_MONGODB_ADMINUSERNAME: username
- ME_CONFIG_MONGODB_ADMINPASSWORD: password
- ME_CONFIG_MONGODB_SERVER: mongo
- ports:
- - "8081:8081"
- networks:
- - agenta-network
- depends_on:
- mongo:
- condition: service_healthy
- restart: always
-
- mongo:
- image: mongo:5.0
- container_name: agenta-mongo-test
- environment:
- MONGO_INITDB_ROOT_USERNAME: username
- MONGO_INITDB_ROOT_PASSWORD: password
- ports:
- - "27017:27017"
- healthcheck:
- test: [ "CMD", "mongo", "--eval", "db.adminCommand('ping')" ]
- interval: 10s
- timeout: 10s
- retries: 20
- networks:
- - agenta-network
-
redis:
image: redis:latest
container_name: agenta-redis-test
@@ -123,29 +91,48 @@ services:
command: >
watchmedo auto-restart --directory=./agenta_backend --pattern=*.py --recursive -- celery -A agenta_backend.main.celery_app worker --concurrency=1 --loglevel=INFO
environment:
- - MONGODB_URI=mongodb://username:password@mongo:27017
+ - POSTGRES_URI=postgresql+asyncpg://username:password@postgres:5432
- REDIS_URL=redis://redis:6379/0
- ENVIRONMENT=${ENVIRONMENT}
- CELERY_BROKER_URL=amqp://guest@rabbitmq//
- CELERY_RESULT_BACKEND=redis://redis:6379/0
- FEATURE_FLAG=oss
- - DATABASE_MODE=test
volumes:
- ./agenta-backend/agenta_backend:/app/agenta_backend
- /var/run/docker.sock:/var/run/docker.sock
depends_on:
- rabbitmq
- redis
- - mongo
+ - postgres
extra_hosts:
- host.docker.internal:host-gateway
networks:
- agenta-network
+ postgres:
+ image: postgres:16.2
+ container_name: agenta-postgresdb-test
+ restart: always
+ environment:
+ POSTGRES_USER: username
+ POSTGRES_PASSWORD: password
+ ports:
+ - "5432:5432"
+ networks:
+ - agenta-network
+ volumes:
+ - postgresdb-data:/var/lib/postgresql/data/
+ healthcheck:
+ test: ["CMD-SHELL", "pg_isready -U postgres"]
+ interval: 10s
+ timeout: 5s
+ retries: 5
+
networks:
agenta-network:
name: agenta-network
+ external: true
volumes:
- mongodb_data:
+ postgresdb-data:
redis_data:
From e3aba63bf2ec877a49d20a89a6a5e085c3a8bbc0 Mon Sep 17 00:00:00 2001
From: aakrem
Date: Fri, 14 Jun 2024 08:03:31 +0200
Subject: [PATCH 067/268] small improvements
---
.../migrations/mongo_to_postgres/utils.py | 27 ++++++++++++-------
.../agenta_backend/models/db_models.py | 9 ++++++-
2 files changed, 25 insertions(+), 11 deletions(-)
diff --git a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
index bc41d81dd9..2e64dedf25 100644
--- a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
+++ b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
@@ -49,12 +49,14 @@ async def create_all_tables(tables):
async def store_mapping(table_name, mongo_id, uuid):
"""Store the mapping of MongoDB ObjectId to UUID in the mapping table."""
+ id = generate_uuid()
async with db_engine.get_session() as session:
async with session.begin():
mapping = IDsMappingDB(
- table_name=table_name, objectid=str(mongo_id), uuid=uuid
+ id=id, table_name=table_name, objectid=str(mongo_id), uuid=uuid
)
session.add(mapping)
+ await session.commit()
async def get_mapped_uuid(mongo_id):
@@ -91,6 +93,10 @@ def print_migration_report():
# Headers
headers = ["Table", "Total in MongoDB", "Migrated to PostgreSQL"]
+ if not migration_report:
+ print("No data available in the migration report.")
+ return
+
# Determine the maximum lengths for each column including headers
max_table_length = max(
len(headers[0]), max(len(table) for table in migration_report.keys())
@@ -125,15 +131,16 @@ async def migrate_collection(
)
total_docs = mongo_db[collection_name].count_documents({})
migrated_docs = 0
+
async with db_engine.get_session() as session:
- async with session.begin():
- for skip in range(0, total_docs, BATCH_SIZE):
- batch = await asyncio.get_event_loop().run_in_executor(
- None,
- lambda: list(
- mongo_db[collection_name].find().skip(skip).limit(BATCH_SIZE)
- ),
- )
+ for skip in range(0, total_docs, BATCH_SIZE):
+ batch = await asyncio.get_event_loop().run_in_executor(
+ None,
+ lambda: list(
+ mongo_db[collection_name].find().skip(skip).limit(BATCH_SIZE)
+ ),
+ )
+ async with session.begin():
for document in batch:
if association_model:
(
@@ -147,5 +154,5 @@ async def migrate_collection(
transformed_document = await transformation_func(document)
session.add(model_class(**transformed_document))
migrated_docs += 1
- await session.commit()
+ await session.commit()
update_migration_report(collection_name, total_docs, migrated_docs)
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index af2bea84a2..51a45b13cd 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -523,6 +523,13 @@ class EvaluationScenarioDB(Base):
class IDsMappingDB(Base):
__tablename__ = "ids_mapping"
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
table_name = Column(String, nullable=False)
- objectid = Column(String, primary_key=True)
+ objectid = Column(String, nullable=False)
uuid = Column(UUID(as_uuid=True), nullable=False)
From f0715d7c264e2875077525b91a4f0db6f1b9f872 Mon Sep 17 00:00:00 2001
From: Abram
Date: Fri, 14 Jun 2024 09:51:51 +0100
Subject: [PATCH 068/268] minor refactor (tests): improve event_loop conftest
fixture and fix import
---
agenta-backend/agenta_backend/tests/conftest.py | 14 +++++++++-----
.../tests/variants_main_router/conftest.py | 2 +-
2 files changed, 10 insertions(+), 6 deletions(-)
diff --git a/agenta-backend/agenta_backend/tests/conftest.py b/agenta-backend/agenta_backend/tests/conftest.py
index 0c289c9894..7656afd98f 100644
--- a/agenta-backend/agenta_backend/tests/conftest.py
+++ b/agenta-backend/agenta_backend/tests/conftest.py
@@ -6,16 +6,20 @@
@pytest.fixture(scope="session", autouse=True)
def event_loop():
- """Create an instance of the default event loop for each test case."""
+ """
+ Create an instance of the default event loop for each test case.
+ """
+
policy = asyncio.get_event_loop_policy()
res = policy.new_event_loop()
asyncio.set_event_loop(res)
- res._close = res.close
+ res._close = res.close # type: ignore
- # Initialize beanie
+ # Initialize database and create tables
res.run_until_complete(DBEngine().init_db())
yield res
- res._close() # close event loop
- DBEngine().remove_db() # drop database
+ res.run_until_complete(DBEngine().close()) # close connections to database
+ res.run_until_complete(DBEngine().remove_db()) # drop database
+ res._close() # close event loop # type: ignore
diff --git a/agenta-backend/agenta_backend/tests/variants_main_router/conftest.py b/agenta-backend/agenta_backend/tests/variants_main_router/conftest.py
index a78884858e..bf4a1008aa 100644
--- a/agenta-backend/agenta_backend/tests/variants_main_router/conftest.py
+++ b/agenta-backend/agenta_backend/tests/variants_main_router/conftest.py
@@ -3,12 +3,12 @@
import logging
from datetime import datetime, timezone
+from agenta_backend.models.shared_models import ConfigDB
from agenta_backend.models.db_models import (
AppDB,
UserDB,
VariantBaseDB,
ImageDB,
- ConfigDB,
AppVariantDB,
)
From 38859c70d87551874745f86d5604ce5370bcb034 Mon Sep 17 00:00:00 2001
From: Abram
Date: Fri, 14 Jun 2024 16:45:43 +0100
Subject: [PATCH 069/268] refactor (backend): move human evaluation db queries
from evaluation_service to db_manager
---
.../agenta_backend/services/db_manager.py | 276 +++++++++++++++++-
.../services/evaluation_service.py | 179 ++++--------
2 files changed, 333 insertions(+), 122 deletions(-)
diff --git a/agenta-backend/agenta_backend/services/db_manager.py b/agenta-backend/agenta_backend/services/db_manager.py
index 063f809f3a..f17d1086ac 100644
--- a/agenta-backend/agenta_backend/services/db_manager.py
+++ b/agenta-backend/agenta_backend/services/db_manager.py
@@ -71,11 +71,13 @@
TemplateDB,
EvaluatorConfigDB,
AppVariantRevisionsDB,
+ HumanEvaluationVariantDB,
EvaluationScenarioResultDB,
EvaluationAggregatedResultDB,
)
from agenta_backend.models.shared_models import (
+ HumanEvaluationScenarioInput,
Result,
ConfigDB,
CorrectAnswer,
@@ -205,7 +207,10 @@ async def fetch_app_variant_by_id(
async with db_engine.get_session() as session:
result = await session.execute(
select(AppVariantDB)
- .options(joinedload(AppVariantDB.base), joinedload(AppVariantDB.app))
+ .options(
+ joinedload(AppVariantDB.base),
+ joinedload(AppVariantDB.app),
+ )
.filter_by(id=uuid.UUID(app_variant_id))
)
app_variant = result.scalars().one_or_none()
@@ -1906,6 +1911,137 @@ async def fetch_evaluation_by_id(evaluation_id: str) -> Optional[EvaluationDB]:
return evaluation
+async def list_human_evaluations(app_id: str):
+ """
+ Fetches human evaluations belonging to an App.
+
+ Args:
+ app_id (str): The application identifier
+ """
+
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(HumanEvaluationDB)
+ .options(
+ joinedload(HumanEvaluationDB.user).load_only(UserDB.id, UserDB.username), # type: ignore
+ joinedload(HumanEvaluationDB.testset).load_only(TestSetDB.id, TestSetDB.name), # type: ignore
+ )
+ .filter_by(app_id=uuid.UUID(app_id))
+ )
+ human_evaluations = result.scalars().all()
+ return human_evaluations
+
+
+async def create_human_evaluation(
+ app: AppDB,
+ user_id: str,
+ status: str,
+ evaluation_type: str,
+ testset_id: str,
+ variants_ids: List[str],
+):
+ """
+ Creates a human evaluation.
+
+ Args:
+ app (AppDB: The app object
+ user_id (id): The ID of the user
+ status (str): The status of the evaluation
+ evaluation_type (str): The evaluation type
+ testset_id (str): The ID of the evaluation testset
+ variants_ids (List[str]): The IDs of the variants for the evaluation
+ """
+
+ async with db_engine.get_session() as session:
+ human_evaluation = HumanEvaluationDB(
+ app_id=app.id,
+ user_id=uuid.UUID(user_id),
+ status=status,
+ evaluation_type=evaluation_type,
+ testset_id=testset_id,
+ )
+ if isCloudEE():
+ human_evaluation.organization_id = str(app.organization_id)
+ human_evaluation.workspace_id = str(app.workspace_id)
+
+ session.add(human_evaluation)
+ await session.commit()
+ await session.refresh(human_evaluation, attribute_names=["testset"])
+
+ # create variants for human evaluation
+ await create_human_evaluation_variants(
+ human_evaluation_id=str(human_evaluation.id),
+ variants_ids=variants_ids
+ )
+ return human_evaluation
+
+
+async def fetch_human_evaluation_variants(human_evaluation_id: str):
+ """
+ Fetches human evaluation variants.
+
+ Args:
+ human_evaluation_id (str): The human evaluation ID
+
+ Returns:
+ The human evaluation variants.
+ """
+
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(HumanEvaluationVariantDB)
+ .options(
+ joinedload(HumanEvaluationVariantDB.variant)
+ .load_only(AppVariantDB.id, AppVariantDB.variant_name), # type: ignore
+ joinedload(HumanEvaluationVariantDB.variant_revision)
+ .load_only(AppVariantRevisionsDB.revision, AppVariantRevisionsDB.id) # type: ignore
+ )
+ .filter_by(human_evaluation_id=uuid.UUID(human_evaluation_id))
+ )
+ evaluation_variants = result.scalars().all()
+ return evaluation_variants
+
+
+async def create_human_evaluation_variants(human_evaluation_id: str, variants_ids: List[str]):
+ """
+ Creates human evaluation variants.
+
+ Args:
+ human_evaluation_id (str): The human evaluation identifier
+ variants_ids (List[str]): The variants identifiers
+ """
+
+ variants_dict = {}
+ for variant_id in variants_ids:
+ variant = await fetch_app_variant_by_id(app_variant_id=variant_id)
+ if variant:
+ variants_dict[variant_id] = variant
+
+ variants_revisions_dict = {}
+ for variant_id, variant in variants_dict.items():
+ variant_revision = await fetch_app_variant_revision_by_variant(
+ app_variant_id=str(variant.id), revision=variant.revision # type: ignore
+ )
+ if variant_revision:
+ variants_revisions_dict[variant_id] = variant_revision
+
+ if set(variants_dict.keys()) != set(variants_revisions_dict.keys()):
+ raise ValueError("Mismatch between variants and their revisions")
+
+ async with db_engine.get_session() as session:
+ for variant_id in variants_ids:
+ variant = variants_dict[variant_id]
+ variant_revision = variants_revisions_dict[variant_id]
+ human_evaluation_variant = HumanEvaluationVariantDB(
+ human_evaluation_id=uuid.UUID(human_evaluation_id),
+ variant_id=variant.id, # type: ignore
+ variant_revision_id=variant_revision.id # type: ignore
+ )
+ session.add(human_evaluation_variant)
+
+ await session.commit()
+
+
async def fetch_human_evaluation_by_id(
evaluation_id: str,
) -> Optional[HumanEvaluationDB]:
@@ -1919,12 +2055,148 @@ async def fetch_human_evaluation_by_id(
assert evaluation_id is not None, "evaluation_id cannot be None"
async with db_engine.get_session() as session:
result = await session.execute(
- select(HumanEvaluationDB).filter_by(id=uuid.UUID(evaluation_id))
+ select(HumanEvaluationDB)
+ .options(
+ joinedload(HumanEvaluationDB.user).load_only(UserDB.username), # type: ignore
+ joinedload(HumanEvaluationDB.testset).load_only(TestSetDB.name) # type: ignore
+ )
+ .filter_by(id=uuid.UUID(evaluation_id))
)
evaluation = result.scalars().one_or_none()
return evaluation
+async def update_human_evaluation(evaluation_id: str, values_to_update: dict):
+ """Updates human evaluation with the specified values.
+
+ Args:
+ evaluation_id (str): The evaluation ID
+ values_to_update (dict): The values to update
+
+ Exceptions:
+ NoResultFound: if human evaluation is not found
+ """
+
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(HumanEvaluationDB).filter_by(id=uuid.UUID(evaluation_id))
+ )
+ human_evaluation = result.scalars().one_or_none()
+ if not human_evaluation:
+ raise NoResultFound(f"Human evaluation with id {evaluation_id} not found")
+
+ for key, value in values_to_update.items():
+ if hasattr(human_evaluation, key):
+ setattr(human_evaluation, key, value)
+
+ await session.commit()
+ await session.refresh(human_evaluation)
+
+
+async def delete_human_evaluation(evaluation_id: str):
+ """Delete the evaluation by its ID.
+
+ Args:
+ evaluation_id (str): The ID of the evaluation to delete.
+ """
+
+ assert evaluation_id is not None, "evaluation_id cannot be None"
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(HumanEvaluationDB).filter_by(id=uuid.UUID(evaluation_id))
+ )
+ evaluation = result.scalars().one_or_none()
+ if not evaluation:
+ raise NoResultFound(f"Human evaluation with id {evaluation_id} not found")
+
+ await session.delete(evaluation)
+ await session.commit()
+
+
+async def create_human_evaluation_scenario(
+ inputs: List[HumanEvaluationScenarioInput],
+ user_id: str,
+ app: AppDB,
+ evaluation_id: str,
+ evaluation_extend: Dict[str, Any]
+):
+ """
+ Creates a human evaluation scenario.
+
+ Args:
+ inputs (List[HumanEvaluationScenarioInput]): The inputs.
+ user_id (str): The user ID.
+ app (AppDB): The app object.
+ evaluation_id (str): The evaluation identifier.
+ evaluation_extend (Dict[str, any]): An extended required payload for the evaluation scenario. Contains score, vote, and correct_answer.
+ """
+
+ async with db_engine.get_session() as session:
+ evaluation_scenario = HumanEvaluationScenarioDB(
+ **evaluation_extend,
+ user_id=uuid.UUID(user_id),
+ evaluation_id=uuid.UUID(evaluation_id),
+ inputs=[input.dict() for input in inputs],
+ outputs=[],
+ )
+
+ if isCloudEE():
+ evaluation_scenario.organization_id = str(app.organization_id)
+ evaluation_scenario.workspace_id = str(app.workspace_id)
+
+ session.add(evaluation_scenario)
+ await session.commit()
+
+
+async def update_human_evaluation_scenario(evaluation_scenario_id: str, values_to_update: dict):
+ """Updates human evaluation scenario with the specified values.
+
+ Args:
+ evaluation_scenario_id (str): The evaluation scenario ID
+ values_to_update (dict): The values to update
+
+ Exceptions:
+ NoResultFound: if human evaluation scenario is not found
+ """
+
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(HumanEvaluationScenarioDB)
+ .filter_by(id=uuid.UUID(evaluation_scenario_id))
+ )
+ human_evaluation_scenario = result.scalars().one_or_none()
+ if not human_evaluation_scenario:
+ raise NoResultFound(f"Human evaluation scenario with id {evaluation_scenario_id} not found")
+
+ for key, value in values_to_update.items():
+ if hasattr(human_evaluation_scenario, key):
+ setattr(human_evaluation_scenario, key, value)
+
+ await session.commit()
+ await session.refresh(human_evaluation_scenario)
+
+
+async def fetch_human_evaluation_scenarios(evaluation_id: str):
+ """
+ Fetches human evaluation scenarios.
+
+ Args:
+ evaluation_id (str): The evaluation identifier
+
+ Returns:
+ The evaluation scenarios.
+ """
+
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(HumanEvaluationScenarioDB).filter_by(
+ evaluation_id=uuid.UUID(evaluation_id)
+ )
+ )
+ evaluation_scenarios = result.scalars().all()
+ return evaluation_scenarios
+
+
async def fetch_evaluation_scenarios(evaluation_id: str):
"""
Fetches evaluation scenarios.
diff --git a/agenta-backend/agenta_backend/services/evaluation_service.py b/agenta-backend/agenta_backend/services/evaluation_service.py
index b7f540c2a3..63736f0f43 100644
--- a/agenta-backend/agenta_backend/services/evaluation_service.py
+++ b/agenta-backend/agenta_backend/services/evaluation_service.py
@@ -67,22 +67,6 @@ class UpdateEvaluationScenarioError(Exception):
pass
-async def _fetch_human_evaluation(evaluation_id: str) -> HumanEvaluationDB:
- # Fetch the evaluation by ID
- evaluation = await db_manager.fetch_human_evaluation_by_id(
- evaluation_id=evaluation_id
- )
-
- # Check if the evaluation exists
- if evaluation is None:
- raise HTTPException(
- status_code=404,
- detail=f"Evaluation with id {evaluation_id} not found",
- )
-
- return evaluation
-
-
async def prepare_csvdata_and_create_evaluation_scenario(
csvdata: List[Dict[str, str]],
payload_inputs: List[str],
@@ -112,7 +96,7 @@ async def prepare_csvdata_and_create_evaluation_scenario(
for name in payload_inputs
]
except KeyError:
- await new_evaluation.delete()
+ await db_manager.delete_human_evaluation(evaluation_id=str(new_evaluation.id))
msg = f"""
Columns in the test set should match the names of the inputs in the variant.
Inputs names in variant are: {[variant_input for variant_input in payload_inputs]} while
@@ -122,7 +106,8 @@ async def prepare_csvdata_and_create_evaluation_scenario(
status_code=400,
detail=msg,
)
- # Create evaluation scenarios
+
+ # Prepare scenario inputs
list_of_scenario_input = []
for scenario_input in inputs:
eval_scenario_input_instance = HumanEvaluationScenarioInput(
@@ -131,29 +116,18 @@ async def prepare_csvdata_and_create_evaluation_scenario(
)
list_of_scenario_input.append(eval_scenario_input_instance)
- evaluation_scenario_payload = {
- **{
- "created_at": datetime.now(timezone.utc),
- "updated_at": datetime.now(timezone.utc),
- },
+ evaluation_scenario_extend_payload = {
**_extend_with_evaluation(evaluation_type),
**_extend_with_correct_answer(evaluation_type, datum),
}
-
- eval_scenario_instance = HumanEvaluationScenarioDB(
- **evaluation_scenario_payload,
- user=user,
- evaluation=new_evaluation,
+ await db_manager.create_human_evaluation_scenario(
inputs=list_of_scenario_input,
- outputs=[],
+ user_id=str(user.id),
+ app=app,
+ evaluation_id=str(new_evaluation.id),
+ evaluation_extend=evaluation_scenario_extend_payload
)
- if isCloudEE():
- eval_scenario_instance.organization = app.organization
- eval_scenario_instance.workspace = app.workspace
-
- await eval_scenario_instance.create()
-
async def update_human_evaluation_service(
evaluation: EvaluationDB, update_payload: HumanEvaluationUpdate
@@ -164,18 +138,13 @@ async def update_human_evaluation_service(
Args:
evaluation (EvaluationDB): The evaluation instance.
update_payload (EvaluationUpdate): The payload for the update.
-
- Raises:
- HTTPException: If the evaluation is not found or access is denied.
"""
- # Prepare updates
- updates = {}
- if update_payload.status is not None:
- updates["status"] = update_payload.status
-
# Update the evaluation
- await evaluation.update({"$set": updates})
+ await db_manager.update_human_evaluation(
+ evaluation_id=str(evaluation.id),
+ values_to_update=update_payload.dict()
+ )
async def fetch_evaluation_scenarios_for_evaluation(evaluation_id: str):
@@ -215,14 +184,15 @@ async def fetch_human_evaluation_scenarios_for_evaluation(
Returns:
List[EvaluationScenario]: A list of evaluation scenarios.
"""
- scenarios = await HumanEvaluationScenarioDB.find(
- HumanEvaluationScenarioDB.evaluation.id == ObjectId(human_evaluation.id),
- ).to_list()
+ human_evaluation_scenarios = await db_manager.fetch_human_evaluation_scenarios(
+ evaluation_id=str(human_evaluation.id)
+ )
eval_scenarios = [
converters.human_evaluation_scenario_db_to_pydantic(
- scenario, str(human_evaluation.id)
+ evaluation_scenario_db=human_evaluation_scenario,
+ evaluation_id=str(human_evaluation.id)
)
- for scenario in scenarios
+ for human_evaluation_scenario in human_evaluation_scenarios
]
return eval_scenarios
@@ -244,50 +214,48 @@ async def update_human_evaluation_scenario(
HTTPException: If evaluation scenario not found or access denied.
"""
- updated_data = evaluation_scenario_data.dict()
- updated_data["updated_at"] = datetime.now(timezone.utc)
- new_eval_set = {}
-
- if updated_data["score"] is not None and evaluation_type in [
- EvaluationType.single_model_test,
- ]:
- new_eval_set["score"] = updated_data["score"]
- elif (
- updated_data["vote"] is not None
- and evaluation_type == EvaluationType.human_a_b_testing
- ):
- new_eval_set["vote"] = updated_data["vote"]
-
- if updated_data["outputs"] is not None:
+ values_to_update = {}
+ payload = evaluation_scenario_data.dict()
+
+ if payload["score"] is not None and evaluation_type == EvaluationType.single_model_test:
+ values_to_update["score"] = payload["score"]
+
+ if payload["vote"] is not None and evaluation_type == EvaluationType.human_a_b_testing:
+ values_to_update["vote"] = payload["vote"]
+
+ if payload["outputs"] is not None:
new_outputs = [
HumanEvaluationScenarioOutput(
variant_id=output["variant_id"],
variant_output=output["variant_output"],
).dict()
- for output in updated_data["outputs"]
+ for output in payload["outputs"]
]
- new_eval_set["outputs"] = new_outputs
+ values_to_update["outputs"] = new_outputs
- if updated_data["inputs"] is not None:
+ if payload["inputs"] is not None:
new_inputs = [
HumanEvaluationScenarioInput(
input_name=input_item["input_name"],
input_value=input_item["input_value"],
).dict()
- for input_item in updated_data["inputs"]
+ for input_item in payload["inputs"]
]
- new_eval_set["inputs"] = new_inputs
+ values_to_update["inputs"] = new_inputs
- if updated_data["is_pinned"] is not None:
- new_eval_set["is_pinned"] = updated_data["is_pinned"]
+ if payload["is_pinned"] is not None:
+ values_to_update["is_pinned"] = payload["is_pinned"]
- if updated_data["note"] is not None:
- new_eval_set["note"] = updated_data["note"]
+ if payload["note"] is not None:
+ values_to_update["note"] = payload["note"]
- if updated_data["correct_answer"] is not None:
- new_eval_set["correct_answer"] = updated_data["correct_answer"]
+ if payload["correct_answer"] is not None:
+ values_to_update["correct_answer"] = payload["correct_answer"]
- await evaluation_scenario_db.update({"$set": new_eval_set})
+ await db_manager.update_human_evaluation_scenario(
+ evaluation_scenario_id=str(evaluation_scenario_db.id),
+ values_to_update=values_to_update
+ )
def _extend_with_evaluation(evaluation_type: EvaluationType):
@@ -339,9 +307,8 @@ async def fetch_list_human_evaluations(
Returns:
List[Evaluation]: A list of evaluations.
"""
- evaluations_db = await HumanEvaluationDB.find(
- HumanEvaluationDB.app.id == ObjectId(app_id), fetch_links=True
- ).to_list()
+
+ evaluations_db = await db_manager.list_human_evaluations(app_id=app_id)
return [
await converters.human_evaluation_db_to_pydantic(evaluation)
for evaluation in evaluations_db
@@ -358,6 +325,7 @@ async def fetch_human_evaluation(human_evaluation_db) -> HumanEvaluation:
Returns:
Evaluation: The fetched evaluation.
"""
+
return await converters.human_evaluation_db_to_pydantic(human_evaluation_db)
@@ -369,11 +337,11 @@ async def delete_human_evaluations(evaluation_ids: List[str]) -> None:
evaluation_ids (List[str]): A list of evaluation IDs.
Raises:
- HTTPException: If evaluation not found or access denied.
+ NoResultFound: If evaluation not found or access denied.
"""
+
for evaluation_id in evaluation_ids:
- evaluation = await _fetch_human_evaluation(evaluation_id=evaluation_id)
- await evaluation.delete()
+ await db_manager.delete_human_evaluation(evaluation_id=evaluation_id)
async def delete_evaluations(evaluation_ids: List[str]) -> None:
@@ -403,11 +371,8 @@ async def create_new_human_evaluation(
Returns:
HumanEvaluationDB
"""
- user = await db_manager.get_user(user_uid)
- current_time = datetime.now(timezone.utc)
-
- # Fetch app
+ user = await db_manager.get_user(user_uid)
app = await db_manager.fetch_app_by_id(app_id=payload.app_id)
if app is None:
raise HTTPException(
@@ -415,54 +380,28 @@ async def create_new_human_evaluation(
detail=f"App with id {payload.app_id} does not exist",
)
- variants = [ObjectId(variant_id) for variant_id in payload.variant_ids]
- variant_dbs = [
- await db_manager.fetch_app_variant_by_id(variant_id)
- for variant_id in payload.variant_ids
- ]
-
- testset = await db_manager.fetch_testset_by_id(testset_id=payload.testset_id)
- # Initialize and save evaluation instance to database
- variants_revisions = [
- await db_manager.fetch_app_variant_revision_by_variant(
- str(variant_db.id), int(variant_db.revision)
- )
- for variant_db in variant_dbs
- ]
- eval_instance = HumanEvaluationDB(
+ human_evaluation = await db_manager.create_human_evaluation(
app=app,
- user=user,
+ user_id=str(user.id),
status=payload.status,
evaluation_type=payload.evaluation_type,
- variants=variants,
- variants_revisions=[
- ObjectId(str(variant_revision.id))
- for variant_revision in variants_revisions
- ],
- testset=testset,
- created_at=current_time,
- updated_at=current_time,
+ testset_id=payload.testset_id,
+ variants_ids=payload.variant_ids
)
-
- if isCloudEE():
- eval_instance.organization = app.organization
- eval_instance.workspace = app.workspace
-
- newEvaluation = await eval_instance.create()
- if newEvaluation is None:
+ if human_evaluation is None:
raise HTTPException(
status_code=500, detail="Failed to create evaluation_scenario"
)
await prepare_csvdata_and_create_evaluation_scenario(
- testset.csvdata,
+ human_evaluation.testset.csvdata,
payload.inputs,
payload.evaluation_type,
- newEvaluation,
+ human_evaluation,
user,
app,
)
- return newEvaluation
+ return human_evaluation
async def create_new_evaluation(
From 3cd5d29942a436046d47e9747b930da690a39cc0 Mon Sep 17 00:00:00 2001
From: Abram
Date: Fri, 14 Jun 2024 16:46:40 +0100
Subject: [PATCH 070/268] refactor (backend): improve converters and
results_service for human evaluation and scenarios
---
.../agenta_backend/models/converters.py | 91 ++++++++++---------
.../services/results_service.py | 71 ++++++++-------
2 files changed, 86 insertions(+), 76 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/converters.py b/agenta-backend/agenta_backend/models/converters.py
index 7495d18630..92eab66aef 100644
--- a/agenta-backend/agenta_backend/models/converters.py
+++ b/agenta-backend/agenta_backend/models/converters.py
@@ -98,15 +98,21 @@
logger.setLevel(logging.DEBUG)
-def human_evaluation_db_to_simple_evaluation_output(
+async def human_evaluation_db_to_simple_evaluation_output(
human_evaluation_db: HumanEvaluationDB,
) -> SimpleEvaluationOutput:
+ evaluation_variants = await db_manager.fetch_human_evaluation_variants(
+ human_evaluation_id=str(human_evaluation_db.id)
+ )
return SimpleEvaluationOutput(
id=str(human_evaluation_db.id),
- app_id=str(human_evaluation_db.app.id),
- status=human_evaluation_db.status,
- evaluation_type=human_evaluation_db.evaluation_type,
- variant_ids=[str(variant) for variant in human_evaluation_db.variants],
+ app_id=str(human_evaluation_db.app_id),
+ status=human_evaluation_db.status, # type: ignore
+ evaluation_type=human_evaluation_db.evaluation_type, # type: ignore
+ variant_ids=[
+ str(evaluation_variant.variant_id)
+ for evaluation_variant in evaluation_variants
+ ],
)
@@ -149,43 +155,40 @@ async def evaluation_db_to_pydantic(
async def human_evaluation_db_to_pydantic(
evaluation_db: HumanEvaluationDB,
) -> HumanEvaluation:
- variant_names = []
- for variant_id in evaluation_db.variants:
- variant = await db_manager.get_app_variant_instance_by_id(str(variant_id))
- variant_name = variant.variant_name if variant else str(variant_id)
- variant_names.append(str(variant_name))
+ evaluation_variants = await db_manager.fetch_human_evaluation_variants(
+ human_evaluation_id=str(evaluation_db.id) # type: ignore
+ )
+
revisions = []
- for variant_revision_id in evaluation_db.variants_revisions:
- variant_revision = await db_manager.get_app_variant_revision_by_id(
- str(variant_revision_id)
+ variants_ids = []
+ variants_names = []
+ variants_revision_ids = []
+ for evaluation_variant in evaluation_variants:
+ variant_name = (
+ evaluation_variant.variant.variant_name
+ if isinstance(evaluation_variant.variant_id, uuid.UUID)
+ else str(evaluation_variant.variant.variant_id)
)
- revision = variant_revision.revision
- revisions.append(str(revision))
+ variants_names.append(str(variant_name))
+ variants_ids.append(str(evaluation_variant.variant.id))
+ revisions.append(str(evaluation_variant.variant_revision.revision))
+ variants_revision_ids.append(str(evaluation_variant.variant_revision.id))
return HumanEvaluation(
id=str(evaluation_db.id),
- app_id=str(evaluation_db.app.id),
- user_id=str(evaluation_db.user.id),
+ app_id=str(evaluation_db.app_id),
+ user_id=str(evaluation_db.user_id),
user_username=evaluation_db.user.username or "",
- status=evaluation_db.status,
- evaluation_type=evaluation_db.evaluation_type,
- variant_ids=[str(variant) for variant in evaluation_db.variants],
- variant_names=variant_names,
- testset_id=(
- "" if type(evaluation_db.testset) is Link else str(evaluation_db.testset.id)
- ),
- testset_name=(
- ""
- if type(evaluation_db.testset) is Link
- else str(evaluation_db.testset.name)
- ),
- variants_revision_ids=[
- str(variant_revision)
- for variant_revision in evaluation_db.variants_revisions
- ],
+ status=evaluation_db.status, # type: ignore
+ evaluation_type=evaluation_db.evaluation_type, # type: ignore
+ variant_ids=variants_ids,
+ variant_names=variants_names,
+ testset_id=str(evaluation_db.testset_id),
+ testset_name=evaluation_db.testset.name,
+ variants_revision_ids=variants_revision_ids,
revisions=revisions,
- created_at=evaluation_db.created_at,
- updated_at=evaluation_db.updated_at,
+ created_at=str(evaluation_db.created_at), # type: ignore
+ updated_at=str(evaluation_db.updated_at), # type: ignore
)
@@ -195,13 +198,13 @@ def human_evaluation_scenario_db_to_pydantic(
return HumanEvaluationScenario(
id=str(evaluation_scenario_db.id),
evaluation_id=evaluation_id,
- inputs=evaluation_scenario_db.inputs,
- outputs=evaluation_scenario_db.outputs,
- vote=evaluation_scenario_db.vote,
- score=evaluation_scenario_db.score,
- correct_answer=evaluation_scenario_db.correct_answer,
- is_pinned=evaluation_scenario_db.is_pinned or False,
- note=evaluation_scenario_db.note or "",
+ inputs=evaluation_scenario_db.inputs, # type: ignore
+ outputs=evaluation_scenario_db.outputs, # type: ignore
+ vote=evaluation_scenario_db.vote, # type: ignore
+ score=evaluation_scenario_db.score, # type: ignore
+ correct_answer=evaluation_scenario_db.correct_answer, # type: ignore
+ is_pinned=evaluation_scenario_db.is_pinned or False, # type: ignore
+ note=evaluation_scenario_db.note or "", # type: ignore
)
@@ -291,8 +294,8 @@ def app_variant_db_to_pydantic(
async def app_variant_db_to_output(app_variant_db: AppVariantDB) -> AppVariantResponse:
if (
- type(app_variant_db.base_id) == uuid.UUID
- and type(app_variant_db.base.deployment_id) == uuid.UUID
+ isinstance(app_variant_db.base_id, uuid.UUID)
+ and isinstance(app_variant_db.base.deployment_id, uuid.UUID)
):
deployment = await db_manager.get_deployment_by_id(
str(app_variant_db.base.deployment_id)
diff --git a/agenta-backend/agenta_backend/services/results_service.py b/agenta-backend/agenta_backend/services/results_service.py
index 86425f9fa5..96b0aa6f3e 100644
--- a/agenta-backend/agenta_backend/services/results_service.py
+++ b/agenta-backend/agenta_backend/services/results_service.py
@@ -1,4 +1,5 @@
-from beanie import PydanticObjectId as ObjectId
+import uuid
+from typing import Sequence
from agenta_backend.services import db_manager
from agenta_backend.utils.common import isCloudEE
@@ -19,23 +20,35 @@
async def fetch_results_for_evaluation(evaluation: HumanEvaluationDB):
- evaluation_scenarios = await HumanEvaluationScenarioDB.find(
- HumanEvaluationScenarioDB.evaluation.id == evaluation.id,
- ).to_list()
+ evaluation_scenarios = await db_manager.fetch_human_evaluation_scenarios(
+ evaluation_id=str(evaluation.id)
+ )
results = {}
if len(evaluation_scenarios) == 0:
return results
- results["variants"] = [str(variant) for variant in evaluation.variants]
- variant_names = []
- for variant_id in evaluation.variants:
- variant = await db_manager.get_app_variant_instance_by_id(str(variant_id))
- variant_name = variant.variant_name if variant else str(variant_id)
+ evaluation_variants = await db_manager.fetch_human_evaluation_variants(
+ human_evaluation_id=str(evaluation.id)
+ )
+ results["variants"] = [
+ str(evaluation_variant.variant_id)
+ for evaluation_variant in evaluation_variants
+ ]
+
+ variant_names: list[str] = []
+ for evaluation_variant in evaluation_variants:
+ variant_name = (
+ evaluation_variant.variant.variant_name
+ if isinstance(evaluation_variant.variant_id, uuid.UUID)
+ else str(evaluation_variant.variant_id)
+ )
variant_names.append(str(variant_name))
+
results["variant_names"] = variant_names
results["nb_of_rows"] = len(evaluation_scenarios)
- if evaluation.evaluation_type == EvaluationType.human_a_b_testing:
+
+ if evaluation.evaluation_type == EvaluationType.human_a_b_testing: # type: ignore
results.update(
await _compute_stats_for_human_a_b_testing_evaluation(evaluation_scenarios)
)
@@ -52,7 +65,9 @@ async def _compute_stats_for_evaluation(evaluation_scenarios: list, classes: lis
return results
-async def _compute_stats_for_human_a_b_testing_evaluation(evaluation_scenarios: list):
+async def _compute_stats_for_human_a_b_testing_evaluation(
+ evaluation_scenarios: Sequence[EvaluationScenarioDB]
+):
results = {}
results["variants_votes_data"] = {}
results["flag_votes"] = {}
@@ -79,12 +94,14 @@ async def _compute_stats_for_human_a_b_testing_evaluation(evaluation_scenarios:
if len(evaluation_scenarios)
else 0
)
+
for scenario in evaluation_scenarios:
if scenario.vote not in results["variants_votes_data"]:
results["variants_votes_data"][scenario.vote] = {}
results["variants_votes_data"][scenario.vote]["number_of_votes"] = 1
else:
results["variants_votes_data"][scenario.vote]["number_of_votes"] += 1
+
for key, value in results["variants_votes_data"].items():
value["percentage"] = round(
value["number_of_votes"] / len(evaluation_scenarios) * 100, 2
@@ -93,27 +110,17 @@ async def _compute_stats_for_human_a_b_testing_evaluation(evaluation_scenarios:
async def fetch_results_for_single_model_test(evaluation_id: str):
- results = await HumanEvaluationScenarioDB.find(
- HumanEvaluationScenarioDB.evaluation.id == ObjectId(evaluation_id)
- ).to_list()
+ evaluation_scenarios = await db_manager.fetch_human_evaluation_scenarios(
+ evaluation_id=str(evaluation_id)
+ )
scores_and_counts = {}
- for result in results:
- score = result.score
+ for evaluation_scenario in evaluation_scenarios:
+ score = evaluation_scenario.score
+ if isinstance(score, str):
+ if score.isdigit(): # Check if the string is a valid integer
+ score = int(score)
+ else:
+ continue # Skip if the string is not a valid integer
+
scores_and_counts[score] = scores_and_counts.get(score, 0) + 1
return scores_and_counts
-
-
-async def fetch_average_score_for_custom_code_run(evaluation_id: str) -> float:
- eval_scenarios = await EvaluationScenarioDB.find(
- EvaluationScenarioDB.evaluation.id == ObjectId(evaluation_id)
- ).to_list()
-
- list_of_scores = []
- for scenario in eval_scenarios:
- score = scenario.score
- if not scenario.score:
- score = 0
- list_of_scores.append(round(float(score), 2))
-
- average_score = sum(list_of_scores) / len(list_of_scores)
- return average_score
From 1243050a64da473057c2076ea2868f81787c588c Mon Sep 17 00:00:00 2001
From: Abram
Date: Fri, 14 Jun 2024 16:47:31 +0100
Subject: [PATCH 071/268] refactor (backend): ensured that exception is
properly raise with its status code
---
.../models/api/evaluation_model.py | 5 +-
.../routers/human_evaluation_router.py | 83 ++++++++++++-------
2 files changed, 53 insertions(+), 35 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/api/evaluation_model.py b/agenta-backend/agenta_backend/models/api/evaluation_model.py
index 8852e4a242..fac3fcb64b 100644
--- a/agenta-backend/agenta_backend/models/api/evaluation_model.py
+++ b/agenta-backend/agenta_backend/models/api/evaluation_model.py
@@ -130,8 +130,8 @@ class HumanEvaluation(BaseModel):
testset_id: str
testset_name: str
status: str
- created_at: datetime
- updated_at: datetime
+ created_at: str
+ updated_at: str
class HumanEvaluationScenario(BaseModel):
@@ -141,7 +141,6 @@ class HumanEvaluationScenario(BaseModel):
outputs: List[HumanEvaluationScenarioOutput]
vote: Optional[str]
score: Optional[Union[str, int]]
- evaluation: Optional[str]
correct_answer: Optional[str]
is_pinned: Optional[bool]
note: Optional[str]
diff --git a/agenta-backend/agenta_backend/routers/human_evaluation_router.py b/agenta-backend/agenta_backend/routers/human_evaluation_router.py
index e32d313bb5..5a40a91d0a 100644
--- a/agenta-backend/agenta_backend/routers/human_evaluation_router.py
+++ b/agenta-backend/agenta_backend/routers/human_evaluation_router.py
@@ -64,15 +64,15 @@ async def create_evaluation(
)
if not has_permission:
error_msg = f"You do not have permission to perform this action. Please contact your Organization Admin."
- return JSONResponse(
- {"detail": error_msg},
+ raise HTTPException(
+ detail=error_msg,
status_code=403,
)
new_human_evaluation_db = await evaluation_service.create_new_human_evaluation(
payload, request.state.user_id
)
- return converters.human_evaluation_db_to_simple_evaluation_output(
+ return await converters.human_evaluation_db_to_simple_evaluation_output(
new_human_evaluation_db
)
except KeyError:
@@ -95,6 +95,7 @@ async def fetch_list_human_evaluations(
Returns:
List[HumanEvaluation]: A list of evaluations.
"""
+
try:
if isCloudEE():
has_permission = await check_action_access(
@@ -105,14 +106,15 @@ async def fetch_list_human_evaluations(
)
if not has_permission:
error_msg = f"You do not have permission to perform this action. Please contact your Organization Admin."
- return JSONResponse(
- {"detail": error_msg},
+ raise HTTPException(
+ detail=error_msg,
status_code=403,
)
return await evaluation_service.fetch_list_human_evaluations(app_id)
except Exception as e:
- raise HTTPException(status_code=500, detail=str(e)) from e
+ status_code = e.status_code if hasattr(e, "status_code") else 500 # type: ignore
+ raise HTTPException(status_code=status_code, detail=str(e)) from e
@router.get("/{evaluation_id}/", response_model=HumanEvaluation)
@@ -142,14 +144,15 @@ async def fetch_human_evaluation(
)
if not has_permission:
error_msg = f"You do not have permission to perform this action. Please contact your Organization Admin."
- return JSONResponse(
- {"detail": error_msg},
+ raise HTTPException(
+ detail=error_msg,
status_code=403,
)
return await evaluation_service.fetch_human_evaluation(human_evaluation)
except Exception as e:
- raise HTTPException(status_code=500, detail=str(e)) from e
+ status_code = e.status_code if hasattr(e, "status_code") else 500 # type: ignore
+ raise HTTPException(status_code=status_code, detail=str(e)) from e
@router.get(
@@ -180,6 +183,7 @@ async def fetch_evaluation_scenarios(
status_code=404,
detail=f"Evaluation with id {evaluation_id} not found",
)
+
if isCloudEE():
has_permission = await check_action_access(
user_uid=request.state.user_id,
@@ -189,8 +193,8 @@ async def fetch_evaluation_scenarios(
)
if not has_permission:
error_msg = f"You do not have permission to perform this action. Please contact your Organization Admin."
- return JSONResponse(
- {"detail": error_msg},
+ raise HTTPException(
+ detail=error_msg,
status_code=403,
)
@@ -202,7 +206,11 @@ async def fetch_evaluation_scenarios(
return eval_scenarios
except Exception as e:
- raise HTTPException(status_code=500, detail=str(e)) from e
+ import traceback
+
+ traceback.print_exc()
+ status_code = e.status_code if hasattr(e, "status_code") else 500 # type: ignore
+ raise HTTPException(status_code=status_code, detail=str(e)) from e
@router.put("/{evaluation_id}/", operation_id="update_human_evaluation")
@@ -219,6 +227,7 @@ async def update_human_evaluation(
Returns:
None: A 204 No Content status code, indicating that the update was successful.
"""
+
try:
human_evaluation = await db_manager.fetch_human_evaluation_by_id(evaluation_id)
if not human_evaluation:
@@ -232,8 +241,8 @@ async def update_human_evaluation(
)
if not has_permission:
error_msg = f"You do not have permission to perform this action. Please contact your Organization Admin."
- return JSONResponse(
- {"detail": error_msg},
+ raise HTTPException(
+ detail=error_msg,
status_code=403,
)
@@ -283,8 +292,8 @@ async def update_evaluation_scenario_router(
)
if not has_permission:
error_msg = f"You do not have permission to perform this action. Please contact your Organization Admin."
- return JSONResponse(
- {"detail": error_msg},
+ raise HTTPException(
+ detail=error_msg,
status_code=403,
)
@@ -298,7 +307,8 @@ async def update_evaluation_scenario_router(
import traceback
traceback.print_exc()
- raise HTTPException(status_code=500, detail=str(e)) from e
+ status_code = e.status_code if hasattr(e, "status_code") else 500 # type: ignore
+ raise HTTPException(status_code=status_code, detail=str(e)) from e
@router.get("/evaluation_scenario/{evaluation_scenario_id}/score/")
@@ -334,8 +344,8 @@ async def get_evaluation_scenario_score_router(
)
if not has_permission:
error_msg = f"You do not have permission to perform this action. Please contact your Organization Admin."
- return JSONResponse(
- {"detail": error_msg},
+ raise HTTPException(
+ detail=error_msg,
status_code=403,
)
@@ -344,7 +354,8 @@ async def get_evaluation_scenario_score_router(
"score": evaluation_scenario.score,
}
except Exception as e:
- raise HTTPException(status_code=500, detail=str(e)) from e
+ status_code = e.status_code if hasattr(e, "status_code") else 500 # type: ignore
+ raise HTTPException(status_code=status_code, detail=str(e)) from e
@router.put("/evaluation_scenario/{evaluation_scenario_id}/score/")
@@ -379,17 +390,20 @@ async def update_evaluation_scenario_score_router(
)
if not has_permission:
error_msg = f"You do not have permission to perform this action. Please contact your Organization Admin."
- return JSONResponse(
- {"detail": error_msg},
+ raise HTTPException(
+ detail=error_msg,
status_code=403,
)
- evaluation_scenario.score = payload.score
- await evaluation_scenario.save()
+ await db_manager.update_human_evaluation_scenario(
+ evaluation_scenario_id=str(evaluation_scenario.id), # type: ignore
+ values_to_update=payload.dict()
+ )
return Response(status_code=status.HTTP_204_NO_CONTENT)
except Exception as e:
- raise HTTPException(status_code=500, detail=str(e)) from e
+ status_code = e.status_code if hasattr(e, "status_code") else 500 # type: ignore
+ raise HTTPException(status_code=status_code, detail=str(e)) from e
@router.get("/{evaluation_id}/results/", operation_id="fetch_results")
@@ -421,8 +435,8 @@ async def fetch_results(
)
if not has_permission:
error_msg = f"You do not have permission to perform this action. Please contact your Organization Admin."
- return JSONResponse(
- {"detail": error_msg},
+ raise HTTPException(
+ detail=error_msg,
status_code=403,
)
@@ -436,10 +450,14 @@ async def fetch_results(
)
return {"results_data": results}
except Exception as e:
- raise HTTPException(status_code=500, detail=str(e)) from e
+ import traceback
+
+ traceback.print_exc()
+ status_code = e.status_code if hasattr(e, "status_code") else 500 # type: ignore
+ raise HTTPException(status_code=status_code, detail=str(e)) from e
-@router.delete("/", response_model=List[str])
+@router.delete("/", response_model=List[str])
async def delete_evaluations(
delete_evaluations: DeleteEvaluation,
request: Request,
@@ -465,8 +483,8 @@ async def delete_evaluations(
)
if not has_permission:
error_msg = f"You do not have permission to perform this action. Please contact your Organization Admin."
- return JSONResponse(
- {"detail": error_msg},
+ raise HTTPException(
+ detail=error_msg,
status_code=403,
)
@@ -475,4 +493,5 @@ async def delete_evaluations(
)
return Response(status_code=status.HTTP_204_NO_CONTENT)
except Exception as e:
- raise HTTPException(status_code=500, detail=str(e)) from e
+ status_code = e.status_code if hasattr(e, "status_code") else 500 # type: ignore
+ raise HTTPException(status_code=status_code, detail=str(e)) from e
From 9f9bf430369fdd924d9bdba46ac8a5c8f637256e Mon Sep 17 00:00:00 2001
From: Abram
Date: Fri, 14 Jun 2024 16:48:59 +0100
Subject: [PATCH 072/268] refactor (backend): created human evaluation variants
model, improved human evaluation model and relationships
---
.../agenta_backend/models/db_models.py | 62 ++++++++++++++-----
1 file changed, 45 insertions(+), 17 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index 3d9db714e9..2550528150 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -99,6 +99,9 @@ class AppDB(Base):
evaluation = relationship(
"EvaluationDB", cascade="all, delete-orphan", backref="app"
)
+ human_evaluation = relationship(
+ "HumanEvaluationDB", cascade="all, delete-orphan", backref="app"
+ )
class DeploymentDB(Base):
@@ -188,8 +191,8 @@ class AppVariantDB(Base):
user = relationship("UserDB", foreign_keys=[user_id])
modified_by = relationship("UserDB", foreign_keys=[modified_by_id])
base = relationship("VariantBaseDB")
- revisions = relationship(
- "AppVariantRevisionsDB", cascade="all, delete-orphan", backref="variant"
+ variant_revision = relationship(
+ "AppVariantRevisionsDB", cascade="all, delete-orphan", backref="variant_revision"
)
@@ -359,8 +362,8 @@ class EvaluatorConfigDB(Base):
user = relationship("UserDB")
-class HumanEvaluationDB(Base):
- __tablename__ = "human_evaluations"
+class HumanEvaluationVariantDB(Base):
+ __tablename__ = "human_evaluation_variants"
id = Column(
UUID(as_uuid=True),
@@ -369,22 +372,35 @@ class HumanEvaluationDB(Base):
unique=True,
nullable=False,
)
- app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id"))
- app = relationship("AppDB")
- user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
- user = relationship("UserDB")
- status = Column(String)
- evaluation_type = Column(String)
+ human_evaluation_id = Column(
+ UUID(as_uuid=True), ForeignKey("human_evaluations.id", ondelete="CASCADE")
+ )
variant_id = Column(
UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="SET NULL")
)
- variant = relationship("AppVariantDB")
variant_revision_id = Column(
UUID(as_uuid=True), ForeignKey("app_variant_revisions.id", ondelete="SET NULL")
)
- variant_revision = relationship("AppVariantRevisionsDB")
+
+ variant = relationship("AppVariantDB", backref="evaluation_variant")
+ variant_revision = relationship("AppVariantRevisionsDB", backref="evaluation_variant_revision")
+
+
+class HumanEvaluationDB(Base):
+ __tablename__ = "human_evaluations"
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id", ondelete="CASCADE"))
+ user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
+ status = Column(String)
+ evaluation_type = Column(String)
testset_id = Column(UUID(as_uuid=True), ForeignKey("testsets.id"))
- testset = relationship("TestSetDB")
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
@@ -392,6 +408,11 @@ class HumanEvaluationDB(Base):
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
+ user = relationship("UserDB")
+ testset = relationship("TestSetDB")
+ evaluation_variant = relationship("HumanEvaluationVariantDB", cascade="all, delete-orphan", backref="human_evaluation")
+ evaluation_scenario = relationship("HumanEvaluationScenarioDB", cascade="all, delete-orphan", backref="evaluation_scenario")
+
class HumanEvaluationScenarioDB(Base):
__tablename__ = "human_evaluations_scenarios"
@@ -404,13 +425,13 @@ class HumanEvaluationScenarioDB(Base):
nullable=False,
)
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
- user = relationship("UserDB")
- evaluation_id = Column(UUID(as_uuid=True), ForeignKey("human_evaluations.id"))
- evaluation = relationship("HumanEvaluationDB")
+ evaluation_id = Column(
+ UUID(as_uuid=True), ForeignKey("human_evaluations.id", ondelete="CASCADE")
+ )
inputs = Column(JSONB) # List of HumanEvaluationScenarioInput
outputs = Column(JSONB) # List of HumanEvaluationScenarioOutput
vote = Column(String)
- score = Column(JSONB)
+ score = Column(String)
correct_answer = Column(String)
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
@@ -510,6 +531,13 @@ class EvaluationDB(Base):
class EvaluationEvaluatorConfigDB(Base):
__tablename__ = "evaluation_evaluator_configs"
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
evaluation_id = Column(
UUID(as_uuid=True), ForeignKey("evaluations.id"), primary_key=True
)
From dcb490c58eb6112d1845a6604356ffe365786262 Mon Sep 17 00:00:00 2001
From: Abram
Date: Fri, 14 Jun 2024 16:49:56 +0100
Subject: [PATCH 073/268] chore (backend): format codebase with black@23.12.0
---
.../agenta_backend/models/converters.py | 33 ++++++++--------
.../agenta_backend/models/db_models.py | 20 ++++++++--
.../routers/human_evaluation_router.py | 6 +--
.../agenta_backend/services/db_manager.py | 38 +++++++++++--------
.../services/evaluation_service.py | 27 ++++++++-----
.../services/results_service.py | 7 ++--
.../agenta_backend/tests/conftest.py | 6 +--
7 files changed, 81 insertions(+), 56 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/converters.py b/agenta-backend/agenta_backend/models/converters.py
index 92eab66aef..c58134c627 100644
--- a/agenta-backend/agenta_backend/models/converters.py
+++ b/agenta-backend/agenta_backend/models/converters.py
@@ -107,8 +107,8 @@ async def human_evaluation_db_to_simple_evaluation_output(
return SimpleEvaluationOutput(
id=str(human_evaluation_db.id),
app_id=str(human_evaluation_db.app_id),
- status=human_evaluation_db.status, # type: ignore
- evaluation_type=human_evaluation_db.evaluation_type, # type: ignore
+ status=human_evaluation_db.status, # type: ignore
+ evaluation_type=human_evaluation_db.evaluation_type, # type: ignore
variant_ids=[
str(evaluation_variant.variant_id)
for evaluation_variant in evaluation_variants
@@ -156,7 +156,7 @@ async def human_evaluation_db_to_pydantic(
evaluation_db: HumanEvaluationDB,
) -> HumanEvaluation:
evaluation_variants = await db_manager.fetch_human_evaluation_variants(
- human_evaluation_id=str(evaluation_db.id) # type: ignore
+ human_evaluation_id=str(evaluation_db.id) # type: ignore
)
revisions = []
@@ -179,16 +179,16 @@ async def human_evaluation_db_to_pydantic(
app_id=str(evaluation_db.app_id),
user_id=str(evaluation_db.user_id),
user_username=evaluation_db.user.username or "",
- status=evaluation_db.status, # type: ignore
- evaluation_type=evaluation_db.evaluation_type, # type: ignore
+ status=evaluation_db.status, # type: ignore
+ evaluation_type=evaluation_db.evaluation_type, # type: ignore
variant_ids=variants_ids,
variant_names=variants_names,
testset_id=str(evaluation_db.testset_id),
testset_name=evaluation_db.testset.name,
variants_revision_ids=variants_revision_ids,
revisions=revisions,
- created_at=str(evaluation_db.created_at), # type: ignore
- updated_at=str(evaluation_db.updated_at), # type: ignore
+ created_at=str(evaluation_db.created_at), # type: ignore
+ updated_at=str(evaluation_db.updated_at), # type: ignore
)
@@ -198,13 +198,13 @@ def human_evaluation_scenario_db_to_pydantic(
return HumanEvaluationScenario(
id=str(evaluation_scenario_db.id),
evaluation_id=evaluation_id,
- inputs=evaluation_scenario_db.inputs, # type: ignore
- outputs=evaluation_scenario_db.outputs, # type: ignore
- vote=evaluation_scenario_db.vote, # type: ignore
- score=evaluation_scenario_db.score, # type: ignore
- correct_answer=evaluation_scenario_db.correct_answer, # type: ignore
- is_pinned=evaluation_scenario_db.is_pinned or False, # type: ignore
- note=evaluation_scenario_db.note or "", # type: ignore
+ inputs=evaluation_scenario_db.inputs, # type: ignore
+ outputs=evaluation_scenario_db.outputs, # type: ignore
+ vote=evaluation_scenario_db.vote, # type: ignore
+ score=evaluation_scenario_db.score, # type: ignore
+ correct_answer=evaluation_scenario_db.correct_answer, # type: ignore
+ is_pinned=evaluation_scenario_db.is_pinned or False, # type: ignore
+ note=evaluation_scenario_db.note or "", # type: ignore
)
@@ -293,9 +293,8 @@ def app_variant_db_to_pydantic(
async def app_variant_db_to_output(app_variant_db: AppVariantDB) -> AppVariantResponse:
- if (
- isinstance(app_variant_db.base_id, uuid.UUID)
- and isinstance(app_variant_db.base.deployment_id, uuid.UUID)
+ if isinstance(app_variant_db.base_id, uuid.UUID) and isinstance(
+ app_variant_db.base.deployment_id, uuid.UUID
):
deployment = await db_manager.get_deployment_by_id(
str(app_variant_db.base.deployment_id)
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index 2550528150..21abf8bc8e 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -192,7 +192,9 @@ class AppVariantDB(Base):
modified_by = relationship("UserDB", foreign_keys=[modified_by_id])
base = relationship("VariantBaseDB")
variant_revision = relationship(
- "AppVariantRevisionsDB", cascade="all, delete-orphan", backref="variant_revision"
+ "AppVariantRevisionsDB",
+ cascade="all, delete-orphan",
+ backref="variant_revision",
)
@@ -383,7 +385,9 @@ class HumanEvaluationVariantDB(Base):
)
variant = relationship("AppVariantDB", backref="evaluation_variant")
- variant_revision = relationship("AppVariantRevisionsDB", backref="evaluation_variant_revision")
+ variant_revision = relationship(
+ "AppVariantRevisionsDB", backref="evaluation_variant_revision"
+ )
class HumanEvaluationDB(Base):
@@ -410,8 +414,16 @@ class HumanEvaluationDB(Base):
user = relationship("UserDB")
testset = relationship("TestSetDB")
- evaluation_variant = relationship("HumanEvaluationVariantDB", cascade="all, delete-orphan", backref="human_evaluation")
- evaluation_scenario = relationship("HumanEvaluationScenarioDB", cascade="all, delete-orphan", backref="evaluation_scenario")
+ evaluation_variant = relationship(
+ "HumanEvaluationVariantDB",
+ cascade="all, delete-orphan",
+ backref="human_evaluation",
+ )
+ evaluation_scenario = relationship(
+ "HumanEvaluationScenarioDB",
+ cascade="all, delete-orphan",
+ backref="evaluation_scenario",
+ )
class HumanEvaluationScenarioDB(Base):
diff --git a/agenta-backend/agenta_backend/routers/human_evaluation_router.py b/agenta-backend/agenta_backend/routers/human_evaluation_router.py
index 5a40a91d0a..63abfada95 100644
--- a/agenta-backend/agenta_backend/routers/human_evaluation_router.py
+++ b/agenta-backend/agenta_backend/routers/human_evaluation_router.py
@@ -396,8 +396,8 @@ async def update_evaluation_scenario_score_router(
)
await db_manager.update_human_evaluation_scenario(
- evaluation_scenario_id=str(evaluation_scenario.id), # type: ignore
- values_to_update=payload.dict()
+ evaluation_scenario_id=str(evaluation_scenario.id), # type: ignore
+ values_to_update=payload.dict(),
)
return Response(status_code=status.HTTP_204_NO_CONTENT)
@@ -457,7 +457,7 @@ async def fetch_results(
raise HTTPException(status_code=status_code, detail=str(e)) from e
-@router.delete("/", response_model=List[str])
+@router.delete("/", response_model=List[str])
async def delete_evaluations(
delete_evaluations: DeleteEvaluation,
request: Request,
diff --git a/agenta-backend/agenta_backend/services/db_manager.py b/agenta-backend/agenta_backend/services/db_manager.py
index f17d1086ac..d5d0163b88 100644
--- a/agenta-backend/agenta_backend/services/db_manager.py
+++ b/agenta-backend/agenta_backend/services/db_manager.py
@@ -1970,8 +1970,7 @@ async def create_human_evaluation(
# create variants for human evaluation
await create_human_evaluation_variants(
- human_evaluation_id=str(human_evaluation.id),
- variants_ids=variants_ids
+ human_evaluation_id=str(human_evaluation.id), variants_ids=variants_ids
)
return human_evaluation
@@ -1991,10 +1990,12 @@ async def fetch_human_evaluation_variants(human_evaluation_id: str):
result = await session.execute(
select(HumanEvaluationVariantDB)
.options(
- joinedload(HumanEvaluationVariantDB.variant)
- .load_only(AppVariantDB.id, AppVariantDB.variant_name), # type: ignore
- joinedload(HumanEvaluationVariantDB.variant_revision)
- .load_only(AppVariantRevisionsDB.revision, AppVariantRevisionsDB.id) # type: ignore
+ joinedload(HumanEvaluationVariantDB.variant).load_only(
+ AppVariantDB.id, AppVariantDB.variant_name
+ ), # type: ignore
+ joinedload(HumanEvaluationVariantDB.variant_revision).load_only(
+ AppVariantRevisionsDB.revision, AppVariantRevisionsDB.id
+ ), # type: ignore
)
.filter_by(human_evaluation_id=uuid.UUID(human_evaluation_id))
)
@@ -2002,7 +2003,9 @@ async def fetch_human_evaluation_variants(human_evaluation_id: str):
return evaluation_variants
-async def create_human_evaluation_variants(human_evaluation_id: str, variants_ids: List[str]):
+async def create_human_evaluation_variants(
+ human_evaluation_id: str, variants_ids: List[str]
+):
"""
Creates human evaluation variants.
@@ -2035,7 +2038,7 @@ async def create_human_evaluation_variants(human_evaluation_id: str, variants_id
human_evaluation_variant = HumanEvaluationVariantDB(
human_evaluation_id=uuid.UUID(human_evaluation_id),
variant_id=variant.id, # type: ignore
- variant_revision_id=variant_revision.id # type: ignore
+ variant_revision_id=variant_revision.id, # type: ignore
)
session.add(human_evaluation_variant)
@@ -2057,8 +2060,8 @@ async def fetch_human_evaluation_by_id(
result = await session.execute(
select(HumanEvaluationDB)
.options(
- joinedload(HumanEvaluationDB.user).load_only(UserDB.username), # type: ignore
- joinedload(HumanEvaluationDB.testset).load_only(TestSetDB.name) # type: ignore
+ joinedload(HumanEvaluationDB.user).load_only(UserDB.username), # type: ignore
+ joinedload(HumanEvaluationDB.testset).load_only(TestSetDB.name), # type: ignore
)
.filter_by(id=uuid.UUID(evaluation_id))
)
@@ -2118,7 +2121,7 @@ async def create_human_evaluation_scenario(
user_id: str,
app: AppDB,
evaluation_id: str,
- evaluation_extend: Dict[str, Any]
+ evaluation_extend: Dict[str, Any],
):
"""
Creates a human evaluation scenario.
@@ -2148,7 +2151,9 @@ async def create_human_evaluation_scenario(
await session.commit()
-async def update_human_evaluation_scenario(evaluation_scenario_id: str, values_to_update: dict):
+async def update_human_evaluation_scenario(
+ evaluation_scenario_id: str, values_to_update: dict
+):
"""Updates human evaluation scenario with the specified values.
Args:
@@ -2161,12 +2166,15 @@ async def update_human_evaluation_scenario(evaluation_scenario_id: str, values_t
async with db_engine.get_session() as session:
result = await session.execute(
- select(HumanEvaluationScenarioDB)
- .filter_by(id=uuid.UUID(evaluation_scenario_id))
+ select(HumanEvaluationScenarioDB).filter_by(
+ id=uuid.UUID(evaluation_scenario_id)
+ )
)
human_evaluation_scenario = result.scalars().one_or_none()
if not human_evaluation_scenario:
- raise NoResultFound(f"Human evaluation scenario with id {evaluation_scenario_id} not found")
+ raise NoResultFound(
+ f"Human evaluation scenario with id {evaluation_scenario_id} not found"
+ )
for key, value in values_to_update.items():
if hasattr(human_evaluation_scenario, key):
diff --git a/agenta-backend/agenta_backend/services/evaluation_service.py b/agenta-backend/agenta_backend/services/evaluation_service.py
index 63736f0f43..ccda51e314 100644
--- a/agenta-backend/agenta_backend/services/evaluation_service.py
+++ b/agenta-backend/agenta_backend/services/evaluation_service.py
@@ -96,7 +96,9 @@ async def prepare_csvdata_and_create_evaluation_scenario(
for name in payload_inputs
]
except KeyError:
- await db_manager.delete_human_evaluation(evaluation_id=str(new_evaluation.id))
+ await db_manager.delete_human_evaluation(
+ evaluation_id=str(new_evaluation.id)
+ )
msg = f"""
Columns in the test set should match the names of the inputs in the variant.
Inputs names in variant are: {[variant_input for variant_input in payload_inputs]} while
@@ -125,7 +127,7 @@ async def prepare_csvdata_and_create_evaluation_scenario(
user_id=str(user.id),
app=app,
evaluation_id=str(new_evaluation.id),
- evaluation_extend=evaluation_scenario_extend_payload
+ evaluation_extend=evaluation_scenario_extend_payload,
)
@@ -142,8 +144,7 @@ async def update_human_evaluation_service(
# Update the evaluation
await db_manager.update_human_evaluation(
- evaluation_id=str(evaluation.id),
- values_to_update=update_payload.dict()
+ evaluation_id=str(evaluation.id), values_to_update=update_payload.dict()
)
@@ -190,7 +191,7 @@ async def fetch_human_evaluation_scenarios_for_evaluation(
eval_scenarios = [
converters.human_evaluation_scenario_db_to_pydantic(
evaluation_scenario_db=human_evaluation_scenario,
- evaluation_id=str(human_evaluation.id)
+ evaluation_id=str(human_evaluation.id),
)
for human_evaluation_scenario in human_evaluation_scenarios
]
@@ -217,10 +218,16 @@ async def update_human_evaluation_scenario(
values_to_update = {}
payload = evaluation_scenario_data.dict()
- if payload["score"] is not None and evaluation_type == EvaluationType.single_model_test:
+ if (
+ payload["score"] is not None
+ and evaluation_type == EvaluationType.single_model_test
+ ):
values_to_update["score"] = payload["score"]
-
- if payload["vote"] is not None and evaluation_type == EvaluationType.human_a_b_testing:
+
+ if (
+ payload["vote"] is not None
+ and evaluation_type == EvaluationType.human_a_b_testing
+ ):
values_to_update["vote"] = payload["vote"]
if payload["outputs"] is not None:
@@ -254,7 +261,7 @@ async def update_human_evaluation_scenario(
await db_manager.update_human_evaluation_scenario(
evaluation_scenario_id=str(evaluation_scenario_db.id),
- values_to_update=values_to_update
+ values_to_update=values_to_update,
)
@@ -386,7 +393,7 @@ async def create_new_human_evaluation(
status=payload.status,
evaluation_type=payload.evaluation_type,
testset_id=payload.testset_id,
- variants_ids=payload.variant_ids
+ variants_ids=payload.variant_ids,
)
if human_evaluation is None:
raise HTTPException(
diff --git a/agenta-backend/agenta_backend/services/results_service.py b/agenta-backend/agenta_backend/services/results_service.py
index 96b0aa6f3e..acde0ca162 100644
--- a/agenta-backend/agenta_backend/services/results_service.py
+++ b/agenta-backend/agenta_backend/services/results_service.py
@@ -32,8 +32,7 @@ async def fetch_results_for_evaluation(evaluation: HumanEvaluationDB):
human_evaluation_id=str(evaluation.id)
)
results["variants"] = [
- str(evaluation_variant.variant_id)
- for evaluation_variant in evaluation_variants
+ str(evaluation_variant.variant_id) for evaluation_variant in evaluation_variants
]
variant_names: list[str] = []
@@ -48,7 +47,7 @@ async def fetch_results_for_evaluation(evaluation: HumanEvaluationDB):
results["variant_names"] = variant_names
results["nb_of_rows"] = len(evaluation_scenarios)
- if evaluation.evaluation_type == EvaluationType.human_a_b_testing: # type: ignore
+ if evaluation.evaluation_type == EvaluationType.human_a_b_testing: # type: ignore
results.update(
await _compute_stats_for_human_a_b_testing_evaluation(evaluation_scenarios)
)
@@ -66,7 +65,7 @@ async def _compute_stats_for_evaluation(evaluation_scenarios: list, classes: lis
async def _compute_stats_for_human_a_b_testing_evaluation(
- evaluation_scenarios: Sequence[EvaluationScenarioDB]
+ evaluation_scenarios: Sequence[EvaluationScenarioDB],
):
results = {}
results["variants_votes_data"] = {}
diff --git a/agenta-backend/agenta_backend/tests/conftest.py b/agenta-backend/agenta_backend/tests/conftest.py
index 7656afd98f..e78d0f2b4c 100644
--- a/agenta-backend/agenta_backend/tests/conftest.py
+++ b/agenta-backend/agenta_backend/tests/conftest.py
@@ -13,13 +13,13 @@ def event_loop():
policy = asyncio.get_event_loop_policy()
res = policy.new_event_loop()
asyncio.set_event_loop(res)
- res._close = res.close # type: ignore
+ res._close = res.close # type: ignore
# Initialize database and create tables
res.run_until_complete(DBEngine().init_db())
yield res
- res.run_until_complete(DBEngine().close()) # close connections to database
- res.run_until_complete(DBEngine().remove_db()) # drop database
+ res.run_until_complete(DBEngine().close()) # close connections to database
+ res.run_until_complete(DBEngine().remove_db()) # drop database
res._close() # close event loop # type: ignore
From c338930eec2b5b205aaa0a325fc6d2d3d3866896 Mon Sep 17 00:00:00 2001
From: Abram
Date: Fri, 14 Jun 2024 16:51:03 +0100
Subject: [PATCH 074/268] chore (tools): reintroduce mongo services
---
docker-compose.yml | 70 +++++++++++++++++++++++-----------------------
1 file changed, 35 insertions(+), 35 deletions(-)
diff --git a/docker-compose.yml b/docker-compose.yml
index 658696bed4..59babfa635 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -61,8 +61,8 @@ services:
"/api",
]
depends_on:
- # mongo:
- # condition: service_healthy
+ mongo:
+ condition: service_healthy
postgres:
condition: service_healthy
restart: always
@@ -87,38 +87,38 @@ services:
- NEXT_PUBLIC_POSTHOG_API_KEY=phc_hmVSxIjTW1REBHXgj2aw4HW9X6CXb6FzerBgP9XenC7
restart: always
- # mongo:
- # image: mongo:5.0
- # environment:
- # MONGO_INITDB_ROOT_USERNAME: username
- # MONGO_INITDB_ROOT_PASSWORD: password
- # volumes:
- # - mongodb_data:/data/db
- # ports:
- # - "27017:27017"
- # networks:
- # - agenta-network
- # healthcheck:
- # test: ["CMD", "mongo", "--eval", "db.adminCommand('ping')"]
- # interval: 10s
- # timeout: 10s
- # retries: 20
- # restart: always
+ mongo:
+ image: mongo:5.0
+ environment:
+ MONGO_INITDB_ROOT_USERNAME: username
+ MONGO_INITDB_ROOT_PASSWORD: password
+ volumes:
+ - mongodb_data:/data/db
+ ports:
+ - "27017:27017"
+ networks:
+ - agenta-network
+ healthcheck:
+ test: ["CMD", "mongo", "--eval", "db.adminCommand('ping')"]
+ interval: 10s
+ timeout: 10s
+ retries: 20
+ restart: always
- # mongo_express:
- # image: mongo-express:0.54.0
- # environment:
- # ME_CONFIG_MONGODB_ADMINUSERNAME: username
- # ME_CONFIG_MONGODB_ADMINPASSWORD: password
- # ME_CONFIG_MONGODB_SERVER: mongo
- # ports:
- # - "8081:8081"
- # networks:
- # - agenta-network
- # depends_on:
- # mongo:
- # condition: service_healthy
- # restart: always
+ mongo_express:
+ image: mongo-express:0.54.0
+ environment:
+ ME_CONFIG_MONGODB_ADMINUSERNAME: username
+ ME_CONFIG_MONGODB_ADMINPASSWORD: password
+ ME_CONFIG_MONGODB_SERVER: mongo
+ ports:
+ - "8081:8081"
+ networks:
+ - agenta-network
+ depends_on:
+ mongo:
+ condition: service_healthy
+ restart: always
redis:
image: redis:latest
@@ -156,7 +156,7 @@ services:
- ./agenta-backend/agenta_backend:/app/agenta_backend
- /var/run/docker.sock:/var/run/docker.sock
depends_on:
- # - mongo
+ - mongo
- postgres
- rabbitmq
- redis
@@ -209,7 +209,7 @@ networks:
name: agenta-network
volumes:
- # mongodb_data:
+ mongodb_data:
redis_data:
nextjs_cache:
postgresdb-data:
From 669c1d4c0ac741adfac9e9f5b3d5545a28cfaaba Mon Sep 17 00:00:00 2001
From: Abram
Date: Fri, 14 Jun 2024 17:13:28 +0100
Subject: [PATCH 075/268] refactor (backend): created human evaluation variant
db model, added cascade to human evaluation related schemas and formatted
code
---
.../agenta_backend/models/db_models.py | 170 +++++++++++++-----
1 file changed, 126 insertions(+), 44 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index cf820be808..9dcf5b589a 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -9,7 +9,6 @@
DateTime,
Boolean,
ForeignKey,
- Float,
Enum,
)
from sqlalchemy.orm import relationship
@@ -87,11 +86,20 @@ class AppDB(Base):
user = relationship("UserDB")
variant = relationship("AppVariantDB", cascade="all, delete-orphan", backref="app")
- evaluator_config = relationship("EvaluatorConfigDB", cascade="all, delete-orphan", backref="app")
+ evaluator_config = relationship(
+ "EvaluatorConfigDB", cascade="all, delete-orphan", backref="app"
+ )
testset = relationship("TestSetDB", cascade="all, delete-orphan", backref="app")
base = relationship("DeploymentDB", cascade="all, delete-orphan", backref="app")
- deployment = relationship("VariantBaseDB", cascade="all, delete-orphan", backref="app")
- evaluation = relationship("EvaluationDB", cascade="all, delete-orphan", backref="app")
+ deployment = relationship(
+ "VariantBaseDB", cascade="all, delete-orphan", backref="app"
+ )
+ evaluation = relationship(
+ "EvaluationDB", cascade="all, delete-orphan", backref="app"
+ )
+ human_evaluation = relationship(
+ "HumanEvaluationDB", cascade="all, delete-orphan", backref="app"
+ )
class DeploymentDB(Base):
@@ -134,7 +142,9 @@ class VariantBaseDB(Base):
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
base_name = Column(String)
image_id = Column(UUID(as_uuid=True), ForeignKey("docker_images.id"))
- deployment_id = Column(UUID(as_uuid=True), ForeignKey("deployments.id", ondelete="SET NULL"))
+ deployment_id = Column(
+ UUID(as_uuid=True), ForeignKey("deployments.id", ondelete="SET NULL")
+ )
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
@@ -179,8 +189,12 @@ class AppVariantDB(Base):
user = relationship("UserDB", foreign_keys=[user_id])
modified_by = relationship("UserDB", foreign_keys=[modified_by_id])
base = relationship("VariantBaseDB")
- revisions = relationship("AppVariantRevisionsDB", cascade="all, delete-orphan", backref="variant")
-
+ variant_revision = relationship(
+ "AppVariantRevisionsDB",
+ cascade="all, delete-orphan",
+ backref="variant_revision",
+ )
+
class AppVariantRevisionsDB(Base):
__tablename__ = "app_variant_revisions"
@@ -192,7 +206,9 @@ class AppVariantRevisionsDB(Base):
unique=True,
nullable=False,
)
- variant_id = Column(UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="CASCADE"))
+ variant_id = Column(
+ UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="CASCADE")
+ )
revision = Column(Integer)
modified_by_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
base_id = Column(UUID(as_uuid=True), ForeignKey("bases.id"))
@@ -223,18 +239,23 @@ class AppEnvironmentDB(Base):
name = Column(String)
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
revision = Column(Integer)
- deployed_app_variant_id = Column(UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="SET NULL"))
+ deployed_app_variant_id = Column(
+ UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="SET NULL")
+ )
deployed_app_variant_revision_id = Column(
- UUID(as_uuid=True),
- ForeignKey("app_variant_revisions.id", ondelete="SET NULL")
+ UUID(as_uuid=True), ForeignKey("app_variant_revisions.id", ondelete="SET NULL")
+ )
+ deployment_id = Column(
+ UUID(as_uuid=True), ForeignKey("deployments.id", ondelete="SET NULL")
)
- deployment_id = Column(UUID(as_uuid=True), ForeignKey("deployments.id", ondelete="SET NULL"))
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
user = relationship("UserDB")
- environment_revisions = relationship("AppEnvironmentRevisionDB", cascade="all, delete-orphan", backref="environment")
+ environment_revisions = relationship(
+ "AppEnvironmentRevisionDB", cascade="all, delete-orphan", backref="environment"
+ )
deployed_app_variant = relationship("AppVariantDB")
deployed_app_variant_revision = relationship("AppVariantRevisionsDB")
@@ -249,13 +270,17 @@ class AppEnvironmentRevisionDB(Base):
unique=True,
nullable=False,
)
- environment_id = Column(UUID(as_uuid=True), ForeignKey("environments.id", ondelete="CASCADE"))
+ environment_id = Column(
+ UUID(as_uuid=True), ForeignKey("environments.id", ondelete="CASCADE")
+ )
revision = Column(Integer)
modified_by_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
deployed_app_variant_revision_id = Column(
UUID(as_uuid=True), ForeignKey("app_variant_revisions.id", ondelete="SET NULL")
)
- deployment_id = Column(UUID(as_uuid=True), ForeignKey("deployments.id", ondelete="SET NULL"))
+ deployment_id = Column(
+ UUID(as_uuid=True), ForeignKey("deployments.id", ondelete="SET NULL")
+ )
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
@@ -337,6 +362,32 @@ class EvaluatorConfigDB(Base):
user = relationship("UserDB")
+class HumanEvaluationVariantDB(Base):
+ __tablename__ = "human_evaluation_variants"
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ human_evaluation_id = Column(
+ UUID(as_uuid=True), ForeignKey("human_evaluations.id", ondelete="CASCADE")
+ )
+ variant_id = Column(
+ UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="SET NULL")
+ )
+ variant_revision_id = Column(
+ UUID(as_uuid=True), ForeignKey("app_variant_revisions.id", ondelete="SET NULL")
+ )
+
+ variant = relationship("AppVariantDB", backref="evaluation_variant")
+ variant_revision = relationship(
+ "AppVariantRevisionsDB", backref="evaluation_variant_revision"
+ )
+
+
class HumanEvaluationDB(Base):
__tablename__ = "human_evaluations"
@@ -347,20 +398,11 @@ class HumanEvaluationDB(Base):
unique=True,
nullable=False,
)
- app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id"))
- app = relationship("AppDB")
+ app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id", ondelete="CASCADE"))
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
- user = relationship("UserDB")
status = Column(String)
evaluation_type = Column(String)
- variant_id = Column(UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="SET NULL"))
- variant = relationship("AppVariantDB")
- variant_revision_id = Column(
- UUID(as_uuid=True), ForeignKey("app_variant_revisions.id", ondelete="SET NULL")
- )
- variant_revision = relationship("AppVariantRevisionsDB")
testset_id = Column(UUID(as_uuid=True), ForeignKey("testsets.id"))
- testset = relationship("TestSetDB")
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
@@ -368,6 +410,19 @@ class HumanEvaluationDB(Base):
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
+ user = relationship("UserDB")
+ testset = relationship("TestSetDB")
+ evaluation_variant = relationship(
+ "HumanEvaluationVariantDB",
+ cascade="all, delete-orphan",
+ backref="human_evaluation",
+ )
+ evaluation_scenario = relationship(
+ "HumanEvaluationScenarioDB",
+ cascade="all, delete-orphan",
+ backref="evaluation_scenario",
+ )
+
class HumanEvaluationScenarioDB(Base):
__tablename__ = "human_evaluations_scenarios"
@@ -380,13 +435,13 @@ class HumanEvaluationScenarioDB(Base):
nullable=False,
)
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
- user = relationship("UserDB")
- evaluation_id = Column(UUID(as_uuid=True), ForeignKey("human_evaluations.id"))
- evaluation = relationship("HumanEvaluationDB")
+ evaluation_id = Column(
+ UUID(as_uuid=True), ForeignKey("human_evaluations.id", ondelete="CASCADE")
+ )
inputs = Column(JSONB) # List of HumanEvaluationScenarioInput
outputs = Column(JSONB) # List of HumanEvaluationScenarioOutput
vote = Column(String)
- score = Column(JSONB)
+ score = Column(String)
correct_answer = Column(String)
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
@@ -408,14 +463,15 @@ class EvaluationAggregatedResultDB(Base):
unique=True,
nullable=False,
)
- evaluation_id = Column(UUID(as_uuid=True), ForeignKey("evaluations.id"))
+ evaluation_id = Column(
+ UUID(as_uuid=True), ForeignKey("evaluations.id", ondelete="CASCADE")
+ )
evaluator_config_id = Column(
- UUID(as_uuid=True), ForeignKey("evaluators_configs.id")
+ UUID(as_uuid=True), ForeignKey("evaluators_configs.id", ondelete="SET NULL")
)
result = Column(JSONB) # Result
- evaluation = relationship("EvaluationDB", back_populates="aggregated_results")
- evaluator_config = relationship("EvaluatorConfigDB")
+ evaluator_config = relationship("EvaluatorConfigDB", backref="evaluator_config")
class EvaluationScenarioResultDB(Base):
@@ -429,13 +485,11 @@ class EvaluationScenarioResultDB(Base):
nullable=False,
)
evaluation_scenario_id = Column(
- UUID(as_uuid=True), ForeignKey("evaluation_scenarios.id")
+ UUID(as_uuid=True), ForeignKey("evaluation_scenarios.id", ondelete="CASCADE")
)
- evaluation_scenario = relationship("EvaluationScenarioDB", back_populates="results")
evaluator_config_id = Column(
UUID(as_uuid=True), ForeignKey("evaluators_configs.id")
)
- evaluator_config = relationship("EvaluatorConfigDB")
result = Column(JSONB) # Result
@@ -452,8 +506,12 @@ class EvaluationDB(Base):
app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id", ondelete="CASCADE"))
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
status = Column(JSONB) # Result
- testset_id = Column(UUID(as_uuid=True), ForeignKey("testsets.id", ondelete="SET NULL"))
- variant_id = Column(UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="SET NULL"))
+ testset_id = Column(
+ UUID(as_uuid=True), ForeignKey("testsets.id", ondelete="SET NULL")
+ )
+ variant_id = Column(
+ UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="SET NULL")
+ )
variant_revision_id = Column(
UUID(as_uuid=True), ForeignKey("app_variant_revisions.id", ondelete="SET NULL")
)
@@ -472,21 +530,39 @@ class EvaluationDB(Base):
variant = relationship("AppVariantDB")
variant_revision = relationship("AppVariantRevisionsDB")
aggregated_results = relationship(
- "EvaluationAggregatedResultDB", back_populates="evaluation"
+ "EvaluationAggregatedResultDB",
+ cascade="all, delete-orphan",
+ backref="evaluation",
)
evaluation_scenarios = relationship(
"EvaluationScenarioDB", cascade="all, delete-orphan", backref="evaluation"
)
+ evaluator_configs = relationship(
+ "EvaluationEvaluatorConfigDB",
+ cascade="all, delete-orphan",
+ backref="evaluation",
+ )
class EvaluationEvaluatorConfigDB(Base):
__tablename__ = "evaluation_evaluator_configs"
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
evaluation_id = Column(
- UUID(as_uuid=True), ForeignKey("evaluations.id"), primary_key=True
+ UUID(as_uuid=True),
+ ForeignKey("evaluations.id", ondelete="CASCADE"),
+ primary_key=True,
)
evaluator_config_id = Column(
- UUID(as_uuid=True), ForeignKey("evaluators_configs.id"), primary_key=True
+ UUID(as_uuid=True),
+ ForeignKey("evaluators_configs.id", ondelete="SET NULL"),
+ primary_key=True,
)
@@ -501,8 +577,12 @@ class EvaluationScenarioDB(Base):
nullable=False,
)
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
- evaluation_id = Column(UUID(as_uuid=True), ForeignKey("evaluations.id", ondelete="CASCADE"))
- variant_id = Column(UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="SET NULL"))
+ evaluation_id = Column(
+ UUID(as_uuid=True), ForeignKey("evaluations.id", ondelete="CASCADE")
+ )
+ variant_id = Column(
+ UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="SET NULL")
+ )
inputs = Column(JSONB) # List of EvaluationScenarioInput
outputs = Column(JSONB) # List of EvaluationScenarioOutput
correct_answers = Column(JSONB) # List of CorrectAnswer
@@ -520,7 +600,9 @@ class EvaluationScenarioDB(Base):
user = relationship("UserDB")
variant = relationship("AppVariantDB")
results = relationship(
- "EvaluationScenarioResultDB", back_populates="evaluation_scenario"
+ "EvaluationScenarioResultDB",
+ cascade="all, delete-orphan",
+ backref="evaluation_scenario",
)
From 9c2d4c3ba40c81a005dbf835e52c15b93df7e292 Mon Sep 17 00:00:00 2001
From: Abram
Date: Fri, 14 Jun 2024 17:14:38 +0100
Subject: [PATCH 076/268] minor refactor (backend): remove redundant imports
and added cascade to schemas referencing evaluations table
---
.../agenta_backend/models/db_models.py | 36 ++++++++++++-------
.../agenta_backend/services/db_manager.py | 3 --
2 files changed, 23 insertions(+), 16 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index 21abf8bc8e..9dcf5b589a 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -2,7 +2,6 @@
from typing import Any, Dict, List, Optional
import uuid_utils.compat as uuid
-from pydantic import BaseModel, Field
from sqlalchemy import (
Column,
String,
@@ -10,10 +9,9 @@
DateTime,
Boolean,
ForeignKey,
- Float,
Enum,
)
-from sqlalchemy.orm import relationship, declarative_base
+from sqlalchemy.orm import relationship
from sqlalchemy.dialects.postgresql import UUID, JSONB
from agenta_backend.models.base import Base
@@ -465,13 +463,14 @@ class EvaluationAggregatedResultDB(Base):
unique=True,
nullable=False,
)
- evaluation_id = Column(UUID(as_uuid=True), ForeignKey("evaluations.id"))
+ evaluation_id = Column(
+ UUID(as_uuid=True), ForeignKey("evaluations.id", ondelete="CASCADE")
+ )
evaluator_config_id = Column(
- UUID(as_uuid=True), ForeignKey("evaluators_configs.id")
+ UUID(as_uuid=True), ForeignKey("evaluators_configs.id", ondelete="SET NULL")
)
result = Column(JSONB) # Result
- evaluation = relationship("EvaluationDB", back_populates="aggregated_results")
evaluator_config = relationship("EvaluatorConfigDB", backref="evaluator_config")
@@ -486,13 +485,11 @@ class EvaluationScenarioResultDB(Base):
nullable=False,
)
evaluation_scenario_id = Column(
- UUID(as_uuid=True), ForeignKey("evaluation_scenarios.id")
+ UUID(as_uuid=True), ForeignKey("evaluation_scenarios.id", ondelete="CASCADE")
)
- evaluation_scenario = relationship("EvaluationScenarioDB", back_populates="results")
evaluator_config_id = Column(
UUID(as_uuid=True), ForeignKey("evaluators_configs.id")
)
- evaluator_config = relationship("EvaluatorConfigDB")
result = Column(JSONB) # Result
@@ -533,11 +530,18 @@ class EvaluationDB(Base):
variant = relationship("AppVariantDB")
variant_revision = relationship("AppVariantRevisionsDB")
aggregated_results = relationship(
- "EvaluationAggregatedResultDB", back_populates="evaluation"
+ "EvaluationAggregatedResultDB",
+ cascade="all, delete-orphan",
+ backref="evaluation",
)
evaluation_scenarios = relationship(
"EvaluationScenarioDB", cascade="all, delete-orphan", backref="evaluation"
)
+ evaluator_configs = relationship(
+ "EvaluationEvaluatorConfigDB",
+ cascade="all, delete-orphan",
+ backref="evaluation",
+ )
class EvaluationEvaluatorConfigDB(Base):
@@ -551,10 +555,14 @@ class EvaluationEvaluatorConfigDB(Base):
nullable=False,
)
evaluation_id = Column(
- UUID(as_uuid=True), ForeignKey("evaluations.id"), primary_key=True
+ UUID(as_uuid=True),
+ ForeignKey("evaluations.id", ondelete="CASCADE"),
+ primary_key=True,
)
evaluator_config_id = Column(
- UUID(as_uuid=True), ForeignKey("evaluators_configs.id"), primary_key=True
+ UUID(as_uuid=True),
+ ForeignKey("evaluators_configs.id", ondelete="SET NULL"),
+ primary_key=True,
)
@@ -592,7 +600,9 @@ class EvaluationScenarioDB(Base):
user = relationship("UserDB")
variant = relationship("AppVariantDB")
results = relationship(
- "EvaluationScenarioResultDB", back_populates="evaluation_scenario"
+ "EvaluationScenarioResultDB",
+ cascade="all, delete-orphan",
+ backref="evaluation_scenario",
)
diff --git a/agenta-backend/agenta_backend/services/db_manager.py b/agenta-backend/agenta_backend/services/db_manager.py
index d5d0163b88..9ce486318f 100644
--- a/agenta-backend/agenta_backend/services/db_manager.py
+++ b/agenta-backend/agenta_backend/services/db_manager.py
@@ -838,9 +838,6 @@ async def get_users_by_ids(user_ids: List):
Args:
user_ids (List): A list of user IDs to retrieve.
-
- Returns:
- List: A list of dictionaries representing the retrieved users.
"""
async with db_engine.get_session() as session:
From bd7e853c6f3d43120d449a343b531ccf81dd185e Mon Sep 17 00:00:00 2001
From: ashrafchowdury
Date: Sat, 15 Jun 2024 13:47:26 +0600
Subject: [PATCH 077/268] fix: cleared create variant model input data after
creating a variant
---
agenta-web/src/components/Playground/NewVariantModal.tsx | 4 ++++
agenta-web/src/components/Playground/Playground.tsx | 3 +++
2 files changed, 7 insertions(+)
diff --git a/agenta-web/src/components/Playground/NewVariantModal.tsx b/agenta-web/src/components/Playground/NewVariantModal.tsx
index 34b5c0542c..ff20f3bceb 100644
--- a/agenta-web/src/components/Playground/NewVariantModal.tsx
+++ b/agenta-web/src/components/Playground/NewVariantModal.tsx
@@ -13,6 +13,7 @@ interface Props {
setNewVariantName: (value: string) => void
newVariantName: string
setTemplateVariantName: (value: string) => void
+ templateVariantName: string
}
const useStyles = createUseStyles({
@@ -29,6 +30,7 @@ const NewVariantModal: React.FC = ({
setNewVariantName,
newVariantName,
setTemplateVariantName,
+ templateVariantName,
}) => {
const classes = useStyles()
const [variantPlaceHolder, setVariantPlaceHolder] = useState("Source Variant")
@@ -70,6 +72,7 @@ const NewVariantModal: React.FC = ({
data-cy="new-variant-modal-select"
placeholder="Select a variant"
onChange={handleTemplateVariantChange}
+ value={templateVariantName}
options={variants.map((variant) => ({
value: variant.variantName,
label: (
@@ -84,6 +87,7 @@ const NewVariantModal: React.FC = ({
diff --git a/agenta-web/src/components/Playground/Playground.tsx b/agenta-web/src/components/Playground/Playground.tsx
index 1777822149..fd3854a32b 100644
--- a/agenta-web/src/components/Playground/Playground.tsx
+++ b/agenta-web/src/components/Playground/Playground.tsx
@@ -89,6 +89,8 @@ const Playground: React.FC = () => {
setVariants((prevState: any) => [...prevState, newVariant])
setActiveKey(updateNewVariantName)
setUnsavedVariants((prev) => ({...prev, [newVariant.variantName!]: false}))
+ setTemplateVariantName("")
+ setNewVariantName("")
} catch (error) {
message.error("Failed to add new variant. Please try again later.")
console.error("Error adding new variant:", error)
@@ -403,6 +405,7 @@ const Playground: React.FC = () => {
setNewVariantName={setNewVariantName}
newVariantName={newVariantName}
setTemplateVariantName={setTemplateVariantName}
+ templateVariantName={templateVariantName}
/>
)
From ca546af60bea4275c9e40a5be2bbe1b1901ba173 Mon Sep 17 00:00:00 2001
From: Kaosiso Ezealigo
Date: Sun, 16 Jun 2024 18:50:51 +0100
Subject: [PATCH 078/268] fix(frontend): path to evaluation results after
deletion
---
.../evaluations/evaluationScenarios/EvaluationScenarios.tsx | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/agenta-web/src/components/pages/evaluations/evaluationScenarios/EvaluationScenarios.tsx b/agenta-web/src/components/pages/evaluations/evaluationScenarios/EvaluationScenarios.tsx
index 573491eb47..3519da87d7 100644
--- a/agenta-web/src/components/pages/evaluations/evaluationScenarios/EvaluationScenarios.tsx
+++ b/agenta-web/src/components/pages/evaluations/evaluationScenarios/EvaluationScenarios.tsx
@@ -292,7 +292,7 @@ const EvaluationScenarios: React.FC = () => {
message: "Are you sure you want to delete this evaluation?",
onOk: () =>
deleteEvaluations([evaluationId])
- .then(() => router.push(`/apps/${appId}/evaluations`))
+ .then(() => router.push(`/apps/${appId}/evaluations/results`))
.catch(console.error),
})
}
From db18a659c067ff6febaf725d7c44e47db398f5c8 Mon Sep 17 00:00:00 2001
From: Kaosiso Ezealigo
Date: Mon, 17 Jun 2024 17:51:36 +0100
Subject: [PATCH 079/268] fix(frontend): human eval inputs overflowing when
values are long
---
.../ABTestingEvaluationTable.tsx | 1 +
.../SingleModelEvaluationTable.tsx | 1 +
.../Playground/ParamsForm/ParamsForm.tsx | 38 +++++++++----------
3 files changed, 20 insertions(+), 20 deletions(-)
diff --git a/agenta-web/src/components/EvaluationTable/ABTestingEvaluationTable.tsx b/agenta-web/src/components/EvaluationTable/ABTestingEvaluationTable.tsx
index 8f209da494..1ea3f32709 100644
--- a/agenta-web/src/components/EvaluationTable/ABTestingEvaluationTable.tsx
+++ b/agenta-web/src/components/EvaluationTable/ABTestingEvaluationTable.tsx
@@ -374,6 +374,7 @@ const ABTestingEvaluationTable: React.FC = ({
),
+ width: 300,
dataIndex: "inputs",
render: (_: any, record: ABTestingEvaluationTableRow, rowIndex: number) => {
return (
diff --git a/agenta-web/src/components/EvaluationTable/SingleModelEvaluationTable.tsx b/agenta-web/src/components/EvaluationTable/SingleModelEvaluationTable.tsx
index e6af1b5162..b008f76ff8 100644
--- a/agenta-web/src/components/EvaluationTable/SingleModelEvaluationTable.tsx
+++ b/agenta-web/src/components/EvaluationTable/SingleModelEvaluationTable.tsx
@@ -417,6 +417,7 @@ const SingleModelEvaluationTable: React.FC
= ({
),
+ width: 300,
dataIndex: "inputs",
render: (_: any, record: SingleModelEvaluationRow, rowIndex: number) => {
return (
diff --git a/agenta-web/src/components/Playground/ParamsForm/ParamsForm.tsx b/agenta-web/src/components/Playground/ParamsForm/ParamsForm.tsx
index b213497e93..53ad106781 100644
--- a/agenta-web/src/components/Playground/ParamsForm/ParamsForm.tsx
+++ b/agenta-web/src/components/Playground/ParamsForm/ParamsForm.tsx
@@ -28,12 +28,11 @@ const useStyles = createUseStyles((theme: JSSTheme) => ({
borderRadius: 6,
},
paramValueContainer: {
- border: `1px solid ${theme.colorBorder}`,
- width: "100%",
- borderRadius: theme.borderRadius,
- padding: theme.paddingSM,
- maxHeight: 300,
- overflowY: "scroll",
+ "&:disabled": {
+ color: "inherit",
+ backgroundColor: "inherit",
+ cursor: "text",
+ },
},
}))
@@ -109,20 +108,19 @@ const ParamsForm: React.FC = ({
alt={param.name}
/>
)}
- {isPlaygroundComponent ? (
-
- onParamChange?.(param.name, e.target.value)
- }
- autoSize={{minRows: 2, maxRows: 8}}
- />
- ) : (
- {param.value}
- )}
+
+ onParamChange?.(param.name, e.target.value)}
+ disabled={!isPlaygroundComponent}
+ autoSize={{minRows: 2, maxRows: 8}}
+ />
)
From dff0f425c06da9653ddd35f860ae03aceeceaf04 Mon Sep 17 00:00:00 2001
From: aakrem
Date: Mon, 17 Jun 2024 19:12:57 +0200
Subject: [PATCH 080/268] fix(sdk): fix passing api key
---
agenta-backend/agenta_backend/services/app_manager.py | 2 +-
agenta-cli/agenta/sdk/agenta_init.py | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/agenta-backend/agenta_backend/services/app_manager.py b/agenta-backend/agenta_backend/services/app_manager.py
index dfd9f0c021..d41599de5a 100644
--- a/agenta-backend/agenta_backend/services/app_manager.py
+++ b/agenta-backend/agenta_backend/services/app_manager.py
@@ -103,7 +103,7 @@ async def start_variant(
if isCloudEE():
api_key = await api_key_service.create_api_key(
str(db_app_variant.user.uid),
- workspace_id=str(db_app_variant.workspace),
+ workspace_id=str(db_app_variant.workspace.id),
expiration_date=None,
hidden=True,
)
diff --git a/agenta-cli/agenta/sdk/agenta_init.py b/agenta-cli/agenta/sdk/agenta_init.py
index 3e4f522bdd..27698881a8 100644
--- a/agenta-cli/agenta/sdk/agenta_init.py
+++ b/agenta-cli/agenta/sdk/agenta_init.py
@@ -91,7 +91,7 @@ def init(
"Warning: Your configuration will not be saved permanently since base_id is not provided."
)
- self.config = Config(base_id=self.base_id, host=self.host) # type: ignore
+ self.config = Config(base_id=self.base_id, host=self.host, api_key=self.api_key) # type: ignore
class Config:
From 611779608b651ffe9c61288e8309bcdd1ee536b5 Mon Sep 17 00:00:00 2001
From: Abram
Date: Mon, 17 Jun 2024 18:23:21 +0100
Subject: [PATCH 081/268] chore (sdk): bump version to a pre-release version
(0.17.4a0)
---
agenta-cli/pyproject.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/agenta-cli/pyproject.toml b/agenta-cli/pyproject.toml
index 4c697d05d8..e9864c0874 100644
--- a/agenta-cli/pyproject.toml
+++ b/agenta-cli/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "agenta"
-version = "0.17.3"
+version = "0.17.4a0"
description = "The SDK for agenta is an open-source LLMOps platform."
readme = "README.md"
authors = ["Mahmoud Mabrouk "]
From b8aa83a39f86c8dad0a1290f6e026fe658bdfb81 Mon Sep 17 00:00:00 2001
From: ashrafchowdury
Date: Tue, 18 Jun 2024 12:43:02 +0600
Subject: [PATCH 082/268] refactore: removed unnecessary code
---
agenta-web/src/components/Playground/NewVariantModal.tsx | 5 +----
agenta-web/src/components/Playground/Playground.tsx | 3 ---
2 files changed, 1 insertion(+), 7 deletions(-)
diff --git a/agenta-web/src/components/Playground/NewVariantModal.tsx b/agenta-web/src/components/Playground/NewVariantModal.tsx
index ff20f3bceb..627e47c3c1 100644
--- a/agenta-web/src/components/Playground/NewVariantModal.tsx
+++ b/agenta-web/src/components/Playground/NewVariantModal.tsx
@@ -13,7 +13,6 @@ interface Props {
setNewVariantName: (value: string) => void
newVariantName: string
setTemplateVariantName: (value: string) => void
- templateVariantName: string
}
const useStyles = createUseStyles({
@@ -30,7 +29,6 @@ const NewVariantModal: React.FC = ({
setNewVariantName,
newVariantName,
setTemplateVariantName,
- templateVariantName,
}) => {
const classes = useStyles()
const [variantPlaceHolder, setVariantPlaceHolder] = useState("Source Variant")
@@ -63,6 +61,7 @@ const NewVariantModal: React.FC = ({
onCancel={() => setIsModalOpen(false)}
centered
okButtonProps={{disabled: !isInputValid}} // Disable OK button if input is not valid
+ destroyOnClose
>
diff --git a/agenta-web/src/components/Playground/Playground.tsx b/agenta-web/src/components/Playground/Playground.tsx
index fd3854a32b..1777822149 100644
--- a/agenta-web/src/components/Playground/Playground.tsx
+++ b/agenta-web/src/components/Playground/Playground.tsx
@@ -89,8 +89,6 @@ const Playground: React.FC = () => {
setVariants((prevState: any) => [...prevState, newVariant])
setActiveKey(updateNewVariantName)
setUnsavedVariants((prev) => ({...prev, [newVariant.variantName!]: false}))
- setTemplateVariantName("")
- setNewVariantName("")
} catch (error) {
message.error("Failed to add new variant. Please try again later.")
console.error("Error adding new variant:", error)
@@ -405,7 +403,6 @@ const Playground: React.FC = () => {
setNewVariantName={setNewVariantName}
newVariantName={newVariantName}
setTemplateVariantName={setTemplateVariantName}
- templateVariantName={templateVariantName}
/>
)
From f407d31054ab9da29cb2d9b5147c859575f5c6a1 Mon Sep 17 00:00:00 2001
From: ashrafchowdury
Date: Tue, 18 Jun 2024 14:51:22 +0600
Subject: [PATCH 083/268] refactor: removed style-props type code duplication
---
agenta-web/src/components/AppSelector/AppSelector.tsx | 5 +----
agenta-web/src/components/AppSelector/TipsAndFeatures.tsx | 5 +----
agenta-web/src/components/AppSelector/Welcome.tsx | 5 +----
.../src/components/AppSelector/modals/AddNewAppModal.tsx | 5 +----
.../src/components/AppSelector/modals/MaxAppModal.tsx | 5 +----
.../src/components/AppSelector/modals/WriteOwnAppModal.tsx | 5 +----
.../components/Evaluations/AutomaticEvaluationResult.tsx | 6 +-----
.../EvaluationCardView/EvaluationVariantCard.tsx | 6 +-----
.../src/components/Evaluations/HumanEvaluationResult.tsx | 6 +-----
.../HumanEvaluationModal/HumanEvaluationModal.tsx | 6 +-----
agenta-web/src/components/Layout/Layout.tsx | 4 ++--
.../Playground/AddToTestSetDrawer/AddToTestSetDrawer.tsx | 6 +-----
agenta-web/src/components/Playground/Views/TestView.tsx | 6 +-----
.../src/components/SecondaryButton/SecondaryButton.tsx | 4 ----
agenta-web/src/lib/Types.ts | 4 ++++
15 files changed, 18 insertions(+), 60 deletions(-)
diff --git a/agenta-web/src/components/AppSelector/AppSelector.tsx b/agenta-web/src/components/AppSelector/AppSelector.tsx
index 5cc43b43b4..bd24ec2ffa 100644
--- a/agenta-web/src/components/AppSelector/AppSelector.tsx
+++ b/agenta-web/src/components/AppSelector/AppSelector.tsx
@@ -2,7 +2,7 @@ import {useState, useEffect, useMemo} from "react"
import {PlusOutlined} from "@ant-design/icons"
import {Input, Modal, ConfigProvider, theme, Card, Button, notification, Divider} from "antd"
import AppCard from "./AppCard"
-import {Template, GenericObject} from "@/lib/Types"
+import {Template, GenericObject, StyleProps} from "@/lib/Types"
import {useAppTheme} from "../Layout/ThemeContextProvider"
import TipsAndFeatures from "./TipsAndFeatures"
import Welcome from "./Welcome"
@@ -22,9 +22,6 @@ import {LlmProvider, getAllProviderLlmKeys} from "@/lib/helpers/llmProviders"
import ResultComponent from "../ResultComponent/ResultComponent"
import {dynamicContext} from "@/lib/helpers/dynamic"
-type StyleProps = {
- themeMode: "dark" | "light"
-}
const useStyles = createUseStyles({
container: ({themeMode}: StyleProps) => ({
diff --git a/agenta-web/src/components/AppSelector/TipsAndFeatures.tsx b/agenta-web/src/components/AppSelector/TipsAndFeatures.tsx
index 1414cd6672..7505327171 100644
--- a/agenta-web/src/components/AppSelector/TipsAndFeatures.tsx
+++ b/agenta-web/src/components/AppSelector/TipsAndFeatures.tsx
@@ -3,15 +3,12 @@ import {BulbFilled} from "@ant-design/icons"
import {Space} from "antd"
import {useAppTheme} from "../Layout/ThemeContextProvider"
import {MDXProvider} from "@mdx-js/react"
+import {StyleProps} from "@/lib/Types"
import slide1 from "../../welcome-highlights/tip1.mdx"
import slide2 from "../../welcome-highlights/tip2.mdx"
import {createUseStyles} from "react-jss"
-type StyleProps = {
- themeMode: "dark" | "light"
-}
-
const useStyles = createUseStyles({
container: ({themeMode}: StyleProps) => ({
backgroundColor: themeMode === "dark" ? "#000" : "rgba(0,0,0,0.03)",
diff --git a/agenta-web/src/components/AppSelector/Welcome.tsx b/agenta-web/src/components/AppSelector/Welcome.tsx
index 0d06a3bb91..711490b3d6 100644
--- a/agenta-web/src/components/AppSelector/Welcome.tsx
+++ b/agenta-web/src/components/AppSelector/Welcome.tsx
@@ -3,10 +3,7 @@ import React from "react"
import {useAppTheme} from "../Layout/ThemeContextProvider"
import {createUseStyles} from "react-jss"
import {CheckCircleFilled, ClockCircleOutlined} from "@ant-design/icons"
-
-type StyleProps = {
- themeMode: "dark" | "light"
-}
+import {StyleProps} from "@/lib/Types"
const useStyles = createUseStyles({
head: {
diff --git a/agenta-web/src/components/AppSelector/modals/AddNewAppModal.tsx b/agenta-web/src/components/AppSelector/modals/AddNewAppModal.tsx
index c40e5778ef..c458a758be 100644
--- a/agenta-web/src/components/AppSelector/modals/AddNewAppModal.tsx
+++ b/agenta-web/src/components/AppSelector/modals/AddNewAppModal.tsx
@@ -3,10 +3,7 @@ import {AppstoreAddOutlined, CodeOutlined} from "@ant-design/icons"
import {Col, Modal, Row, Typography} from "antd"
import React from "react"
import {createUseStyles} from "react-jss"
-
-type StyleProps = {
- themeMode: "dark" | "light"
-}
+import {StyleProps} from "@/lib/Types"
const useStyles = createUseStyles({
modal: {
diff --git a/agenta-web/src/components/AppSelector/modals/MaxAppModal.tsx b/agenta-web/src/components/AppSelector/modals/MaxAppModal.tsx
index ea634fbd98..1885a43f16 100644
--- a/agenta-web/src/components/AppSelector/modals/MaxAppModal.tsx
+++ b/agenta-web/src/components/AppSelector/modals/MaxAppModal.tsx
@@ -3,10 +3,7 @@ import {AppstoreAddOutlined, CodeOutlined} from "@ant-design/icons"
import {Col, Modal, Row, Typography} from "antd"
import React from "react"
import {createUseStyles} from "react-jss"
-
-type StyleProps = {
- themeMode: "dark" | "light"
-}
+import {StyleProps} from "@/lib/Types"
const useStyles = createUseStyles({
modal: {
diff --git a/agenta-web/src/components/AppSelector/modals/WriteOwnAppModal.tsx b/agenta-web/src/components/AppSelector/modals/WriteOwnAppModal.tsx
index a0242f6e8c..c00d7c1829 100644
--- a/agenta-web/src/components/AppSelector/modals/WriteOwnAppModal.tsx
+++ b/agenta-web/src/components/AppSelector/modals/WriteOwnAppModal.tsx
@@ -6,10 +6,7 @@ import React, {useEffect, useRef} from "react"
import {createUseStyles} from "react-jss"
import YouTube, {YouTubeProps} from "react-youtube"
import {isDemo} from "@/lib/helpers/utils"
-
-type StyleProps = {
- themeMode: "dark" | "light"
-}
+import {StyleProps} from "@/lib/Types"
const useStyles = createUseStyles({
modal: ({themeMode}: StyleProps) => ({
diff --git a/agenta-web/src/components/Evaluations/AutomaticEvaluationResult.tsx b/agenta-web/src/components/Evaluations/AutomaticEvaluationResult.tsx
index 251fbc6319..c7a00dac7c 100644
--- a/agenta-web/src/components/Evaluations/AutomaticEvaluationResult.tsx
+++ b/agenta-web/src/components/Evaluations/AutomaticEvaluationResult.tsx
@@ -7,7 +7,7 @@ import {Button, Spin, Statistic, Table, Typography} from "antd"
import {useRouter} from "next/router"
import {useEffect, useState} from "react"
import {ColumnsType} from "antd/es/table"
-import {Evaluation, GenericObject} from "@/lib/Types"
+import {Evaluation, GenericObject, StyleProps} from "@/lib/Types"
import {DeleteOutlined} from "@ant-design/icons"
import {EvaluationFlow, EvaluationType} from "@/lib/enums"
import {createUseStyles} from "react-jss"
@@ -41,10 +41,6 @@ interface EvaluationListTableDataType {
variant_revision_ids: string[]
}
-type StyleProps = {
- themeMode: "dark" | "light"
-}
-
const useStyles = createUseStyles({
container: {
marginBottom: 20,
diff --git a/agenta-web/src/components/Evaluations/EvaluationCardView/EvaluationVariantCard.tsx b/agenta-web/src/components/Evaluations/EvaluationCardView/EvaluationVariantCard.tsx
index 8cdb13ced6..c778a7417a 100644
--- a/agenta-web/src/components/Evaluations/EvaluationCardView/EvaluationVariantCard.tsx
+++ b/agenta-web/src/components/Evaluations/EvaluationCardView/EvaluationVariantCard.tsx
@@ -1,14 +1,10 @@
import {useAppTheme} from "@/components/Layout/ThemeContextProvider"
-import {Evaluation, Variant} from "@/lib/Types"
+import {Evaluation, Variant, StyleProps} from "@/lib/Types"
import {Typography} from "antd"
import React from "react"
import {createUseStyles} from "react-jss"
import {VARIANT_COLORS} from "."
-type StyleProps = {
- themeMode: "dark" | "light"
-}
-
const useStyles = createUseStyles({
root: ({themeMode}: StyleProps) => ({
flex: 1,
diff --git a/agenta-web/src/components/Evaluations/HumanEvaluationResult.tsx b/agenta-web/src/components/Evaluations/HumanEvaluationResult.tsx
index 5eee97ec44..811adf13c0 100644
--- a/agenta-web/src/components/Evaluations/HumanEvaluationResult.tsx
+++ b/agenta-web/src/components/Evaluations/HumanEvaluationResult.tsx
@@ -4,7 +4,7 @@ import {Button, Spin, Statistic, Table, Typography} from "antd"
import {useRouter} from "next/router"
import {useEffect, useState} from "react"
import {ColumnsType} from "antd/es/table"
-import {EvaluationResponseType} from "@/lib/Types"
+import {EvaluationResponseType, StyleProps} from "@/lib/Types"
import {DeleteOutlined} from "@ant-design/icons"
import {EvaluationFlow, EvaluationType} from "@/lib/enums"
import {createUseStyles} from "react-jss"
@@ -47,10 +47,6 @@ export interface HumanEvaluationListTableDataType {
variantNames: string[]
}
-type StyleProps = {
- themeMode: "dark" | "light"
-}
-
const useStyles = createUseStyles({
container: {
marginBottom: 20,
diff --git a/agenta-web/src/components/HumanEvaluationModal/HumanEvaluationModal.tsx b/agenta-web/src/components/HumanEvaluationModal/HumanEvaluationModal.tsx
index 983324b13a..74396dab4a 100644
--- a/agenta-web/src/components/HumanEvaluationModal/HumanEvaluationModal.tsx
+++ b/agenta-web/src/components/HumanEvaluationModal/HumanEvaluationModal.tsx
@@ -1,5 +1,5 @@
import React, {useEffect, useState} from "react"
-import {GenericObject, JSSTheme, Parameter, Variant} from "@/lib/Types"
+import {GenericObject, JSSTheme, Parameter, Variant, StyleProps} from "@/lib/Types"
import {fetchVariants} from "@/services/api"
import {createNewEvaluation} from "@/services/human-evaluations/api"
import {isDemo} from "@/lib/helpers/utils"
@@ -16,10 +16,6 @@ import EvaluationErrorModal from "../Evaluations/EvaluationErrorModal"
import {dynamicComponent} from "@/lib/helpers/dynamic"
import {useLoadTestsetsList} from "@/services/testsets/api"
-type StyleProps = {
- themeMode: "dark" | "light"
-}
-
const useStyles = createUseStyles((theme: JSSTheme) => ({
evaluationContainer: {
border: "1px solid lightgrey",
diff --git a/agenta-web/src/components/Layout/Layout.tsx b/agenta-web/src/components/Layout/Layout.tsx
index 38662ec4a6..16a07a58a8 100644
--- a/agenta-web/src/components/Layout/Layout.tsx
+++ b/agenta-web/src/components/Layout/Layout.tsx
@@ -28,11 +28,11 @@ import moonIcon from "@/media/night.png"
import sunIcon from "@/media/sun.png"
import {useProfileData} from "@/contexts/profile.context"
import {ThemeProvider} from "react-jss"
+import {StyleProps as MainStyleProps} from "@/lib/Types"
const {Content, Footer} = Layout
-type StyleProps = {
- themeMode: "dark" | "light"
+interface StyleProps extends MainStyleProps {
footerHeight: number
}
diff --git a/agenta-web/src/components/Playground/AddToTestSetDrawer/AddToTestSetDrawer.tsx b/agenta-web/src/components/Playground/AddToTestSetDrawer/AddToTestSetDrawer.tsx
index ba7d4f2a8c..1d835e28dd 100644
--- a/agenta-web/src/components/Playground/AddToTestSetDrawer/AddToTestSetDrawer.tsx
+++ b/agenta-web/src/components/Playground/AddToTestSetDrawer/AddToTestSetDrawer.tsx
@@ -1,6 +1,6 @@
import AlertPopup from "@/components/AlertPopup/AlertPopup"
import {useAppTheme} from "../../Layout/ThemeContextProvider"
-import {ChatMessage, ChatRole, GenericObject, testset} from "@/lib/Types"
+import {ChatMessage, ChatRole, GenericObject, testset, StyleProps} from "@/lib/Types"
import {removeKeys, renameVariables} from "@/lib/helpers/utils"
import {
createNewTestset,
@@ -29,10 +29,6 @@ import {useLocalStorage, useUpdateEffect} from "usehooks-ts"
import ChatInputs from "@/components/ChatInputs/ChatInputs"
import _ from "lodash"
-type StyleProps = {
- themeMode: "dark" | "light"
-}
-
const useStyles = createUseStyles({
footer: {
display: "flex",
diff --git a/agenta-web/src/components/Playground/Views/TestView.tsx b/agenta-web/src/components/Playground/Views/TestView.tsx
index 4d9cb7ae61..ec54a974a2 100644
--- a/agenta-web/src/components/Playground/Views/TestView.tsx
+++ b/agenta-web/src/components/Playground/Views/TestView.tsx
@@ -2,7 +2,7 @@ import React, {useContext, useEffect, useRef, useState} from "react"
import {Button, Input, Card, Row, Col, Space, Form, Modal} from "antd"
import {CaretRightOutlined, CloseCircleOutlined, PlusOutlined} from "@ant-design/icons"
import {callVariant} from "@/services/api"
-import {ChatMessage, ChatRole, GenericObject, JSSTheme, Parameter, Variant} from "@/lib/Types"
+import {ChatMessage, ChatRole, GenericObject, JSSTheme, Parameter, Variant, StyleProps} from "@/lib/Types"
import {batchExecute, randString, removeKeys} from "@/lib/helpers/utils"
import LoadTestsModal from "../LoadTestsModal"
import AddToTestSetDrawer from "../AddToTestSetDrawer/AddToTestSetDrawer"
@@ -30,10 +30,6 @@ const promptRevision: any = dynamicService("promptVersioning/api")
dayjs.extend(relativeTime)
dayjs.extend(duration)
-type StyleProps = {
- themeMode: "dark" | "light"
-}
-
const {TextArea} = Input
const LOADING_TEXT = "Loading..."
diff --git a/agenta-web/src/components/SecondaryButton/SecondaryButton.tsx b/agenta-web/src/components/SecondaryButton/SecondaryButton.tsx
index b67380055f..0a6b78e5a5 100644
--- a/agenta-web/src/components/SecondaryButton/SecondaryButton.tsx
+++ b/agenta-web/src/components/SecondaryButton/SecondaryButton.tsx
@@ -10,10 +10,6 @@ type SecondaryBtnProps = {
onClick: () => void
}
-type StyleProps = {
- themeMode: "dark" | "light"
-}
-
const SecondaryButton: React.FC = ({children, ...props}) => {
const {appTheme} = useAppTheme()
diff --git a/agenta-web/src/lib/Types.ts b/agenta-web/src/lib/Types.ts
index c742244029..374d054a1d 100644
--- a/agenta-web/src/lib/Types.ts
+++ b/agenta-web/src/lib/Types.ts
@@ -488,3 +488,7 @@ export type PaginationQuery = {
page: number
pageSize: number
}
+
+export type StyleProps = {
+ themeMode: "dark" | "light"
+}
From 44733b773ad9ad35ebcdc693da9387763ebd9fba Mon Sep 17 00:00:00 2001
From: mmabrouk <4510758+mmabrouk@users.noreply.github.com>
Date: Tue, 18 Jun 2024 08:51:43 +0000
Subject: [PATCH 084/268] Bump versions
---
agenta-backend/pyproject.toml | 2 +-
agenta-cli/pyproject.toml | 2 +-
agenta-web/package-lock.json | 4 ++--
agenta-web/package.json | 2 +-
4 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/agenta-backend/pyproject.toml b/agenta-backend/pyproject.toml
index ac288e62be..5553638939 100644
--- a/agenta-backend/pyproject.toml
+++ b/agenta-backend/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "agenta_backend"
-version = "0.17.3"
+version = "0.17.4"
description = ""
authors = ["Mahmoud Mabrouk "]
readme = "README.md"
diff --git a/agenta-cli/pyproject.toml b/agenta-cli/pyproject.toml
index e9864c0874..2c3f012e46 100644
--- a/agenta-cli/pyproject.toml
+++ b/agenta-cli/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "agenta"
-version = "0.17.4a0"
+version = "0.17.4"
description = "The SDK for agenta is an open-source LLMOps platform."
readme = "README.md"
authors = ["Mahmoud Mabrouk "]
diff --git a/agenta-web/package-lock.json b/agenta-web/package-lock.json
index 9af6f3747b..47de851a13 100644
--- a/agenta-web/package-lock.json
+++ b/agenta-web/package-lock.json
@@ -1,12 +1,12 @@
{
"name": "agenta",
- "version": "0.17.3",
+ "version": "0.17.4",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "agenta",
- "version": "0.17.3",
+ "version": "0.17.4",
"dependencies": {
"@ant-design/colors": "^7.0.0",
"@ant-design/icons": "^5.3.7",
diff --git a/agenta-web/package.json b/agenta-web/package.json
index 5efd10c16f..e50a0b2888 100644
--- a/agenta-web/package.json
+++ b/agenta-web/package.json
@@ -1,6 +1,6 @@
{
"name": "agenta",
- "version": "0.17.3",
+ "version": "0.17.4",
"private": true,
"engines": {
"node": ">=18"
From 26cbbe2cf403cba3265f14c78e82cc8c57cfacaa Mon Sep 17 00:00:00 2001
From: Abram
Date: Tue, 18 Jun 2024 16:55:59 +0100
Subject: [PATCH 085/268] refactor (backend): modified ondelete behaviour for
evaluation config and evaluation scenario result models
---
agenta-backend/agenta_backend/models/db_models.py | 7 ++-----
1 file changed, 2 insertions(+), 5 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index 9dcf5b589a..f2d7d1099f 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -86,9 +86,6 @@ class AppDB(Base):
user = relationship("UserDB")
variant = relationship("AppVariantDB", cascade="all, delete-orphan", backref="app")
- evaluator_config = relationship(
- "EvaluatorConfigDB", cascade="all, delete-orphan", backref="app"
- )
testset = relationship("TestSetDB", cascade="all, delete-orphan", backref="app")
base = relationship("DeploymentDB", cascade="all, delete-orphan", backref="app")
deployment = relationship(
@@ -347,7 +344,7 @@ class EvaluatorConfigDB(Base):
nullable=False,
)
- app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id", ondelete="CASCADE"))
+ app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id", ondelete="SET NULL"))
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
name = Column(String)
evaluator_key = Column(String)
@@ -488,7 +485,7 @@ class EvaluationScenarioResultDB(Base):
UUID(as_uuid=True), ForeignKey("evaluation_scenarios.id", ondelete="CASCADE")
)
evaluator_config_id = Column(
- UUID(as_uuid=True), ForeignKey("evaluators_configs.id")
+ UUID(as_uuid=True), ForeignKey("evaluators_configs.id", ondelete="SET NULL")
)
result = Column(JSONB) # Result
From 568d0c2818351ad253f00fc5ac5153e044054a32 Mon Sep 17 00:00:00 2001
From: Abram
Date: Tue, 18 Jun 2024 16:56:47 +0100
Subject: [PATCH 086/268] refactor (backend): improve code clarity for
remove_app service function
---
.../agenta_backend/services/app_manager.py | 22 ++++++++++---------
1 file changed, 12 insertions(+), 10 deletions(-)
diff --git a/agenta-backend/agenta_backend/services/app_manager.py b/agenta-backend/agenta_backend/services/app_manager.py
index 86ae8c81d9..6522fe42e0 100644
--- a/agenta-backend/agenta_backend/services/app_manager.py
+++ b/agenta-backend/agenta_backend/services/app_manager.py
@@ -308,23 +308,25 @@ async def remove_app(app: AppDB):
logger.error(error_msg)
raise ValueError(error_msg)
+ app_variants = await db_manager.list_app_variants(str(app.id))
try:
- app_variants = await db_manager.list_app_variants(str(app.id))
for app_variant_db in app_variants:
await terminate_and_remove_app_variant(app_variant_db=app_variant_db)
logger.info(
f"Successfully deleted app variant {app_variant_db.app.app_name}/{app_variant_db.variant_name}."
)
-
- if len(app_variants) <= 1: # Failsafe in case something went wrong before
- logger.debug("remove_app_related_resources")
- await remove_app_related_resources(str(app.id))
-
except Exception as e:
- logger.error(
- f"An error occurred while deleting app {app.id} and its associated resources: {str(e)}"
- )
- raise e from None
+ # Failsafe: in case something went wrong,
+ # delete app and its related resources
+ try:
+ if len(app_variants) <= 1:
+ logger.debug("remove_app_related_resources")
+ await remove_app_related_resources(str(app.id))
+ except Exception as e:
+ logger.error(
+ f"An error occurred while deleting app {app.id} and its associated resources: {str(e)}"
+ )
+ raise e from None
async def update_variant_parameters(
From dcf3879a067a9af5cd5813381cdae60de90feadc Mon Sep 17 00:00:00 2001
From: Abram
Date: Tue, 18 Jun 2024 16:57:28 +0100
Subject: [PATCH 087/268] refactor (backend): move
fetch_evaluations_by_resource to db_manager and added function to remove
testsets by ids
---
.../agenta_backend/services/db_manager.py | 59 ++++++++++++++++++-
.../services/evaluation_service.py | 21 -------
2 files changed, 58 insertions(+), 22 deletions(-)
diff --git a/agenta-backend/agenta_backend/services/db_manager.py b/agenta-backend/agenta_backend/services/db_manager.py
index 9ce486318f..ffaef622b4 100644
--- a/agenta-backend/agenta_backend/services/db_manager.py
+++ b/agenta-backend/agenta_backend/services/db_manager.py
@@ -18,7 +18,7 @@
from sqlalchemy.exc import NoResultFound
from sqlalchemy.ext.asyncio import AsyncSession
from agenta_backend.models.db_engine import db_engine
-from sqlalchemy.orm import selectinload, joinedload, aliased, load_only
+from sqlalchemy.orm import joinedload, aliased, load_only
from agenta_backend.models.api.api_models import (
App,
@@ -73,6 +73,7 @@
AppVariantRevisionsDB,
HumanEvaluationVariantDB,
EvaluationScenarioResultDB,
+ EvaluationEvaluatorConfigDB,
EvaluationAggregatedResultDB,
)
@@ -1627,6 +1628,23 @@ async def remove_environment(environment_db: AppEnvironmentDB):
await session.commit()
+async def remove_testsets(testset_ids: List[str]):
+ """
+ Removes testsets.
+
+ Args:
+ testset_ids (List[str]): The testset identifiers
+ """
+
+ async with db_engine.get_session() as session:
+ query = select(TestSetDB).where(TestSetDB.id.in_(testset_ids))
+ result = await session.execute(query)
+ testsets = result.scalars().all()
+ for testset in testsets:
+ await session.delete(testset)
+ await session.commit()
+
+
async def remove_app_testsets(app_id: str):
"""Returns a list of testsets owned by an app.
@@ -2632,6 +2650,45 @@ async def list_evaluations(app_id: str):
return evaluations
+async def fetch_evaluations_by_resource(resource_type: str, resource_ids: List[str]):
+ """
+ Fetches an evaluations by resource.
+
+ Args:
+ resource_type: The resource type
+ resource_ids: The resource identifiers
+
+ Returns:
+ The evaluations by resource.
+
+ Raises:
+ HTTPException:400 resource_type {type} is not supported
+ """
+
+ ids = list(map(uuid.UUID, resource_ids))
+
+ async with db_engine.get_session() as session:
+ if resource_type == "variant":
+ query = select(EvaluationDB).filter(EvaluationDB.variant_id.in_(ids))
+ elif resource_type == "testset":
+ query = select(EvaluationDB).filter(EvaluationDB.testset_id.in_(ids))
+ elif resource_type == "evaluator_config":
+ query = (
+ select(EvaluationDB)
+ .join(EvaluationDB.evaluator_configs)
+ .filter(EvaluationEvaluatorConfigDB.evaluator_config_id.in_(ids))
+ )
+ else:
+ raise HTTPException(
+ status_code=400,
+ detail=f"resource_type {resource_type} is not supported",
+ )
+
+ result = await session.execute(query)
+ res = result.scalars().all()
+ return res
+
+
async def delete_evaluations(evaluation_ids: List[str]) -> None:
"""Delete evaluations based on the ids provided from the db.
diff --git a/agenta-backend/agenta_backend/services/evaluation_service.py b/agenta-backend/agenta_backend/services/evaluation_service.py
index ccda51e314..750c9e257e 100644
--- a/agenta-backend/agenta_backend/services/evaluation_service.py
+++ b/agenta-backend/agenta_backend/services/evaluation_service.py
@@ -548,24 +548,3 @@ def remove_duplicates(csvdata):
unique_entries.append(entry)
return unique_entries
-
-
-async def fetch_evaluations_by_resource(resource_type: str, resource_ids: List[str]):
- ids = list(map(uuid.UUID, resource_ids))
-
- async with db_engine.get_session() as session:
- if resource_type == "variant":
- query = select(EvaluationDB).filter(EvaluationDB.variant_id.in_(ids))
- elif resource_type == "testset":
- query = select(EvaluationDB).filter(EvaluationDB.testset_id.in_(ids))
- # elif resource_type == "evaluator_config":
- # query = select(EvaluationDB).filter(EvaluationDB.evaluators_configs_id.in_(ids))
- else:
- raise HTTPException(
- status_code=400,
- detail=f"resource_type {resource_type} is not supported",
- )
-
- result = await session.execute(query)
- res = result.scalars().all()
- return res
From 8983b27c618cfcdf0d5adaad7ef649f79812d5b9 Mon Sep 17 00:00:00 2001
From: Abram
Date: Tue, 18 Jun 2024 16:58:36 +0100
Subject: [PATCH 088/268] refactor (backend): update
fetch_evaluations_by_resource import from evaluation_service to db_manager
and added traceback to trace error
---
.../agenta_backend/routers/evaluation_router.py | 11 ++++++-----
.../agenta_backend/routers/evaluators_router.py | 5 ++++-
2 files changed, 10 insertions(+), 6 deletions(-)
diff --git a/agenta-backend/agenta_backend/routers/evaluation_router.py b/agenta-backend/agenta_backend/routers/evaluation_router.py
index 7e21e05091..98557f6982 100644
--- a/agenta-backend/agenta_backend/routers/evaluation_router.py
+++ b/agenta-backend/agenta_backend/routers/evaluation_router.py
@@ -24,8 +24,6 @@
from agenta_backend.commons.models.shared_models import Permission
from agenta_backend.commons.utils.permissions import check_action_access
-from beanie import PydanticObjectId as ObjectId
-
router = APIRouter()
logger = logging.getLogger(__name__)
@@ -34,7 +32,7 @@
@router.get(
"/by_resource/",
- response_model=List[ObjectId],
+ response_model=List[str],
)
async def fetch_evaluation_ids(
app_id: str,
@@ -73,11 +71,14 @@ async def fetch_evaluation_ids(
{"detail": error_msg},
status_code=403,
)
- evaluations = await evaluation_service.fetch_evaluations_by_resource(
+ evaluations = await db_manager.fetch_evaluations_by_resource(
resource_type, resource_ids
)
- return list(map(lambda x: x.id, evaluations))
+ return list(map(lambda x: str(x.id), evaluations))
except Exception as exc:
+ import traceback
+
+ traceback.print_exc()
raise HTTPException(status_code=500, detail=str(exc))
diff --git a/agenta-backend/agenta_backend/routers/evaluators_router.py b/agenta-backend/agenta_backend/routers/evaluators_router.py
index b0fce13c8d..4892c5d6ea 100644
--- a/agenta-backend/agenta_backend/routers/evaluators_router.py
+++ b/agenta-backend/agenta_backend/routers/evaluators_router.py
@@ -184,7 +184,7 @@ async def update_evaluator_config(
)
evaluators_configs = await evaluator_manager.update_evaluator_config(
- evaluator_config_id=evaluator_config_id, updates=payload
+ evaluator_config_id=evaluator_config_id, updates=payload.model_dump()
)
return evaluators_configs
except Exception as e:
@@ -222,6 +222,9 @@ async def delete_evaluator_config(evaluator_config_id: str, request: Request):
success = await evaluator_manager.delete_evaluator_config(evaluator_config_id)
return success
except Exception as e:
+ import traceback
+
+ traceback.print_exc()
raise HTTPException(
status_code=500, detail=f"Error deleting evaluator configuration: {str(e)}"
)
From 2dc3e219b10d705869bf263ca18b1486f6416aea Mon Sep 17 00:00:00 2001
From: Abram
Date: Tue, 18 Jun 2024 17:03:18 +0100
Subject: [PATCH 089/268] minor refactor (backend): revert back to using
jsonresponse for 403 exceptions
---
.../agenta_backend/routers/evaluation_router.py | 8 ++++----
agenta-backend/agenta_backend/routers/testset_router.py | 4 ++--
2 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/agenta-backend/agenta_backend/routers/evaluation_router.py b/agenta-backend/agenta_backend/routers/evaluation_router.py
index 98557f6982..e883b70237 100644
--- a/agenta-backend/agenta_backend/routers/evaluation_router.py
+++ b/agenta-backend/agenta_backend/routers/evaluation_router.py
@@ -263,8 +263,8 @@ async def fetch_evaluation_scenarios(
if not has_permission:
error_msg = f"You do not have permission to perform this action. Please contact your organization admin."
logger.error(error_msg)
- raise HTTPException(
- detail=error_msg,
+ return JSONResponse(
+ {"detail": error_msg},
status_code=403,
)
@@ -362,8 +362,8 @@ async def fetch_evaluation(
if not has_permission:
error_msg = f"You do not have permission to perform this action. Please contact your organization admin."
logger.error(error_msg)
- raise HTTPException(
- detail=error_msg,
+ return JSONResponse(
+ {"detail": error_msg},
status_code=403,
)
diff --git a/agenta-backend/agenta_backend/routers/testset_router.py b/agenta-backend/agenta_backend/routers/testset_router.py
index 5a28380b80..6df0a42a29 100644
--- a/agenta-backend/agenta_backend/routers/testset_router.py
+++ b/agenta-backend/agenta_backend/routers/testset_router.py
@@ -336,8 +336,8 @@ async def get_testsets(
if not has_permission:
error_msg = f"You do not have permission to perform this action. Please contact your organization admin."
logger.error(error_msg)
- raise HTTPException(
- detail=error_msg,
+ return JSONResponse(
+ {"detail": error_msg},
status_code=403,
)
From 8da15c9e64c3da8f2c5d8a2f319c652fb242074b Mon Sep 17 00:00:00 2001
From: Abram
Date: Tue, 18 Jun 2024 17:21:07 +0100
Subject: [PATCH 090/268] refactor (backend): prefetch variant revision and fix
failing /configs endpoint
---
.../agenta_backend/routers/configs_router.py | 20 ++++++++++---------
.../agenta_backend/services/db_manager.py | 12 ++++++++++-
2 files changed, 22 insertions(+), 10 deletions(-)
diff --git a/agenta-backend/agenta_backend/routers/configs_router.py b/agenta-backend/agenta_backend/routers/configs_router.py
index 05e367f674..fe4342c4bf 100644
--- a/agenta-backend/agenta_backend/routers/configs_router.py
+++ b/agenta-backend/agenta_backend/routers/configs_router.py
@@ -115,7 +115,7 @@ async def get_config(
# in case environment_name is provided, find the variant deployed
if environment_name:
app_environments = await db_manager.list_environments(
- app_id=str(base_db.app.ref.id)
+ app_id=str(base_db.app_id) # type: ignore
)
found_variant_revision = next(
(
@@ -130,20 +130,21 @@ async def get_config(
status_code=400,
detail=f"Environment name {environment_name} not found for base {base_id}",
)
- if str(found_variant_revision.base.id) != base_id:
+ if str(found_variant_revision.base_id) != base_id:
raise HTTPException(
status_code=400,
detail=f"Environment {environment_name} does not deploy base {base_id}",
)
+
variant_revision = found_variant_revision.revision
- config = found_variant_revision.config
+ config = {"name": found_variant_revision.config_name, "parameters": found_variant_revision.config_parameters}
elif config_name:
variants_db = await db_manager.list_variants_for_base(base_db)
found_variant = next(
(
variant_db
for variant_db in variants_db
- if variant_db.config_name == config_name
+ if variant_db.config_name == config_name # type: ignore
),
None,
)
@@ -153,12 +154,13 @@ async def get_config(
detail=f"Config name {config_name} not found for base {base_id}",
)
variant_revision = found_variant.revision
- config = found_variant.config
- logger.debug(config.parameters)
+ config = {"name": found_variant.config_name, "parameters": found_variant.config_parameters}
+
+ assert "name" and "parameters" in config, "'name' and 'parameters' not found in "
return GetConfigResponse(
- config_name=config.config_name,
- current_version=variant_revision,
- parameters=config.parameters,
+ config_name=config["name"], # type: ignore
+ current_version=variant_revision, # type: ignore
+ parameters=config["parameters"], # type: ignore
)
except HTTPException as e:
logger.error(f"get_config http exception: {e.detail}")
diff --git a/agenta-backend/agenta_backend/services/db_manager.py b/agenta-backend/agenta_backend/services/db_manager.py
index ffaef622b4..1fa49961e7 100644
--- a/agenta-backend/agenta_backend/services/db_manager.py
+++ b/agenta-backend/agenta_backend/services/db_manager.py
@@ -1412,7 +1412,17 @@ async def list_environments(app_id: str, **kwargs: dict):
async with db_engine.get_session() as session:
result = await session.execute(
- select(AppEnvironmentDB).filter_by(app_id=uuid.UUID(app_id))
+ select(AppEnvironmentDB)
+ .options(
+ joinedload(AppEnvironmentDB.deployed_app_variant_revision)
+ .load_only(
+ AppVariantRevisionsDB.base_id, # type: ignore
+ AppVariantRevisionsDB.revision, # type: ignore
+ AppVariantRevisionsDB.config_name, # type: ignore
+ AppVariantRevisionsDB.config_parameters # type: ignore
+ )
+ )
+ .filter_by(app_id=uuid.UUID(app_id))
)
environments_db = result.scalars().all()
return environments_db
From d806afc9397b6cf1b83a42a0b922b2af603d6aa4 Mon Sep 17 00:00:00 2001
From: Abram
Date: Tue, 18 Jun 2024 17:38:35 +0100
Subject: [PATCH 091/268] minor refactor (backend): improve code readability
for create_image
---
.../agenta_backend/services/db_manager.py | 46 ++++++++-----------
1 file changed, 18 insertions(+), 28 deletions(-)
diff --git a/agenta-backend/agenta_backend/services/db_manager.py b/agenta-backend/agenta_backend/services/db_manager.py
index 1fa49961e7..29d9cf6cde 100644
--- a/agenta-backend/agenta_backend/services/db_manager.py
+++ b/agenta-backend/agenta_backend/services/db_manager.py
@@ -3,28 +3,21 @@
import logging
from pathlib import Path
from urllib.parse import urlparse
-from datetime import datetime, timezone
from typing import Any, Dict, List, Optional
from fastapi import HTTPException
-from fastapi.responses import JSONResponse
from agenta_backend.models import converters
from agenta_backend.utils.common import isCloudEE
+from agenta_backend.models.db_engine import db_engine
from agenta_backend.services.json_importer_helper import get_json
from sqlalchemy import func
from sqlalchemy.future import select
from sqlalchemy.exc import NoResultFound
from sqlalchemy.ext.asyncio import AsyncSession
-from agenta_backend.models.db_engine import db_engine
from sqlalchemy.orm import joinedload, aliased, load_only
-from agenta_backend.models.api.api_models import (
- App,
- Template,
-)
-
if isCloudEE():
from agenta_backend.commons.services import db_manager_ee
from agenta_backend.commons.utils.permissions import check_rbac_permission
@@ -78,15 +71,15 @@
)
from agenta_backend.models.shared_models import (
- HumanEvaluationScenarioInput,
Result,
ConfigDB,
+ TemplateType,
CorrectAnswer,
AggregatedResult,
EvaluationScenarioResult,
EvaluationScenarioInput,
EvaluationScenarioOutput,
- TemplateType,
+ HumanEvaluationScenarioInput,
)
@@ -502,20 +495,9 @@ async def create_image(
ImageDB: The created image.
"""
- # Validate image type
- valid_image_types = ["image", "zip"]
- if image_type not in valid_image_types:
- raise Exception("Invalid image type")
-
- # Validate either docker_id or template_uri, but not both
- if (docker_id is None) == (template_uri is None):
- raise Exception("Provide either docker_id or template_uri, but not both")
-
- # Validate docker_id or template_uri based on image_type
- if image_type == "image" and docker_id is None:
- raise Exception("Docker id must be provided for type image")
- elif image_type == "zip" and template_uri is None:
- raise Exception("template_uri must be provided for type zip")
+ assert image_type == TemplateType.IMAGE.value and docker_id is None, "docker_id must be provided for type image"
+ assert image_type == TemplateType.ZIP.value and docker_id is None, "template_uri must be provided for zip image"
+ assert (docker_id is None) == (template_uri is None), "Provide either docker_id or template_uri, but not both"
async with db_engine.get_session() as session:
image = ImageDB(
@@ -523,11 +505,19 @@ async def create_image(
user_id=user.id,
)
- if image_type == "zip":
- image.type = "zip" # type: ignore
+ image_types = {
+ "zip": TemplateType.ZIP.value,
+ "image": TemplateType.IMAGE.value
+ }
+
+ image_type_value = image_types.get(image_type)
+ if image_type_value is None:
+ raise ValueError(f"Invalid image_type: {image_type}")
+
+ image.type = image_type_value # type: ignore
+ if image_type_value == "zip":
image.template_uri = template_uri # type: ignore
- elif image_type == "image":
- image.type = "image" # type: ignore
+ else:
image.tags = tags # type: ignore
image.docker_id = docker_id # type: ignore
From 9f2fb2b7034fcb54271ec625239b4a5813b5db2a Mon Sep 17 00:00:00 2001
From: Abram
Date: Tue, 18 Jun 2024 17:39:53 +0100
Subject: [PATCH 092/268] chore (backend): format codebase with black@23.12.0
---
.../agenta_backend/routers/configs_router.py | 2 +-
.../agenta_backend/services/app_manager.py | 2 +-
.../agenta_backend/services/db_manager.py | 28 ++++++++++---------
3 files changed, 17 insertions(+), 15 deletions(-)
diff --git a/agenta-backend/agenta_backend/routers/configs_router.py b/agenta-backend/agenta_backend/routers/configs_router.py
index fe4342c4bf..a7c9a531d6 100644
--- a/agenta-backend/agenta_backend/routers/configs_router.py
+++ b/agenta-backend/agenta_backend/routers/configs_router.py
@@ -156,7 +156,7 @@ async def get_config(
variant_revision = found_variant.revision
config = {"name": found_variant.config_name, "parameters": found_variant.config_parameters}
- assert "name" and "parameters" in config, "'name' and 'parameters' not found in "
+ assert "name" and "parameters" in config, "'name' and 'parameters' not found in configuration"
return GetConfigResponse(
config_name=config["name"], # type: ignore
current_version=variant_revision, # type: ignore
diff --git a/agenta-backend/agenta_backend/services/app_manager.py b/agenta-backend/agenta_backend/services/app_manager.py
index 6522fe42e0..8960f11a44 100644
--- a/agenta-backend/agenta_backend/services/app_manager.py
+++ b/agenta-backend/agenta_backend/services/app_manager.py
@@ -316,7 +316,7 @@ async def remove_app(app: AppDB):
f"Successfully deleted app variant {app_variant_db.app.app_name}/{app_variant_db.variant_name}."
)
except Exception as e:
- # Failsafe: in case something went wrong,
+ # Failsafe: in case something went wrong,
# delete app and its related resources
try:
if len(app_variants) <= 1:
diff --git a/agenta-backend/agenta_backend/services/db_manager.py b/agenta-backend/agenta_backend/services/db_manager.py
index 29d9cf6cde..1465b7d3cd 100644
--- a/agenta-backend/agenta_backend/services/db_manager.py
+++ b/agenta-backend/agenta_backend/services/db_manager.py
@@ -495,9 +495,15 @@ async def create_image(
ImageDB: The created image.
"""
- assert image_type == TemplateType.IMAGE.value and docker_id is None, "docker_id must be provided for type image"
- assert image_type == TemplateType.ZIP.value and docker_id is None, "template_uri must be provided for zip image"
- assert (docker_id is None) == (template_uri is None), "Provide either docker_id or template_uri, but not both"
+ assert (
+ image_type == TemplateType.IMAGE.value and docker_id is None
+ ), "docker_id must be provided for type image"
+ assert (
+ image_type == TemplateType.ZIP.value and docker_id is None
+ ), "template_uri must be provided for zip image"
+ assert (docker_id is None) == (
+ template_uri is None
+ ), "Provide either docker_id or template_uri, but not both"
async with db_engine.get_session() as session:
image = ImageDB(
@@ -505,10 +511,7 @@ async def create_image(
user_id=user.id,
)
- image_types = {
- "zip": TemplateType.ZIP.value,
- "image": TemplateType.IMAGE.value
- }
+ image_types = {"zip": TemplateType.ZIP.value, "image": TemplateType.IMAGE.value}
image_type_value = image_types.get(image_type)
if image_type_value is None:
@@ -1404,12 +1407,11 @@ async def list_environments(app_id: str, **kwargs: dict):
result = await session.execute(
select(AppEnvironmentDB)
.options(
- joinedload(AppEnvironmentDB.deployed_app_variant_revision)
- .load_only(
- AppVariantRevisionsDB.base_id, # type: ignore
- AppVariantRevisionsDB.revision, # type: ignore
- AppVariantRevisionsDB.config_name, # type: ignore
- AppVariantRevisionsDB.config_parameters # type: ignore
+ joinedload(AppEnvironmentDB.deployed_app_variant_revision).load_only(
+ AppVariantRevisionsDB.base_id, # type: ignore
+ AppVariantRevisionsDB.revision, # type: ignore
+ AppVariantRevisionsDB.config_name, # type: ignore
+ AppVariantRevisionsDB.config_parameters, # type: ignore
)
)
.filter_by(app_id=uuid.UUID(app_id))
From 909ca6755e6a8dd84c0d7fd77a5911d74571b5d9 Mon Sep 17 00:00:00 2001
From: Abram
Date: Tue, 18 Jun 2024 18:32:24 +0100
Subject: [PATCH 093/268] minor refactor (backend): added logic to remove app
related resources
---
agenta-backend/agenta_backend/services/app_manager.py | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/agenta-backend/agenta_backend/services/app_manager.py b/agenta-backend/agenta_backend/services/app_manager.py
index 8960f11a44..fa18b7bd3d 100644
--- a/agenta-backend/agenta_backend/services/app_manager.py
+++ b/agenta-backend/agenta_backend/services/app_manager.py
@@ -315,6 +315,11 @@ async def remove_app(app: AppDB):
logger.info(
f"Successfully deleted app variant {app_variant_db.app.app_name}/{app_variant_db.variant_name}."
)
+
+ if len(app_variants) <= 1:
+ logger.debug("remove_app_related_resources")
+ await remove_app_related_resources(str(app.id))
+
except Exception as e:
# Failsafe: in case something went wrong,
# delete app and its related resources
From e42df10015b73b67dcf71cca0f256a5a4b12a109 Mon Sep 17 00:00:00 2001
From: Abram
Date: Tue, 18 Jun 2024 20:36:56 +0100
Subject: [PATCH 094/268] minor refactor (backend): revert assertion back to
conditional check for image creation
---
.../agenta_backend/services/db_manager.py | 23 +++++++++++--------
1 file changed, 14 insertions(+), 9 deletions(-)
diff --git a/agenta-backend/agenta_backend/services/db_manager.py b/agenta-backend/agenta_backend/services/db_manager.py
index 1465b7d3cd..aa581a7f48 100644
--- a/agenta-backend/agenta_backend/services/db_manager.py
+++ b/agenta-backend/agenta_backend/services/db_manager.py
@@ -495,15 +495,20 @@ async def create_image(
ImageDB: The created image.
"""
- assert (
- image_type == TemplateType.IMAGE.value and docker_id is None
- ), "docker_id must be provided for type image"
- assert (
- image_type == TemplateType.ZIP.value and docker_id is None
- ), "template_uri must be provided for zip image"
- assert (docker_id is None) == (
- template_uri is None
- ), "Provide either docker_id or template_uri, but not both"
+ # Validate image type
+ valid_image_types = ["image", "zip"]
+ if image_type not in valid_image_types:
+ raise Exception("Invalid image type")
+
+ # Validate either docker_id or template_uri, but not both
+ if (docker_id is None) == (template_uri is None):
+ raise Exception("Provide either docker_id or template_uri, but not both")
+
+ # Validate docker_id or template_uri based on image_type
+ if image_type == "image" and docker_id is None:
+ raise Exception("Docker id must be provided for type image")
+ elif image_type == "zip" and template_uri is None:
+ raise Exception("template_uri must be provided for type zip")
async with db_engine.get_session() as session:
image = ImageDB(
From 35946c3354b66ab902da7ad79bcb5160740d7b3c Mon Sep 17 00:00:00 2001
From: Abram
Date: Tue, 18 Jun 2024 20:58:39 +0100
Subject: [PATCH 095/268] minor refactor (backend): change model_dump() to
dict() when updating evaluator config
---
agenta-backend/agenta_backend/routers/evaluators_router.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/agenta-backend/agenta_backend/routers/evaluators_router.py b/agenta-backend/agenta_backend/routers/evaluators_router.py
index 4892c5d6ea..3cd966d982 100644
--- a/agenta-backend/agenta_backend/routers/evaluators_router.py
+++ b/agenta-backend/agenta_backend/routers/evaluators_router.py
@@ -184,7 +184,7 @@ async def update_evaluator_config(
)
evaluators_configs = await evaluator_manager.update_evaluator_config(
- evaluator_config_id=evaluator_config_id, updates=payload.model_dump()
+ evaluator_config_id=evaluator_config_id, updates=payload.dict()
)
return evaluators_configs
except Exception as e:
From df7e48d04ea08268591dafb44b83a02c3faea73b Mon Sep 17 00:00:00 2001
From: Abram
Date: Tue, 18 Jun 2024 21:19:37 +0100
Subject: [PATCH 096/268] refactor (tools): installed sqlalchemy-json for jsonb
mutation tracking
---
agenta-backend/poetry.lock | 185 +++++++++++++++++++++++++---------
agenta-backend/pyproject.toml | 3 +-
2 files changed, 137 insertions(+), 51 deletions(-)
diff --git a/agenta-backend/poetry.lock b/agenta-backend/poetry.lock
index bfa1888b20..20bb34137c 100644
--- a/agenta-backend/poetry.lock
+++ b/agenta-backend/poetry.lock
@@ -1,4 +1,4 @@
-# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand.
+# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
[[package]]
name = "aioboto3"
@@ -208,6 +208,17 @@ files = [
[package.dependencies]
vine = ">=5.0.0,<6.0.0"
+[[package]]
+name = "annotated-types"
+version = "0.7.0"
+description = "Reusable constraint types to use with typing.Annotated"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"},
+ {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"},
+]
+
[[package]]
name = "anyio"
version = "3.7.1"
@@ -2315,55 +2326,113 @@ files = [
[[package]]
name = "pydantic"
-version = "1.10.15"
-description = "Data validation and settings management using python type hints"
+version = "2.7.4"
+description = "Data validation using Python type hints"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "pydantic-1.10.15-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:22ed12ee588b1df028a2aa5d66f07bf8f8b4c8579c2e96d5a9c1f96b77f3bb55"},
- {file = "pydantic-1.10.15-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:75279d3cac98186b6ebc2597b06bcbc7244744f6b0b44a23e4ef01e5683cc0d2"},
- {file = "pydantic-1.10.15-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50f1666a9940d3d68683c9d96e39640f709d7a72ff8702987dab1761036206bb"},
- {file = "pydantic-1.10.15-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82790d4753ee5d00739d6cb5cf56bceb186d9d6ce134aca3ba7befb1eedbc2c8"},
- {file = "pydantic-1.10.15-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:d207d5b87f6cbefbdb1198154292faee8017d7495a54ae58db06762004500d00"},
- {file = "pydantic-1.10.15-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e49db944fad339b2ccb80128ffd3f8af076f9f287197a480bf1e4ca053a866f0"},
- {file = "pydantic-1.10.15-cp310-cp310-win_amd64.whl", hash = "sha256:d3b5c4cbd0c9cb61bbbb19ce335e1f8ab87a811f6d589ed52b0254cf585d709c"},
- {file = "pydantic-1.10.15-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c3d5731a120752248844676bf92f25a12f6e45425e63ce22e0849297a093b5b0"},
- {file = "pydantic-1.10.15-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c365ad9c394f9eeffcb30a82f4246c0006417f03a7c0f8315d6211f25f7cb654"},
- {file = "pydantic-1.10.15-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3287e1614393119c67bd4404f46e33ae3be3ed4cd10360b48d0a4459f420c6a3"},
- {file = "pydantic-1.10.15-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:be51dd2c8596b25fe43c0a4a59c2bee4f18d88efb8031188f9e7ddc6b469cf44"},
- {file = "pydantic-1.10.15-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6a51a1dd4aa7b3f1317f65493a182d3cff708385327c1c82c81e4a9d6d65b2e4"},
- {file = "pydantic-1.10.15-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4e316e54b5775d1eb59187f9290aeb38acf620e10f7fd2f776d97bb788199e53"},
- {file = "pydantic-1.10.15-cp311-cp311-win_amd64.whl", hash = "sha256:0d142fa1b8f2f0ae11ddd5e3e317dcac060b951d605fda26ca9b234b92214986"},
- {file = "pydantic-1.10.15-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7ea210336b891f5ea334f8fc9f8f862b87acd5d4a0cbc9e3e208e7aa1775dabf"},
- {file = "pydantic-1.10.15-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3453685ccd7140715e05f2193d64030101eaad26076fad4e246c1cc97e1bb30d"},
- {file = "pydantic-1.10.15-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bea1f03b8d4e8e86702c918ccfd5d947ac268f0f0cc6ed71782e4b09353b26f"},
- {file = "pydantic-1.10.15-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:005655cabc29081de8243126e036f2065bd7ea5b9dff95fde6d2c642d39755de"},
- {file = "pydantic-1.10.15-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:af9850d98fc21e5bc24ea9e35dd80a29faf6462c608728a110c0a30b595e58b7"},
- {file = "pydantic-1.10.15-cp37-cp37m-win_amd64.whl", hash = "sha256:d31ee5b14a82c9afe2bd26aaa405293d4237d0591527d9129ce36e58f19f95c1"},
- {file = "pydantic-1.10.15-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5e09c19df304b8123938dc3c53d3d3be6ec74b9d7d0d80f4f4b5432ae16c2022"},
- {file = "pydantic-1.10.15-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7ac9237cd62947db00a0d16acf2f3e00d1ae9d3bd602b9c415f93e7a9fc10528"},
- {file = "pydantic-1.10.15-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:584f2d4c98ffec420e02305cf675857bae03c9d617fcfdc34946b1160213a948"},
- {file = "pydantic-1.10.15-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bbc6989fad0c030bd70a0b6f626f98a862224bc2b1e36bfc531ea2facc0a340c"},
- {file = "pydantic-1.10.15-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d573082c6ef99336f2cb5b667b781d2f776d4af311574fb53d908517ba523c22"},
- {file = "pydantic-1.10.15-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6bd7030c9abc80134087d8b6e7aa957e43d35714daa116aced57269a445b8f7b"},
- {file = "pydantic-1.10.15-cp38-cp38-win_amd64.whl", hash = "sha256:3350f527bb04138f8aff932dc828f154847fbdc7a1a44c240fbfff1b57f49a12"},
- {file = "pydantic-1.10.15-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:51d405b42f1b86703555797270e4970a9f9bd7953f3990142e69d1037f9d9e51"},
- {file = "pydantic-1.10.15-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a980a77c52723b0dc56640ced396b73a024d4b74f02bcb2d21dbbac1debbe9d0"},
- {file = "pydantic-1.10.15-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67f1a1fb467d3f49e1708a3f632b11c69fccb4e748a325d5a491ddc7b5d22383"},
- {file = "pydantic-1.10.15-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:676ed48f2c5bbad835f1a8ed8a6d44c1cd5a21121116d2ac40bd1cd3619746ed"},
- {file = "pydantic-1.10.15-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:92229f73400b80c13afcd050687f4d7e88de9234d74b27e6728aa689abcf58cc"},
- {file = "pydantic-1.10.15-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2746189100c646682eff0bce95efa7d2e203420d8e1c613dc0c6b4c1d9c1fde4"},
- {file = "pydantic-1.10.15-cp39-cp39-win_amd64.whl", hash = "sha256:394f08750bd8eaad714718812e7fab615f873b3cdd0b9d84e76e51ef3b50b6b7"},
- {file = "pydantic-1.10.15-py3-none-any.whl", hash = "sha256:28e552a060ba2740d0d2aabe35162652c1459a0b9069fe0db7f4ee0e18e74d58"},
- {file = "pydantic-1.10.15.tar.gz", hash = "sha256:ca832e124eda231a60a041da4f013e3ff24949d94a01154b137fc2f2a43c3ffb"},
-]
-
-[package.dependencies]
-typing-extensions = ">=4.2.0"
-
-[package.extras]
-dotenv = ["python-dotenv (>=0.10.4)"]
-email = ["email-validator (>=1.0.3)"]
+ {file = "pydantic-2.7.4-py3-none-any.whl", hash = "sha256:ee8538d41ccb9c0a9ad3e0e5f07bf15ed8015b481ced539a1759d8cc89ae90d0"},
+ {file = "pydantic-2.7.4.tar.gz", hash = "sha256:0c84efd9548d545f63ac0060c1e4d39bb9b14db8b3c0652338aecc07b5adec52"},
+]
+
+[package.dependencies]
+annotated-types = ">=0.4.0"
+pydantic-core = "2.18.4"
+typing-extensions = ">=4.6.1"
+
+[package.extras]
+email = ["email-validator (>=2.0.0)"]
+
+[[package]]
+name = "pydantic-core"
+version = "2.18.4"
+description = "Core functionality for Pydantic validation and serialization"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pydantic_core-2.18.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:f76d0ad001edd426b92233d45c746fd08f467d56100fd8f30e9ace4b005266e4"},
+ {file = "pydantic_core-2.18.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:59ff3e89f4eaf14050c8022011862df275b552caef8082e37b542b066ce1ff26"},
+ {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a55b5b16c839df1070bc113c1f7f94a0af4433fcfa1b41799ce7606e5c79ce0a"},
+ {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4d0dcc59664fcb8974b356fe0a18a672d6d7cf9f54746c05f43275fc48636851"},
+ {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8951eee36c57cd128f779e641e21eb40bc5073eb28b2d23f33eb0ef14ffb3f5d"},
+ {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4701b19f7e3a06ea655513f7938de6f108123bf7c86bbebb1196eb9bd35cf724"},
+ {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e00a3f196329e08e43d99b79b286d60ce46bed10f2280d25a1718399457e06be"},
+ {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:97736815b9cc893b2b7f663628e63f436018b75f44854c8027040e05230eeddb"},
+ {file = "pydantic_core-2.18.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6891a2ae0e8692679c07728819b6e2b822fb30ca7445f67bbf6509b25a96332c"},
+ {file = "pydantic_core-2.18.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bc4ff9805858bd54d1a20efff925ccd89c9d2e7cf4986144b30802bf78091c3e"},
+ {file = "pydantic_core-2.18.4-cp310-none-win32.whl", hash = "sha256:1b4de2e51bbcb61fdebd0ab86ef28062704f62c82bbf4addc4e37fa4b00b7cbc"},
+ {file = "pydantic_core-2.18.4-cp310-none-win_amd64.whl", hash = "sha256:6a750aec7bf431517a9fd78cb93c97b9b0c496090fee84a47a0d23668976b4b0"},
+ {file = "pydantic_core-2.18.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:942ba11e7dfb66dc70f9ae66b33452f51ac7bb90676da39a7345e99ffb55402d"},
+ {file = "pydantic_core-2.18.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b2ebef0e0b4454320274f5e83a41844c63438fdc874ea40a8b5b4ecb7693f1c4"},
+ {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a642295cd0c8df1b86fc3dced1d067874c353a188dc8e0f744626d49e9aa51c4"},
+ {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f09baa656c904807e832cf9cce799c6460c450c4ad80803517032da0cd062e2"},
+ {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98906207f29bc2c459ff64fa007afd10a8c8ac080f7e4d5beff4c97086a3dabd"},
+ {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:19894b95aacfa98e7cb093cd7881a0c76f55731efad31073db4521e2b6ff5b7d"},
+ {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fbbdc827fe5e42e4d196c746b890b3d72876bdbf160b0eafe9f0334525119c8"},
+ {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f85d05aa0918283cf29a30b547b4df2fbb56b45b135f9e35b6807cb28bc47951"},
+ {file = "pydantic_core-2.18.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e85637bc8fe81ddb73fda9e56bab24560bdddfa98aa64f87aaa4e4b6730c23d2"},
+ {file = "pydantic_core-2.18.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2f5966897e5461f818e136b8451d0551a2e77259eb0f73a837027b47dc95dab9"},
+ {file = "pydantic_core-2.18.4-cp311-none-win32.whl", hash = "sha256:44c7486a4228413c317952e9d89598bcdfb06399735e49e0f8df643e1ccd0558"},
+ {file = "pydantic_core-2.18.4-cp311-none-win_amd64.whl", hash = "sha256:8a7164fe2005d03c64fd3b85649891cd4953a8de53107940bf272500ba8a788b"},
+ {file = "pydantic_core-2.18.4-cp311-none-win_arm64.whl", hash = "sha256:4e99bc050fe65c450344421017f98298a97cefc18c53bb2f7b3531eb39bc7805"},
+ {file = "pydantic_core-2.18.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:6f5c4d41b2771c730ea1c34e458e781b18cc668d194958e0112455fff4e402b2"},
+ {file = "pydantic_core-2.18.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2fdf2156aa3d017fddf8aea5adfba9f777db1d6022d392b682d2a8329e087cef"},
+ {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4748321b5078216070b151d5271ef3e7cc905ab170bbfd27d5c83ee3ec436695"},
+ {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:847a35c4d58721c5dc3dba599878ebbdfd96784f3fb8bb2c356e123bdcd73f34"},
+ {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c40d4eaad41f78e3bbda31b89edc46a3f3dc6e171bf0ecf097ff7a0ffff7cb1"},
+ {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:21a5e440dbe315ab9825fcd459b8814bb92b27c974cbc23c3e8baa2b76890077"},
+ {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01dd777215e2aa86dfd664daed5957704b769e726626393438f9c87690ce78c3"},
+ {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4b06beb3b3f1479d32befd1f3079cc47b34fa2da62457cdf6c963393340b56e9"},
+ {file = "pydantic_core-2.18.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:564d7922e4b13a16b98772441879fcdcbe82ff50daa622d681dd682175ea918c"},
+ {file = "pydantic_core-2.18.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0eb2a4f660fcd8e2b1c90ad566db2b98d7f3f4717c64fe0a83e0adb39766d5b8"},
+ {file = "pydantic_core-2.18.4-cp312-none-win32.whl", hash = "sha256:8b8bab4c97248095ae0c4455b5a1cd1cdd96e4e4769306ab19dda135ea4cdb07"},
+ {file = "pydantic_core-2.18.4-cp312-none-win_amd64.whl", hash = "sha256:14601cdb733d741b8958224030e2bfe21a4a881fb3dd6fbb21f071cabd48fa0a"},
+ {file = "pydantic_core-2.18.4-cp312-none-win_arm64.whl", hash = "sha256:c1322d7dd74713dcc157a2b7898a564ab091ca6c58302d5c7b4c07296e3fd00f"},
+ {file = "pydantic_core-2.18.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:823be1deb01793da05ecb0484d6c9e20baebb39bd42b5d72636ae9cf8350dbd2"},
+ {file = "pydantic_core-2.18.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ebef0dd9bf9b812bf75bda96743f2a6c5734a02092ae7f721c048d156d5fabae"},
+ {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae1d6df168efb88d7d522664693607b80b4080be6750c913eefb77e34c12c71a"},
+ {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f9899c94762343f2cc2fc64c13e7cae4c3cc65cdfc87dd810a31654c9b7358cc"},
+ {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99457f184ad90235cfe8461c4d70ab7dd2680e28821c29eca00252ba90308c78"},
+ {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18f469a3d2a2fdafe99296a87e8a4c37748b5080a26b806a707f25a902c040a8"},
+ {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7cdf28938ac6b8b49ae5e92f2735056a7ba99c9b110a474473fd71185c1af5d"},
+ {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:938cb21650855054dc54dfd9120a851c974f95450f00683399006aa6e8abb057"},
+ {file = "pydantic_core-2.18.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:44cd83ab6a51da80fb5adbd9560e26018e2ac7826f9626bc06ca3dc074cd198b"},
+ {file = "pydantic_core-2.18.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:972658f4a72d02b8abfa2581d92d59f59897d2e9f7e708fdabe922f9087773af"},
+ {file = "pydantic_core-2.18.4-cp38-none-win32.whl", hash = "sha256:1d886dc848e60cb7666f771e406acae54ab279b9f1e4143babc9c2258213daa2"},
+ {file = "pydantic_core-2.18.4-cp38-none-win_amd64.whl", hash = "sha256:bb4462bd43c2460774914b8525f79b00f8f407c945d50881568f294c1d9b4443"},
+ {file = "pydantic_core-2.18.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:44a688331d4a4e2129140a8118479443bd6f1905231138971372fcde37e43528"},
+ {file = "pydantic_core-2.18.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a2fdd81edd64342c85ac7cf2753ccae0b79bf2dfa063785503cb85a7d3593223"},
+ {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:86110d7e1907ab36691f80b33eb2da87d780f4739ae773e5fc83fb272f88825f"},
+ {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:46387e38bd641b3ee5ce247563b60c5ca098da9c56c75c157a05eaa0933ed154"},
+ {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:123c3cec203e3f5ac7b000bd82235f1a3eced8665b63d18be751f115588fea30"},
+ {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dc1803ac5c32ec324c5261c7209e8f8ce88e83254c4e1aebdc8b0a39f9ddb443"},
+ {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53db086f9f6ab2b4061958d9c276d1dbe3690e8dd727d6abf2321d6cce37fa94"},
+ {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:abc267fa9837245cc28ea6929f19fa335f3dc330a35d2e45509b6566dc18be23"},
+ {file = "pydantic_core-2.18.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a0d829524aaefdebccb869eed855e2d04c21d2d7479b6cada7ace5448416597b"},
+ {file = "pydantic_core-2.18.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:509daade3b8649f80d4e5ff21aa5673e4ebe58590b25fe42fac5f0f52c6f034a"},
+ {file = "pydantic_core-2.18.4-cp39-none-win32.whl", hash = "sha256:ca26a1e73c48cfc54c4a76ff78df3727b9d9f4ccc8dbee4ae3f73306a591676d"},
+ {file = "pydantic_core-2.18.4-cp39-none-win_amd64.whl", hash = "sha256:c67598100338d5d985db1b3d21f3619ef392e185e71b8d52bceacc4a7771ea7e"},
+ {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:574d92eac874f7f4db0ca653514d823a0d22e2354359d0759e3f6a406db5d55d"},
+ {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1f4d26ceb5eb9eed4af91bebeae4b06c3fb28966ca3a8fb765208cf6b51102ab"},
+ {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77450e6d20016ec41f43ca4a6c63e9fdde03f0ae3fe90e7c27bdbeaece8b1ed4"},
+ {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d323a01da91851a4f17bf592faf46149c9169d68430b3146dcba2bb5e5719abc"},
+ {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43d447dd2ae072a0065389092a231283f62d960030ecd27565672bd40746c507"},
+ {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:578e24f761f3b425834f297b9935e1ce2e30f51400964ce4801002435a1b41ef"},
+ {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:81b5efb2f126454586d0f40c4d834010979cb80785173d1586df845a632e4e6d"},
+ {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ab86ce7c8f9bea87b9d12c7f0af71102acbf5ecbc66c17796cff45dae54ef9a5"},
+ {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:90afc12421df2b1b4dcc975f814e21bc1754640d502a2fbcc6d41e77af5ec312"},
+ {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:51991a89639a912c17bef4b45c87bd83593aee0437d8102556af4885811d59f5"},
+ {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:293afe532740370aba8c060882f7d26cfd00c94cae32fd2e212a3a6e3b7bc15e"},
+ {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b48ece5bde2e768197a2d0f6e925f9d7e3e826f0ad2271120f8144a9db18d5c8"},
+ {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:eae237477a873ab46e8dd748e515c72c0c804fb380fbe6c85533c7de51f23a8f"},
+ {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:834b5230b5dfc0c1ec37b2fda433b271cbbc0e507560b5d1588e2cc1148cf1ce"},
+ {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e858ac0a25074ba4bce653f9b5d0a85b7456eaddadc0ce82d3878c22489fa4ee"},
+ {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2fd41f6eff4c20778d717af1cc50eca52f5afe7805ee530a4fbd0bae284f16e9"},
+ {file = "pydantic_core-2.18.4.tar.gz", hash = "sha256:ec3beeada09ff865c344ff3bc2f427f5e6c26401cc6113d77e372c3fdac73864"},
+]
+
+[package.dependencies]
+typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
[[package]]
name = "pyjwt"
@@ -2631,7 +2700,6 @@ files = [
{file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
{file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
{file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
- {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"},
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
{file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
{file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
@@ -3069,6 +3137,23 @@ postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"]
pymysql = ["pymysql"]
sqlcipher = ["sqlcipher3_binary"]
+[[package]]
+name = "sqlalchemy-json"
+version = "0.7.0"
+description = "JSON type with nested change tracking for SQLAlchemy"
+optional = false
+python-versions = ">= 3.6"
+files = [
+ {file = "sqlalchemy-json-0.7.0.tar.gz", hash = "sha256:620d0b26f648f21a8fa9127df66f55f83a5ab4ae010e5397a5c6989a08238561"},
+ {file = "sqlalchemy_json-0.7.0-py3-none-any.whl", hash = "sha256:27881d662ca18363a4ac28175cc47ea2a6f2bef997ae1159c151026b741818e6"},
+]
+
+[package.dependencies]
+sqlalchemy = ">=0.7"
+
+[package.extras]
+dev = ["pytest"]
+
[[package]]
name = "starkbank-ecdsa"
version = "2.2.0"
@@ -3814,4 +3899,4 @@ multidict = ">=4.0"
[metadata]
lock-version = "2.0"
python-versions = "^3.9"
-content-hash = "20b3e4e19a02e246d051de3e7853ef1e2b9f45ebafd9512c71bf1f9e3ddd8d24"
+content-hash = "7118cded062bfcd960f08cc272d4eab330222cc1e51e9e03e7b728931642e0b2"
diff --git a/agenta-backend/pyproject.toml b/agenta-backend/pyproject.toml
index 3d071913b8..b9224c29a0 100644
--- a/agenta-backend/pyproject.toml
+++ b/agenta-backend/pyproject.toml
@@ -9,7 +9,7 @@ packages = [{include = "agenta_backend"}]
[tool.poetry.dependencies]
python = "^3.9"
fastapi = "^0.109.1"
-pydantic = "^1.10.7"
+pydantic = "^2.7.4"
docker = "7.1.0"
toml = "^0.10.2"
uvicorn = "^0.22.0"
@@ -38,6 +38,7 @@ sqlalchemy = "^2.0.30"
asyncpg = "^0.29.0"
psycopg2-binary = "^2.9.9"
uuid-utils = "^0.7.0"
+sqlalchemy-json = "^0.7.0"
[tool.poetry.group.dev.dependencies]
pytest = "^7.3.1"
From f75c46abe6cdcbb202619e52d4b061b113dbd7a7 Mon Sep 17 00:00:00 2001
From: Abram
Date: Wed, 19 Jun 2024 09:24:17 +0100
Subject: [PATCH 097/268] refactor (backend): update testset and user api
models
---
agenta-backend/agenta_backend/models/api/testset_model.py | 2 +-
agenta-backend/agenta_backend/models/api/user_models.py | 8 ++++----
2 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/api/testset_model.py b/agenta-backend/agenta_backend/models/api/testset_model.py
index 48621ddd1f..46dfc965e5 100644
--- a/agenta-backend/agenta_backend/models/api/testset_model.py
+++ b/agenta-backend/agenta_backend/models/api/testset_model.py
@@ -45,7 +45,7 @@ class NewTestset(BaseModel):
class TestSetOutputResponse(BaseModel):
- id: str = Field(..., alias="_id")
+ id: str
name: str
created_at: str
diff --git a/agenta-backend/agenta_backend/models/api/user_models.py b/agenta-backend/agenta_backend/models/api/user_models.py
index a2f5f64f9a..bbecff33df 100644
--- a/agenta-backend/agenta_backend/models/api/user_models.py
+++ b/agenta-backend/agenta_backend/models/api/user_models.py
@@ -4,8 +4,8 @@
class TimestampModel(BaseModel):
- created_at: datetime = Field(datetime.now(timezone.utc))
- updated_at: datetime = Field(datetime.now(timezone.utc))
+ created_at: str = Field(str(datetime.now(timezone.utc)))
+ updated_at: str = Field(str(datetime.now(timezone.utc)))
class User(TimestampModel):
@@ -13,10 +13,10 @@ class User(TimestampModel):
uid: str
username: str
email: str # switch to EmailStr when langchain support pydantic>=2.1
- organizations: Optional[List[str]]
+ organizations: Optional[List[str]] = None
class UserUpdate(BaseModel):
username: Optional[str]
email: Optional[str]
- updated_at: datetime = Field(datetime.now(timezone.utc))
+ updated_at: str = Field(str(datetime.now(timezone.utc)))
From 500fd1d5d03ed833ed5115a7b26abe3e46b03806 Mon Sep 17 00:00:00 2001
From: Abram
Date: Wed, 19 Jun 2024 09:26:21 +0100
Subject: [PATCH 098/268] refactor (backend): fix 500 error in user-profile and
improve get_single_testset router
---
.../agenta_backend/routers/testset_router.py | 42 ++++++++++---------
.../agenta_backend/routers/user_profile.py | 2 +
2 files changed, 25 insertions(+), 19 deletions(-)
diff --git a/agenta-backend/agenta_backend/routers/testset_router.py b/agenta-backend/agenta_backend/routers/testset_router.py
index 6df0a42a29..f552c71e6b 100644
--- a/agenta-backend/agenta_backend/routers/testset_router.py
+++ b/agenta-backend/agenta_backend/routers/testset_router.py
@@ -369,26 +369,30 @@ async def get_single_testset(
Returns:
The requested testset if found, else an HTTPException.
"""
- test_set = await db_manager.fetch_testset_by_id(testset_id=testset_id)
- if isCloudEE():
- has_permission = await check_action_access(
- user_uid=request.state.user_id,
- object=test_set,
- permission=Permission.VIEW_TESTSET,
- )
- logger.debug(f"User has Permission to view Testset: {has_permission}")
- if not has_permission:
- error_msg = f"You do not have permission to perform this action. Please contact your organization admin."
- logger.error(error_msg)
- return JSONResponse(
- {"detail": error_msg},
- status_code=403,
- )
- if test_set is None:
- raise HTTPException(status_code=404, detail="testset not found")
-
- return testset_db_to_pydantic(test_set)
+ try:
+ test_set = await db_manager.fetch_testset_by_id(testset_id=testset_id)
+ if isCloudEE():
+ has_permission = await check_action_access(
+ user_uid=request.state.user_id,
+ object=test_set,
+ permission=Permission.VIEW_TESTSET,
+ )
+ logger.debug(f"User has Permission to view Testset: {has_permission}")
+ if not has_permission:
+ error_msg = f"You do not have permission to perform this action. Please contact your organization admin."
+ logger.error(error_msg)
+ return JSONResponse(
+ {"detail": error_msg},
+ status_code=403,
+ )
+
+ if test_set is None:
+ raise HTTPException(status_code=404, detail="testset not found")
+ return testset_db_to_pydantic(test_set)
+ except Exception as exc:
+ status_code = exc.status_code if hasattr(exc, "status_code") else 500 # type: ignore
+ raise HTTPException(status_code=status_code, detail=str(exc))
@router.delete("/", response_model=List[str], operation_id="delete_testsets")
diff --git a/agenta-backend/agenta_backend/routers/user_profile.py b/agenta-backend/agenta_backend/routers/user_profile.py
index d0e3332472..c02a6c127a 100644
--- a/agenta-backend/agenta_backend/routers/user_profile.py
+++ b/agenta-backend/agenta_backend/routers/user_profile.py
@@ -18,6 +18,8 @@ async def user_profile(
uid=str(user.uid),
username=str(user.username),
email=str(user.email),
+ created_at=str(user.created_at),
+ updated_at=str(user.updated_at)
).dict(exclude_unset=True)
except Exception as e:
From 8783d32e94f1371881b250dd1c3ed7e4ee485923 Mon Sep 17 00:00:00 2001
From: Abram
Date: Wed, 19 Jun 2024 09:27:30 +0100
Subject: [PATCH 099/268] refactor (backend): improve update functions for
evaluator_config and app_variant parameters, also improve logic for removing
app variant
---
.../agenta_backend/services/app_manager.py | 11 +++++----
.../agenta_backend/services/db_manager.py | 23 ++++++++++++-------
2 files changed, 22 insertions(+), 12 deletions(-)
diff --git a/agenta-backend/agenta_backend/services/app_manager.py b/agenta-backend/agenta_backend/services/app_manager.py
index fa18b7bd3d..0886827db7 100644
--- a/agenta-backend/agenta_backend/services/app_manager.py
+++ b/agenta-backend/agenta_backend/services/app_manager.py
@@ -222,7 +222,7 @@ async def terminate_and_remove_app_variant(
raise
image = base_db.image
- logger.debug("is_last_variant_for_image {image}")
+ logger.debug(f"is_last_variant_for_image {image}")
if not isinstance(base_db.image_id, uuid.UUID): # type: ignore
logger.debug(
@@ -255,6 +255,9 @@ async def terminate_and_remove_app_variant(
logger.error(f"Failed to remove image {image} {e}")
finally:
await db_manager.remove_image(image)
+
+ # remove app variant
+ await db_manager.remove_app_variant_from_db(app_variant_db)
else:
# remove variant + config
logger.debug("remove_app_variant_from_db")
@@ -263,7 +266,7 @@ async def terminate_and_remove_app_variant(
app_variants = await db_manager.list_app_variants(app_id)
logger.debug(f"Count of app variants available: {len(app_variants)}")
if (
- len(app_variants) <= 1
+ len(app_variants) == 0
): # remove app related resources if the length of the app variants hit 0
logger.debug("remove_app_related_resources")
await remove_app_related_resources(app_id)
@@ -316,7 +319,7 @@ async def remove_app(app: AppDB):
f"Successfully deleted app variant {app_variant_db.app.app_name}/{app_variant_db.variant_name}."
)
- if len(app_variants) <= 1:
+ if len(app_variants) == 0:
logger.debug("remove_app_related_resources")
await remove_app_related_resources(str(app.id))
@@ -324,7 +327,7 @@ async def remove_app(app: AppDB):
# Failsafe: in case something went wrong,
# delete app and its related resources
try:
- if len(app_variants) <= 1:
+ if len(app_variants) == 0:
logger.debug("remove_app_related_resources")
await remove_app_related_resources(str(app.id))
except Exception as e:
diff --git a/agenta-backend/agenta_backend/services/db_manager.py b/agenta-backend/agenta_backend/services/db_manager.py
index aa581a7f48..9427d3780c 100644
--- a/agenta-backend/agenta_backend/services/db_manager.py
+++ b/agenta-backend/agenta_backend/services/db_manager.py
@@ -1385,7 +1385,7 @@ async def update_app_environment_deployed_variant_revision(
f"App variant revision {deployed_variant_revision} not found"
)
- app_environment.deployed_app_variant_revision = app_variant_revision
+ app_environment.deployed_app_variant_revision_id = app_variant_revision.id
await session.commit()
await session.refresh(app_environment)
@@ -1761,9 +1761,7 @@ async def update_variant_parameters(
raise NoResultFound(f"App variant with id {app_variant_id} not found")
# Update associated ConfigDB parameters
- for key, value in parameters.items():
- if hasattr(app_variant_db.config_parameters, key):
- setattr(app_variant_db.config_parameters, key, value)
+ app_variant_db.config_parameters.update(parameters)
# ...and variant versioning
app_variant_db.revision += 1 # type: ignore
@@ -1771,6 +1769,7 @@ async def update_variant_parameters(
# Save updated ConfigDB
await session.commit()
+ await session.refresh(app_variant_db)
variant_revision = AppVariantRevisionsDB(
variant_id=app_variant_db.id,
@@ -1833,10 +1832,17 @@ async def fetch_testset_by_id(testset_id: str) -> Optional[TestSetDB]:
TestSetDB: The fetched testset, or None if no testset was found.
"""
- assert testset_id is not None, "testset_id cannot be None"
+ if not isinstance(testset_id, str) or not testset_id:
+ raise ValueError(f"testset_id {testset_id} must be a non-empty string")
+
+ try:
+ testset_uuid = uuid.UUID(testset_id)
+ except ValueError as e:
+ raise ValueError(f"testset_id {testset_id} is not a valid UUID") from e
+
async with db_engine.get_session() as session:
result = await session.execute(
- select(TestSetDB).filter_by(id=uuid.UUID(testset_id))
+ select(TestSetDB).filter_by(id=testset_uuid)
)
testset = result.scalars().one_or_none()
return testset
@@ -2964,9 +2970,10 @@ async def update_evaluator_config(
f"Evaluator config with id {evaluator_config_id} not found"
)
+ # Update evaluator config settings values
for key, value in updates.items():
- if hasattr(evaluator_config.settings_values, key):
- setattr(evaluator_config.settings_values, key, value)
+ if hasattr(evaluator_config, key):
+ setattr(evaluator_config, key, value)
await session.commit()
await session.refresh(evaluator_config)
From 69c1919421f1c23c7cfd9ef95d7fb85aded3471b Mon Sep 17 00:00:00 2001
From: Abram
Date: Wed, 19 Jun 2024 09:28:00 +0100
Subject: [PATCH 100/268] minor refactor (frontend): make use of record.id
instead of record._id
---
agenta-web/src/pages/apps/[app_id]/testsets/index.tsx | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/agenta-web/src/pages/apps/[app_id]/testsets/index.tsx b/agenta-web/src/pages/apps/[app_id]/testsets/index.tsx
index 3faf4680d6..bf588391ec 100644
--- a/agenta-web/src/pages/apps/[app_id]/testsets/index.tsx
+++ b/agenta-web/src/pages/apps/[app_id]/testsets/index.tsx
@@ -154,7 +154,7 @@ export default function Testsets() {
loading={isTestsetsLoading}
onRow={(record) => {
return {
- onClick: () => router.push(`/apps/${appId}/testsets/${record._id}`),
+ onClick: () => router.push(`/apps/${appId}/testsets/${record.id}`),
}
}}
/>
From 019eb1fdf0de06dfe2e5b2a739d916f638ca06d6 Mon Sep 17 00:00:00 2001
From: Abram
Date: Wed, 19 Jun 2024 09:28:41 +0100
Subject: [PATCH 101/268] feat (backend): make use of mutable_json_type for
jsonb mutation tracking
---
.../agenta_backend/models/db_models.py | 46 ++++++++++++-------
1 file changed, 30 insertions(+), 16 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index f2d7d1099f..e7f8579659 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -1,5 +1,4 @@
from datetime import datetime, timezone
-from typing import Any, Dict, List, Optional
import uuid_utils.compat as uuid
from sqlalchemy import (
@@ -12,6 +11,7 @@
Enum,
)
from sqlalchemy.orm import relationship
+from sqlalchemy_json import mutable_json_type
from sqlalchemy.dialects.postgresql import UUID, JSONB
from agenta_backend.models.base import Base
@@ -174,7 +174,9 @@ class AppVariantDB(Base):
base_name = Column(String)
base_id = Column(UUID(as_uuid=True), ForeignKey("bases.id"))
config_name = Column(String, nullable=False)
- config_parameters = Column(JSONB, nullable=False, default=dict)
+ config_parameters = Column(
+ mutable_json_type(dbtype=JSONB, nested=True), nullable=False, default=dict
+ )
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
@@ -210,7 +212,9 @@ class AppVariantRevisionsDB(Base):
modified_by_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
base_id = Column(UUID(as_uuid=True), ForeignKey("bases.id"))
config_name = Column(String, nullable=False)
- config_parameters = Column(JSONB, nullable=False, default=dict)
+ config_parameters = Column(
+ mutable_json_type(dbtype=JSONB, nested=True), nullable=False, default=dict
+ )
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
@@ -321,7 +325,7 @@ class TestSetDB(Base):
)
name = Column(String)
app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id", ondelete="CASCADE"))
- csvdata = Column(JSONB)
+ csvdata = Column(mutable_json_type(dbtype=JSONB, nested=True))
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
@@ -348,7 +352,7 @@ class EvaluatorConfigDB(Base):
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
name = Column(String)
evaluator_key = Column(String)
- settings_values = Column(JSONB, default=dict)
+ settings_values = Column(mutable_json_type(dbtype=JSONB, nested=True), default=dict)
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
@@ -435,8 +439,12 @@ class HumanEvaluationScenarioDB(Base):
evaluation_id = Column(
UUID(as_uuid=True), ForeignKey("human_evaluations.id", ondelete="CASCADE")
)
- inputs = Column(JSONB) # List of HumanEvaluationScenarioInput
- outputs = Column(JSONB) # List of HumanEvaluationScenarioOutput
+ inputs = Column(
+ mutable_json_type(dbtype=JSONB, nested=True)
+ ) # List of HumanEvaluationScenarioInput
+ outputs = Column(
+ mutable_json_type(dbtype=JSONB, nested=True)
+ ) # List of HumanEvaluationScenarioOutput
vote = Column(String)
score = Column(String)
correct_answer = Column(String)
@@ -466,7 +474,7 @@ class EvaluationAggregatedResultDB(Base):
evaluator_config_id = Column(
UUID(as_uuid=True), ForeignKey("evaluators_configs.id", ondelete="SET NULL")
)
- result = Column(JSONB) # Result
+ result = Column(mutable_json_type(dbtype=JSONB, nested=True)) # Result
evaluator_config = relationship("EvaluatorConfigDB", backref="evaluator_config")
@@ -487,7 +495,7 @@ class EvaluationScenarioResultDB(Base):
evaluator_config_id = Column(
UUID(as_uuid=True), ForeignKey("evaluators_configs.id", ondelete="SET NULL")
)
- result = Column(JSONB) # Result
+ result = Column(mutable_json_type(dbtype=JSONB, nested=True)) # Result
class EvaluationDB(Base):
@@ -502,7 +510,7 @@ class EvaluationDB(Base):
)
app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id", ondelete="CASCADE"))
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
- status = Column(JSONB) # Result
+ status = Column(mutable_json_type(dbtype=JSONB, nested=True)) # Result
testset_id = Column(
UUID(as_uuid=True), ForeignKey("testsets.id", ondelete="SET NULL")
)
@@ -512,9 +520,9 @@ class EvaluationDB(Base):
variant_revision_id = Column(
UUID(as_uuid=True), ForeignKey("app_variant_revisions.id", ondelete="SET NULL")
)
- average_cost = Column(JSONB) # Result
- total_cost = Column(JSONB) # Result
- average_latency = Column(JSONB) # Result
+ average_cost = Column(mutable_json_type(dbtype=JSONB, nested=True)) # Result
+ total_cost = Column(mutable_json_type(dbtype=JSONB, nested=True)) # Result
+ average_latency = Column(mutable_json_type(dbtype=JSONB, nested=True)) # Result
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
@@ -580,9 +588,15 @@ class EvaluationScenarioDB(Base):
variant_id = Column(
UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="SET NULL")
)
- inputs = Column(JSONB) # List of EvaluationScenarioInput
- outputs = Column(JSONB) # List of EvaluationScenarioOutput
- correct_answers = Column(JSONB) # List of CorrectAnswer
+ inputs = Column(
+ mutable_json_type(dbtype=JSONB, nested=True)
+ ) # List of EvaluationScenarioInput
+ outputs = Column(
+ mutable_json_type(dbtype=JSONB, nested=True)
+ ) # List of EvaluationScenarioOutput
+ correct_answers = Column(
+ mutable_json_type(dbtype=JSONB, nested=True)
+ ) # List of CorrectAnswer
is_pinned = Column(Boolean)
note = Column(String)
latency = Column(Integer)
From 43015b940d65a146f23322020f18eff35885e251 Mon Sep 17 00:00:00 2001
From: Abram
Date: Wed, 19 Jun 2024 09:29:29 +0100
Subject: [PATCH 102/268] chore (backend): format codebase with black@23.12.0
---
agenta-backend/agenta_backend/routers/testset_router.py | 2 +-
agenta-backend/agenta_backend/routers/user_profile.py | 2 +-
agenta-backend/agenta_backend/services/db_manager.py | 4 +---
3 files changed, 3 insertions(+), 5 deletions(-)
diff --git a/agenta-backend/agenta_backend/routers/testset_router.py b/agenta-backend/agenta_backend/routers/testset_router.py
index f552c71e6b..4e1801a3a5 100644
--- a/agenta-backend/agenta_backend/routers/testset_router.py
+++ b/agenta-backend/agenta_backend/routers/testset_router.py
@@ -391,7 +391,7 @@ async def get_single_testset(
raise HTTPException(status_code=404, detail="testset not found")
return testset_db_to_pydantic(test_set)
except Exception as exc:
- status_code = exc.status_code if hasattr(exc, "status_code") else 500 # type: ignore
+ status_code = exc.status_code if hasattr(exc, "status_code") else 500 # type: ignore
raise HTTPException(status_code=status_code, detail=str(exc))
diff --git a/agenta-backend/agenta_backend/routers/user_profile.py b/agenta-backend/agenta_backend/routers/user_profile.py
index c02a6c127a..6b787a6fd9 100644
--- a/agenta-backend/agenta_backend/routers/user_profile.py
+++ b/agenta-backend/agenta_backend/routers/user_profile.py
@@ -19,7 +19,7 @@ async def user_profile(
username=str(user.username),
email=str(user.email),
created_at=str(user.created_at),
- updated_at=str(user.updated_at)
+ updated_at=str(user.updated_at),
).dict(exclude_unset=True)
except Exception as e:
diff --git a/agenta-backend/agenta_backend/services/db_manager.py b/agenta-backend/agenta_backend/services/db_manager.py
index 9427d3780c..6519566956 100644
--- a/agenta-backend/agenta_backend/services/db_manager.py
+++ b/agenta-backend/agenta_backend/services/db_manager.py
@@ -1841,9 +1841,7 @@ async def fetch_testset_by_id(testset_id: str) -> Optional[TestSetDB]:
raise ValueError(f"testset_id {testset_id} is not a valid UUID") from e
async with db_engine.get_session() as session:
- result = await session.execute(
- select(TestSetDB).filter_by(id=testset_uuid)
- )
+ result = await session.execute(select(TestSetDB).filter_by(id=testset_uuid))
testset = result.scalars().one_or_none()
return testset
From 5f39d213a8997190580eac2b3fdd366651051aed Mon Sep 17 00:00:00 2001
From: Abram
Date: Wed, 19 Jun 2024 09:44:30 +0100
Subject: [PATCH 103/268] minor refactor (frontend): change testset interface
from _id to id
---
agenta-web/src/lib/Types.ts | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/agenta-web/src/lib/Types.ts b/agenta-web/src/lib/Types.ts
index c742244029..418e8ba962 100644
--- a/agenta-web/src/lib/Types.ts
+++ b/agenta-web/src/lib/Types.ts
@@ -5,7 +5,7 @@ import {GlobalToken} from "antd"
export type JSSTheme = GlobalToken & {isDark: boolean}
export interface testset {
- _id: string
+ id: string
name: string
created_at: string
}
From 81c48d47d9c3dbb0eddc00a7a02f9180a8728983 Mon Sep 17 00:00:00 2001
From: Abram
Date: Wed, 19 Jun 2024 09:59:12 +0100
Subject: [PATCH 104/268] refactor (backend): revert record.id back to
record._id
---
agenta-backend/agenta_backend/models/api/testset_model.py | 2 +-
agenta-backend/agenta_backend/routers/testset_router.py | 2 +-
agenta-web/src/lib/Types.ts | 2 +-
agenta-web/src/pages/apps/[app_id]/testsets/index.tsx | 2 +-
4 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/api/testset_model.py b/agenta-backend/agenta_backend/models/api/testset_model.py
index 46dfc965e5..48621ddd1f 100644
--- a/agenta-backend/agenta_backend/models/api/testset_model.py
+++ b/agenta-backend/agenta_backend/models/api/testset_model.py
@@ -45,7 +45,7 @@ class NewTestset(BaseModel):
class TestSetOutputResponse(BaseModel):
- id: str
+ id: str = Field(..., alias="_id")
name: str
created_at: str
diff --git a/agenta-backend/agenta_backend/routers/testset_router.py b/agenta-backend/agenta_backend/routers/testset_router.py
index 4e1801a3a5..1139b9ccb0 100644
--- a/agenta-backend/agenta_backend/routers/testset_router.py
+++ b/agenta-backend/agenta_backend/routers/testset_router.py
@@ -347,7 +347,7 @@ async def get_testsets(
testsets = await db_manager.fetch_testsets_by_app_id(app_id=app_id)
return [
TestSetOutputResponse(
- id=str(testset.id), # type: ignore
+ _id=str(testset.id), # type: ignore
name=testset.name,
created_at=str(testset.created_at),
)
diff --git a/agenta-web/src/lib/Types.ts b/agenta-web/src/lib/Types.ts
index 418e8ba962..c742244029 100644
--- a/agenta-web/src/lib/Types.ts
+++ b/agenta-web/src/lib/Types.ts
@@ -5,7 +5,7 @@ import {GlobalToken} from "antd"
export type JSSTheme = GlobalToken & {isDark: boolean}
export interface testset {
- id: string
+ _id: string
name: string
created_at: string
}
diff --git a/agenta-web/src/pages/apps/[app_id]/testsets/index.tsx b/agenta-web/src/pages/apps/[app_id]/testsets/index.tsx
index bf588391ec..3faf4680d6 100644
--- a/agenta-web/src/pages/apps/[app_id]/testsets/index.tsx
+++ b/agenta-web/src/pages/apps/[app_id]/testsets/index.tsx
@@ -154,7 +154,7 @@ export default function Testsets() {
loading={isTestsetsLoading}
onRow={(record) => {
return {
- onClick: () => router.push(`/apps/${appId}/testsets/${record.id}`),
+ onClick: () => router.push(`/apps/${appId}/testsets/${record._id}`),
}
}}
/>
From 07d030653a59df4b3884cd8ec89eb2e04d6cdcbd Mon Sep 17 00:00:00 2001
From: Abram
Date: Wed, 19 Jun 2024 12:26:36 +0100
Subject: [PATCH 105/268] refactor (backend): resolve oss migration bugs
---
.../models/api/evaluation_model.py | 76 +++++++++----------
.../agenta_backend/models/converters.py | 9 ++-
.../agenta_backend/models/db_models.py | 5 +-
.../routers/human_evaluation_router.py | 4 +-
.../services/evaluation_service.py | 19 ++---
5 files changed, 60 insertions(+), 53 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/api/evaluation_model.py b/agenta-backend/agenta_backend/models/api/evaluation_model.py
index fac3fcb64b..8b77616aa4 100644
--- a/agenta-backend/agenta_backend/models/api/evaluation_model.py
+++ b/agenta-backend/agenta_backend/models/api/evaluation_model.py
@@ -10,14 +10,14 @@ class Evaluator(BaseModel):
key: str
direct_use: bool
settings_template: dict
- description: Optional[str]
+ description: Optional[str] = None
class EvaluatorConfig(BaseModel):
id: str
name: str
evaluator_key: str
- settings_values: Optional[Dict[str, Any]]
+ settings_values: Optional[Dict[str, Any]] = None
created_at: str
updated_at: str
@@ -67,13 +67,13 @@ class Evaluation(BaseModel):
variant_names: List[str]
variant_revision_ids: List[str]
revisions: List[str]
- testset_id: Optional[str]
- testset_name: Optional[str]
+ testset_id: Optional[str] = None
+ testset_name: Optional[str] = None
status: Result
aggregated_results: List[AggregatedResult]
- average_cost: Optional[Result]
- total_cost: Optional[Result]
- average_latency: Optional[Result]
+ average_cost: Optional[Result] = None
+ total_cost: Optional[Result] = None
+ average_latency: Optional[Result] = None
created_at: datetime
updated_at: datetime
@@ -87,7 +87,7 @@ class SimpleEvaluationOutput(BaseModel):
class HumanEvaluationUpdate(BaseModel):
- status: Optional[EvaluationStatusEnum]
+ status: Optional[EvaluationStatusEnum] = None
class EvaluationScenarioResult(BaseModel):
@@ -103,8 +103,8 @@ class EvaluationScenarioInput(BaseModel):
class EvaluationScenarioOutput(BaseModel):
result: Result
- cost: Optional[float]
- latency: Optional[float]
+ cost: Optional[float] = None
+ latency: Optional[float] = None
class HumanEvaluationScenarioInput(BaseModel):
@@ -135,26 +135,26 @@ class HumanEvaluation(BaseModel):
class HumanEvaluationScenario(BaseModel):
- id: Optional[str]
+ id: Optional[str] = None
evaluation_id: str
inputs: List[HumanEvaluationScenarioInput]
outputs: List[HumanEvaluationScenarioOutput]
- vote: Optional[str]
- score: Optional[Union[str, int]]
- correct_answer: Optional[str]
- is_pinned: Optional[bool]
- note: Optional[str]
+ vote: Optional[str] = None
+ score: Optional[Union[str, int]] = None
+ correct_answer: Optional[str] = None
+ is_pinned: Optional[bool] = None
+ note: Optional[str] = None
class HumanEvaluationScenarioUpdate(BaseModel):
- vote: Optional[str]
- score: Optional[Union[str, int]]
+ vote: Optional[str] = None
+ score: Optional[Union[str, int]] = None
# will be used when running custom code evaluation
- correct_answer: Optional[str]
- outputs: Optional[List[HumanEvaluationScenarioOutput]]
- inputs: Optional[List[HumanEvaluationScenarioInput]]
- is_pinned: Optional[bool]
- note: Optional[str]
+ correct_answer: Optional[str] = None
+ outputs: Optional[List[HumanEvaluationScenarioOutput]] = None
+ inputs: Optional[List[HumanEvaluationScenarioInput]] = None
+ is_pinned: Optional[bool] = None
+ note: Optional[str] = None
class CorrectAnswer(BaseModel):
@@ -163,25 +163,25 @@ class CorrectAnswer(BaseModel):
class EvaluationScenario(BaseModel):
- id: Optional[str]
+ id: Optional[str] = None
evaluation_id: str
inputs: List[EvaluationScenarioInput]
outputs: List[EvaluationScenarioOutput]
- correct_answers: Optional[List[CorrectAnswer]]
- is_pinned: Optional[bool]
- note: Optional[str]
+ correct_answers: Optional[List[CorrectAnswer]] = None
+ is_pinned: Optional[bool] = None
+ note: Optional[str] = None
results: List[EvaluationScenarioResult]
class EvaluationScenarioUpdate(BaseModel):
- vote: Optional[str]
- score: Optional[Any]
+ vote: Optional[str] = None
+ score: Optional[Any] = None
# will be used when running custom code evaluation
- correct_answer: Optional[str]
- outputs: Optional[List[EvaluationScenarioOutput]]
- inputs: Optional[List[EvaluationScenarioInput]]
- is_pinned: Optional[bool]
- note: Optional[str]
+ correct_answer: Optional[str] = None
+ outputs: Optional[List[EvaluationScenarioOutput]] = None
+ inputs: Optional[List[EvaluationScenarioInput]] = None
+ is_pinned: Optional[bool] = None
+ note: Optional[str] = None
class EvaluationScenarioScoreUpdate(BaseModel):
@@ -259,8 +259,8 @@ class NewEvaluation(BaseModel):
evaluators_configs: List[str]
testset_id: str
rate_limit: LLMRunRateLimit
- lm_providers_keys: Optional[Dict[LMProvidersEnum, str]]
- correct_answer_column: Optional[str]
+ lm_providers_keys: Optional[Dict[LMProvidersEnum, str]] = None
+ correct_answer_column: Optional[str] = None
class NewEvaluatorConfig(BaseModel):
@@ -271,6 +271,6 @@ class NewEvaluatorConfig(BaseModel):
class UpdateEvaluatorConfig(BaseModel):
- name: Optional[str]
- evaluator_key: Optional[str]
+ name: Optional[str] = None
+ evaluator_key: Optional[str] = None
settings_values: Optional[dict]
diff --git a/agenta-backend/agenta_backend/models/converters.py b/agenta-backend/agenta_backend/models/converters.py
index c58134c627..46a985e5ed 100644
--- a/agenta-backend/agenta_backend/models/converters.py
+++ b/agenta-backend/agenta_backend/models/converters.py
@@ -215,7 +215,14 @@ async def aggregated_result_of_evaluation_to_pydantic(evaluation_id: str) -> Lis
)
for aggregated_result in aggregated_results:
evaluator_config_dict = (
- aggregated_result.evaluator_config.__dict__
+ {
+ "id": str(aggregated_result.evaluator_config.id),
+ "name": aggregated_result.evaluator_config.name,
+ "evaluator_key": aggregated_result.evaluator_config.evaluator_key,
+ "settings_values": aggregated_result.evaluator_config.settings_values,
+ "created_at": str(aggregated_result.evaluator_config.created_at),
+ "updated_at": str(aggregated_result.evaluator_config.updated_at),
+ }
if isinstance(aggregated_result.evaluator_config_id, uuid.UUID)
else None
)
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index e7f8579659..5b80e46165 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -85,7 +85,9 @@ class AppDB(Base):
)
user = relationship("UserDB")
- variant = relationship("AppVariantDB", cascade="all, delete-orphan", backref="app")
+ variant = relationship(
+ "AppVariantDB", cascade="all, delete-orphan", back_populates="app"
+ )
testset = relationship("TestSetDB", cascade="all, delete-orphan", backref="app")
base = relationship("DeploymentDB", cascade="all, delete-orphan", backref="app")
deployment = relationship(
@@ -185,6 +187,7 @@ class AppVariantDB(Base):
)
image = relationship("ImageDB")
+ app = relationship("AppDB", back_populates="variant")
user = relationship("UserDB", foreign_keys=[user_id])
modified_by = relationship("UserDB", foreign_keys=[modified_by_id])
base = relationship("VariantBaseDB")
diff --git a/agenta-backend/agenta_backend/routers/human_evaluation_router.py b/agenta-backend/agenta_backend/routers/human_evaluation_router.py
index 63abfada95..9dcb16c940 100644
--- a/agenta-backend/agenta_backend/routers/human_evaluation_router.py
+++ b/agenta-backend/agenta_backend/routers/human_evaluation_router.py
@@ -263,7 +263,7 @@ async def update_evaluation_scenario_router(
evaluation_id: str,
evaluation_scenario_id: str,
evaluation_type: EvaluationType,
- evaluation_scenario: HumanEvaluationScenarioUpdate,
+ payload: HumanEvaluationScenarioUpdate,
request: Request,
):
"""Updates an evaluation scenario's vote or score based on its type.
@@ -299,7 +299,7 @@ async def update_evaluation_scenario_router(
await update_human_evaluation_scenario(
evaluation_scenario_db,
- evaluation_scenario,
+ payload,
evaluation_type,
)
return Response(status_code=status.HTTP_204_NO_CONTENT)
diff --git a/agenta-backend/agenta_backend/services/evaluation_service.py b/agenta-backend/agenta_backend/services/evaluation_service.py
index 750c9e257e..cf45a4761f 100644
--- a/agenta-backend/agenta_backend/services/evaluation_service.py
+++ b/agenta-backend/agenta_backend/services/evaluation_service.py
@@ -216,21 +216,18 @@ async def update_human_evaluation_scenario(
"""
values_to_update = {}
- payload = evaluation_scenario_data.dict()
+ payload = evaluation_scenario_data.dict(exclude_unset=True)
if (
- payload["score"] is not None
+ hasattr(payload, "score")
and evaluation_type == EvaluationType.single_model_test
):
values_to_update["score"] = payload["score"]
- if (
- payload["vote"] is not None
- and evaluation_type == EvaluationType.human_a_b_testing
- ):
+ if hasattr(payload, "vote") and evaluation_type == EvaluationType.human_a_b_testing:
values_to_update["vote"] = payload["vote"]
- if payload["outputs"] is not None:
+ if hasattr(payload, "outputs"):
new_outputs = [
HumanEvaluationScenarioOutput(
variant_id=output["variant_id"],
@@ -240,7 +237,7 @@ async def update_human_evaluation_scenario(
]
values_to_update["outputs"] = new_outputs
- if payload["inputs"] is not None:
+ if hasattr(payload, "inputs"):
new_inputs = [
HumanEvaluationScenarioInput(
input_name=input_item["input_name"],
@@ -250,13 +247,13 @@ async def update_human_evaluation_scenario(
]
values_to_update["inputs"] = new_inputs
- if payload["is_pinned"] is not None:
+ if hasattr(payload, "is_pinned"):
values_to_update["is_pinned"] = payload["is_pinned"]
- if payload["note"] is not None:
+ if hasattr(payload, "note"):
values_to_update["note"] = payload["note"]
- if payload["correct_answer"] is not None:
+ if hasattr(payload, "correct_answer"):
values_to_update["correct_answer"] = payload["correct_answer"]
await db_manager.update_human_evaluation_scenario(
From 96b8ebe0a4623fbf84f0f57be6f2d0459dbcbf94 Mon Sep 17 00:00:00 2001
From: Abram
Date: Wed, 19 Jun 2024 13:35:28 +0100
Subject: [PATCH 106/268] minor refactor (backend): simply
update_human_evaluation_scenario logic
---
.../routers/human_evaluation_router.py | 3 +--
.../services/evaluation_service.py | 19 ++++++++-----------
2 files changed, 9 insertions(+), 13 deletions(-)
diff --git a/agenta-backend/agenta_backend/routers/human_evaluation_router.py b/agenta-backend/agenta_backend/routers/human_evaluation_router.py
index 9dcb16c940..f71f7616f9 100644
--- a/agenta-backend/agenta_backend/routers/human_evaluation_router.py
+++ b/agenta-backend/agenta_backend/routers/human_evaluation_router.py
@@ -1,12 +1,11 @@
from typing import List, Dict
-from fastapi.responses import JSONResponse
-from agenta_backend.utils.common import APIRouter, isCloudEE
from fastapi import HTTPException, Body, Request, status, Response
from agenta_backend.models import converters
from agenta_backend.services import db_manager
from agenta_backend.services import results_service
from agenta_backend.services import evaluation_service
+from agenta_backend.utils.common import APIRouter, isCloudEE
from agenta_backend.models.api.evaluation_model import (
DeleteEvaluation,
diff --git a/agenta-backend/agenta_backend/services/evaluation_service.py b/agenta-backend/agenta_backend/services/evaluation_service.py
index cf45a4761f..aaff1c8927 100644
--- a/agenta-backend/agenta_backend/services/evaluation_service.py
+++ b/agenta-backend/agenta_backend/services/evaluation_service.py
@@ -218,16 +218,13 @@ async def update_human_evaluation_scenario(
values_to_update = {}
payload = evaluation_scenario_data.dict(exclude_unset=True)
- if (
- hasattr(payload, "score")
- and evaluation_type == EvaluationType.single_model_test
- ):
- values_to_update["score"] = payload["score"]
+ if "score" in payload and evaluation_type == EvaluationType.single_model_test:
+ values_to_update["score"] = str(payload["score"])
- if hasattr(payload, "vote") and evaluation_type == EvaluationType.human_a_b_testing:
+ if "vote" in payload and evaluation_type == EvaluationType.human_a_b_testing:
values_to_update["vote"] = payload["vote"]
- if hasattr(payload, "outputs"):
+ if "outputs" in payload:
new_outputs = [
HumanEvaluationScenarioOutput(
variant_id=output["variant_id"],
@@ -237,7 +234,7 @@ async def update_human_evaluation_scenario(
]
values_to_update["outputs"] = new_outputs
- if hasattr(payload, "inputs"):
+ if "inputs" in payload:
new_inputs = [
HumanEvaluationScenarioInput(
input_name=input_item["input_name"],
@@ -247,13 +244,13 @@ async def update_human_evaluation_scenario(
]
values_to_update["inputs"] = new_inputs
- if hasattr(payload, "is_pinned"):
+ if "is_pinned" in payload:
values_to_update["is_pinned"] = payload["is_pinned"]
- if hasattr(payload, "note"):
+ if "note" in payload:
values_to_update["note"] = payload["note"]
- if hasattr(payload, "correct_answer"):
+ if "correct_answer" in payload:
values_to_update["correct_answer"] = payload["correct_answer"]
await db_manager.update_human_evaluation_scenario(
From 517dd496853e6a42dd09e6b5b60e9c63cbd139a6 Mon Sep 17 00:00:00 2001
From: Abram
Date: Wed, 19 Jun 2024 16:38:47 +0100
Subject: [PATCH 107/268] refactor (cli): fix failing backend endpoint for
variant serve
---
.../agenta_backend/models/db_models.py | 15 +++++++-------
.../agenta_backend/routers/variants_router.py | 9 +++++++++
.../agenta_backend/services/app_manager.py | 20 ++++++++++---------
.../services/container_manager.py | 4 +++-
.../agenta_backend/services/db_manager.py | 5 ++++-
5 files changed, 35 insertions(+), 18 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index 5b80e46165..4443c067e9 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -89,9 +89,9 @@ class AppDB(Base):
"AppVariantDB", cascade="all, delete-orphan", back_populates="app"
)
testset = relationship("TestSetDB", cascade="all, delete-orphan", backref="app")
- base = relationship("DeploymentDB", cascade="all, delete-orphan", backref="app")
- deployment = relationship(
- "VariantBaseDB", cascade="all, delete-orphan", backref="app"
+ deployment = relationship("DeploymentDB", cascade="all, delete-orphan", back_populates="app")
+ base = relationship(
+ "VariantBaseDB", cascade="all, delete-orphan", back_populates="app"
)
evaluation = relationship(
"EvaluationDB", cascade="all, delete-orphan", backref="app"
@@ -111,7 +111,7 @@ class DeploymentDB(Base):
unique=True,
nullable=False,
)
- app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id"))
+ app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id", ondelete="CASCADE"))
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
container_name = Column(String)
container_id = Column(String)
@@ -125,6 +125,7 @@ class DeploymentDB(Base):
)
user = relationship("UserDB")
+ app = relationship("AppDB", back_populates="deployment")
class VariantBaseDB(Base):
@@ -137,10 +138,10 @@ class VariantBaseDB(Base):
unique=True,
nullable=False,
)
- app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id"))
+ app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id", ondelete="CASCADE"))
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
base_name = Column(String)
- image_id = Column(UUID(as_uuid=True), ForeignKey("docker_images.id"))
+ image_id = Column(UUID(as_uuid=True), ForeignKey("docker_images.id", ondelete="SET NULL"))
deployment_id = Column(
UUID(as_uuid=True), ForeignKey("deployments.id", ondelete="SET NULL")
)
@@ -151,10 +152,10 @@ class VariantBaseDB(Base):
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
- # app = relationship("AppDB", back_populates="base")
user = relationship("UserDB")
image = relationship("ImageDB")
deployment = relationship("DeploymentDB")
+ app = relationship("AppDB", back_populates="base")
class AppVariantDB(Base):
diff --git a/agenta-backend/agenta_backend/routers/variants_router.py b/agenta-backend/agenta_backend/routers/variants_router.py
index 77dc7971d9..f087ffed5c 100644
--- a/agenta-backend/agenta_backend/routers/variants_router.py
+++ b/agenta-backend/agenta_backend/routers/variants_router.py
@@ -254,12 +254,21 @@ async def update_variant_image(
db_app_variant, image, request.state.user_id
)
except ValueError as e:
+ import traceback
+
+ traceback.print_exc()
detail = f"Error while trying to update the app variant: {str(e)}"
raise HTTPException(status_code=500, detail=detail)
except DockerException as e:
+ import traceback
+
+ traceback.print_exc()
detail = f"Docker error while trying to update the app variant: {str(e)}"
raise HTTPException(status_code=500, detail=detail)
except Exception as e:
+ import traceback
+
+ traceback.print_exc()
detail = f"Unexpected error while trying to update the app variant: {str(e)}"
raise HTTPException(status_code=500, detail=detail)
diff --git a/agenta-backend/agenta_backend/services/app_manager.py b/agenta-backend/agenta_backend/services/app_manager.py
index 17bdde8c26..bea5584829 100644
--- a/agenta-backend/agenta_backend/services/app_manager.py
+++ b/agenta-backend/agenta_backend/services/app_manager.py
@@ -14,13 +14,11 @@
)
from agenta_backend.models.db_models import (
AppVariantDB,
- AppEnvironmentDB,
AppDB,
)
from agenta_backend.services import (
db_manager,
- evaluator_manager,
)
from agenta_backend.utils.common import (
@@ -74,6 +72,7 @@ async def start_variant(
ValueError: If the app variant does not have a corresponding image in the database.
RuntimeError: If there is an error starting the Docker container.
"""
+
try:
logger.debug(
"Starting variant %s with image name %s and tags %s and app_name %s and organization %s and workspace %s",
@@ -109,6 +108,7 @@ async def start_variant(
hidden=True,
)
env_vars.update({"AGENTA_API_KEY": api_key})
+
deployment = await deployment_manager.start_service(
app_variant_db=db_app_variant, env_vars=env_vars
)
@@ -144,7 +144,9 @@ async def update_variant_image(
valid_image = await deployment_manager.validate_image(image)
if not valid_image:
raise ValueError("Image could not be found in registry.")
- deployment = await db_manager.get_deployment_by_id(app_variant_db.base.deployment)
+
+ base = await db_manager.fetch_base_by_id(str(app_variant_db.base_id))
+ deployment = await db_manager.get_deployment_by_id(str(base.deployment_id))
await deployment_manager.stop_and_delete_service(deployment)
await db_manager.remove_deployment(str(deployment.id))
@@ -153,6 +155,7 @@ async def update_variant_image(
await deployment_manager.remove_image(app_variant_db.base.image)
await db_manager.remove_image(app_variant_db.base.image)
+
# Create a new image instance
db_image = await db_manager.create_image(
image_type="image",
@@ -167,7 +170,7 @@ async def update_variant_image(
await db_manager.update_base(str(app_variant_db.base_id), image=db_image)
# Update variant to remove configuration
await db_manager.update_variant_parameters(
- app_variant_db=app_variant_db, parameters={}, user_uid=user_uid
+ str(app_variant_db.id), parameters={}, user_uid=user_uid
)
# Update variant with new image
app_variant_db = await db_manager.update_app_variant(app_variant_db, image=db_image)
@@ -318,7 +321,7 @@ async def remove_app(app: AppDB):
logger.info(
f"Successfully deleted app variant {app_variant_db.app.app_name}/{app_variant_db.variant_name}."
)
-
+ print("LEN: ", len(app_variants))
if len(app_variants) == 0:
logger.debug("remove_app_related_resources")
await remove_app_related_resources(str(app.id))
@@ -327,14 +330,13 @@ async def remove_app(app: AppDB):
# Failsafe: in case something went wrong,
# delete app and its related resources
try:
- if len(app_variants) == 0:
- logger.debug("remove_app_related_resources")
- await remove_app_related_resources(str(app.id))
+ logger.debug("remove_app_related_resources")
+ await remove_app_related_resources(str(app.id))
except Exception as e:
logger.error(
f"An error occurred while deleting app {app.id} and its associated resources: {str(e)}"
)
- raise e from None
+ raise e
async def update_variant_parameters(
diff --git a/agenta-backend/agenta_backend/services/container_manager.py b/agenta-backend/agenta_backend/services/container_manager.py
index aa3412d5f4..7575c1efa3 100644
--- a/agenta-backend/agenta_backend/services/container_manager.py
+++ b/agenta-backend/agenta_backend/services/container_manager.py
@@ -30,7 +30,7 @@
async def build_image(app_db: AppDB, base_name: str, tar_file: UploadFile) -> Image:
app_name = app_db.app_name
- user_id = app_db.user.id
+ user_id = str(app_db.user_id)
image_name = f"agentaai/{app_name.lower()}_{base_name.lower()}:latest"
# Get event loop
@@ -104,6 +104,7 @@ def build_image_job(
dockerfile = "Dockerfile.cloud"
else:
dockerfile = "Dockerfile"
+
image, build_log = client.images.build(
path=str(temp_dir),
tag=image_name,
@@ -114,6 +115,7 @@ def build_image_job(
)
for line in build_log:
logger.info(line)
+
pydantic_image = Image(
type="image",
docker_id=image.id,
diff --git a/agenta-backend/agenta_backend/services/db_manager.py b/agenta-backend/agenta_backend/services/db_manager.py
index 6519566956..874fb8d8a5 100644
--- a/agenta-backend/agenta_backend/services/db_manager.py
+++ b/agenta-backend/agenta_backend/services/db_manager.py
@@ -203,7 +203,9 @@ async def fetch_app_variant_by_id(
select(AppVariantDB)
.options(
joinedload(AppVariantDB.base),
+ joinedload(AppVariantDB.user).load_only(UserDB.uid), # type: ignore
joinedload(AppVariantDB.app),
+ joinedload(AppVariantDB.image).load_only(ImageDB.docker_id, ImageDB.tags) # type: ignore
)
.filter_by(id=uuid.UUID(app_variant_id))
)
@@ -2555,7 +2557,8 @@ async def update_app_variant(
setattr(app_variant, key, value)
await session.commit()
- await session.refresh(app_variant)
+ await session.refresh(app_variant, attribute_names=["user", "app", "image", "base"])
+
return app_variant
From 70269ea671cc8fa11a93eac7f8ad7b8cc683b188 Mon Sep 17 00:00:00 2001
From: Abram
Date: Wed, 19 Jun 2024 22:36:06 +0100
Subject: [PATCH 108/268] refactor (backend): fix logic for updating variant
image
---
.../agenta_backend/models/db_models.py | 8 ++++++--
.../agenta_backend/services/app_manager.py | 10 ++++++----
.../agenta_backend/services/db_manager.py | 19 ++++++++++++++-----
3 files changed, 26 insertions(+), 11 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index 4443c067e9..17f0f6d1e2 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -89,7 +89,9 @@ class AppDB(Base):
"AppVariantDB", cascade="all, delete-orphan", back_populates="app"
)
testset = relationship("TestSetDB", cascade="all, delete-orphan", backref="app")
- deployment = relationship("DeploymentDB", cascade="all, delete-orphan", back_populates="app")
+ deployment = relationship(
+ "DeploymentDB", cascade="all, delete-orphan", back_populates="app"
+ )
base = relationship(
"VariantBaseDB", cascade="all, delete-orphan", back_populates="app"
)
@@ -141,7 +143,9 @@ class VariantBaseDB(Base):
app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id", ondelete="CASCADE"))
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
base_name = Column(String)
- image_id = Column(UUID(as_uuid=True), ForeignKey("docker_images.id", ondelete="SET NULL"))
+ image_id = Column(
+ UUID(as_uuid=True), ForeignKey("docker_images.id", ondelete="SET NULL")
+ )
deployment_id = Column(
UUID(as_uuid=True), ForeignKey("deployments.id", ondelete="SET NULL")
)
diff --git a/agenta-backend/agenta_backend/services/app_manager.py b/agenta-backend/agenta_backend/services/app_manager.py
index bea5584829..12a0c83927 100644
--- a/agenta-backend/agenta_backend/services/app_manager.py
+++ b/agenta-backend/agenta_backend/services/app_manager.py
@@ -152,9 +152,9 @@ async def update_variant_image(
await db_manager.remove_deployment(str(deployment.id))
if isOssEE():
- await deployment_manager.remove_image(app_variant_db.base.image)
+ await deployment_manager.remove_image(base.image)
- await db_manager.remove_image(app_variant_db.base.image)
+ await db_manager.remove_image(base.image)
# Create a new image instance
db_image = await db_manager.create_image(
@@ -173,7 +173,9 @@ async def update_variant_image(
str(app_variant_db.id), parameters={}, user_uid=user_uid
)
# Update variant with new image
- app_variant_db = await db_manager.update_app_variant(app_variant_db, image=db_image)
+ app_variant_db = await db_manager.update_app_variant(
+ app_variant_id=str(app_variant_db.id), image_id=db_image.id
+ )
# Start variant
await start_variant(app_variant_db)
@@ -321,7 +323,7 @@ async def remove_app(app: AppDB):
logger.info(
f"Successfully deleted app variant {app_variant_db.app.app_name}/{app_variant_db.variant_name}."
)
- print("LEN: ", len(app_variants))
+
if len(app_variants) == 0:
logger.debug("remove_app_related_resources")
await remove_app_related_resources(str(app.id))
diff --git a/agenta-backend/agenta_backend/services/db_manager.py b/agenta-backend/agenta_backend/services/db_manager.py
index 874fb8d8a5..004a74f914 100644
--- a/agenta-backend/agenta_backend/services/db_manager.py
+++ b/agenta-backend/agenta_backend/services/db_manager.py
@@ -203,9 +203,9 @@ async def fetch_app_variant_by_id(
select(AppVariantDB)
.options(
joinedload(AppVariantDB.base),
- joinedload(AppVariantDB.user).load_only(UserDB.uid), # type: ignore
+ joinedload(AppVariantDB.user).load_only(UserDB.uid), # type: ignore
joinedload(AppVariantDB.app),
- joinedload(AppVariantDB.image).load_only(ImageDB.docker_id, ImageDB.tags) # type: ignore
+ joinedload(AppVariantDB.image).load_only(ImageDB.docker_id, ImageDB.tags), # type: ignore
)
.filter_by(id=uuid.UUID(app_variant_id))
)
@@ -2542,22 +2542,31 @@ async def remove_base(base_db: VariantBaseDB):
async def update_app_variant(
- app_variant: AppVariantDB,
+ app_variant_id: str,
**kwargs: dict,
) -> AppVariantDB:
"""Update the app variant object in the database with the provided id.
Arguments:
- app_variant (AppVariantDB): The app variant object to update.
+ app_variant_id (str): The app variant oIDbject to update.
"""
async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(AppVariantDB).filter_by(id=uuid.UUID(app_variant_id))
+ )
+ app_variant = result.scalars().one_or_none()
+ if not app_variant:
+ raise NoResultFound(f"App variant with id {app_variant_id} not found")
+
for key, value in kwargs.items():
if hasattr(app_variant, key):
setattr(app_variant, key, value)
await session.commit()
- await session.refresh(app_variant, attribute_names=["user", "app", "image", "base"])
+ await session.refresh(
+ app_variant, attribute_names=["user", "app", "image", "base"]
+ )
return app_variant
From 667eb1c955643772af47db4b535f22a50f59c6dd Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Thu, 20 Jun 2024 03:42:16 +0000
Subject: [PATCH 109/268] build(deps-dev): bump setuptools from 70.0.0 to
70.1.0 in /agenta-cli
Bumps [setuptools](https://github.com/pypa/setuptools) from 70.0.0 to 70.1.0.
- [Release notes](https://github.com/pypa/setuptools/releases)
- [Changelog](https://github.com/pypa/setuptools/blob/main/NEWS.rst)
- [Commits](https://github.com/pypa/setuptools/compare/v70.0.0...v70.1.0)
---
updated-dependencies:
- dependency-name: setuptools
dependency-type: direct:development
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
---
agenta-cli/poetry.lock | 95 ++++++---------------------------------
agenta-cli/pyproject.toml | 2 +-
2 files changed, 15 insertions(+), 82 deletions(-)
diff --git a/agenta-cli/poetry.lock b/agenta-cli/poetry.lock
index 599be327fb..ece7ee7cc7 100644
--- a/agenta-cli/poetry.lock
+++ b/agenta-cli/poetry.lock
@@ -15,7 +15,6 @@ files = [
name = "anyio"
version = "4.4.0"
description = "High level compatibility layer for multiple asynchronous event loop implementations"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -38,7 +37,6 @@ trio = ["trio (>=0.23)"]
name = "asttokens"
version = "2.4.1"
description = "Annotate AST trees with source code positions"
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -57,7 +55,6 @@ test = ["astroid (>=1,<2)", "astroid (>=2,<4)", "pytest"]
name = "backoff"
version = "2.2.1"
description = "Function decoration for backoff and retry"
-category = "main"
optional = false
python-versions = ">=3.7,<4.0"
files = [
@@ -69,7 +66,6 @@ files = [
name = "cachetools"
version = "5.3.3"
description = "Extensible memoizing collections and decorators"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -81,7 +77,6 @@ files = [
name = "certifi"
version = "2024.2.2"
description = "Python package for providing Mozilla's CA Bundle."
-category = "main"
optional = false
python-versions = ">=3.6"
files = [
@@ -93,7 +88,6 @@ files = [
name = "charset-normalizer"
version = "3.3.2"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
-category = "main"
optional = false
python-versions = ">=3.7.0"
files = [
@@ -193,7 +187,6 @@ files = [
name = "click"
version = "8.1.7"
description = "Composable command line interface toolkit"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -208,7 +201,6 @@ colorama = {version = "*", markers = "platform_system == \"Windows\""}
name = "colorama"
version = "0.4.6"
description = "Cross-platform colored terminal text."
-category = "main"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
files = [
@@ -220,7 +212,6 @@ files = [
name = "decorator"
version = "5.1.1"
description = "Decorators for Humans"
-category = "main"
optional = false
python-versions = ">=3.5"
files = [
@@ -232,7 +223,6 @@ files = [
name = "dnspython"
version = "2.6.1"
description = "DNS toolkit"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -253,7 +243,6 @@ wmi = ["wmi (>=1.5.1)"]
name = "docker"
version = "7.1.0"
description = "A Python library for the Docker Engine API."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -276,7 +265,6 @@ websockets = ["websocket-client (>=1.3.0)"]
name = "email-validator"
version = "2.1.1"
description = "A robust email address syntax and deliverability validation library."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -292,7 +280,6 @@ idna = ">=2.0.0"
name = "exceptiongroup"
version = "1.2.1"
description = "Backport of PEP 654 (exception groups)"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -307,7 +294,6 @@ test = ["pytest (>=6)"]
name = "executing"
version = "2.0.1"
description = "Get the currently executing AST node of a frame, and other information"
-category = "main"
optional = false
python-versions = ">=3.5"
files = [
@@ -322,7 +308,6 @@ tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipyth
name = "fastapi"
version = "0.111.0"
description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -350,7 +335,6 @@ all = ["email_validator (>=2.0.0)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)"
name = "fastapi-cli"
version = "0.0.4"
description = "Run and manage FastAPI apps from the command line with FastAPI CLI. 🚀"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -368,7 +352,6 @@ standard = ["fastapi", "uvicorn[standard] (>=0.15.0)"]
name = "h11"
version = "0.14.0"
description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -380,7 +363,6 @@ files = [
name = "httpcore"
version = "1.0.5"
description = "A minimal low-level HTTP client."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -395,14 +377,13 @@ h11 = ">=0.13,<0.15"
[package.extras]
asyncio = ["anyio (>=4.0,<5.0)"]
http2 = ["h2 (>=3,<5)"]
-socks = ["socksio (>=1.0.0,<2.0.0)"]
+socks = ["socksio (==1.*)"]
trio = ["trio (>=0.22.0,<0.26.0)"]
[[package]]
name = "httptools"
version = "0.6.1"
description = "A collection of framework independent HTTP protocol utils."
-category = "main"
optional = false
python-versions = ">=3.8.0"
files = [
@@ -451,7 +432,6 @@ test = ["Cython (>=0.29.24,<0.30.0)"]
name = "httpx"
version = "0.27.0"
description = "The next generation HTTP client."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -462,21 +442,20 @@ files = [
[package.dependencies]
anyio = "*"
certifi = "*"
-httpcore = ">=1.0.0,<2.0.0"
+httpcore = "==1.*"
idna = "*"
sniffio = "*"
[package.extras]
brotli = ["brotli", "brotlicffi"]
-cli = ["click (>=8.0.0,<9.0.0)", "pygments (>=2.0.0,<3.0.0)", "rich (>=10,<14)"]
+cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"]
http2 = ["h2 (>=3,<5)"]
-socks = ["socksio (>=1.0.0,<2.0.0)"]
+socks = ["socksio (==1.*)"]
[[package]]
name = "idna"
version = "3.7"
description = "Internationalized Domain Names in Applications (IDNA)"
-category = "main"
optional = false
python-versions = ">=3.5"
files = [
@@ -488,7 +467,6 @@ files = [
name = "importlib-metadata"
version = "7.1.0"
description = "Read metadata from Python packages"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -508,7 +486,6 @@ testing = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)",
name = "iniconfig"
version = "2.0.0"
description = "brain-dead simple config-ini parsing"
-category = "dev"
optional = false
python-versions = ">=3.7"
files = [
@@ -520,7 +497,6 @@ files = [
name = "ipdb"
version = "0.13.13"
description = "IPython-enabled pdb"
-category = "main"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
files = [
@@ -537,7 +513,6 @@ tomli = {version = "*", markers = "python_version > \"3.6\" and python_version <
name = "ipython"
version = "8.18.0"
description = "IPython: Productive Interactive Computing"
-category = "main"
optional = false
python-versions = ">=3.9"
files = [
@@ -575,7 +550,6 @@ test-extra = ["curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.22)", "pa
name = "jedi"
version = "0.19.1"
description = "An autocompletion tool for Python that can be used for text editors."
-category = "main"
optional = false
python-versions = ">=3.6"
files = [
@@ -595,7 +569,6 @@ testing = ["Django", "attrs", "colorama", "docopt", "pytest (<7.0.0)"]
name = "jinja2"
version = "3.1.4"
description = "A very fast and expressive template engine."
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -613,7 +586,6 @@ i18n = ["Babel (>=2.7)"]
name = "markdown-it-py"
version = "3.0.0"
description = "Python port of markdown-it. Markdown parsing, done right!"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -638,7 +610,6 @@ testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"]
name = "markupsafe"
version = "2.1.5"
description = "Safely add untrusted strings to HTML/XML markup."
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -708,7 +679,6 @@ files = [
name = "matplotlib-inline"
version = "0.1.7"
description = "Inline Matplotlib backend for Jupyter"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -723,7 +693,6 @@ traitlets = "*"
name = "mdurl"
version = "0.1.2"
description = "Markdown URL utilities"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -735,7 +704,6 @@ files = [
name = "monotonic"
version = "1.6"
description = "An implementation of time.monotonic() for Python 2 & < 3.3"
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -747,7 +715,6 @@ files = [
name = "orjson"
version = "3.10.3"
description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -803,7 +770,6 @@ files = [
name = "packaging"
version = "24.0"
description = "Core utilities for Python packages"
-category = "dev"
optional = false
python-versions = ">=3.7"
files = [
@@ -815,7 +781,6 @@ files = [
name = "parso"
version = "0.8.4"
description = "A Python Parser"
-category = "main"
optional = false
python-versions = ">=3.6"
files = [
@@ -831,7 +796,6 @@ testing = ["docopt", "pytest"]
name = "pexpect"
version = "4.9.0"
description = "Pexpect allows easy control of interactive console applications."
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -846,7 +810,6 @@ ptyprocess = ">=0.5"
name = "pluggy"
version = "1.5.0"
description = "plugin and hook calling mechanisms for python"
-category = "dev"
optional = false
python-versions = ">=3.8"
files = [
@@ -862,7 +825,6 @@ testing = ["pytest", "pytest-benchmark"]
name = "posthog"
version = "3.5.0"
description = "Integrate PostHog into any python application."
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -886,7 +848,6 @@ test = ["coverage", "flake8", "freezegun (==0.3.15)", "mock (>=2.0.0)", "pylint"
name = "prompt-toolkit"
version = "3.0.36"
description = "Library for building powerful interactive command lines in Python"
-category = "main"
optional = false
python-versions = ">=3.6.2"
files = [
@@ -901,7 +862,6 @@ wcwidth = "*"
name = "ptyprocess"
version = "0.7.0"
description = "Run a subprocess in a pseudo terminal"
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -913,7 +873,6 @@ files = [
name = "pure-eval"
version = "0.2.2"
description = "Safely evaluate AST nodes without side effects"
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -1038,7 +997,6 @@ typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
name = "pygments"
version = "2.18.0"
description = "Pygments is a syntax highlighting package written in Python."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1053,7 +1011,6 @@ windows-terminal = ["colorama (>=0.4.6)"]
name = "pymongo"
version = "4.7.3"
description = "Python driver for MongoDB "
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -1135,7 +1092,6 @@ zstd = ["zstandard"]
name = "pytest"
version = "8.2.2"
description = "pytest: simple powerful testing with Python"
-category = "dev"
optional = false
python-versions = ">=3.8"
files = [
@@ -1158,7 +1114,6 @@ dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments
name = "python-dateutil"
version = "2.9.0.post0"
description = "Extensions to the standard Python datetime module"
-category = "main"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
files = [
@@ -1173,7 +1128,6 @@ six = ">=1.5"
name = "python-dotenv"
version = "1.0.1"
description = "Read key-value pairs from a .env file and set them as environment variables"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1188,7 +1142,6 @@ cli = ["click (>=5.0)"]
name = "python-multipart"
version = "0.0.9"
description = "A streaming multipart parser for Python"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1203,7 +1156,6 @@ dev = ["atomicwrites (==1.4.1)", "attrs (==23.2.0)", "coverage (==7.4.1)", "hatc
name = "pywin32"
version = "306"
description = "Python for Window Extensions"
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -1227,7 +1179,6 @@ files = [
name = "pyyaml"
version = "6.0.1"
description = "YAML parser and emitter for Python"
-category = "main"
optional = false
python-versions = ">=3.6"
files = [
@@ -1236,6 +1187,7 @@ files = [
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"},
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"},
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"},
+ {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"},
{file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"},
{file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"},
{file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"},
@@ -1243,6 +1195,7 @@ files = [
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"},
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"},
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"},
+ {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"},
{file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"},
{file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
{file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
@@ -1268,6 +1221,7 @@ files = [
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"},
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"},
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"},
+ {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"},
{file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"},
{file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"},
{file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"},
@@ -1275,6 +1229,7 @@ files = [
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"},
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"},
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"},
+ {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"},
{file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"},
{file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"},
{file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"},
@@ -1284,7 +1239,6 @@ files = [
name = "questionary"
version = "2.0.1"
description = "Python library to build pretty command line user prompts ⭐️"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1299,7 +1253,6 @@ prompt_toolkit = ">=2.0,<=3.0.36"
name = "requests"
version = "2.32.3"
description = "Python HTTP for Humans."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1321,7 +1274,6 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
name = "rich"
version = "13.7.1"
description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal"
-category = "main"
optional = false
python-versions = ">=3.7.0"
files = [
@@ -1338,25 +1290,23 @@ jupyter = ["ipywidgets (>=7.5.1,<9)"]
[[package]]
name = "setuptools"
-version = "70.0.0"
+version = "70.1.0"
description = "Easily download, build, install, upgrade, and uninstall Python packages"
-category = "dev"
optional = false
python-versions = ">=3.8"
files = [
- {file = "setuptools-70.0.0-py3-none-any.whl", hash = "sha256:54faa7f2e8d2d11bcd2c07bed282eef1046b5c080d1c32add737d7b5817b1ad4"},
- {file = "setuptools-70.0.0.tar.gz", hash = "sha256:f211a66637b8fa059bb28183da127d4e86396c991a942b028c6650d4319c3fd0"},
+ {file = "setuptools-70.1.0-py3-none-any.whl", hash = "sha256:d9b8b771455a97c8a9f3ab3448ebe0b29b5e105f1228bba41028be116985a267"},
+ {file = "setuptools-70.1.0.tar.gz", hash = "sha256:01a1e793faa5bd89abc851fa15d0a0db26f160890c7102cd8dce643e886b47f5"},
]
[package.extras]
docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"]
-testing = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "mypy (==1.9)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.1)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf", "pytest-ruff (>=0.2.1)", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"]
+testing = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test", "mypy (==1.10.0)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.1)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf", "pytest-ruff (>=0.3.2)", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"]
[[package]]
name = "shellingham"
version = "1.5.4"
description = "Tool to Detect Surrounding Shell"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -1368,7 +1318,6 @@ files = [
name = "six"
version = "1.16.0"
description = "Python 2 and 3 compatibility utilities"
-category = "main"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
files = [
@@ -1380,7 +1329,6 @@ files = [
name = "sniffio"
version = "1.3.1"
description = "Sniff out which async library your code is running under"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -1392,7 +1340,6 @@ files = [
name = "stack-data"
version = "0.6.3"
description = "Extract data from python stack frames and tracebacks for informative displays"
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -1412,7 +1359,6 @@ tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"]
name = "starlette"
version = "0.37.2"
description = "The little ASGI library that shines."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1431,7 +1377,6 @@ full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.7
name = "toml"
version = "0.10.2"
description = "Python Library for Tom's Obvious, Minimal Language"
-category = "main"
optional = false
python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
files = [
@@ -1443,7 +1388,6 @@ files = [
name = "tomli"
version = "2.0.1"
description = "A lil' TOML parser"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -1455,7 +1399,6 @@ files = [
name = "traitlets"
version = "5.14.3"
description = "Traitlets Python configuration system"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1471,7 +1414,6 @@ test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0,
name = "typer"
version = "0.12.3"
description = "Typer, build great CLIs. Easy to code. Based on Python type hints."
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -1489,7 +1431,6 @@ typing-extensions = ">=3.7.4.3"
name = "typing-extensions"
version = "4.12.0"
description = "Backported and Experimental Type Hints for Python 3.8+"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1501,7 +1442,6 @@ files = [
name = "ujson"
version = "5.10.0"
description = "Ultra fast JSON encoder and decoder for Python"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1589,7 +1529,6 @@ files = [
name = "urllib3"
version = "2.2.1"
description = "HTTP library with thread-safe connection pooling, file post, and more."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1607,7 +1546,6 @@ zstd = ["zstandard (>=0.18.0)"]
name = "uvicorn"
version = "0.30.0"
description = "The lightning-fast ASGI server."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1623,7 +1561,7 @@ httptools = {version = ">=0.5.0", optional = true, markers = "extra == \"standar
python-dotenv = {version = ">=0.13", optional = true, markers = "extra == \"standard\""}
pyyaml = {version = ">=5.1", optional = true, markers = "extra == \"standard\""}
typing-extensions = {version = ">=4.0", markers = "python_version < \"3.11\""}
-uvloop = {version = ">=0.14.0,<0.15.0 || >0.15.0,<0.15.1 || >0.15.1", optional = true, markers = "sys_platform != \"win32\" and sys_platform != \"cygwin\" and platform_python_implementation != \"PyPy\" and extra == \"standard\""}
+uvloop = {version = ">=0.14.0,<0.15.0 || >0.15.0,<0.15.1 || >0.15.1", optional = true, markers = "(sys_platform != \"win32\" and sys_platform != \"cygwin\") and platform_python_implementation != \"PyPy\" and extra == \"standard\""}
watchfiles = {version = ">=0.13", optional = true, markers = "extra == \"standard\""}
websockets = {version = ">=10.4", optional = true, markers = "extra == \"standard\""}
@@ -1634,7 +1572,6 @@ standard = ["colorama (>=0.4)", "httptools (>=0.5.0)", "python-dotenv (>=0.13)",
name = "uvloop"
version = "0.19.0"
description = "Fast implementation of asyncio event loop on top of libuv"
-category = "main"
optional = false
python-versions = ">=3.8.0"
files = [
@@ -1679,7 +1616,6 @@ test = ["Cython (>=0.29.36,<0.30.0)", "aiohttp (==3.9.0b0)", "aiohttp (>=3.8.1)"
name = "watchfiles"
version = "0.22.0"
description = "Simple, modern and high performance file watching and code reload in python."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1767,7 +1703,6 @@ anyio = ">=3.0.0"
name = "wcwidth"
version = "0.2.13"
description = "Measures the displayed width of unicode strings in a terminal"
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -1779,7 +1714,6 @@ files = [
name = "websockets"
version = "12.0"
description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1861,7 +1795,6 @@ files = [
name = "zipp"
version = "3.19.0"
description = "Backport of pathlib-compatible object wrapper for zip files"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1876,4 +1809,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more
[metadata]
lock-version = "2.0"
python-versions = "^3.9"
-content-hash = "6d97347e0ac076de0bbaedfc2fb0ef9db603ca17a80d997444501391bf4a5dd8"
+content-hash = "ec2c2c9036a752dacfcc4584a6c1868e26813b1f6e7578d3ae2796fd119be3a2"
diff --git a/agenta-cli/pyproject.toml b/agenta-cli/pyproject.toml
index 2c3f012e46..a74d7ab8e8 100644
--- a/agenta-cli/pyproject.toml
+++ b/agenta-cli/pyproject.toml
@@ -34,7 +34,7 @@ cachetools = "^5.3.3"
[tool.poetry.dev-dependencies]
pytest = "^8.2"
-setuptools = "^70.0.0"
+setuptools = "^70.1.0"
[build-system]
requires = ["poetry-core"]
From f39501bb2e2a085b4104ea69d3cac0c56aac42a6 Mon Sep 17 00:00:00 2001
From: ashrafchowdury
Date: Thu, 20 Jun 2024 13:58:08 +0600
Subject: [PATCH 110/268] fix: fixed prettier format issue
---
agenta-web/src/components/AppSelector/AppSelector.tsx | 1 -
.../src/components/Playground/Views/TestView.tsx | 10 +++++++++-
2 files changed, 9 insertions(+), 2 deletions(-)
diff --git a/agenta-web/src/components/AppSelector/AppSelector.tsx b/agenta-web/src/components/AppSelector/AppSelector.tsx
index bd24ec2ffa..2b4260a45b 100644
--- a/agenta-web/src/components/AppSelector/AppSelector.tsx
+++ b/agenta-web/src/components/AppSelector/AppSelector.tsx
@@ -22,7 +22,6 @@ import {LlmProvider, getAllProviderLlmKeys} from "@/lib/helpers/llmProviders"
import ResultComponent from "../ResultComponent/ResultComponent"
import {dynamicContext} from "@/lib/helpers/dynamic"
-
const useStyles = createUseStyles({
container: ({themeMode}: StyleProps) => ({
marginTop: 10,
diff --git a/agenta-web/src/components/Playground/Views/TestView.tsx b/agenta-web/src/components/Playground/Views/TestView.tsx
index ec54a974a2..cf39a95834 100644
--- a/agenta-web/src/components/Playground/Views/TestView.tsx
+++ b/agenta-web/src/components/Playground/Views/TestView.tsx
@@ -2,7 +2,15 @@ import React, {useContext, useEffect, useRef, useState} from "react"
import {Button, Input, Card, Row, Col, Space, Form, Modal} from "antd"
import {CaretRightOutlined, CloseCircleOutlined, PlusOutlined} from "@ant-design/icons"
import {callVariant} from "@/services/api"
-import {ChatMessage, ChatRole, GenericObject, JSSTheme, Parameter, Variant, StyleProps} from "@/lib/Types"
+import {
+ ChatMessage,
+ ChatRole,
+ GenericObject,
+ JSSTheme,
+ Parameter,
+ Variant,
+ StyleProps,
+} from "@/lib/Types"
import {batchExecute, randString, removeKeys} from "@/lib/helpers/utils"
import LoadTestsModal from "../LoadTestsModal"
import AddToTestSetDrawer from "../AddToTestSetDrawer/AddToTestSetDrawer"
From b0ad9fe55d4681cab64f7ac4d48dbeba8c4f850e Mon Sep 17 00:00:00 2001
From: Kaosiso Ezealigo
Date: Thu, 20 Jun 2024 10:52:39 +0100
Subject: [PATCH 111/268] test(frontend): modified match regex to get app_id
from url
---
agenta-web/cypress/support/commands/evaluations.ts | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/agenta-web/cypress/support/commands/evaluations.ts b/agenta-web/cypress/support/commands/evaluations.ts
index 040f3f112c..71be863e68 100644
--- a/agenta-web/cypress/support/commands/evaluations.ts
+++ b/agenta-web/cypress/support/commands/evaluations.ts
@@ -47,7 +47,7 @@ Cypress.Commands.add("createVariant", () => {
cy.url().should("include", "/playground")
cy.url().then((url) => {
- app_id = url.match(/\/apps\/([a-zA-Z0-9]+)\/playground/)[1]
+ app_id = url.match(/\/apps\/([a-fA-F0-9-]+)\/playground/)[1]
cy.wrap(app_id).as("app_id")
})
From 936a87d7b0bcc486a0c77e9040480a0dff92559d Mon Sep 17 00:00:00 2001
From: Abram
Date: Thu, 20 Jun 2024 11:17:32 +0100
Subject: [PATCH 112/268] refactor (tests): added agenta-web service to test
compose
---
docker-compose.test.yml | 20 ++++++++++++++++++++
1 file changed, 20 insertions(+)
diff --git a/docker-compose.test.yml b/docker-compose.test.yml
index 47f90a6e77..4cdc508c01 100644
--- a/docker-compose.test.yml
+++ b/docker-compose.test.yml
@@ -63,6 +63,26 @@ services:
networks:
- agenta-network
+ agenta-web:
+ build:
+ context: ./agenta-web
+ dockerfile: dev.Dockerfile
+ volumes:
+ - ./agenta-web/src:/app/src
+ - ./agenta-web/public:/app/public
+ - .nextjs_cache:/app/.next
+ ports:
+ - "3000:3000"
+ networks:
+ - agenta-network
+ labels:
+ - "traefik.http.routers.agenta-web.rule=PathPrefix(`/`)"
+ - "traefik.http.routers.agenta-web.entrypoints=web"
+ - "traefik.http.services.agenta-web.loadbalancer.server.port=3000"
+ environment:
+ - NEXT_PUBLIC_POSTHOG_API_KEY=phc_hmVSxIjTW1REBHXgj2aw4HW9X6CXb6FzerBgP9XenC7
+ restart: always
+
redis:
image: redis:latest
container_name: agenta-redis-test
From 14c89a8534d2ad1ad23cf96bd28407f83438cb83 Mon Sep 17 00:00:00 2001
From: Abram
Date: Thu, 20 Jun 2024 15:25:15 +0100
Subject: [PATCH 113/268] minor refactor (tools): remove agenta-web service
from test compose and update step 2 in run-frontend-tests workflow
---
.github/workflows/run-frontend-tests.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/run-frontend-tests.yml b/.github/workflows/run-frontend-tests.yml
index e87e35fa39..dcaac9f211 100644
--- a/.github/workflows/run-frontend-tests.yml
+++ b/.github/workflows/run-frontend-tests.yml
@@ -33,7 +33,7 @@ jobs:
NEXT_PUBLIC_OPENAI_API_KEY: ${{ secrets.NEXT_PUBLIC_OPENAI_API_KEY }}
run: |
sudo apt install curl -y
- docker-compose -f "docker-compose.test.yml" up -d --build
+ docker-compose -f "docker-compose.yml" up -d --build
- name: Restart Backend Service To Fetch Template(s)
run: docker container restart agenta-backend-test
From 9e3dff91e317fb2be529eac3f52b35f6dec58607 Mon Sep 17 00:00:00 2001
From: Abram
Date: Thu, 20 Jun 2024 15:34:45 +0100
Subject: [PATCH 114/268] minor refactor (tools): modified step 2 for
run-frontend-tests workflow
---
.github/workflows/run-frontend-tests.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/run-frontend-tests.yml b/.github/workflows/run-frontend-tests.yml
index dcaac9f211..684ba3552c 100644
--- a/.github/workflows/run-frontend-tests.yml
+++ b/.github/workflows/run-frontend-tests.yml
@@ -33,7 +33,7 @@ jobs:
NEXT_PUBLIC_OPENAI_API_KEY: ${{ secrets.NEXT_PUBLIC_OPENAI_API_KEY }}
run: |
sudo apt install curl -y
- docker-compose -f "docker-compose.yml" up -d --build
+ OPENAI_API_KEY=${{ secrets.NEXT_PUBLIC_OPENAI_API_KEY }} ENVIRONMENT=github docker-compose -f "docker-compose.test.yml" up -d --build
- name: Restart Backend Service To Fetch Template(s)
run: docker container restart agenta-backend-test
From 718361d6a8467bd03d2264151e3f9e1def088290 Mon Sep 17 00:00:00 2001
From: Abram
Date: Thu, 20 Jun 2024 15:38:21 +0100
Subject: [PATCH 115/268] minor refactor (tools): remove external: true from
test compose
---
docker-compose.test.yml | 1 -
1 file changed, 1 deletion(-)
diff --git a/docker-compose.test.yml b/docker-compose.test.yml
index 4cdc508c01..fbacfd81c6 100644
--- a/docker-compose.test.yml
+++ b/docker-compose.test.yml
@@ -151,7 +151,6 @@ services:
networks:
agenta-network:
name: agenta-network
- external: true
volumes:
postgresdb-data:
From 64473bbbaf8044032260ab0fa203b61cf8338025 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 21 Jun 2024 03:22:56 +0000
Subject: [PATCH 116/268] build(deps): bump importlib-metadata from 7.1.0 to
7.2.0 in /agenta-cli
Bumps [importlib-metadata](https://github.com/python/importlib_metadata) from 7.1.0 to 7.2.0.
- [Release notes](https://github.com/python/importlib_metadata/releases)
- [Changelog](https://github.com/python/importlib_metadata/blob/main/NEWS.rst)
- [Commits](https://github.com/python/importlib_metadata/compare/v7.1.0...v7.2.0)
---
updated-dependencies:
- dependency-name: importlib-metadata
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
---
agenta-cli/poetry.lock | 95 +++++++-----------------------------------
1 file changed, 14 insertions(+), 81 deletions(-)
diff --git a/agenta-cli/poetry.lock b/agenta-cli/poetry.lock
index 599be327fb..d03c872afe 100644
--- a/agenta-cli/poetry.lock
+++ b/agenta-cli/poetry.lock
@@ -15,7 +15,6 @@ files = [
name = "anyio"
version = "4.4.0"
description = "High level compatibility layer for multiple asynchronous event loop implementations"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -38,7 +37,6 @@ trio = ["trio (>=0.23)"]
name = "asttokens"
version = "2.4.1"
description = "Annotate AST trees with source code positions"
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -57,7 +55,6 @@ test = ["astroid (>=1,<2)", "astroid (>=2,<4)", "pytest"]
name = "backoff"
version = "2.2.1"
description = "Function decoration for backoff and retry"
-category = "main"
optional = false
python-versions = ">=3.7,<4.0"
files = [
@@ -69,7 +66,6 @@ files = [
name = "cachetools"
version = "5.3.3"
description = "Extensible memoizing collections and decorators"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -81,7 +77,6 @@ files = [
name = "certifi"
version = "2024.2.2"
description = "Python package for providing Mozilla's CA Bundle."
-category = "main"
optional = false
python-versions = ">=3.6"
files = [
@@ -93,7 +88,6 @@ files = [
name = "charset-normalizer"
version = "3.3.2"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
-category = "main"
optional = false
python-versions = ">=3.7.0"
files = [
@@ -193,7 +187,6 @@ files = [
name = "click"
version = "8.1.7"
description = "Composable command line interface toolkit"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -208,7 +201,6 @@ colorama = {version = "*", markers = "platform_system == \"Windows\""}
name = "colorama"
version = "0.4.6"
description = "Cross-platform colored terminal text."
-category = "main"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
files = [
@@ -220,7 +212,6 @@ files = [
name = "decorator"
version = "5.1.1"
description = "Decorators for Humans"
-category = "main"
optional = false
python-versions = ">=3.5"
files = [
@@ -232,7 +223,6 @@ files = [
name = "dnspython"
version = "2.6.1"
description = "DNS toolkit"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -253,7 +243,6 @@ wmi = ["wmi (>=1.5.1)"]
name = "docker"
version = "7.1.0"
description = "A Python library for the Docker Engine API."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -276,7 +265,6 @@ websockets = ["websocket-client (>=1.3.0)"]
name = "email-validator"
version = "2.1.1"
description = "A robust email address syntax and deliverability validation library."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -292,7 +280,6 @@ idna = ">=2.0.0"
name = "exceptiongroup"
version = "1.2.1"
description = "Backport of PEP 654 (exception groups)"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -307,7 +294,6 @@ test = ["pytest (>=6)"]
name = "executing"
version = "2.0.1"
description = "Get the currently executing AST node of a frame, and other information"
-category = "main"
optional = false
python-versions = ">=3.5"
files = [
@@ -322,7 +308,6 @@ tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipyth
name = "fastapi"
version = "0.111.0"
description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -350,7 +335,6 @@ all = ["email_validator (>=2.0.0)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)"
name = "fastapi-cli"
version = "0.0.4"
description = "Run and manage FastAPI apps from the command line with FastAPI CLI. 🚀"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -368,7 +352,6 @@ standard = ["fastapi", "uvicorn[standard] (>=0.15.0)"]
name = "h11"
version = "0.14.0"
description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -380,7 +363,6 @@ files = [
name = "httpcore"
version = "1.0.5"
description = "A minimal low-level HTTP client."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -395,14 +377,13 @@ h11 = ">=0.13,<0.15"
[package.extras]
asyncio = ["anyio (>=4.0,<5.0)"]
http2 = ["h2 (>=3,<5)"]
-socks = ["socksio (>=1.0.0,<2.0.0)"]
+socks = ["socksio (==1.*)"]
trio = ["trio (>=0.22.0,<0.26.0)"]
[[package]]
name = "httptools"
version = "0.6.1"
description = "A collection of framework independent HTTP protocol utils."
-category = "main"
optional = false
python-versions = ">=3.8.0"
files = [
@@ -451,7 +432,6 @@ test = ["Cython (>=0.29.24,<0.30.0)"]
name = "httpx"
version = "0.27.0"
description = "The next generation HTTP client."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -462,21 +442,20 @@ files = [
[package.dependencies]
anyio = "*"
certifi = "*"
-httpcore = ">=1.0.0,<2.0.0"
+httpcore = "==1.*"
idna = "*"
sniffio = "*"
[package.extras]
brotli = ["brotli", "brotlicffi"]
-cli = ["click (>=8.0.0,<9.0.0)", "pygments (>=2.0.0,<3.0.0)", "rich (>=10,<14)"]
+cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"]
http2 = ["h2 (>=3,<5)"]
-socks = ["socksio (>=1.0.0,<2.0.0)"]
+socks = ["socksio (==1.*)"]
[[package]]
name = "idna"
version = "3.7"
description = "Internationalized Domain Names in Applications (IDNA)"
-category = "main"
optional = false
python-versions = ">=3.5"
files = [
@@ -486,29 +465,27 @@ files = [
[[package]]
name = "importlib-metadata"
-version = "7.1.0"
+version = "7.2.0"
description = "Read metadata from Python packages"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
- {file = "importlib_metadata-7.1.0-py3-none-any.whl", hash = "sha256:30962b96c0c223483ed6cc7280e7f0199feb01a0e40cfae4d4450fc6fab1f570"},
- {file = "importlib_metadata-7.1.0.tar.gz", hash = "sha256:b78938b926ee8d5f020fc4772d487045805a55ddbad2ecf21c6d60938dc7fcd2"},
+ {file = "importlib_metadata-7.2.0-py3-none-any.whl", hash = "sha256:04e4aad329b8b948a5711d394fa8759cb80f009225441b4f2a02bd4d8e5f426c"},
+ {file = "importlib_metadata-7.2.0.tar.gz", hash = "sha256:3ff4519071ed42740522d494d04819b666541b9752c43012f85afb2cc220fcc6"},
]
[package.dependencies]
zipp = ">=0.5"
[package.extras]
-docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
+doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
perf = ["ipython"]
-testing = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1)"]
+test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1)"]
[[package]]
name = "iniconfig"
version = "2.0.0"
description = "brain-dead simple config-ini parsing"
-category = "dev"
optional = false
python-versions = ">=3.7"
files = [
@@ -520,7 +497,6 @@ files = [
name = "ipdb"
version = "0.13.13"
description = "IPython-enabled pdb"
-category = "main"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
files = [
@@ -537,7 +513,6 @@ tomli = {version = "*", markers = "python_version > \"3.6\" and python_version <
name = "ipython"
version = "8.18.0"
description = "IPython: Productive Interactive Computing"
-category = "main"
optional = false
python-versions = ">=3.9"
files = [
@@ -575,7 +550,6 @@ test-extra = ["curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.22)", "pa
name = "jedi"
version = "0.19.1"
description = "An autocompletion tool for Python that can be used for text editors."
-category = "main"
optional = false
python-versions = ">=3.6"
files = [
@@ -595,7 +569,6 @@ testing = ["Django", "attrs", "colorama", "docopt", "pytest (<7.0.0)"]
name = "jinja2"
version = "3.1.4"
description = "A very fast and expressive template engine."
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -613,7 +586,6 @@ i18n = ["Babel (>=2.7)"]
name = "markdown-it-py"
version = "3.0.0"
description = "Python port of markdown-it. Markdown parsing, done right!"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -638,7 +610,6 @@ testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"]
name = "markupsafe"
version = "2.1.5"
description = "Safely add untrusted strings to HTML/XML markup."
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -708,7 +679,6 @@ files = [
name = "matplotlib-inline"
version = "0.1.7"
description = "Inline Matplotlib backend for Jupyter"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -723,7 +693,6 @@ traitlets = "*"
name = "mdurl"
version = "0.1.2"
description = "Markdown URL utilities"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -735,7 +704,6 @@ files = [
name = "monotonic"
version = "1.6"
description = "An implementation of time.monotonic() for Python 2 & < 3.3"
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -747,7 +715,6 @@ files = [
name = "orjson"
version = "3.10.3"
description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -803,7 +770,6 @@ files = [
name = "packaging"
version = "24.0"
description = "Core utilities for Python packages"
-category = "dev"
optional = false
python-versions = ">=3.7"
files = [
@@ -815,7 +781,6 @@ files = [
name = "parso"
version = "0.8.4"
description = "A Python Parser"
-category = "main"
optional = false
python-versions = ">=3.6"
files = [
@@ -831,7 +796,6 @@ testing = ["docopt", "pytest"]
name = "pexpect"
version = "4.9.0"
description = "Pexpect allows easy control of interactive console applications."
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -846,7 +810,6 @@ ptyprocess = ">=0.5"
name = "pluggy"
version = "1.5.0"
description = "plugin and hook calling mechanisms for python"
-category = "dev"
optional = false
python-versions = ">=3.8"
files = [
@@ -862,7 +825,6 @@ testing = ["pytest", "pytest-benchmark"]
name = "posthog"
version = "3.5.0"
description = "Integrate PostHog into any python application."
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -886,7 +848,6 @@ test = ["coverage", "flake8", "freezegun (==0.3.15)", "mock (>=2.0.0)", "pylint"
name = "prompt-toolkit"
version = "3.0.36"
description = "Library for building powerful interactive command lines in Python"
-category = "main"
optional = false
python-versions = ">=3.6.2"
files = [
@@ -901,7 +862,6 @@ wcwidth = "*"
name = "ptyprocess"
version = "0.7.0"
description = "Run a subprocess in a pseudo terminal"
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -913,7 +873,6 @@ files = [
name = "pure-eval"
version = "0.2.2"
description = "Safely evaluate AST nodes without side effects"
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -1038,7 +997,6 @@ typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
name = "pygments"
version = "2.18.0"
description = "Pygments is a syntax highlighting package written in Python."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1053,7 +1011,6 @@ windows-terminal = ["colorama (>=0.4.6)"]
name = "pymongo"
version = "4.7.3"
description = "Python driver for MongoDB "
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -1135,7 +1092,6 @@ zstd = ["zstandard"]
name = "pytest"
version = "8.2.2"
description = "pytest: simple powerful testing with Python"
-category = "dev"
optional = false
python-versions = ">=3.8"
files = [
@@ -1158,7 +1114,6 @@ dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments
name = "python-dateutil"
version = "2.9.0.post0"
description = "Extensions to the standard Python datetime module"
-category = "main"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
files = [
@@ -1173,7 +1128,6 @@ six = ">=1.5"
name = "python-dotenv"
version = "1.0.1"
description = "Read key-value pairs from a .env file and set them as environment variables"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1188,7 +1142,6 @@ cli = ["click (>=5.0)"]
name = "python-multipart"
version = "0.0.9"
description = "A streaming multipart parser for Python"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1203,7 +1156,6 @@ dev = ["atomicwrites (==1.4.1)", "attrs (==23.2.0)", "coverage (==7.4.1)", "hatc
name = "pywin32"
version = "306"
description = "Python for Window Extensions"
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -1227,7 +1179,6 @@ files = [
name = "pyyaml"
version = "6.0.1"
description = "YAML parser and emitter for Python"
-category = "main"
optional = false
python-versions = ">=3.6"
files = [
@@ -1236,6 +1187,7 @@ files = [
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"},
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"},
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"},
+ {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"},
{file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"},
{file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"},
{file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"},
@@ -1243,6 +1195,7 @@ files = [
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"},
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"},
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"},
+ {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"},
{file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"},
{file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
{file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
@@ -1268,6 +1221,7 @@ files = [
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"},
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"},
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"},
+ {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"},
{file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"},
{file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"},
{file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"},
@@ -1275,6 +1229,7 @@ files = [
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"},
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"},
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"},
+ {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"},
{file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"},
{file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"},
{file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"},
@@ -1284,7 +1239,6 @@ files = [
name = "questionary"
version = "2.0.1"
description = "Python library to build pretty command line user prompts ⭐️"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1299,7 +1253,6 @@ prompt_toolkit = ">=2.0,<=3.0.36"
name = "requests"
version = "2.32.3"
description = "Python HTTP for Humans."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1321,7 +1274,6 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
name = "rich"
version = "13.7.1"
description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal"
-category = "main"
optional = false
python-versions = ">=3.7.0"
files = [
@@ -1340,7 +1292,6 @@ jupyter = ["ipywidgets (>=7.5.1,<9)"]
name = "setuptools"
version = "70.0.0"
description = "Easily download, build, install, upgrade, and uninstall Python packages"
-category = "dev"
optional = false
python-versions = ">=3.8"
files = [
@@ -1356,7 +1307,6 @@ testing = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "importlib-metad
name = "shellingham"
version = "1.5.4"
description = "Tool to Detect Surrounding Shell"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -1368,7 +1318,6 @@ files = [
name = "six"
version = "1.16.0"
description = "Python 2 and 3 compatibility utilities"
-category = "main"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
files = [
@@ -1380,7 +1329,6 @@ files = [
name = "sniffio"
version = "1.3.1"
description = "Sniff out which async library your code is running under"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -1392,7 +1340,6 @@ files = [
name = "stack-data"
version = "0.6.3"
description = "Extract data from python stack frames and tracebacks for informative displays"
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -1412,7 +1359,6 @@ tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"]
name = "starlette"
version = "0.37.2"
description = "The little ASGI library that shines."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1431,7 +1377,6 @@ full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.7
name = "toml"
version = "0.10.2"
description = "Python Library for Tom's Obvious, Minimal Language"
-category = "main"
optional = false
python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
files = [
@@ -1443,7 +1388,6 @@ files = [
name = "tomli"
version = "2.0.1"
description = "A lil' TOML parser"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -1455,7 +1399,6 @@ files = [
name = "traitlets"
version = "5.14.3"
description = "Traitlets Python configuration system"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1471,7 +1414,6 @@ test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0,
name = "typer"
version = "0.12.3"
description = "Typer, build great CLIs. Easy to code. Based on Python type hints."
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -1489,7 +1431,6 @@ typing-extensions = ">=3.7.4.3"
name = "typing-extensions"
version = "4.12.0"
description = "Backported and Experimental Type Hints for Python 3.8+"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1501,7 +1442,6 @@ files = [
name = "ujson"
version = "5.10.0"
description = "Ultra fast JSON encoder and decoder for Python"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1589,7 +1529,6 @@ files = [
name = "urllib3"
version = "2.2.1"
description = "HTTP library with thread-safe connection pooling, file post, and more."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1607,7 +1546,6 @@ zstd = ["zstandard (>=0.18.0)"]
name = "uvicorn"
version = "0.30.0"
description = "The lightning-fast ASGI server."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1623,7 +1561,7 @@ httptools = {version = ">=0.5.0", optional = true, markers = "extra == \"standar
python-dotenv = {version = ">=0.13", optional = true, markers = "extra == \"standard\""}
pyyaml = {version = ">=5.1", optional = true, markers = "extra == \"standard\""}
typing-extensions = {version = ">=4.0", markers = "python_version < \"3.11\""}
-uvloop = {version = ">=0.14.0,<0.15.0 || >0.15.0,<0.15.1 || >0.15.1", optional = true, markers = "sys_platform != \"win32\" and sys_platform != \"cygwin\" and platform_python_implementation != \"PyPy\" and extra == \"standard\""}
+uvloop = {version = ">=0.14.0,<0.15.0 || >0.15.0,<0.15.1 || >0.15.1", optional = true, markers = "(sys_platform != \"win32\" and sys_platform != \"cygwin\") and platform_python_implementation != \"PyPy\" and extra == \"standard\""}
watchfiles = {version = ">=0.13", optional = true, markers = "extra == \"standard\""}
websockets = {version = ">=10.4", optional = true, markers = "extra == \"standard\""}
@@ -1634,7 +1572,6 @@ standard = ["colorama (>=0.4)", "httptools (>=0.5.0)", "python-dotenv (>=0.13)",
name = "uvloop"
version = "0.19.0"
description = "Fast implementation of asyncio event loop on top of libuv"
-category = "main"
optional = false
python-versions = ">=3.8.0"
files = [
@@ -1679,7 +1616,6 @@ test = ["Cython (>=0.29.36,<0.30.0)", "aiohttp (==3.9.0b0)", "aiohttp (>=3.8.1)"
name = "watchfiles"
version = "0.22.0"
description = "Simple, modern and high performance file watching and code reload in python."
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1767,7 +1703,6 @@ anyio = ">=3.0.0"
name = "wcwidth"
version = "0.2.13"
description = "Measures the displayed width of unicode strings in a terminal"
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -1779,7 +1714,6 @@ files = [
name = "websockets"
version = "12.0"
description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
@@ -1861,7 +1795,6 @@ files = [
name = "zipp"
version = "3.19.0"
description = "Backport of pathlib-compatible object wrapper for zip files"
-category = "main"
optional = false
python-versions = ">=3.8"
files = [
From 04075eb301e51e6b5067378c835f899fa1989586 Mon Sep 17 00:00:00 2001
From: aakrem
Date: Fri, 21 Jun 2024 13:03:20 +0200
Subject: [PATCH 117/268] fix human evaluations
---
.../migrations/mongo_to_postgres/migration.py | 47 ++++++++++++++-----
1 file changed, 36 insertions(+), 11 deletions(-)
diff --git a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
index cd56d3dab4..9529256819 100644
--- a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
+++ b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
@@ -11,27 +11,27 @@
# Assuming agenta_backend.models.db_models contains your SQLAlchemy models
from agenta_backend.models.db_models import (
- EvaluationAggregatedResultDB,
- Base,
UserDB,
ImageDB,
AppDB,
DeploymentDB,
VariantBaseDB,
AppVariantDB,
- AppVariantRevisionsDB,
AppEnvironmentDB,
AppEnvironmentRevisionDB,
TemplateDB,
TestSetDB,
EvaluatorConfigDB,
HumanEvaluationDB,
+ HumanEvaluationVariantDB,
HumanEvaluationScenarioDB,
+ EvaluationAggregatedResultDB,
+ EvaluationScenarioResultDB,
EvaluationDB,
+ EvaluationEvaluatorConfigDB,
EvaluationScenarioDB,
IDsMappingDB,
- EvaluationEvaluatorConfigDB,
- EvaluationScenarioResultDB,
+ AppVariantRevisionsDB,
)
from agenta_backend.migrations.mongo_to_postgres.utils import (
@@ -294,27 +294,49 @@ async def transform_evaluator_config(config):
}
+async def convert_human_evaluations_associated_variants(
+ variants, variants_revisions, evaluation_id
+):
+ """Convert variant and revision ObjectIds to UUIDs and structure them."""
+ associated_variants = []
+ for variant_id, revision_id in zip(variants, variants_revisions):
+ variant_uuid = await get_mapped_uuid(variant_id)
+ revision_uuid = await get_mapped_uuid(revision_id)
+ associated_variants.append(
+ {
+ "human_evaluation_id": evaluation_id,
+ "variant_id": variant_uuid,
+ "variant_revision_id": revision_uuid,
+ }
+ )
+ return associated_variants
+
+
async def transform_human_evaluation(evaluation):
app_uuid = await get_mapped_uuid(evaluation["app"].id)
user_uuid = await get_mapped_uuid(evaluation["user"].id)
test_set_uuid = await get_mapped_uuid(evaluation["testset"].id)
- variant_uuid = await get_mapped_uuid(evaluation["variants"][0])
- revision_uuid = await get_mapped_uuid(evaluation["variants_revisions"][0])
evaluation_uuid = generate_uuid()
+
await store_mapping("human_evaluations", evaluation["_id"], evaluation_uuid)
- return {
+
+ transformed_evaluation = {
"id": evaluation_uuid,
"app_id": app_uuid,
"user_id": user_uuid,
"status": evaluation["status"],
"evaluation_type": evaluation["evaluation_type"],
- "variant_id": variant_uuid,
- "variant_revision_id": revision_uuid,
"testset_id": test_set_uuid,
"created_at": get_datetime(evaluation.get("created_at")),
"updated_at": get_datetime(evaluation.get("updated_at")),
}
+ associated_variants = await convert_human_evaluations_associated_variants(
+ evaluation["variants"], evaluation["variants_revisions"], evaluation_uuid
+ )
+
+ return transformed_evaluation, associated_variants
+
async def transform_human_evaluation_scenario(scenario):
user_uuid = await get_mapped_uuid(scenario["user"].id)
@@ -465,7 +487,10 @@ async def main():
"evaluators_configs", EvaluatorConfigDB, transform_evaluator_config
)
await migrate_collection(
- "human_evaluations", HumanEvaluationDB, transform_human_evaluation
+ "human_evaluations",
+ HumanEvaluationDB,
+ transform_human_evaluation,
+ HumanEvaluationVariantDB,
)
await migrate_collection(
"human_evaluations_scenarios",
From f93ba1d6812667422a61fab29e8ffef249b355cd Mon Sep 17 00:00:00 2001
From: aakrem
Date: Fri, 21 Jun 2024 13:04:14 +0200
Subject: [PATCH 118/268] add human evaluations variant
---
.../migrations/mongo_to_postgres/utils.py | 55 +++++++++----------
1 file changed, 26 insertions(+), 29 deletions(-)
diff --git a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
index 2e64dedf25..abe759acb5 100644
--- a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
+++ b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
@@ -11,7 +11,6 @@
import uuid_utils.compat as uuid
from sqlalchemy.future import select
-
from agenta_backend.models.db_engine import db_engine
from agenta_backend.models.db_models import (
@@ -35,7 +34,9 @@ async def drop_all_tables():
"""Drop all tables in the database."""
async with db_engine.engine.begin() as conn:
await conn.run_sync(Base.metadata.reflect)
- await conn.run_sync(Base.metadata.drop_all)
+ # Drop all tables with CASCADE option
+ for table in reversed(Base.metadata.sorted_tables):
+ await conn.execute(text(f"DROP TABLE IF EXISTS {table.name} CASCADE"))
async def create_all_tables(tables):
@@ -51,24 +52,20 @@ async def store_mapping(table_name, mongo_id, uuid):
"""Store the mapping of MongoDB ObjectId to UUID in the mapping table."""
id = generate_uuid()
async with db_engine.get_session() as session:
- async with session.begin():
- mapping = IDsMappingDB(
- id=id, table_name=table_name, objectid=str(mongo_id), uuid=uuid
- )
- session.add(mapping)
+ mapping = IDsMappingDB(
+ id=id, table_name=table_name, objectid=str(mongo_id), uuid=uuid
+ )
+ session.add(mapping)
await session.commit()
async def get_mapped_uuid(mongo_id):
"""Retrieve the mapped UUID for a given MongoDB ObjectId."""
async with db_engine.get_session() as session:
- async with session.begin():
- stmt = select(IDsMappingDB.uuid).filter(
- IDsMappingDB.objectid == str(mongo_id)
- )
- result = await session.execute(stmt)
- row = result.first()
- return row[0] if row else None
+ stmt = select(IDsMappingDB.uuid).filter(IDsMappingDB.objectid == str(mongo_id))
+ result = await session.execute(stmt)
+ row = result.first()
+ return row[0] if row else None
def get_datetime(value):
@@ -140,19 +137,19 @@ async def migrate_collection(
mongo_db[collection_name].find().skip(skip).limit(BATCH_SIZE)
),
)
- async with session.begin():
- for document in batch:
- if association_model:
- (
- transformed_document,
- associated_entities,
- ) = await transformation_func(document)
- session.add(model_class(**transformed_document))
- for assoc_entity in associated_entities:
- session.add(association_model(**assoc_entity))
- else:
- transformed_document = await transformation_func(document)
- session.add(model_class(**transformed_document))
- migrated_docs += 1
- await session.commit()
+ for document in batch:
+ if association_model:
+ (
+ transformed_document,
+ associated_entities,
+ ) = await transformation_func(document)
+ session.add(model_class(**transformed_document))
+ for assoc_entity in associated_entities:
+ session.add(association_model(**assoc_entity))
+ else:
+ transformed_document = await transformation_func(document)
+ session.add(model_class(**transformed_document))
+ await session.commit()
+ migrated_docs += 1
+
update_migration_report(collection_name, total_docs, migrated_docs)
From e79d71328bff8558c246c797e26b1de145e980e0 Mon Sep 17 00:00:00 2001
From: Abram
Date: Fri, 21 Jun 2024 16:37:23 +0100
Subject: [PATCH 119/268] refactor (tests): resolve failing backend tests
---
.../agenta_backend/models/api/user_models.py | 11 +-
.../routers/evaluation_router.py | 4 +-
.../tests/variants_main_router/conftest.py | 140 +++---
.../test_app_variant_router.py | 111 +++--
.../test_variant_evaluators_router.py | 399 ++++++++++--------
.../test_variant_testset_router.py | 171 ++++----
.../test_variant_versioning_deployment.py | 81 ++--
.../test_user_profile.py | 37 +-
8 files changed, 566 insertions(+), 388 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/api/user_models.py b/agenta-backend/agenta_backend/models/api/user_models.py
index bbecff33df..10c4573e99 100644
--- a/agenta-backend/agenta_backend/models/api/user_models.py
+++ b/agenta-backend/agenta_backend/models/api/user_models.py
@@ -1,7 +1,8 @@
from typing import Optional, List
-from pydantic import BaseModel, Field
from datetime import datetime, timezone
+from pydantic import BaseModel, Field
+
class TimestampModel(BaseModel):
created_at: str = Field(str(datetime.now(timezone.utc)))
@@ -9,14 +10,14 @@ class TimestampModel(BaseModel):
class User(TimestampModel):
- id: Optional[str]
+ id: Optional[str] = None
uid: str
username: str
- email: str # switch to EmailStr when langchain support pydantic>=2.1
+ email: str
organizations: Optional[List[str]] = None
class UserUpdate(BaseModel):
- username: Optional[str]
- email: Optional[str]
+ username: Optional[str] = None
+ email: Optional[str] = None
updated_at: str = Field(str(datetime.now(timezone.utc)))
diff --git a/agenta-backend/agenta_backend/routers/evaluation_router.py b/agenta-backend/agenta_backend/routers/evaluation_router.py
index e883b70237..4e94e059a0 100644
--- a/agenta-backend/agenta_backend/routers/evaluation_router.py
+++ b/agenta-backend/agenta_backend/routers/evaluation_router.py
@@ -215,8 +215,8 @@ async def fetch_evaluation_results(evaluation_id: str, request: Request):
status_code=403,
)
- results = await converters.aggregated_result_to_pydantic(
- evaluation.aggregated_results
+ results = await converters.aggregated_result_of_evaluation_to_pydantic(
+ str(evaluation.id)
)
return {"results": results, "evaluation_id": evaluation_id}
except Exception as exc:
diff --git a/agenta-backend/agenta_backend/tests/variants_main_router/conftest.py b/agenta-backend/agenta_backend/tests/variants_main_router/conftest.py
index bf4a1008aa..31616fc424 100644
--- a/agenta-backend/agenta_backend/tests/variants_main_router/conftest.py
+++ b/agenta-backend/agenta_backend/tests/variants_main_router/conftest.py
@@ -3,16 +3,19 @@
import logging
from datetime import datetime, timezone
+from agenta_backend.models.db_engine import db_engine
from agenta_backend.models.shared_models import ConfigDB
from agenta_backend.models.db_models import (
AppDB,
UserDB,
+ DeploymentDB,
VariantBaseDB,
ImageDB,
AppVariantDB,
)
import httpx
+from sqlalchemy.future import select
# Initialize logger
@@ -32,67 +35,106 @@
async def get_first_user_object():
"""Get the user object from the database or create a new one if not found."""
- user = await UserDB.find_one(UserDB.uid == "0")
- if user is None:
- create_user = UserDB(uid="0")
- await create_user.create()
-
- return create_user
- return user
+ async with db_engine.get_session() as session:
+ result = await session.execute(select(UserDB).filter_by(uid="0"))
+ user = result.scalars().first()
+ if user is None:
+ create_user = UserDB(uid="0")
+ session.add(create_user)
+ await session.commit()
+ await session.refresh(create_user)
+ return create_user
+ return user
@pytest.fixture()
async def get_second_user_object():
"""Create a second user object."""
- user = await UserDB.find_one(UserDB.uid == "1")
- if user is None:
- create_user = UserDB(
- uid="1", username="test_user1", email="test_user1@email.com"
- )
- await create_user.create()
-
- return create_user
- return user
+ async with db_engine.get_session() as session:
+ result = await session.execute(select(UserDB).filter_by(uid="1"))
+ user = result.scalars().first()
+ if user is None:
+ create_user = UserDB(
+ uid="1", username="test_user1", email="test_user1@email.com"
+ )
+ session.add(create_user)
+ await session.commit()
+ await session.refresh(create_user)
+ return create_user
+ return user
@pytest.fixture()
async def get_first_user_app(get_first_user_object):
user = await get_first_user_object
- app = AppDB(app_name="myapp", user=user)
- await app.create()
+ async with db_engine.get_session() as session:
+ app = AppDB(app_name="myapp", user_id=user.id)
+ session.add(app)
+ await session.commit()
+ await session.refresh(app)
- db_image = ImageDB(
- docker_id="sha256:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
- tags="agentaai/templates_v2:local_test_prompt",
- user=user,
- )
- await db_image.create()
+ db_image = ImageDB(
+ docker_id="sha256:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
+ tags="agentaai/templates_v2:local_test_prompt",
+ user_id=user.id,
+ )
+ session.add(db_image)
+ await session.commit()
+ await session.refresh(db_image)
- db_config = ConfigDB(
- config_name="default",
- parameters={},
- )
+ db_config = ConfigDB(
+ config_name="default",
+ parameters={},
+ )
- db_base = VariantBaseDB(base_name="app", image=db_image, user=user, app=app)
- await db_base.create()
-
- appvariant = AppVariantDB(
- app=app,
- variant_name="app",
- image=db_image,
- user=user,
- parameters={},
- base_name="app",
- config_name="default",
- base=db_base,
- revision=0,
- modified_by=user,
- config=db_config,
- )
- await appvariant.create()
- return appvariant, user, app, db_image, db_config, db_base
+ db_deployment = DeploymentDB(
+ app_id=app.id,
+ user_id=user.id,
+ container_name="container_a_test",
+ container_id="w243e34red",
+ uri="http://localhost/app/w243e34red",
+ status="stale"
+ )
+ session.add(db_deployment)
+
+ db_base = VariantBaseDB(
+ base_name="app",
+ image_id=db_image.id,
+ user_id=user.id,
+ app_id=app.id,
+ deployment_id=db_deployment.id
+ )
+ session.add(db_base)
+ await session.commit()
+ await session.refresh(db_base)
+
+ appvariant = AppVariantDB(
+ app_id=app.id,
+ variant_name="app",
+ image_id=db_image.id,
+ user_id=user.id,
+ config_parameters={},
+ base_name="app",
+ config_name="default",
+ base_id=db_base.id,
+ revision=0,
+ modified_by_id=user.id,
+ )
+ session.add(appvariant)
+ await session.commit()
+ await session.refresh(appvariant)
+
+ return appvariant, user, app, db_image, db_config, db_base
+
+
+@pytest.fixture(scope="session")
+async def fetch_user():
+ async with db_engine.get_session() as session:
+ result = await session.execute(select(UserDB).filter_by(uid="0"))
+ user = result.scalars().first()
+ return user
@pytest.fixture()
@@ -144,12 +186,6 @@ def app_from_template():
}
-@pytest.fixture(scope="session")
-async def fetch_user():
- user = await UserDB.find_one(UserDB.uid == "0", fetch_links=True)
- return user
-
-
@pytest.fixture()
def update_app_variant_parameters():
return {
diff --git a/agenta-backend/agenta_backend/tests/variants_main_router/test_app_variant_router.py b/agenta-backend/agenta_backend/tests/variants_main_router/test_app_variant_router.py
index 880ecdab84..8dc9e8b169 100644
--- a/agenta-backend/agenta_backend/tests/variants_main_router/test_app_variant_router.py
+++ b/agenta-backend/agenta_backend/tests/variants_main_router/test_app_variant_router.py
@@ -6,14 +6,18 @@
from agenta_backend.routers import app_router
from agenta_backend.services import db_manager
+from agenta_backend.models.db_engine import db_engine
+from agenta_backend.models.shared_models import ConfigDB
from agenta_backend.models.db_models import (
AppDB,
+ DeploymentDB,
VariantBaseDB,
ImageDB,
- ConfigDB,
AppVariantDB,
)
+from sqlalchemy.future import select
+
# Initialize http client
test_client = httpx.AsyncClient()
@@ -34,9 +38,7 @@
@pytest.mark.asyncio
-async def test_create_app(get_first_user_object):
- user = await get_first_user_object
-
+async def test_create_app():
response = await test_client.post(
f"{BACKEND_API_HOST}/apps/",
json={
@@ -59,42 +61,61 @@ async def test_list_apps():
@pytest.mark.asyncio
async def test_create_app_variant(get_first_user_object):
user = await get_first_user_object
- app = await AppDB.find_one(AppDB.app_name == "app_variant_test")
- db_image = ImageDB(
- docker_id="sha256:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
- tags="agentaai/templates_v2:local_test_prompt",
- user=user,
- )
- await db_image.create()
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(AppDB).filter_by(app_name="app_variant_test")
+ )
+ app = result.scalars().first()
- db_config = ConfigDB(
- config_name="default",
- parameters={},
- )
+ db_image = ImageDB(
+ docker_id="sha256:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
+ tags="agentaai/templates_v2:local_test_prompt",
+ user_id=user.id,
+ )
+ session.add(db_image)
+ await session.commit()
- db_base = VariantBaseDB(
- base_name="app",
- app=app,
- user=user,
- image=db_image,
- )
- await db_base.create()
-
- appvariant = AppVariantDB(
- app=app,
- variant_name="app",
- image=db_image,
- user=user,
- parameters={},
- base_name="app",
- config_name="default",
- revision=0,
- modified_by=user,
- base=db_base,
- config=db_config,
- )
- await appvariant.create()
+ db_config = ConfigDB(
+ config_name="default",
+ parameters={},
+ )
+
+ db_deployment = DeploymentDB(
+ app_id=app.id,
+ user_id=user.id,
+ container_name="container_a_test",
+ container_id="w243e34red",
+ uri="http://localhost/app/w243e34red",
+ status="stale"
+ )
+ session.add(db_deployment)
+ await session.commit()
+
+ db_base = VariantBaseDB(
+ base_name="app",
+ app_id=app.id,
+ user_id=user.id,
+ image_id=db_image.id,
+ deployment_id=db_deployment.id
+ )
+ session.add(db_base)
+ await session.commit()
+
+ appvariant = AppVariantDB(
+ app_id=app.id,
+ variant_name="app",
+ image_id=db_image.id,
+ user_id=user.id,
+ config_parameters={},
+ base_name="app",
+ config_name="default",
+ revision=0,
+ modified_by_id=user.id,
+ base_id=db_base.id,
+ )
+ session.add(appvariant)
+ await session.commit()
response = await test_client.get(f"{BACKEND_API_HOST}/apps/{str(app.id)}/variants/")
assert response.status_code == 200
@@ -103,9 +124,14 @@ async def test_create_app_variant(get_first_user_object):
@pytest.mark.asyncio
async def test_list_app_variants():
- app_db = await AppDB.find_one(AppDB.app_name == "app_variant_test")
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(AppDB).filter_by(app_name="app_variant_test")
+ )
+ app = result.scalars().first()
+
response = await test_client.get(
- f"{BACKEND_API_HOST}/apps/{str(app_db.id)}/variants/"
+ f"{BACKEND_API_HOST}/apps/{str(app.id)}/variants/"
)
assert response.status_code == 200
@@ -114,7 +140,12 @@ async def test_list_app_variants():
@pytest.mark.asyncio
async def test_list_environments():
- app = await AppDB.find_one(AppDB.app_name == "app_variant_test")
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(AppDB).filter_by(app_name="app_variant_test")
+ )
+ app = result.scalars().first()
+
response = await test_client.get(
f"{BACKEND_API_HOST}/apps/{str(app.id)}/environments/"
)
diff --git a/agenta-backend/agenta_backend/tests/variants_main_router/test_variant_evaluators_router.py b/agenta-backend/agenta_backend/tests/variants_main_router/test_variant_evaluators_router.py
index 9ff8ad60b2..fcbebea5bd 100644
--- a/agenta-backend/agenta_backend/tests/variants_main_router/test_variant_evaluators_router.py
+++ b/agenta-backend/agenta_backend/tests/variants_main_router/test_variant_evaluators_router.py
@@ -3,6 +3,7 @@
import pytest
import asyncio
+from agenta_backend.models.db_engine import db_engine
from agenta_backend.models.api.evaluation_model import EvaluationStatusEnum
from agenta_backend.models.db_models import (
AppDB,
@@ -13,6 +14,9 @@
EvaluationScenarioDB,
)
+from sqlalchemy.future import select
+from sqlalchemy.orm import joinedload
+
# Initialize http client
test_client = httpx.AsyncClient()
@@ -56,182 +60,138 @@ async def test_get_evaluators_endpoint():
async def test_create_auto_exact_match_evaluator_config(
auto_exact_match_evaluator_config,
):
- app = await AppDB.find_one(AppDB.app_name == APP_NAME)
- payload = auto_exact_match_evaluator_config
- payload["app_id"] = str(app.id)
- payload["settings_values"]["correct_answer_key"] = "correct_answer"
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(AppDB).filter_by(app_name=APP_NAME)
+ )
+ app = result.scalars().first()
- response = await test_client.post(
- f"{BACKEND_API_HOST}/evaluators/configs/", json=payload, timeout=timeout
- )
- assert response.status_code == 200
- assert response.json()["evaluator_key"] == payload["evaluator_key"]
- assert response.json()["settings_values"] == payload["settings_values"]
+ payload = auto_exact_match_evaluator_config
+ payload["app_id"] = str(app.id)
+ payload["settings_values"]["correct_answer_key"] = "correct_answer"
+
+ response = await test_client.post(
+ f"{BACKEND_API_HOST}/evaluators/configs/", json=payload, timeout=timeout
+ )
+ assert response.status_code == 200
+ assert response.json()["evaluator_key"] == payload["evaluator_key"]
+ assert response.json()["settings_values"] == payload["settings_values"]
@pytest.mark.asyncio
async def test_create_auto_similarity_match_evaluator_config(
auto_similarity_match_evaluator_config,
):
- app = await AppDB.find_one(AppDB.app_name == APP_NAME)
- payload = auto_similarity_match_evaluator_config
- payload["app_id"] = str(app.id)
- payload["settings_values"]["correct_answer_key"] = "correct_answer"
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(AppDB).filter_by(app_name=APP_NAME)
+ )
+ app = result.scalars().first()
- response = await test_client.post(
- f"{BACKEND_API_HOST}/evaluators/configs/", json=payload, timeout=timeout
- )
- assert response.status_code == 200
- assert response.json()["evaluator_key"] == payload["evaluator_key"]
- assert response.json()["settings_values"] == payload["settings_values"]
+ payload = auto_similarity_match_evaluator_config
+ payload["app_id"] = str(app.id)
+ payload["settings_values"]["correct_answer_key"] = "correct_answer"
+
+ response = await test_client.post(
+ f"{BACKEND_API_HOST}/evaluators/configs/", json=payload, timeout=timeout
+ )
+ assert response.status_code == 200
+ assert response.json()["evaluator_key"] == payload["evaluator_key"]
+ assert response.json()["settings_values"] == payload["settings_values"]
@pytest.mark.asyncio
async def test_create_auto_regex_test_evaluator_config(
auto_regex_test_evaluator_config,
):
- app = await AppDB.find_one(AppDB.app_name == APP_NAME)
- payload = auto_regex_test_evaluator_config
- payload["app_id"] = str(app.id)
- payload["settings_values"]["regex_pattern"] = "^ig\\d{3}$"
- payload["settings_values"]["correct_answer_key"] = "correct_answer"
-
- response = await test_client.post(
- f"{BACKEND_API_HOST}/evaluators/configs/", json=payload, timeout=timeout
- )
- assert response.status_code == 200
- assert response.json()["evaluator_key"] == payload["evaluator_key"]
- assert response.json()["settings_values"] == payload["settings_values"]
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(AppDB).filter_by(app_name=APP_NAME)
+ )
+ app = result.scalars().first()
+
+ payload = auto_regex_test_evaluator_config
+ payload["app_id"] = str(app.id)
+ payload["settings_values"]["regex_pattern"] = "^ig\\d{3}$"
+ payload["settings_values"]["correct_answer_key"] = "correct_answer"
+
+ response = await test_client.post(
+ f"{BACKEND_API_HOST}/evaluators/configs/", json=payload, timeout=timeout
+ )
+ assert response.status_code == 200
+ assert response.json()["evaluator_key"] == payload["evaluator_key"]
+ assert response.json()["settings_values"] == payload["settings_values"]
@pytest.mark.asyncio
async def test_create_auto_webhook_test_evaluator_config(
auto_webhook_test_evaluator_config,
):
- app = await AppDB.find_one(AppDB.app_name == APP_NAME)
- payload = auto_webhook_test_evaluator_config
- payload["app_id"] = str(app.id)
- payload["settings_values"]["correct_answer_key"] = "correct_answer"
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(AppDB).filter_by(app_name=APP_NAME)
+ )
+ app = result.scalars().first()
- response = await test_client.post(
- f"{BACKEND_API_HOST}/evaluators/configs/", json=payload, timeout=timeout
- )
- assert response.status_code == 200
- assert response.json()["evaluator_key"] == payload["evaluator_key"]
- assert response.json()["settings_values"] == payload["settings_values"]
+ payload = auto_webhook_test_evaluator_config
+ payload["app_id"] = str(app.id)
+ payload["settings_values"]["correct_answer_key"] = "correct_answer"
+
+ response = await test_client.post(
+ f"{BACKEND_API_HOST}/evaluators/configs/", json=payload, timeout=timeout
+ )
+ assert response.status_code == 200
+ assert response.json()["evaluator_key"] == payload["evaluator_key"]
+ assert response.json()["settings_values"] == payload["settings_values"]
@pytest.mark.asyncio
async def test_create_auto_ai_critique_evaluator_config(
auto_ai_critique_evaluator_config,
):
- app = await AppDB.find_one(AppDB.app_name == APP_NAME)
- payload = auto_ai_critique_evaluator_config
- payload["app_id"] = str(app.id)
- payload["settings_values"]["correct_answer_key"] = "correct_answer"
-
- response = await test_client.post(
- f"{BACKEND_API_HOST}/evaluators/configs/", json=payload, timeout=timeout
- )
- assert response.status_code == 200
- assert response.json()["evaluator_key"] == payload["evaluator_key"]
- assert response.json()["settings_values"] == payload["settings_values"]
-
-
-@pytest.mark.asyncio
-async def test_get_evaluator_configs():
- app = await AppDB.find_one(AppDB.app_name == APP_NAME)
- response = await test_client.get(
- f"{BACKEND_API_HOST}/evaluators/configs/?app_id={str(app.id)}",
- timeout=timeout,
- )
- assert response.status_code == 200
- assert type(response.json()) == list
-
-
-@pytest.mark.asyncio
-async def test_create_evaluation_auto_exact_match():
- await create_evaluation_with_evaluator("auto_exact_match_evaluator_config")
-
-
-@pytest.mark.asyncio
-async def test_create_evaluation_auto_similarity_match():
- await create_evaluation_with_evaluator("auto_similarity_match_evaluator_config")
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(AppDB).filter_by(app_name=APP_NAME)
+ )
+ app = result.scalars().first()
+ payload = auto_ai_critique_evaluator_config
+ payload["app_id"] = str(app.id)
+ payload["settings_values"]["correct_answer_key"] = "correct_answer"
-@pytest.mark.asyncio
-async def test_create_evaluation_auto_regex_test():
- await create_evaluation_with_evaluator("auto_regex_test_evaluator_config")
+ response = await test_client.post(
+ f"{BACKEND_API_HOST}/evaluators/configs/", json=payload, timeout=timeout
+ )
+ assert response.status_code == 200
+ assert response.json()["evaluator_key"] == payload["evaluator_key"]
+ assert response.json()["settings_values"] == payload["settings_values"]
@pytest.mark.asyncio
-async def test_create_evaluation_auto_webhook_test():
- await create_evaluation_with_evaluator("auto_webhook_test_evaluator_config")
-
+async def test_get_evaluator_configs():
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(AppDB).filter_by(app_name=APP_NAME)
+ )
+ app = result.scalars().first()
-@pytest.mark.asyncio
-async def test_create_evaluation_auto_ai_critique():
- await create_evaluation_with_evaluator("auto_ai_critique_evaluator_config")
+ response = await test_client.get(
+ f"{BACKEND_API_HOST}/evaluators/configs/?app_id={str(app.id)}",
+ timeout=timeout,
+ )
+ assert response.status_code == 200
+ assert type(response.json()) == list
-async def create_evaluation_with_evaluator(evaluator_config_name):
- # Fetch app, app_variant and testset
- app = await AppDB.find_one(AppDB.app_name == APP_NAME)
- app_variant = await AppVariantDB.find_one(AppVariantDB.app.id == app.id)
- testset = await TestSetDB.find_one(TestSetDB.app.id == app.id)
-
- # Prepare payload
- payload = {
- "app_id": str(app.id),
- "variant_ids": [str(app_variant.id)],
- "evaluators_configs": [],
- "testset_id": str(testset.id),
- "lm_providers_keys": {"OPENAI_API_KEY": OPEN_AI_KEY},
- "rate_limit": {
- "batch_size": 10,
- "max_retries": 3,
- "retry_delay": 3,
- "delay_between_batches": 5,
- },
- }
-
- # Fetch evaluator configs
+async def fetch_evaluation_results(evaluation_id):
response = await test_client.get(
- f"{BACKEND_API_HOST}/evaluators/configs/?app_id={payload['app_id']}",
- timeout=timeout,
- )
- list_of_configs_ids = []
- evaluator_configs = response.json()
- for evaluator_config in evaluator_configs:
- if evaluator_config["evaluator_key"] == evaluator_config_name:
- list_of_configs_ids.append(evaluator_config["id"])
-
- # Update payload with list of configs ids
- payload["evaluators_configs"] = list_of_configs_ids
-
- # Sleep for 10 seconds (to allow the llm app container start completely)
- await asyncio.sleep(10)
-
- # Make request to create evaluation
- response = await test_client.post(
- f"{BACKEND_API_HOST}/evaluations/", json=payload, timeout=timeout
+ f"{BACKEND_API_HOST}/evaluations/{evaluation_id}/results/", timeout=timeout
)
- response_data = response.json()[0]
+ response_data = response.json()
+ print("Response Data: ", response_data)
assert response.status_code == 200
- assert response_data["app_id"] == payload["app_id"]
- assert (
- response_data["status"]["value"]
- == EvaluationStatusEnum.EVALUATION_STARTED.value
- )
- assert response_data is not None
-
- # Wait for evaluation to finish
- evaluation_id = response_data["id"]
- await wait_for_evaluation_to_finish(evaluation_id)
-
- # Fetch evaluation results
- await fetch_evaluation_results(evaluation_id)
+ assert response_data["evaluation_id"] == evaluation_id
async def wait_for_evaluation_to_finish(evaluation_id):
@@ -244,6 +204,7 @@ async def wait_for_evaluation_to_finish(evaluation_id):
)
response_data = response.json()
if response_data["status"]["value"] == EvaluationStatusEnum.EVALUATION_FINISHED:
+ await fetch_evaluation_results(evaluation_id)
assert True
return
await asyncio.sleep(intervals)
@@ -253,47 +214,142 @@ async def wait_for_evaluation_to_finish(evaluation_id):
), f"Evaluation status did not become '{EvaluationStatusEnum.EVALUATION_FINISHED}' within the specified polling time"
-async def fetch_evaluation_results(evaluation_id):
- response = await test_client.get(
- f"{BACKEND_API_HOST}/evaluations/{evaluation_id}/results/", timeout=timeout
- )
- response_data = response.json()
+async def create_evaluation_with_evaluator(evaluator_config_name):
+ # Fetch app, app_variant and testset
+ async with db_engine.get_session() as session:
+ app_result = await session.execute(
+ select(AppDB).filter_by(app_name=APP_NAME)
+ )
+ app = app_result.scalars().first()
- assert response.status_code == 200
- assert response_data["evaluation_id"] == evaluation_id
+ app_variant_result = await session.execute(
+ select(AppVariantDB).filter_by(app_id=app.id)
+ )
+ app_variant = app_variant_result.scalars().first()
+
+ testset_result = await session.execute(
+ select(TestSetDB).filter_by(app_id=app.id)
+ )
+ testset = testset_result.scalars().first()
+
+ # Prepare payload
+ payload = {
+ "app_id": str(app.id),
+ "variant_ids": [str(app_variant.id)],
+ "evaluators_configs": [],
+ "testset_id": str(testset.id),
+ "lm_providers_keys": {"OPENAI_API_KEY": OPEN_AI_KEY},
+ "rate_limit": {
+ "batch_size": 10,
+ "max_retries": 3,
+ "retry_delay": 3,
+ "delay_between_batches": 5,
+ },
+ }
+
+ # Fetch evaluator configs
+ response = await test_client.get(
+ f"{BACKEND_API_HOST}/evaluators/configs/?app_id={payload['app_id']}",
+ timeout=timeout,
+ )
+ list_of_configs_ids = []
+ evaluator_configs = response.json()
+ for evaluator_config in evaluator_configs:
+ if evaluator_config["evaluator_key"] == evaluator_config_name:
+ list_of_configs_ids.append(evaluator_config["id"])
+
+ # Update payload with list of configs ids
+ payload["evaluators_configs"] = list_of_configs_ids
+
+ # Sleep for 10 seconds (to allow the llm app container start completely)
+ await asyncio.sleep(10)
+
+ # Make request to create evaluation
+ response = await test_client.post(
+ f"{BACKEND_API_HOST}/evaluations/", json=payload, timeout=timeout
+ )
+ response_data = response.json()[0]
+
+ assert response.status_code == 200
+ assert response_data["app_id"] == payload["app_id"]
+ assert (
+ response_data["status"]["value"]
+ == EvaluationStatusEnum.EVALUATION_STARTED.value
+ )
+ assert response_data is not None
+
+ # Wait for evaluation to finish
+ evaluation_id = response_data["id"]
+ await wait_for_evaluation_to_finish(evaluation_id)
+
+
+@pytest.mark.asyncio
+async def test_create_evaluation_auto_exact_match():
+ await create_evaluation_with_evaluator("auto_exact_match_evaluator_config")
+
+
+@pytest.mark.asyncio
+async def test_create_evaluation_auto_similarity_match():
+ await create_evaluation_with_evaluator("auto_similarity_match_evaluator_config")
+
+
+@pytest.mark.asyncio
+async def test_create_evaluation_auto_regex_test():
+ await create_evaluation_with_evaluator("auto_regex_test_evaluator_config")
+
+
+@pytest.mark.asyncio
+async def test_create_evaluation_auto_webhook_test():
+ await create_evaluation_with_evaluator("auto_webhook_test_evaluator_config")
+
+
+@pytest.mark.asyncio
+async def test_create_evaluation_auto_ai_critique():
+ await create_evaluation_with_evaluator("auto_ai_critique_evaluator_config")
@pytest.mark.asyncio
async def test_delete_evaluator_config():
- app = await AppDB.find_one(AppDB.app_name == APP_NAME)
- response = await test_client.get(
- f"{BACKEND_API_HOST}/evaluators/configs/?app_id={str(app.id)}",
- timeout=timeout,
- )
- list_of_deleted_configs = []
- evaluator_configs = response.json()
- for evaluator_config in evaluator_configs:
- response = await test_client.delete(
- f"{BACKEND_API_HOST}/evaluators/configs/{str(evaluator_config['id'])}/",
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(AppDB).filter_by(app_name=APP_NAME)
+ )
+ app = result.scalars().first()
+
+ response = await test_client.get(
+ f"{BACKEND_API_HOST}/evaluators/configs/?app_id={str(app.id)}",
timeout=timeout,
)
- list_of_deleted_configs.append(response.json())
+ list_of_deleted_configs = []
+ evaluator_configs = response.json()
+ for evaluator_config in evaluator_configs:
+ response = await test_client.delete(
+ f"{BACKEND_API_HOST}/evaluators/configs/{str(evaluator_config['id'])}/",
+ timeout=timeout,
+ )
+ list_of_deleted_configs.append(response.json())
- count_of_deleted_configs = sum(list_of_deleted_configs)
- assert len(evaluator_configs) == count_of_deleted_configs
+ count_of_deleted_configs = sum(list_of_deleted_configs)
+ assert len(evaluator_configs) == count_of_deleted_configs
@pytest.mark.asyncio
async def test_evaluation_scenario_match_evaluation_testset_length():
- evaluations = await EvaluationDB.find(
- fetch_links=True
- ).to_list() # will return only one in this case
- evaluation = evaluations[0]
- evaluation_scenario_count = await EvaluationScenarioDB.find(
- EvaluationScenarioDB.evaluation.id == evaluation.id
- ).count()
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(EvaluationDB).options(
+ joinedload(EvaluationDB.testset)
+ )
+ )
+ evaluations = result.scalars().all()
+
+ evaluation = evaluations[0]
+ evaluation_scenarios_result = await session.execute(
+ select(EvaluationScenarioDB).filter_by(evaluation_id=evaluation.id)
+ )
+ evaluation_scenarios = evaluation_scenarios_result.scalars().all()
- assert evaluation_scenario_count == len(evaluation.testset.csvdata)
+ assert len(evaluation_scenarios) == len(evaluation.testset.csvdata)
@pytest.mark.asyncio
@@ -302,8 +358,17 @@ async def test_remove_running_template_app_container():
# Connect to the Docker daemon
client = docker.from_env()
- app = await AppDB.find_one(AppDB.app_name == APP_NAME)
- deployment = await DeploymentDB.find_one(DeploymentDB.app.id == app.id)
+ async with db_engine.get_session() as session:
+ app_result = await session.execute(
+ select(AppDB).filter_by(app_name=APP_NAME)
+ )
+ app = app_result.scalars().first()
+
+ deployment_result =await session.execute(
+ select(DeploymentDB).filter_by(app_id=app.id)
+ )
+ deployment = deployment_result.scalars().first()
+
try:
# Retrieve container
container = client.containers.get(deployment.container_name)
diff --git a/agenta-backend/agenta_backend/tests/variants_main_router/test_variant_testset_router.py b/agenta-backend/agenta_backend/tests/variants_main_router/test_variant_testset_router.py
index ce397ba346..f12a811a0e 100644
--- a/agenta-backend/agenta_backend/tests/variants_main_router/test_variant_testset_router.py
+++ b/agenta-backend/agenta_backend/tests/variants_main_router/test_variant_testset_router.py
@@ -1,13 +1,15 @@
import os
-from pathlib import Path
+import httpx
+import pytest
+from sqlalchemy.future import select
+
+from agenta_backend.models.db_engine import db_engine
from agenta_backend.models.db_models import (
AppDB,
TestSetDB,
)
-import httpx
-import pytest
# Initialize http client
@@ -28,98 +30,115 @@
@pytest.mark.asyncio
async def test_create_testset():
- app = await AppDB.find_one(AppDB.app_name == "app_variant_test")
-
- payload = {
- "name": "create_testset_main",
- "csvdata": [
- {
- "country": "Comoros",
- "correct_answer": "The capital of Comoros is Moroni",
- },
- {
- "country": "Kyrgyzstan",
- "correct_answer": "The capital of Kyrgyzstan is Bishkek",
- },
- {
- "country": "Azerbaijan",
- "correct_answer": "The capital of Azerbaijan is Baku",
- },
- ],
- }
- response = await test_client.post(
- f"{BACKEND_API_HOST}/testsets/{str(app.id)}/", json=payload
- )
- assert response.status_code == 200
- assert response.json()["name"] == payload["name"]
+ async with db_engine.get_session() as session:
+ result = await session.execute(select(AppDB).filter_by(app_name="app_variant_test"))
+ app = result.scalars().first()
+
+ payload = {
+ "name": "create_testset_main",
+ "csvdata": [
+ {
+ "country": "Comoros",
+ "correct_answer": "The capital of Comoros is Moroni",
+ },
+ {
+ "country": "Kyrgyzstan",
+ "correct_answer": "The capital of Kyrgyzstan is Bishkek",
+ },
+ {
+ "country": "Azerbaijan",
+ "correct_answer": "The capital of Azerbaijan is Baku",
+ },
+ ],
+ }
+ response = await test_client.post(
+ f"{BACKEND_API_HOST}/testsets/{str(app.id)}/", json=payload
+ )
+ assert response.status_code == 200
+ assert response.json()["name"] == payload["name"]
@pytest.mark.asyncio
async def test_update_testset():
- app = await AppDB.find_one(AppDB.app_name == "app_variant_test")
- testset = await TestSetDB.find_one(TestSetDB.app.id == app.id)
-
- payload = {
- "name": "update_testset",
- "csvdata": [
- {
- "country": "Comoros",
- "correct_answer": "The capital of Comoros is Moroni",
- },
- {
- "country": "Kyrgyzstan",
- "correct_answer": "The capital of Kyrgyzstan is Bishkek",
- },
- {
- "country": "Azerbaijan",
- "correct_answer": "The capital of Azerbaijan is Baku",
- },
- ],
- }
- response = await test_client.put(
- f"{BACKEND_API_HOST}/testsets/{str(testset.id)}/", json=payload
- )
-
- assert response.status_code == 200
- assert response.json()["_id"] == str(testset.id)
- assert response.json()["status"] == "success"
- assert response.json()["message"] == "testset updated successfully"
+ async with db_engine.get_session() as session:
+ result = await session.execute(select(AppDB).filter_by(app_name="app_variant_test"))
+ app = result.scalars().first()
+
+ testset_result = await session.execute(select(TestSetDB).filter_by(app_id=app.id))
+ testset = testset_result.scalars().first()
+
+ payload = {
+ "name": "update_testset",
+ "csvdata": [
+ {
+ "country": "Comoros",
+ "correct_answer": "The capital of Comoros is Moroni",
+ },
+ {
+ "country": "Kyrgyzstan",
+ "correct_answer": "The capital of Kyrgyzstan is Bishkek",
+ },
+ {
+ "country": "Azerbaijan",
+ "correct_answer": "The capital of Azerbaijan is Baku",
+ },
+ ],
+ }
+ response = await test_client.put(
+ f"{BACKEND_API_HOST}/testsets/{str(testset.id)}/", json=payload
+ )
+
+ assert response.status_code == 200
+ assert response.json()["_id"] == str(testset.id)
+ assert response.json()["status"] == "success"
+ assert response.json()["message"] == "testset updated successfully"
@pytest.mark.asyncio
async def test_get_testsets():
- app = await AppDB.find_one(AppDB.app_name == "app_variant_test")
- response = await test_client.get(
- f"{BACKEND_API_HOST}/testsets/?app_id={str(app.id)}"
- )
+ async with db_engine.get_session() as session:
+ result = await session.execute(select(AppDB).filter_by(app_name="app_variant_test"))
+ app = result.scalars().first()
+
+ response = await test_client.get(
+ f"{BACKEND_API_HOST}/testsets/?app_id={str(app.id)}"
+ )
- assert response.status_code == 200
- assert len(response.json()) == 1
+ assert response.status_code == 200
+ assert len(response.json()) == 1
@pytest.mark.asyncio()
async def test_get_testset():
- app = await AppDB.find_one(AppDB.app_name == "app_variant_test")
- testset = await TestSetDB.find_one(TestSetDB.app.id == app.id)
+ async with db_engine.get_session() as session:
+ result = await session.execute(select(AppDB).filter_by(app_name="app_variant_test"))
+ app = result.scalars().first()
- response = await test_client.get(f"{BACKEND_API_HOST}/testsets/{str(testset.id)}/")
+ testset_result = await session.execute(select(TestSetDB).filter_by(app_id=app.id))
+ testset = testset_result.scalars().first()
- assert response.status_code == 200
- assert response.json()["name"] == testset.name
- assert response.json()["id"] == str(testset.id)
+ response = await test_client.get(f"{BACKEND_API_HOST}/testsets/{str(testset.id)}/")
+
+ assert response.status_code == 200
+ assert response.json()["name"] == testset.name
+ assert response.json()["id"] == str(testset.id)
@pytest.mark.asyncio
async def test_delete_testsets():
- app = await AppDB.find_one(AppDB.app_name == "app_variant_test")
- testsets = await TestSetDB.find(TestSetDB.app.id == app.id).to_list()
+ async with db_engine.get_session() as session:
+ result = await session.execute(select(AppDB).filter_by(app_name="app_variant_test"))
+ app = result.scalars().first()
+
+ testset_result = await session.execute(select(TestSetDB).filter_by(app_id=app.id))
+ testsets = testset_result.scalars().all()
- testset_ids = [str(testset.id) for testset in testsets]
- payload = {"testset_ids": testset_ids}
+ testset_ids = [str(testset.id) for testset in testsets]
+ payload = {"testset_ids": testset_ids}
- response = await test_client.request(
- method="DELETE", url=f"{BACKEND_API_HOST}/testsets/", json=payload
- )
+ response = await test_client.request(
+ method="DELETE", url=f"{BACKEND_API_HOST}/testsets/", json=payload
+ )
- assert response.status_code == 200
- assert response.json() == testset_ids
+ assert response.status_code == 200
+ assert response.json() == testset_ids
diff --git a/agenta-backend/agenta_backend/tests/variants_main_router/test_variant_versioning_deployment.py b/agenta-backend/agenta_backend/tests/variants_main_router/test_variant_versioning_deployment.py
index 405801405c..cfc82a1479 100644
--- a/agenta-backend/agenta_backend/tests/variants_main_router/test_variant_versioning_deployment.py
+++ b/agenta-backend/agenta_backend/tests/variants_main_router/test_variant_versioning_deployment.py
@@ -3,6 +3,9 @@
import pytest
import random
+from sqlalchemy.future import select
+
+from agenta_backend.models.db_engine import db_engine
from agenta_backend.models.db_models import (
AppDB,
TestSetDB,
@@ -27,41 +30,51 @@
@pytest.mark.asyncio
async def test_update_app_variant_parameters(app_variant_parameters_updated):
- app = await AppDB.find_one(AppDB.app_name == APP_NAME)
- testset = await TestSetDB.find_one(TestSetDB.app.id == app.id)
- app_variant = await AppVariantDB.find_one(
- AppVariantDB.app.id == app.id, AppVariantDB.variant_name == "app.default"
- )
- for _ in VARIANT_DEPLOY_ENVIRONMENTS:
- parameters = app_variant_parameters_updated
- parameters["temperature"] = random.uniform(0.9, 1.5)
- parameters["frequence_penalty"] = random.uniform(0.9, 1.5)
- parameters["frequence_penalty"] = random.uniform(0.9, 1.5)
- parameters["inputs"] = [{"name": list(testset.csvdata[0].keys())[0]}]
- payload = {"parameters": parameters}
-
- response = await test_client.put(
- f"{BACKEND_API_HOST}/variants/{str(app_variant.id)}/parameters/",
- json=payload,
- )
- assert response.status_code == 200
+ async with db_engine.get_session() as session:
+ result = await session.execute(select(AppDB).filter_by(app_name=APP_NAME))
+ app = result.scalars().first()
+
+ testset_result = await session.execute(select(TestSetDB).filter_by(app_id=app.id))
+ testset = testset_result.scalars().first()
+
+ app_variant_result = await session.execute(select(AppVariantDB).filter_by(app_id=app.id, variant_name="app.default"))
+ app_variant = app_variant_result.scalars().first()
+
+ for _ in VARIANT_DEPLOY_ENVIRONMENTS:
+ parameters = app_variant_parameters_updated
+ parameters["temperature"] = random.uniform(0.9, 1.5)
+ parameters["frequence_penalty"] = random.uniform(0.9, 1.5)
+ parameters["frequence_penalty"] = random.uniform(0.9, 1.5)
+ parameters["inputs"] = [{"name": list(testset.csvdata[0].keys())[0]}]
+ payload = {"parameters": parameters}
+
+ response = await test_client.put(
+ f"{BACKEND_API_HOST}/variants/{str(app_variant.id)}/parameters/",
+ json=payload,
+ )
+ assert response.status_code == 200
@pytest.mark.asyncio
async def test_deploy_to_environment(deploy_to_environment_payload):
- app = await AppDB.find_one(AppDB.app_name == APP_NAME)
- app_variant = await AppVariantDB.find_one(AppVariantDB.app.id == app.id)
- list_of_response_status_codes = []
- for environment in VARIANT_DEPLOY_ENVIRONMENTS:
- payload = deploy_to_environment_payload
- payload["variant_id"] = str(app_variant.id)
- payload["environment_name"] = environment
-
- response = await test_client.post(
- f"{BACKEND_API_HOST}/environments/deploy/", json=payload, timeout=timeout
- )
- list_of_response_status_codes.append(response.status_code)
-
- assert (
- list_of_response_status_codes.count(200) == 3
- ), "The list does not contain 3 occurrences of 200 status code"
+ async with db_engine.get_session() as session:
+ result = await session.execute(select(AppDB).filter_by(app_name=APP_NAME))
+ app = result.scalars().first()
+
+ app_variant_result = await session.execute(select(AppVariantDB).filter_by(app_id=app.id))
+ app_variant = app_variant_result.scalars().first()
+
+ list_of_response_status_codes = []
+ for environment in VARIANT_DEPLOY_ENVIRONMENTS:
+ payload = deploy_to_environment_payload
+ payload["variant_id"] = str(app_variant.id)
+ payload["environment_name"] = environment
+
+ response = await test_client.post(
+ f"{BACKEND_API_HOST}/environments/deploy/", json=payload, timeout=timeout
+ )
+ list_of_response_status_codes.append(response.status_code)
+
+ assert (
+ list_of_response_status_codes.count(200) == 3
+ ), "The list does not contain 3 occurrences of 200 status code"
diff --git a/agenta-backend/agenta_backend/tests/variants_user_profile_router/test_user_profile.py b/agenta-backend/agenta_backend/tests/variants_user_profile_router/test_user_profile.py
index a1df5dd30c..222e6384d5 100644
--- a/agenta-backend/agenta_backend/tests/variants_user_profile_router/test_user_profile.py
+++ b/agenta-backend/agenta_backend/tests/variants_user_profile_router/test_user_profile.py
@@ -1,8 +1,11 @@
import os
+
import httpx
import pytest
+from sqlalchemy.future import select
from agenta_backend.models.db_models import UserDB
+from agenta_backend.models.db_engine import db_engine
from agenta_backend.models.api.user_models import User
@@ -20,15 +23,25 @@
@pytest.mark.asyncio
async def test_user_profile():
- user_db = await UserDB.find_one(UserDB.uid == "0")
- user_db_dict = User(
- id=str(user_db.id),
- uid=str(user_db.uid),
- username=str(user_db.username),
- email=str(user_db.email),
- ).dict(exclude_unset=True)
-
- response = await test_client.get(f"{BACKEND_API_HOST}/profile/")
-
- assert response.status_code == 200
- assert response.json() == user_db_dict
+ async with db_engine.get_session() as session:
+ result = await session.execute(select(UserDB).filter_by(uid="0"))
+ user_db = result.scalars().first()
+ if not user_db:
+ assert False
+
+ user_db_dict = User(
+ id=str(user_db.id),
+ uid=str(user_db.uid),
+ username=str(user_db.username),
+ email=str(user_db.email),
+ created_at=str(user_db.created_at),
+ updated_at=str(user_db.updated_at)
+ ).dict(exclude_unset=True)
+
+ response = await test_client.get(f"{BACKEND_API_HOST}/profile/")
+
+ assert response.status_code == 200
+ assert response.json()["id"] == user_db_dict["id"]
+ assert response.json()["uid"] == user_db_dict["uid"]
+ assert response.json()["email"] == user_db_dict["email"]
+ assert response.json()["username"] == user_db_dict["username"]
From 1d6e20409560fd5fbcf4a77d322810f581a2148b Mon Sep 17 00:00:00 2001
From: Abram
Date: Fri, 21 Jun 2024 16:45:34 +0100
Subject: [PATCH 120/268] chore (tests): format tests/ codebase with
black@23.12.0
---
.../agenta_backend/routers/configs_router.py | 28 +++++++++----
.../tests/variants_main_router/conftest.py | 4 +-
.../test_app_variant_router.py | 8 ++--
.../test_variant_evaluators_router.py | 42 +++++--------------
.../test_variant_testset_router.py | 37 +++++++++++-----
.../test_variant_versioning_deployment.py | 16 +++++--
.../test_user_profile.py | 2 +-
7 files changed, 75 insertions(+), 62 deletions(-)
diff --git a/agenta-backend/agenta_backend/routers/configs_router.py b/agenta-backend/agenta_backend/routers/configs_router.py
index a7c9a531d6..0ba8b96659 100644
--- a/agenta-backend/agenta_backend/routers/configs_router.py
+++ b/agenta-backend/agenta_backend/routers/configs_router.py
@@ -115,7 +115,7 @@ async def get_config(
# in case environment_name is provided, find the variant deployed
if environment_name:
app_environments = await db_manager.list_environments(
- app_id=str(base_db.app_id) # type: ignore
+ app_id=str(base_db.app_id) # type: ignore
)
found_variant_revision = next(
(
@@ -130,6 +130,7 @@ async def get_config(
status_code=400,
detail=f"Environment name {environment_name} not found for base {base_id}",
)
+
if str(found_variant_revision.base_id) != base_id:
raise HTTPException(
status_code=400,
@@ -137,14 +138,17 @@ async def get_config(
)
variant_revision = found_variant_revision.revision
- config = {"name": found_variant_revision.config_name, "parameters": found_variant_revision.config_parameters}
+ config = {
+ "name": found_variant_revision.config_name,
+ "parameters": found_variant_revision.config_parameters,
+ }
elif config_name:
variants_db = await db_manager.list_variants_for_base(base_db)
found_variant = next(
(
variant_db
for variant_db in variants_db
- if variant_db.config_name == config_name # type: ignore
+ if variant_db.config_name == config_name # type: ignore
),
None,
)
@@ -153,14 +157,20 @@ async def get_config(
status_code=400,
detail=f"Config name {config_name} not found for base {base_id}",
)
- variant_revision = found_variant.revision
- config = {"name": found_variant.config_name, "parameters": found_variant.config_parameters}
- assert "name" and "parameters" in config, "'name' and 'parameters' not found in configuration"
+ variant_revision = found_variant.revision
+ config = {
+ "name": found_variant.config_name,
+ "parameters": found_variant.config_parameters,
+ }
+
+ assert (
+ "name" and "parameters" in config # type: ignore
+ ), "'name' and 'parameters' not found in configuration"
return GetConfigResponse(
- config_name=config["name"], # type: ignore
- current_version=variant_revision, # type: ignore
- parameters=config["parameters"], # type: ignore
+ config_name=config["name"], # type: ignore
+ current_version=variant_revision, # type: ignore
+ parameters=config["parameters"], # type: ignore
)
except HTTPException as e:
logger.error(f"get_config http exception: {e.detail}")
diff --git a/agenta-backend/agenta_backend/tests/variants_main_router/conftest.py b/agenta-backend/agenta_backend/tests/variants_main_router/conftest.py
index 31616fc424..1d74dca441 100644
--- a/agenta-backend/agenta_backend/tests/variants_main_router/conftest.py
+++ b/agenta-backend/agenta_backend/tests/variants_main_router/conftest.py
@@ -95,7 +95,7 @@ async def get_first_user_app(get_first_user_object):
container_name="container_a_test",
container_id="w243e34red",
uri="http://localhost/app/w243e34red",
- status="stale"
+ status="stale",
)
session.add(db_deployment)
@@ -104,7 +104,7 @@ async def get_first_user_app(get_first_user_object):
image_id=db_image.id,
user_id=user.id,
app_id=app.id,
- deployment_id=db_deployment.id
+ deployment_id=db_deployment.id,
)
session.add(db_base)
await session.commit()
diff --git a/agenta-backend/agenta_backend/tests/variants_main_router/test_app_variant_router.py b/agenta-backend/agenta_backend/tests/variants_main_router/test_app_variant_router.py
index 8dc9e8b169..f04bfddd5b 100644
--- a/agenta-backend/agenta_backend/tests/variants_main_router/test_app_variant_router.py
+++ b/agenta-backend/agenta_backend/tests/variants_main_router/test_app_variant_router.py
@@ -87,7 +87,7 @@ async def test_create_app_variant(get_first_user_object):
container_name="container_a_test",
container_id="w243e34red",
uri="http://localhost/app/w243e34red",
- status="stale"
+ status="stale",
)
session.add(db_deployment)
await session.commit()
@@ -97,7 +97,7 @@ async def test_create_app_variant(get_first_user_object):
app_id=app.id,
user_id=user.id,
image_id=db_image.id,
- deployment_id=db_deployment.id
+ deployment_id=db_deployment.id,
)
session.add(db_base)
await session.commit()
@@ -130,9 +130,7 @@ async def test_list_app_variants():
)
app = result.scalars().first()
- response = await test_client.get(
- f"{BACKEND_API_HOST}/apps/{str(app.id)}/variants/"
- )
+ response = await test_client.get(f"{BACKEND_API_HOST}/apps/{str(app.id)}/variants/")
assert response.status_code == 200
assert len(response.json()) == 1
diff --git a/agenta-backend/agenta_backend/tests/variants_main_router/test_variant_evaluators_router.py b/agenta-backend/agenta_backend/tests/variants_main_router/test_variant_evaluators_router.py
index fcbebea5bd..8dfa23c326 100644
--- a/agenta-backend/agenta_backend/tests/variants_main_router/test_variant_evaluators_router.py
+++ b/agenta-backend/agenta_backend/tests/variants_main_router/test_variant_evaluators_router.py
@@ -61,9 +61,7 @@ async def test_create_auto_exact_match_evaluator_config(
auto_exact_match_evaluator_config,
):
async with db_engine.get_session() as session:
- result = await session.execute(
- select(AppDB).filter_by(app_name=APP_NAME)
- )
+ result = await session.execute(select(AppDB).filter_by(app_name=APP_NAME))
app = result.scalars().first()
payload = auto_exact_match_evaluator_config
@@ -83,9 +81,7 @@ async def test_create_auto_similarity_match_evaluator_config(
auto_similarity_match_evaluator_config,
):
async with db_engine.get_session() as session:
- result = await session.execute(
- select(AppDB).filter_by(app_name=APP_NAME)
- )
+ result = await session.execute(select(AppDB).filter_by(app_name=APP_NAME))
app = result.scalars().first()
payload = auto_similarity_match_evaluator_config
@@ -105,9 +101,7 @@ async def test_create_auto_regex_test_evaluator_config(
auto_regex_test_evaluator_config,
):
async with db_engine.get_session() as session:
- result = await session.execute(
- select(AppDB).filter_by(app_name=APP_NAME)
- )
+ result = await session.execute(select(AppDB).filter_by(app_name=APP_NAME))
app = result.scalars().first()
payload = auto_regex_test_evaluator_config
@@ -128,9 +122,7 @@ async def test_create_auto_webhook_test_evaluator_config(
auto_webhook_test_evaluator_config,
):
async with db_engine.get_session() as session:
- result = await session.execute(
- select(AppDB).filter_by(app_name=APP_NAME)
- )
+ result = await session.execute(select(AppDB).filter_by(app_name=APP_NAME))
app = result.scalars().first()
payload = auto_webhook_test_evaluator_config
@@ -150,9 +142,7 @@ async def test_create_auto_ai_critique_evaluator_config(
auto_ai_critique_evaluator_config,
):
async with db_engine.get_session() as session:
- result = await session.execute(
- select(AppDB).filter_by(app_name=APP_NAME)
- )
+ result = await session.execute(select(AppDB).filter_by(app_name=APP_NAME))
app = result.scalars().first()
payload = auto_ai_critique_evaluator_config
@@ -170,9 +160,7 @@ async def test_create_auto_ai_critique_evaluator_config(
@pytest.mark.asyncio
async def test_get_evaluator_configs():
async with db_engine.get_session() as session:
- result = await session.execute(
- select(AppDB).filter_by(app_name=APP_NAME)
- )
+ result = await session.execute(select(AppDB).filter_by(app_name=APP_NAME))
app = result.scalars().first()
response = await test_client.get(
@@ -217,9 +205,7 @@ async def wait_for_evaluation_to_finish(evaluation_id):
async def create_evaluation_with_evaluator(evaluator_config_name):
# Fetch app, app_variant and testset
async with db_engine.get_session() as session:
- app_result = await session.execute(
- select(AppDB).filter_by(app_name=APP_NAME)
- )
+ app_result = await session.execute(select(AppDB).filter_by(app_name=APP_NAME))
app = app_result.scalars().first()
app_variant_result = await session.execute(
@@ -311,9 +297,7 @@ async def test_create_evaluation_auto_ai_critique():
@pytest.mark.asyncio
async def test_delete_evaluator_config():
async with db_engine.get_session() as session:
- result = await session.execute(
- select(AppDB).filter_by(app_name=APP_NAME)
- )
+ result = await session.execute(select(AppDB).filter_by(app_name=APP_NAME))
app = result.scalars().first()
response = await test_client.get(
@@ -337,9 +321,7 @@ async def test_delete_evaluator_config():
async def test_evaluation_scenario_match_evaluation_testset_length():
async with db_engine.get_session() as session:
result = await session.execute(
- select(EvaluationDB).options(
- joinedload(EvaluationDB.testset)
- )
+ select(EvaluationDB).options(joinedload(EvaluationDB.testset))
)
evaluations = result.scalars().all()
@@ -359,12 +341,10 @@ async def test_remove_running_template_app_container():
# Connect to the Docker daemon
client = docker.from_env()
async with db_engine.get_session() as session:
- app_result = await session.execute(
- select(AppDB).filter_by(app_name=APP_NAME)
- )
+ app_result = await session.execute(select(AppDB).filter_by(app_name=APP_NAME))
app = app_result.scalars().first()
- deployment_result =await session.execute(
+ deployment_result = await session.execute(
select(DeploymentDB).filter_by(app_id=app.id)
)
deployment = deployment_result.scalars().first()
diff --git a/agenta-backend/agenta_backend/tests/variants_main_router/test_variant_testset_router.py b/agenta-backend/agenta_backend/tests/variants_main_router/test_variant_testset_router.py
index f12a811a0e..24c7d62a6d 100644
--- a/agenta-backend/agenta_backend/tests/variants_main_router/test_variant_testset_router.py
+++ b/agenta-backend/agenta_backend/tests/variants_main_router/test_variant_testset_router.py
@@ -11,7 +11,6 @@
)
-
# Initialize http client
test_client = httpx.AsyncClient()
timeout = httpx.Timeout(timeout=5, read=None, write=5)
@@ -31,7 +30,9 @@
@pytest.mark.asyncio
async def test_create_testset():
async with db_engine.get_session() as session:
- result = await session.execute(select(AppDB).filter_by(app_name="app_variant_test"))
+ result = await session.execute(
+ select(AppDB).filter_by(app_name="app_variant_test")
+ )
app = result.scalars().first()
payload = {
@@ -61,10 +62,14 @@ async def test_create_testset():
@pytest.mark.asyncio
async def test_update_testset():
async with db_engine.get_session() as session:
- result = await session.execute(select(AppDB).filter_by(app_name="app_variant_test"))
+ result = await session.execute(
+ select(AppDB).filter_by(app_name="app_variant_test")
+ )
app = result.scalars().first()
- testset_result = await session.execute(select(TestSetDB).filter_by(app_id=app.id))
+ testset_result = await session.execute(
+ select(TestSetDB).filter_by(app_id=app.id)
+ )
testset = testset_result.scalars().first()
payload = {
@@ -97,7 +102,9 @@ async def test_update_testset():
@pytest.mark.asyncio
async def test_get_testsets():
async with db_engine.get_session() as session:
- result = await session.execute(select(AppDB).filter_by(app_name="app_variant_test"))
+ result = await session.execute(
+ select(AppDB).filter_by(app_name="app_variant_test")
+ )
app = result.scalars().first()
response = await test_client.get(
@@ -111,13 +118,19 @@ async def test_get_testsets():
@pytest.mark.asyncio()
async def test_get_testset():
async with db_engine.get_session() as session:
- result = await session.execute(select(AppDB).filter_by(app_name="app_variant_test"))
+ result = await session.execute(
+ select(AppDB).filter_by(app_name="app_variant_test")
+ )
app = result.scalars().first()
- testset_result = await session.execute(select(TestSetDB).filter_by(app_id=app.id))
+ testset_result = await session.execute(
+ select(TestSetDB).filter_by(app_id=app.id)
+ )
testset = testset_result.scalars().first()
- response = await test_client.get(f"{BACKEND_API_HOST}/testsets/{str(testset.id)}/")
+ response = await test_client.get(
+ f"{BACKEND_API_HOST}/testsets/{str(testset.id)}/"
+ )
assert response.status_code == 200
assert response.json()["name"] == testset.name
@@ -127,10 +140,14 @@ async def test_get_testset():
@pytest.mark.asyncio
async def test_delete_testsets():
async with db_engine.get_session() as session:
- result = await session.execute(select(AppDB).filter_by(app_name="app_variant_test"))
+ result = await session.execute(
+ select(AppDB).filter_by(app_name="app_variant_test")
+ )
app = result.scalars().first()
- testset_result = await session.execute(select(TestSetDB).filter_by(app_id=app.id))
+ testset_result = await session.execute(
+ select(TestSetDB).filter_by(app_id=app.id)
+ )
testsets = testset_result.scalars().all()
testset_ids = [str(testset.id) for testset in testsets]
diff --git a/agenta-backend/agenta_backend/tests/variants_main_router/test_variant_versioning_deployment.py b/agenta-backend/agenta_backend/tests/variants_main_router/test_variant_versioning_deployment.py
index cfc82a1479..50e8e1fcf1 100644
--- a/agenta-backend/agenta_backend/tests/variants_main_router/test_variant_versioning_deployment.py
+++ b/agenta-backend/agenta_backend/tests/variants_main_router/test_variant_versioning_deployment.py
@@ -34,10 +34,14 @@ async def test_update_app_variant_parameters(app_variant_parameters_updated):
result = await session.execute(select(AppDB).filter_by(app_name=APP_NAME))
app = result.scalars().first()
- testset_result = await session.execute(select(TestSetDB).filter_by(app_id=app.id))
+ testset_result = await session.execute(
+ select(TestSetDB).filter_by(app_id=app.id)
+ )
testset = testset_result.scalars().first()
- app_variant_result = await session.execute(select(AppVariantDB).filter_by(app_id=app.id, variant_name="app.default"))
+ app_variant_result = await session.execute(
+ select(AppVariantDB).filter_by(app_id=app.id, variant_name="app.default")
+ )
app_variant = app_variant_result.scalars().first()
for _ in VARIANT_DEPLOY_ENVIRONMENTS:
@@ -61,7 +65,9 @@ async def test_deploy_to_environment(deploy_to_environment_payload):
result = await session.execute(select(AppDB).filter_by(app_name=APP_NAME))
app = result.scalars().first()
- app_variant_result = await session.execute(select(AppVariantDB).filter_by(app_id=app.id))
+ app_variant_result = await session.execute(
+ select(AppVariantDB).filter_by(app_id=app.id)
+ )
app_variant = app_variant_result.scalars().first()
list_of_response_status_codes = []
@@ -71,7 +77,9 @@ async def test_deploy_to_environment(deploy_to_environment_payload):
payload["environment_name"] = environment
response = await test_client.post(
- f"{BACKEND_API_HOST}/environments/deploy/", json=payload, timeout=timeout
+ f"{BACKEND_API_HOST}/environments/deploy/",
+ json=payload,
+ timeout=timeout,
)
list_of_response_status_codes.append(response.status_code)
diff --git a/agenta-backend/agenta_backend/tests/variants_user_profile_router/test_user_profile.py b/agenta-backend/agenta_backend/tests/variants_user_profile_router/test_user_profile.py
index 222e6384d5..651794eab1 100644
--- a/agenta-backend/agenta_backend/tests/variants_user_profile_router/test_user_profile.py
+++ b/agenta-backend/agenta_backend/tests/variants_user_profile_router/test_user_profile.py
@@ -35,7 +35,7 @@ async def test_user_profile():
username=str(user_db.username),
email=str(user_db.email),
created_at=str(user_db.created_at),
- updated_at=str(user_db.updated_at)
+ updated_at=str(user_db.updated_at),
).dict(exclude_unset=True)
response = await test_client.get(f"{BACKEND_API_HOST}/profile/")
From 28c188c01ea3198865dedec23b91fef7847a852c Mon Sep 17 00:00:00 2001
From: Abram
Date: Fri, 21 Jun 2024 18:10:15 +0100
Subject: [PATCH 121/268] refactor (backend): rewrite user_service to make use
of sqlalchemy orm
---
.../agenta_backend/services/user_service.py | 71 ++++++++++++++-----
1 file changed, 54 insertions(+), 17 deletions(-)
diff --git a/agenta-backend/agenta_backend/services/user_service.py b/agenta-backend/agenta_backend/services/user_service.py
index be7714c802..555fae5d42 100644
--- a/agenta-backend/agenta_backend/services/user_service.py
+++ b/agenta-backend/agenta_backend/services/user_service.py
@@ -1,34 +1,71 @@
-import os
+import uuid
+
+from sqlalchemy.future import select
+from sqlalchemy.exc import NoResultFound
+
from agenta_backend.utils.common import isCloud
+from agenta_backend.models.db_engine import db_engine
if isCloud():
from agenta_backend.commons.models.db_models import UserDB_ as UserDB
else:
from agenta_backend.models.db_models import UserDB
+
from agenta_backend.models.api.user_models import User, UserUpdate
async def create_new_user(payload: User) -> UserDB:
- user_instance = UserDB(
- uid=payload.uid,
- username=payload.username,
- email=payload.email,
- )
- user = await user_instance.create()
- return user
+ """
+ This function creates a new user.
+ Args:
+ payload (User): The payload data to create the user.
-async def update_user(user_uid: str, payload: UserUpdate) -> UserDB:
- user = await UserDB.find_one(UserDB.uid == user_uid, fetch_links=True)
+ Returns:
+ UserDB: The created user object.
+ """
+
+ async with db_engine.get_session() as session:
+ user = UserDB(
+ uid=payload.uid,
+ username=payload.username,
+ email=payload.email,
+ )
+
+ session.add(user)
+ await session.commit()
+ await session.refresh(user)
- if user is not None:
- values_to_update = {key: value for key, value in payload.dict()}
- await user.update({"$set": values_to_update})
return user
- raise NotFound("Credentials not found. Please try again!")
-class NotFound(Exception):
- """Custom exception for credentials not found"""
+async def update_user(user_uid: str, payload: UserUpdate) -> UserDB:
+ """
+ This function updates the user.
+
+ Args:
+ user_uid (str): The supertokens session id of the user
+ payload (UserUpdate): The payload to update the user information with
- pass
+ Returns:
+ UserDB: The updated user object
+
+ Raises:
+ NoResultFound: User with session id xxxx not found.
+ """
+
+ async with db_engine.get_session() as session:
+ result = await session.execute(select(UserDB).filter_by(uid=user_uid))
+ user = result.scalars().first()
+
+ if not user:
+ raise NoResultFound(f"User with session id {user_uid} not found.")
+
+ for key, value in payload.dict(exclude_unset=True):
+ if hasattr(user, key):
+ setattr(user, key, value)
+
+ await session.commit()
+ await session.refresh(user)
+
+ return user
From a9e616912451654c522bb7d1125f81fcf36f2a98 Mon Sep 17 00:00:00 2001
From: Abram
Date: Fri, 21 Jun 2024 18:12:23 +0100
Subject: [PATCH 122/268] refactor (db): rewrite db_engine to intialize
postgres x mongodb engines based on feature flag
---
.../agenta_backend/models/db_engine.py | 61 +++++++++++++++----
1 file changed, 50 insertions(+), 11 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/db_engine.py b/agenta-backend/agenta_backend/models/db_engine.py
index 30aa53262d..db321af8b5 100644
--- a/agenta-backend/agenta_backend/models/db_engine.py
+++ b/agenta-backend/agenta_backend/models/db_engine.py
@@ -19,6 +19,10 @@
APIKeyDB,
WorkspaceDB,
OrganizationDB,
+ InvitationDB,
+ UserOrganizationDB,
+ UserWorkspaceDB,
+ WorkspaceMemberDB,
AppDB_ as AppDB,
UserDB_ as UserDB,
ImageDB_ as ImageDB,
@@ -77,7 +81,7 @@
]
if isCloudEE():
- models.extend([OrganizationDB, WorkspaceDB, APIKeyDB]) # type: ignore
+ models.extend([OrganizationDB, WorkspaceDB, APIKeyDB, InvitationDB, UserOrganizationDB, UserWorkspaceDB, WorkspaceMemberDB]) # type: ignore
# Configure and set logging level
@@ -87,13 +91,23 @@
class DBEngine:
"""
- Database engine to initialize SQLAlchemy and return the engine based on mode.
+ Database engine to initialize SQLAlchemy (and beanie)
"""
def __init__(self) -> None:
self.mode = os.environ.get("DATABASE_MODE", "v2")
- self.db_url = f"{os.environ.get('POSTGRES_URI')}"
- self.engine = create_async_engine(url=self.db_url)
+ self.postgres_uri = os.environ.get("POSTGRES_URI", None)
+ self.mongo_uri = os.environ.get("MONGODB_URI")
+
+ async def initialize_async_postgres(self):
+ """
+ Initialize PostgreSQL database engine and sessions.
+ """
+
+ if not self.postgres_uri:
+ raise ValueError("Postgres URI cannot be None.")
+
+ self.engine = create_async_engine(self.postgres_uri)
self.async_session_maker = async_sessionmaker(
bind=self.engine, class_=AsyncSession, expire_on_commit=False
)
@@ -101,22 +115,47 @@ def __init__(self) -> None:
session_factory=self.async_session_maker, scopefunc=current_task
)
+ async with self.engine.begin() as conn:
+ # Drop and create tables if needed
+ for model in models:
+ await conn.run_sync(model.metadata.create_all)
+ logger.info(f"Using PostgreSQL database...")
+
+ async def initialize_mongodb(self):
+ """
+ Initializes the mongodb async driver and beanie documents.
+
+ Raises:
+ ValueError: It looks like one of the following packages are not installed: beanie, motor. Exception: ImportError message
+ """
+
+ try:
+ from beanie import init_beanie # type: ignore
+ from motor.motor_asyncio import AsyncIOMotorClient # type: ignore
+ except ImportError as exc:
+ raise ValueError(f"It looks like one of the following packages are not installed: beanie, motor. Exception: {str(exc)}")
+
+ db_name = f"agenta_{self.mode}"
+ client = AsyncIOMotorClient(self.mongo_uri)
+ await init_beanie(database=client[db_name], document_models=[SpanDB])
+ logger.info(f"Using {db_name} mongo database...")
+
async def init_db(self):
"""
Initialize the database based on the mode and create all tables.
"""
- async with self.engine.begin() as conn:
- # Drop all existing tables (if needed)
- # await conn.run_sync(Base.metadata.drop_all)
- # Create tables
- for model in models:
- await conn.run_sync(model.metadata.create_all)
- logger.info(f"Using {self.mode} database...")
+
+ if isCloudEE():
+ await self.initialize_mongodb()
+ await self.initialize_async_postgres()
+ else:
+ await self.initialize_async_postgres()
async def remove_db(self) -> None:
"""
Remove the database based on the mode.
"""
+
async with self.engine.begin() as conn:
for model in models:
await conn.run_sync(model.metadata.drop_all)
From 256f132d456c7c0cbd298ebf31e5923f89cf8b42 Mon Sep 17 00:00:00 2001
From: Abram
Date: Sat, 22 Jun 2024 09:32:05 +0100
Subject: [PATCH 123/268] refactor (backend): improve service function to
create a new user
---
.../agenta_backend/services/user_service.py | 12 ++++--------
1 file changed, 4 insertions(+), 8 deletions(-)
diff --git a/agenta-backend/agenta_backend/services/user_service.py b/agenta-backend/agenta_backend/services/user_service.py
index 555fae5d42..7c5131e9b0 100644
--- a/agenta-backend/agenta_backend/services/user_service.py
+++ b/agenta-backend/agenta_backend/services/user_service.py
@@ -11,26 +11,22 @@
else:
from agenta_backend.models.db_models import UserDB
-from agenta_backend.models.api.user_models import User, UserUpdate
+from agenta_backend.models.api.user_models import UserUpdate
-async def create_new_user(payload: User) -> UserDB:
+async def create_new_user(payload: dict) -> UserDB:
"""
This function creates a new user.
Args:
- payload (User): The payload data to create the user.
+ payload (dict): The payload data to create the user.
Returns:
UserDB: The created user object.
"""
async with db_engine.get_session() as session:
- user = UserDB(
- uid=payload.uid,
- username=payload.username,
- email=payload.email,
- )
+ user = UserDB(**payload)
session.add(user)
await session.commit()
From 405a1f6631b30e0c47145d7c1045afab5ccd6f84 Mon Sep 17 00:00:00 2001
From: aakrem
Date: Sun, 23 Jun 2024 16:01:01 +0200
Subject: [PATCH 124/268] add db name and create db on postgres startup if
doesn't exist
---
docker-assets/postgres/init-db.sql | 1 +
docker-compose.yml | 13 ++++++++-----
2 files changed, 9 insertions(+), 5 deletions(-)
create mode 100644 docker-assets/postgres/init-db.sql
diff --git a/docker-assets/postgres/init-db.sql b/docker-assets/postgres/init-db.sql
new file mode 100644
index 0000000000..9bd429417f
--- /dev/null
+++ b/docker-assets/postgres/init-db.sql
@@ -0,0 +1 @@
+CREATE DATABASE agenta_oss;
diff --git a/docker-compose.yml b/docker-compose.yml
index 59babfa635..c65c102680 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -15,7 +15,7 @@ services:
build: ./agenta-backend
environment:
- MONGODB_URI=mongodb://username:password@mongo:27017
- - POSTGRES_URI=postgresql+asyncpg://username:password@postgres:5432
+ - POSTGRES_URI=postgresql+asyncpg://username:password@postgres:5432/agenta_oss
- REDIS_URL=redis://redis:6379/0
- ENVIRONMENT=development
- DATABASE_MODE=v2
@@ -146,7 +146,7 @@ services:
command: >
watchmedo auto-restart --directory=./agenta_backend --pattern=*.py --recursive -- celery -A agenta_backend.main.celery_app worker --concurrency=1 --loglevel=INFO
environment:
- - POSTGRES_URI=postgresql+asyncpg://username:password@postgres:5432
+ - POSTGRES_URI=postgresql+asyncpg://username:password@postgres:5432/agenta_oss
- MONGODB_URI=mongodb://username:password@mongo:27017
- REDIS_URL=redis://redis:6379/0
- CELERY_BROKER_URL=amqp://guest@rabbitmq//
@@ -172,12 +172,14 @@ services:
environment:
POSTGRES_USER: username
POSTGRES_PASSWORD: password
+ POSTGRES_DB: agenta_oss
ports:
- "5432:5432"
networks:
- agenta-network
volumes:
- - postgresdb-data:/var/lib/postgresql/data/
+ - postgresdb-data:/var/lib/postgresql/data/
+ - ./docker-assets/postgres/init-db.sql:/docker-entrypoint-initdb.d/init-db.sql
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
interval: 10s
@@ -194,12 +196,13 @@ services:
PGADMIN_SERVER_PORT: 5432
PGADMIN_SERVER_USER: "username"
PGADMIN_SERVER_PASSWORD: "password"
+ PGADMIN_SERVER_DB: agenta_oss
ports:
- "5050:80"
networks:
- agenta-network
volumes:
- - pgadmin-data:/var/lib/pgadmin
+ - pgadmin-data:/var/lib/pgadmin
depends_on:
postgres:
condition: service_healthy
@@ -213,4 +216,4 @@ volumes:
redis_data:
nextjs_cache:
postgresdb-data:
- pgadmin-data:
\ No newline at end of file
+ pgadmin-data:
From 8f9262d93721ad1d119a13cddceb8285a892714d Mon Sep 17 00:00:00 2001
From: Kaosiso Ezealigo
Date: Sun, 23 Jun 2024 17:23:42 +0100
Subject: [PATCH 125/268] fix(frontend): rows not expanding in evaluation
comparison view
---
.../pages/evaluations/evaluationCompare/EvaluationCompare.tsx | 2 +-
agenta-web/src/services/evaluations/api/index.ts | 2 ++
2 files changed, 3 insertions(+), 1 deletion(-)
diff --git a/agenta-web/src/components/pages/evaluations/evaluationCompare/EvaluationCompare.tsx b/agenta-web/src/components/pages/evaluations/evaluationCompare/EvaluationCompare.tsx
index 0069b9fe0b..c3b325c643 100644
--- a/agenta-web/src/components/pages/evaluations/evaluationCompare/EvaluationCompare.tsx
+++ b/agenta-web/src/components/pages/evaluations/evaluationCompare/EvaluationCompare.tsx
@@ -544,7 +544,7 @@ const EvaluationCompareMode: React.FC = () => {
ref={gridRef as any}
rowData={rows}
columnDefs={colDefs}
- getRowId={(params) => params.data.id}
+ getRowId={(params) => params.data.rowId}
headerHeight={64}
/>
diff --git a/agenta-web/src/services/evaluations/api/index.ts b/agenta-web/src/services/evaluations/api/index.ts
index 301bf6c994..98612464d8 100644
--- a/agenta-web/src/services/evaluations/api/index.ts
+++ b/agenta-web/src/services/evaluations/api/index.ts
@@ -14,6 +14,7 @@ import {
} from "@/lib/Types"
import {getTagColors} from "@/lib/helpers/colors"
import {stringToNumberInRange} from "@/lib/helpers/utils"
+import {v4 as uuidv4} from "uuid"
import exactMatchImg from "@/media/target.png"
import similarityImg from "@/media/transparency.png"
import regexImg from "@/media/programming.png"
@@ -239,6 +240,7 @@ export const fetchAllComparisonResults = async (evaluationIds: string[]) => {
rows.push({
id: inputValuesStr,
+ rowId: uuidv4(),
inputs: inputNames
.map((name) => ({name, value: data[name]}))
.filter((ip) => ip.value !== undefined),
From c133fcdac359b87c307e5735cd776a8a2e38e989 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 24 Jun 2024 03:38:20 +0000
Subject: [PATCH 126/268] build(deps): bump importlib-metadata from 7.2.0 to
7.2.1 in /agenta-cli
Bumps [importlib-metadata](https://github.com/python/importlib_metadata) from 7.2.0 to 7.2.1.
- [Release notes](https://github.com/python/importlib_metadata/releases)
- [Changelog](https://github.com/python/importlib_metadata/blob/main/NEWS.rst)
- [Commits](https://github.com/python/importlib_metadata/compare/v7.2.0...v7.2.1)
---
updated-dependencies:
- dependency-name: importlib-metadata
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
---
agenta-cli/poetry.lock | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/agenta-cli/poetry.lock b/agenta-cli/poetry.lock
index a090925b39..a3200a3b76 100644
--- a/agenta-cli/poetry.lock
+++ b/agenta-cli/poetry.lock
@@ -465,13 +465,13 @@ files = [
[[package]]
name = "importlib-metadata"
-version = "7.2.0"
+version = "7.2.1"
description = "Read metadata from Python packages"
optional = false
python-versions = ">=3.8"
files = [
- {file = "importlib_metadata-7.2.0-py3-none-any.whl", hash = "sha256:04e4aad329b8b948a5711d394fa8759cb80f009225441b4f2a02bd4d8e5f426c"},
- {file = "importlib_metadata-7.2.0.tar.gz", hash = "sha256:3ff4519071ed42740522d494d04819b666541b9752c43012f85afb2cc220fcc6"},
+ {file = "importlib_metadata-7.2.1-py3-none-any.whl", hash = "sha256:ffef94b0b66046dd8ea2d619b701fe978d9264d38f3998bc4c27ec3b146a87c8"},
+ {file = "importlib_metadata-7.2.1.tar.gz", hash = "sha256:509ecb2ab77071db5137c655e24ceb3eee66e7bbc6574165d0d114d9fc4bbe68"},
]
[package.dependencies]
From 089c0aba59e22a4b0e0369659cce4b305bbf9010 Mon Sep 17 00:00:00 2001
From: aakrem <6608260+aakrem@users.noreply.github.com>
Date: Mon, 24 Jun 2024 08:52:15 +0000
Subject: [PATCH 127/268] Bump versions
---
agenta-backend/pyproject.toml | 2 +-
agenta-cli/pyproject.toml | 2 +-
agenta-web/package-lock.json | 4 ++--
agenta-web/package.json | 2 +-
4 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/agenta-backend/pyproject.toml b/agenta-backend/pyproject.toml
index 5553638939..f1adf3fcf3 100644
--- a/agenta-backend/pyproject.toml
+++ b/agenta-backend/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "agenta_backend"
-version = "0.17.4"
+version = "0.17.5"
description = ""
authors = ["Mahmoud Mabrouk "]
readme = "README.md"
diff --git a/agenta-cli/pyproject.toml b/agenta-cli/pyproject.toml
index a74d7ab8e8..a3fc9fb07b 100644
--- a/agenta-cli/pyproject.toml
+++ b/agenta-cli/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "agenta"
-version = "0.17.4"
+version = "0.17.5"
description = "The SDK for agenta is an open-source LLMOps platform."
readme = "README.md"
authors = ["Mahmoud Mabrouk "]
diff --git a/agenta-web/package-lock.json b/agenta-web/package-lock.json
index 47de851a13..76ac9d6c43 100644
--- a/agenta-web/package-lock.json
+++ b/agenta-web/package-lock.json
@@ -1,12 +1,12 @@
{
"name": "agenta",
- "version": "0.17.4",
+ "version": "0.17.5",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "agenta",
- "version": "0.17.4",
+ "version": "0.17.5",
"dependencies": {
"@ant-design/colors": "^7.0.0",
"@ant-design/icons": "^5.3.7",
diff --git a/agenta-web/package.json b/agenta-web/package.json
index e50a0b2888..3ad495c93c 100644
--- a/agenta-web/package.json
+++ b/agenta-web/package.json
@@ -1,6 +1,6 @@
{
"name": "agenta",
- "version": "0.17.4",
+ "version": "0.17.5",
"private": true,
"engines": {
"node": ">=18"
From c96d25a347415027e56ee2d61612cd1eed5368ac Mon Sep 17 00:00:00 2001
From: aakrem
Date: Mon, 24 Jun 2024 12:21:55 +0200
Subject: [PATCH 128/268] remove comment from chatgpt
---
.../agenta_backend/migrations/mongo_to_postgres/migration.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
index 9529256819..8784e808f2 100644
--- a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
+++ b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
@@ -9,7 +9,6 @@
from sqlalchemy.dialects.postgresql import UUID
import uuid_utils.compat as uuid
-# Assuming agenta_backend.models.db_models contains your SQLAlchemy models
from agenta_backend.models.db_models import (
UserDB,
ImageDB,
From 1417acaa77a362232ab62c3b4454b94e81db323b Mon Sep 17 00:00:00 2001
From: aakrem
Date: Mon, 24 Jun 2024 12:28:01 +0200
Subject: [PATCH 129/268] add separated messages
---
.../agenta_backend/migrations/mongo_to_postgres/utils.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
index abe759acb5..ba83cb0269 100644
--- a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
+++ b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
@@ -37,6 +37,7 @@ async def drop_all_tables():
# Drop all tables with CASCADE option
for table in reversed(Base.metadata.sorted_tables):
await conn.execute(text(f"DROP TABLE IF EXISTS {table.name} CASCADE"))
+ print("All tables are dropped.")
async def create_all_tables(tables):
@@ -45,7 +46,7 @@ async def create_all_tables(tables):
for table in tables:
print(f"====================== Creating table for {table.__name__}")
await conn.run_sync(table.metadata.create_all)
- print("All tables dropped and created.")
+ print("All tables are created.")
async def store_mapping(table_name, mongo_id, uuid):
From 808439c5be4967e91454a07addb8a496fd94f8ff Mon Sep 17 00:00:00 2001
From: aakrem
Date: Mon, 24 Jun 2024 15:45:06 +0200
Subject: [PATCH 130/268] add printing traceback
---
.../agenta_backend/migrations/mongo_to_postgres/migration.py | 3 +++
1 file changed, 3 insertions(+)
diff --git a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
index 8784e808f2..ba525c1060 100644
--- a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
+++ b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
@@ -511,8 +511,11 @@ async def main():
print("Migration completed successfully.")
except Exception as e:
+ import traceback
+
print(f"\n====================== Error ======================\n")
print(f"Error occurred: {e}")
+ traceback.print_exc()
finally:
print_migration_report()
From 316fa202ceb45de30c983dd08b1c7f4eb2b2369e Mon Sep 17 00:00:00 2001
From: aakrem
Date: Mon, 24 Jun 2024 16:27:36 +0200
Subject: [PATCH 131/268] improve migration output
---
.../migrations/mongo_to_postgres/migration.py | 2 +-
.../migrations/mongo_to_postgres/utils.py | 21 ++++++++++++-------
agenta-backend/poetry.lock | 5 +++--
agenta-backend/pyproject.toml | 1 +
4 files changed, 18 insertions(+), 11 deletions(-)
diff --git a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
index ba525c1060..9a78264060 100644
--- a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
+++ b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
@@ -508,7 +508,7 @@ async def main():
transform_evaluation_scenario,
EvaluationScenarioResultDB,
)
-
+ print("\n ========================================================")
print("Migration completed successfully.")
except Exception as e:
import traceback
diff --git a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
index ba83cb0269..08dab2a26b 100644
--- a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
+++ b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
@@ -1,6 +1,7 @@
import os
import asyncio
from datetime import datetime, timezone
+from tqdm import tqdm
from pymongo import MongoClient
from bson import ObjectId, DBRef
@@ -37,16 +38,16 @@ async def drop_all_tables():
# Drop all tables with CASCADE option
for table in reversed(Base.metadata.sorted_tables):
await conn.execute(text(f"DROP TABLE IF EXISTS {table.name} CASCADE"))
- print("All tables are dropped.")
+ print("\n====================== All tables are dropped.\n")
async def create_all_tables(tables):
"""Create all tables in the database."""
async with db_engine.engine.begin() as conn:
for table in tables:
- print(f"====================== Creating table for {table.__name__}")
+ print(f"Creating table for {table.__name__}")
await conn.run_sync(table.metadata.create_all)
- print("All tables are created.")
+ print("\n====================== All tables are created.\n")
async def store_mapping(table_name, mongo_id, uuid):
@@ -86,7 +87,9 @@ def update_migration_report(collection_name, total_docs, migrated_docs):
def print_migration_report():
- print("\n====================== Migration Report ======================")
+ print(
+ "\n ============================ Migration Report ============================"
+ )
# Headers
headers = ["Table", "Total in MongoDB", "Migrated to PostgreSQL"]
@@ -124,14 +127,16 @@ async def migrate_collection(
collection_name, model_class, transformation_func, association_model=None
):
"""General function to migrate a collection to a SQL table."""
- print(
- f"\n====================== Migrating {collection_name}... ======================\n"
- )
+ print(f"\n")
total_docs = mongo_db[collection_name].count_documents({})
migrated_docs = 0
async with db_engine.get_session() as session:
- for skip in range(0, total_docs, BATCH_SIZE):
+ for skip in tqdm(
+ range(0, total_docs, BATCH_SIZE),
+ total=(total_docs - 1) // BATCH_SIZE + 1,
+ desc=f"Migrating: {collection_name}",
+ ):
batch = await asyncio.get_event_loop().run_in_executor(
None,
lambda: list(
diff --git a/agenta-backend/poetry.lock b/agenta-backend/poetry.lock
index 20bb34137c..16b80ad6c5 100644
--- a/agenta-backend/poetry.lock
+++ b/agenta-backend/poetry.lock
@@ -1,4 +1,4 @@
-# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
+# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand.
[[package]]
name = "aioboto3"
@@ -2700,6 +2700,7 @@ files = [
{file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
{file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
{file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
+ {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"},
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
{file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
{file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
@@ -3899,4 +3900,4 @@ multidict = ">=4.0"
[metadata]
lock-version = "2.0"
python-versions = "^3.9"
-content-hash = "7118cded062bfcd960f08cc272d4eab330222cc1e51e9e03e7b728931642e0b2"
+content-hash = "27f39a7d172ca89900f8feb0fd495ddc55977f205571823a19842417976be8c2"
diff --git a/agenta-backend/pyproject.toml b/agenta-backend/pyproject.toml
index d9a714281c..cf624168cc 100644
--- a/agenta-backend/pyproject.toml
+++ b/agenta-backend/pyproject.toml
@@ -39,6 +39,7 @@ asyncpg = "^0.29.0"
psycopg2-binary = "^2.9.9"
uuid-utils = "^0.7.0"
sqlalchemy-json = "^0.7.0"
+tqdm = "^4.66.4"
[tool.poetry.group.dev.dependencies]
pytest = "^7.3.1"
From 92145d3cd10ef7c632c208fc4d85b25f6ca6e271 Mon Sep 17 00:00:00 2001
From: aakrem
Date: Mon, 24 Jun 2024 16:47:30 +0200
Subject: [PATCH 132/268] add filtering by table
---
.../migrations/mongo_to_postgres/migration.py | 107 ++++++++++--------
.../migrations/mongo_to_postgres/utils.py | 9 +-
2 files changed, 66 insertions(+), 50 deletions(-)
diff --git a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
index 9a78264060..b2fd594e56 100644
--- a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
+++ b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
@@ -84,7 +84,7 @@ async def transform_user(user):
async def transform_image(image):
user_uuid = await get_mapped_uuid(
- image["user"].id if isinstance(image["user"], DBRef) else image["user"]
+ "users", image["user"].id if isinstance(image["user"], DBRef) else image["user"]
)
image_uuid = generate_uuid()
await store_mapping("docker_images", image["_id"], image_uuid)
@@ -102,7 +102,7 @@ async def transform_image(image):
async def transform_app(app):
- user_uuid = await get_mapped_uuid(app["user"].id)
+ user_uuid = await get_mapped_uuid("users", app["user"].id)
app_uuid = generate_uuid()
await store_mapping("app_db", app["_id"], app_uuid)
return {
@@ -115,8 +115,8 @@ async def transform_app(app):
async def transform_deployment(deployment):
- app_uuid = await get_mapped_uuid(deployment["app"].id)
- user_uuid = await get_mapped_uuid(deployment["user"].id)
+ app_uuid = await get_mapped_uuid("app_db", deployment["app"].id)
+ user_uuid = await get_mapped_uuid("users", deployment["user"].id)
deployment_uuid = generate_uuid()
await store_mapping("deployments", deployment["_id"], deployment_uuid)
return {
@@ -133,10 +133,12 @@ async def transform_deployment(deployment):
async def transform_variant_base(base):
- app_uuid = await get_mapped_uuid(base["app"].id)
- user_uuid = await get_mapped_uuid(base["user"].id)
- image_uuid = await get_mapped_uuid(base["image"].id)
- deployment_uuid = base["deployment"] and await get_mapped_uuid(base["deployment"])
+ app_uuid = await get_mapped_uuid("app_db", base["app"].id)
+ user_uuid = await get_mapped_uuid("users", base["user"].id)
+ image_uuid = await get_mapped_uuid("docker_images", base["image"].id)
+ deployment_uuid = base["deployment"] and await get_mapped_uuid(
+ "deployments", base["deployment"]
+ )
base_uuid = generate_uuid()
await store_mapping("bases", base["_id"], base_uuid)
return {
@@ -152,11 +154,11 @@ async def transform_variant_base(base):
async def transform_app_variant(variant):
- app_uuid = await get_mapped_uuid(variant["app"].id)
- image_uuid = await get_mapped_uuid(variant["image"].id)
- user_uuid = await get_mapped_uuid(variant["user"].id)
- modified_by_uuid = await get_mapped_uuid(variant["modified_by"].id)
- base_uuid = await get_mapped_uuid(variant["base"].id)
+ app_uuid = await get_mapped_uuid("app_db", variant["app"].id)
+ image_uuid = await get_mapped_uuid("docker_images", variant["image"].id)
+ user_uuid = await get_mapped_uuid("users", variant["user"].id)
+ modified_by_uuid = await get_mapped_uuid("users", variant["modified_by"].id)
+ base_uuid = await get_mapped_uuid("bases", variant["base"].id)
variant_uuid = generate_uuid()
await store_mapping("app_variants", variant["_id"], variant_uuid)
return {
@@ -177,9 +179,9 @@ async def transform_app_variant(variant):
async def transform_app_variant_revision(revision):
- variant_uuid = await get_mapped_uuid(revision["variant"].id)
- modified_by_uuid = await get_mapped_uuid(revision["modified_by"].id)
- base_uuid = await get_mapped_uuid(revision["base"].id)
+ variant_uuid = await get_mapped_uuid("app_variants", revision["variant"].id)
+ modified_by_uuid = await get_mapped_uuid("users", revision["modified_by"].id)
+ base_uuid = await get_mapped_uuid("bases", revision["base"].id)
revision_uuid = generate_uuid()
await store_mapping("app_variant_revisions", revision["_id"], revision_uuid)
return {
@@ -196,11 +198,15 @@ async def transform_app_variant_revision(revision):
async def transform_app_environment(environment):
- app_uuid = await get_mapped_uuid(environment["app"].id)
- user_uuid = await get_mapped_uuid(environment["user"].id)
- variant_uuid = await get_mapped_uuid(environment["deployed_app_variant"])
- revision_uuid = await get_mapped_uuid(environment["deployed_app_variant_revision"])
- deployment_uuid = await get_mapped_uuid(environment["deployment"])
+ app_uuid = await get_mapped_uuid("app_db", environment["app"].id)
+ user_uuid = await get_mapped_uuid("users", environment["user"].id)
+ variant_uuid = await get_mapped_uuid(
+ "app_variants", environment["deployed_app_variant"]
+ )
+ revision_uuid = await get_mapped_uuid(
+ "app_variant_revisions", environment["deployed_app_variant_revision"]
+ )
+ deployment_uuid = await get_mapped_uuid("deployments", environment["deployment"])
environment_uuid = generate_uuid()
await store_mapping("environments", environment["_id"], environment_uuid)
return {
@@ -217,12 +223,12 @@ async def transform_app_environment(environment):
async def transform_app_environment_revision(revision):
- environment_uuid = await get_mapped_uuid(revision["environment"].id)
- modified_by_uuid = await get_mapped_uuid(revision["modified_by"].id)
+ environment_uuid = await get_mapped_uuid("environments", revision["environment"].id)
+ modified_by_uuid = await get_mapped_uuid("users", revision["modified_by"].id)
variant_revision_uuid = await get_mapped_uuid(
- revision["deployed_app_variant_revision"]
+ "app_variant_revisions", revision["deployed_app_variant_revision"]
)
- deployment_uuid = await get_mapped_uuid(revision["deployment"])
+ deployment_uuid = await get_mapped_uuid("deployments", revision["deployment"])
revision_uuid = generate_uuid()
await store_mapping("environments_revisions", revision["_id"], revision_uuid)
return {
@@ -240,7 +246,6 @@ async def transform_template(template):
template_uuid = generate_uuid()
await store_mapping("templates", template["_id"], template_uuid)
- # Ensure type is correctly mapped to TemplateType enum
template_type = (
TemplateType(template["type"]) if "type" in template else TemplateType.IMAGE
)
@@ -261,8 +266,8 @@ async def transform_template(template):
async def transform_test_set(test_set):
- app_uuid = await get_mapped_uuid(test_set["app"].id)
- user_uuid = await get_mapped_uuid(test_set["user"].id)
+ app_uuid = await get_mapped_uuid("app_db", test_set["app"].id)
+ user_uuid = await get_mapped_uuid("users", test_set["user"].id)
test_set_uuid = generate_uuid()
await store_mapping("testsets", test_set["_id"], test_set_uuid)
return {
@@ -277,8 +282,8 @@ async def transform_test_set(test_set):
async def transform_evaluator_config(config):
- app_uuid = await get_mapped_uuid(config["app"].id)
- user_uuid = await get_mapped_uuid(config["user"].id)
+ app_uuid = await get_mapped_uuid("app_db", config["app"].id)
+ user_uuid = await get_mapped_uuid("users", config["user"].id)
config_uuid = generate_uuid()
await store_mapping("evaluators_configs", config["_id"], config_uuid)
return {
@@ -299,8 +304,8 @@ async def convert_human_evaluations_associated_variants(
"""Convert variant and revision ObjectIds to UUIDs and structure them."""
associated_variants = []
for variant_id, revision_id in zip(variants, variants_revisions):
- variant_uuid = await get_mapped_uuid(variant_id)
- revision_uuid = await get_mapped_uuid(revision_id)
+ variant_uuid = await get_mapped_uuid("app_variants", variant_id)
+ revision_uuid = await get_mapped_uuid("app_variant_revisions", revision_id)
associated_variants.append(
{
"human_evaluation_id": evaluation_id,
@@ -312,9 +317,9 @@ async def convert_human_evaluations_associated_variants(
async def transform_human_evaluation(evaluation):
- app_uuid = await get_mapped_uuid(evaluation["app"].id)
- user_uuid = await get_mapped_uuid(evaluation["user"].id)
- test_set_uuid = await get_mapped_uuid(evaluation["testset"].id)
+ app_uuid = await get_mapped_uuid("app_db", evaluation["app"].id)
+ user_uuid = await get_mapped_uuid("users", evaluation["user"].id)
+ test_set_uuid = await get_mapped_uuid("testsets", evaluation["testset"].id)
evaluation_uuid = generate_uuid()
await store_mapping("human_evaluations", evaluation["_id"], evaluation_uuid)
@@ -338,8 +343,10 @@ async def transform_human_evaluation(evaluation):
async def transform_human_evaluation_scenario(scenario):
- user_uuid = await get_mapped_uuid(scenario["user"].id)
- evaluation_uuid = await get_mapped_uuid(scenario["evaluation"].id)
+ user_uuid = await get_mapped_uuid("users", scenario["user"].id)
+ evaluation_uuid = await get_mapped_uuid(
+ "human_evaluations", scenario["evaluation"].id
+ )
scenario_uuid = generate_uuid()
await store_mapping("human_evaluations_scenarios", scenario["_id"], scenario_uuid)
return {
@@ -362,7 +369,9 @@ async def convert_aggregated_results(results, evaluation_id):
"""Convert evaluator_config ObjectIds in aggregated_results to UUIDs and structure them."""
aggregated_results = []
for result in results:
- evaluator_config_uuid = await get_mapped_uuid(result["evaluator_config"])
+ evaluator_config_uuid = await get_mapped_uuid(
+ "evaluators_configs", result["evaluator_config"]
+ )
result_uuid = generate_uuid()
aggregated_results.append(
{
@@ -379,7 +388,9 @@ async def convert_scenario_aggregated_results(results, scenario_id):
"""Convert evaluator_config ObjectIds in scenario aggregated_results to UUIDs and structure them."""
scenario_aggregated_results = []
for result in results:
- evaluator_config_uuid = await get_mapped_uuid(result["evaluator_config"])
+ evaluator_config_uuid = await get_mapped_uuid(
+ "evaluators_configs", result["evaluator_config"]
+ )
result_uuid = generate_uuid()
scenario_aggregated_results.append(
{
@@ -393,11 +404,13 @@ async def convert_scenario_aggregated_results(results, scenario_id):
async def transform_evaluation(evaluation):
- app_uuid = await get_mapped_uuid(evaluation["app"].id)
- user_uuid = await get_mapped_uuid(evaluation["user"].id)
- test_set_uuid = await get_mapped_uuid(evaluation["testset"].id)
- variant_uuid = await get_mapped_uuid(evaluation["variant"])
- revision_uuid = await get_mapped_uuid(evaluation["variant_revision"])
+ app_uuid = await get_mapped_uuid("app_db", evaluation["app"].id)
+ user_uuid = await get_mapped_uuid("users", evaluation["user"].id)
+ test_set_uuid = await get_mapped_uuid("testsets", evaluation["testset"].id)
+ variant_uuid = await get_mapped_uuid("app_variants", evaluation["variant"])
+ revision_uuid = await get_mapped_uuid(
+ "app_variant_revisions", evaluation["variant_revision"]
+ )
evaluation_uuid = generate_uuid()
await store_mapping("evaluations", evaluation["_id"], evaluation_uuid)
@@ -425,9 +438,9 @@ async def transform_evaluation(evaluation):
async def transform_evaluation_scenario(scenario):
- user_uuid = await get_mapped_uuid(scenario["user"].id)
- evaluation_uuid = await get_mapped_uuid(scenario["evaluation"].id)
- variant_uuid = await get_mapped_uuid(scenario["variant_id"])
+ user_uuid = await get_mapped_uuid("users", scenario["user"].id)
+ evaluation_uuid = await get_mapped_uuid("evaluations", scenario["evaluation"].id)
+ variant_uuid = await get_mapped_uuid("app_variants", scenario["variant_id"])
scenario_uuid = generate_uuid()
await store_mapping("evaluation_scenarios", scenario["_id"], scenario_uuid)
diff --git a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
index 08dab2a26b..45e74355db 100644
--- a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
+++ b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
@@ -61,10 +61,13 @@ async def store_mapping(table_name, mongo_id, uuid):
await session.commit()
-async def get_mapped_uuid(mongo_id):
- """Retrieve the mapped UUID for a given MongoDB ObjectId."""
+async def get_mapped_uuid(table_name, mongo_id):
+ """Retrieve the mapped UUID for a given MongoDB ObjectId and table name."""
async with db_engine.get_session() as session:
- stmt = select(IDsMappingDB.uuid).filter(IDsMappingDB.objectid == str(mongo_id))
+ stmt = select(IDsMappingDB.uuid).filter(
+ IDsMappingDB.table_name == table_name,
+ IDsMappingDB.objectid == str(mongo_id),
+ )
result = await session.execute(stmt)
row = result.first()
return row[0] if row else None
From 74ba503b314fc6792c46d8bb39cc6a50b64f4b94 Mon Sep 17 00:00:00 2001
From: aakrem
Date: Mon, 24 Jun 2024 16:52:01 +0200
Subject: [PATCH 133/268] replace id with _id
---
.../agenta_backend/migrations/mongo_to_postgres/utils.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
index 45e74355db..c9c55702ba 100644
--- a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
+++ b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
@@ -52,10 +52,10 @@ async def create_all_tables(tables):
async def store_mapping(table_name, mongo_id, uuid):
"""Store the mapping of MongoDB ObjectId to UUID in the mapping table."""
- id = generate_uuid()
+ id_ = generate_uuid()
async with db_engine.get_session() as session:
mapping = IDsMappingDB(
- id=id, table_name=table_name, objectid=str(mongo_id), uuid=uuid
+ id=id_, table_name=table_name, objectid=str(mongo_id), uuid=uuid
)
session.add(mapping)
await session.commit()
From 4c48c403b7a2caa42e73dde64c02002001623631 Mon Sep 17 00:00:00 2001
From: aakrem
Date: Mon, 24 Jun 2024 17:18:48 +0200
Subject: [PATCH 134/268] fix base
---
.../agenta_backend/migrations/mongo_to_postgres/utils.py | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)
diff --git a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
index c9c55702ba..ce994d54d8 100644
--- a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
+++ b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
@@ -14,10 +14,8 @@
from agenta_backend.models.db_engine import db_engine
-from agenta_backend.models.db_models import (
- IDsMappingDB,
- Base,
-)
+from agenta_backend.models.db_models import IDsMappingDB
+from agenta_backend.models.base import Base
BATCH_SIZE = 1000
From a950c4008c94cbcb5dd13544cc18f1904fb62d7e Mon Sep 17 00:00:00 2001
From: aakrem
Date: Tue, 25 Jun 2024 08:54:43 +0200
Subject: [PATCH 135/268] add assertion to check length of variants and
variants_revisions
---
.../agenta_backend/migrations/mongo_to_postgres/migration.py | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
index b2fd594e56..894773293c 100644
--- a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
+++ b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
@@ -303,6 +303,10 @@ async def convert_human_evaluations_associated_variants(
):
"""Convert variant and revision ObjectIds to UUIDs and structure them."""
associated_variants = []
+ assert len(variants) == len(
+ variants_revisions
+ ), "variants and variants_revisions must have the same length"
+
for variant_id, revision_id in zip(variants, variants_revisions):
variant_uuid = await get_mapped_uuid("app_variants", variant_id)
revision_uuid = await get_mapped_uuid("app_variant_revisions", revision_id)
From 1d201ab5329f244915c11b349b2daff06ac0040c Mon Sep 17 00:00:00 2001
From: aakrem
Date: Tue, 25 Jun 2024 11:14:46 +0200
Subject: [PATCH 136/268] add migration steps
---
.../agenta_backend/migrations/mongo_to_postgres/README.md | 6 ++++++
1 file changed, 6 insertions(+)
create mode 100644 agenta-backend/agenta_backend/migrations/mongo_to_postgres/README.md
diff --git a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/README.md b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/README.md
new file mode 100644
index 0000000000..0c6f6c4b89
--- /dev/null
+++ b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/README.md
@@ -0,0 +1,6 @@
+```bash
+docker ps
+docker exec -it {backend-container-id} bash
+cd /app/agenta_backend/migrations/mongo_to_postgres
+python3 migration.py
+```
\ No newline at end of file
From 92fd40d742f63e8946cb843cb5ef89f969b37c9f Mon Sep 17 00:00:00 2001
From: aakrem
Date: Tue, 25 Jun 2024 13:46:11 +0200
Subject: [PATCH 137/268] fix deployed_app_variant_revision
---
.../migrations/mongo_to_postgres/migration.py | 11 ++++++++---
1 file changed, 8 insertions(+), 3 deletions(-)
diff --git a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
index 894773293c..04780119e5 100644
--- a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
+++ b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
@@ -203,9 +203,14 @@ async def transform_app_environment(environment):
variant_uuid = await get_mapped_uuid(
"app_variants", environment["deployed_app_variant"]
)
- revision_uuid = await get_mapped_uuid(
- "app_variant_revisions", environment["deployed_app_variant_revision"]
- )
+
+ if environment["deployed_app_variant_revision"] is None:
+ revision_uuid = None
+ else:
+ revision_uuid = await get_mapped_uuid(
+ "app_variant_revisions", environment["deployed_app_variant_revision"].id
+ )
+
deployment_uuid = await get_mapped_uuid("deployments", environment["deployment"])
environment_uuid = generate_uuid()
await store_mapping("environments", environment["_id"], environment_uuid)
From 1d45104bf2f4c7745daaae2f83a1136aa9cc66c4 Mon Sep 17 00:00:00 2001
From: Abram
Date: Tue, 25 Jun 2024 13:04:47 +0100
Subject: [PATCH 138/268] refactor (backend -> db orm): improve queries to
include cloud/ee related joins
---
.../agenta_backend/services/db_manager.py | 280 +++++++++++-------
1 file changed, 169 insertions(+), 111 deletions(-)
diff --git a/agenta-backend/agenta_backend/services/db_manager.py b/agenta-backend/agenta_backend/services/db_manager.py
index 004a74f914..f476af07fb 100644
--- a/agenta-backend/agenta_backend/services/db_manager.py
+++ b/agenta-backend/agenta_backend/services/db_manager.py
@@ -24,6 +24,7 @@
from agenta_backend.commons.services.selectors import get_user_org_and_workspace_id
from agenta_backend.commons.models.db_models import (
+ WorkspaceDB,
AppDB_ as AppDB,
UserDB_ as UserDB,
ImageDB_ as ImageDB,
@@ -62,7 +63,6 @@
)
from agenta_backend.models.db_models import (
TemplateDB,
- EvaluatorConfigDB,
AppVariantRevisionsDB,
HumanEvaluationVariantDB,
EvaluationScenarioResultDB,
@@ -179,8 +179,15 @@ async def fetch_app_by_id(app_id: str) -> AppDB:
assert app_id is not None, "app_id cannot be None"
async with db_engine.get_session() as session:
- result = await session.execute(select(AppDB).filter_by(id=uuid.UUID(app_id)))
- app = result.scalars().one_or_none()
+ base_query = select(AppDB).filter_by(id=uuid.UUID(app_id))
+ if isCloudEE():
+ base_query = base_query.options(
+ joinedload(AppDB.workspace).joinedload(WorkspaceDB.members), # type: ignore
+ joinedload(AppDB.organization)
+ )
+
+ result = await session.execute(base_query)
+ app = result.unique().scalars().one_or_none()
return app
@@ -199,15 +206,23 @@ async def fetch_app_variant_by_id(
assert app_variant_id is not None, "app_variant_id cannot be None"
async with db_engine.get_session() as session:
- result = await session.execute(
- select(AppVariantDB)
- .options(
- joinedload(AppVariantDB.base),
+ base_query = select(AppVariantDB).options(
+ joinedload(AppVariantDB.base),
+ joinedload(AppVariantDB.app)
+ )
+ if isCloudEE():
+ query = base_query.options(
+ joinedload(AppVariantDB.user.of_type(UserDB)).load_only(UserDB.uid), # type: ignore
+ joinedload(AppVariantDB.image.of_type(ImageDB)).load_only(ImageDB.docker_id, ImageDB.tags), # type: ignore
+ )
+ else:
+ query = base_query.options(
joinedload(AppVariantDB.user).load_only(UserDB.uid), # type: ignore
- joinedload(AppVariantDB.app),
joinedload(AppVariantDB.image).load_only(ImageDB.docker_id, ImageDB.tags), # type: ignore
)
- .filter_by(id=uuid.UUID(app_variant_id))
+
+ result = await session.execute(
+ query.filter_by(id=uuid.UUID(app_variant_id))
)
app_variant = result.scalars().one_or_none()
return app_variant
@@ -448,15 +463,20 @@ async def create_new_app_variant(
variant.workspace_id = uuid.UUID(workspace)
session.add(variant)
+
+ attributes_to_refresh = [
+ "app",
+ "image",
+ "user",
+ "base",
+ ]
+ if isCloudEE():
+ attributes_to_refresh.extend(["organization", "workspace"])
+
await session.commit()
await session.refresh(
variant,
- attribute_names=[
- "app",
- "image",
- "user",
- "base",
- ],
+ attribute_names=attributes_to_refresh,
) # Ensures the app, image, user and base relationship are loaded
variant_revision = AppVariantRevisionsDB(
@@ -548,8 +568,8 @@ async def create_image(
async def create_deployment(
- app: AppVariantDB,
- user: UserDB,
+ app_id: str,
+ user_id: str,
container_name: str,
container_id: str,
uri: str,
@@ -559,10 +579,10 @@ async def create_deployment(
) -> DeploymentDB:
"""Create a new deployment.
Args:
- app (AppVariantDB): The app variant to create the deployment for.
+ app (str): The app to create the deployment for.
organization (OrganizationDB): The organization that the deployment belongs to.
workspace (WorkspaceDB): The Workspace that the deployment belongs to.
- user (UserDB): The user that the deployment belongs to.
+ user (str): The user that the deployment belongs to.
container_name (str): The name of the container.
container_id (str): The ID of the container.
uri (str): The URI of the container.
@@ -574,8 +594,8 @@ async def create_deployment(
async with db_engine.get_session() as session:
try:
deployment = DeploymentDB(
- app=app,
- user=user,
+ app_id=uuid.UUID(app_id),
+ user_id=uuid.UUID(user_id),
container_name=container_name,
container_id=container_id,
uri=uri,
@@ -827,9 +847,6 @@ async def get_user_with_email(email: str):
async with db_engine.get_session() as session:
result = await session.execute(select(UserDB).filter_by(email=email))
user = result.scalars().one_or_none()
- if user is None:
- logger.error("Failed to get user with email address")
- raise Exception("Error while getting user")
return user
@@ -874,7 +891,7 @@ async def get_orga_image_instance_by_docker_id(
query = query.filter_by(
organization_id=uuid.UUID(organization_id),
- workspace_id=uuid.UUID(workspace_id),
+ workspace_id=workspace_id,
)
result = await session.execute(query)
@@ -912,7 +929,7 @@ async def get_orga_image_instance_by_uri(
query = query.filter_by(
organization_id=uuid.UUID(organization_id),
- workspace_id=uuid.UUID(workspace_id),
+ workspace_id=workspace_id,
)
result = await session.execute(query)
@@ -1054,8 +1071,8 @@ async def list_apps(
user_org_workspace_data = await get_user_org_and_workspace_id(user_uid) # type: ignore
has_permission = await check_rbac_permission( # type: ignore
user_org_workspace_data=user_org_workspace_data,
- workspace_id=uuid.UUID(workspace_id),
- organization_id=uuid.UUID(org_id),
+ workspace_id=workspace_id,
+ organization_id=org_id,
permission=Permission.VIEW_APPLICATION, # type: ignore
)
logger.debug(f"User has Permission to list apps: {has_permission}")
@@ -1068,8 +1085,8 @@ async def list_apps(
async with db_engine.get_session() as session:
result = await session.execute(
select(AppDB).filter_by(
- organization_id=uuid.UUID(org_id),
- workspace_id=uuid.UUID(workspace_id),
+ organization_id=org_id,
+ workspace_id=workspace_id,
)
)
apps = result.unique().scalars().all()
@@ -1281,11 +1298,14 @@ async def fetch_app_environment_by_name_and_appid(
"""
async with db_engine.get_session() as session:
- result = await session.execute(
- select(AppEnvironmentDB).filter_by(
- app_id=uuid.UUID(app_id), name=environment_name
- )
+ query = select(AppEnvironmentDB).filter_by(
+ app_id=uuid.UUID(app_id), name=environment_name
)
+ if isCloudEE():
+ query = query.options(
+ joinedload(AppEnvironmentDB.deployed_app_variant.of_type(AppVariantDB)), # type: ignore
+ )
+ result = await session.execute(query)
app_environment = result.scalars().one_or_none()
return app_environment
@@ -1324,9 +1344,12 @@ async def fetch_environment_revisions_for_environment(
"""
async with db_engine.get_session() as session:
- result = await session.execute(
- select(AppEnvironmentRevisionDB).filter_by(environment_id=environment.id)
- )
+ query = select(AppEnvironmentRevisionDB).filter_by(environment_id=environment.id)
+ if isCloudEE():
+ query = query.options(
+ joinedload(AppEnvironmentRevisionDB.modified_by.of_type(UserDB)).load_only(UserDB.username) # type: ignore
+ )
+ result = await session.execute(query)
environment_revisions = result.scalars().all()
return environment_revisions
@@ -1342,7 +1365,7 @@ async def fetch_app_environment_revision(revision_id: str) -> AppEnvironmentRevi
result = await session.execute(
select(AppEnvironmentRevisionDB).filter_by(id=uuid.UUID(revision_id))
)
- environment_revision = result.scalars().all()
+ environment_revision = result.scalars().one_or_none()
return environment_revision
@@ -1366,12 +1389,12 @@ async def update_app_environment(
async def update_app_environment_deployed_variant_revision(
- app_environment: AppEnvironmentDB, deployed_variant_revision: str
+ app_environment_id: str, deployed_variant_revision: str
):
"""Updates the deployed variant revision for an app environment
Args:
- app_environment (AppEnvironment): the app environment object
+ app_environment_id (str): the app environment ID
deployed_variant_revision (str): the ID of the deployed variant revision
"""
@@ -1387,7 +1410,11 @@ async def update_app_environment_deployed_variant_revision(
f"App variant revision {deployed_variant_revision} not found"
)
- app_environment.deployed_app_variant_revision_id = app_variant_revision.id
+ app_environment_result = await session.execute(
+ select(AppEnvironmentDB).filter_by(id=uuid.UUID(app_environment_id))
+ )
+ app_environment = app_environment_result.scalars().one_or_none()
+ app_environment.deployed_app_variant_revision_id = app_variant_revision.id # type: ignore
await session.commit()
await session.refresh(app_environment)
@@ -1539,9 +1566,14 @@ async def list_app_variant_revisions_by_variant(
"""
async with db_engine.get_session() as session:
- result = await session.execute(
- select(AppVariantRevisionsDB).filter_by(variant_id=app_variant.id)
- )
+ base_query = select(AppVariantRevisionsDB).filter_by(variant_id=app_variant.id)
+ if isCloudEE():
+ base_query = base_query.options(
+ joinedload(AppVariantRevisionsDB.modified_by.of_type(UserDB))
+ .load_only(UserDB.username) # type: ignore
+ )
+
+ result = await session.execute(base_query)
app_variant_revisions = result.scalars().all()
return app_variant_revisions
@@ -1557,15 +1589,18 @@ async def fetch_app_variant_revision(app_variant: str, revision_number: int):
"""
async with db_engine.get_session() as session:
- result = await session.execute(
- select(AppVariantRevisionsDB)
- .options(
- joinedload(AppVariantRevisionsDB.modified_by).load_only(
- UserDB.username
- ) # type: ignore
+ base_query = select(AppVariantRevisionsDB).filter_by(variant_id=uuid.UUID(app_variant), revision=revision_number)
+ if isCloudEE():
+ query = base_query.options(
+ joinedload(AppVariantRevisionsDB.modified_by.of_type(UserDB))
+ .load_only(UserDB.username) # type: ignore
)
- .filter_by(variant_id=uuid.UUID(app_variant), revision=revision_number)
- )
+ else:
+ query = base_query.options(
+ joinedload(AppVariantRevisionsDB.modified_by)
+ .load_only(UserDB.username) # type: ignore
+ )
+ result = await session.execute(query)
app_variant_revisions = result.scalars().one_or_none()
return app_variant_revisions
@@ -1807,7 +1842,7 @@ async def get_app_variant_instance_by_id(variant_id: str) -> AppVariantDB:
async def get_app_variant_revision_by_id(
- variant_revision_id: str, fetch_links=False
+ variant_revision_id: str
) -> AppVariantRevisionsDB:
"""Get the app variant revision object from the database with the provided id.
@@ -1927,14 +1962,18 @@ async def fetch_evaluation_by_id(evaluation_id: str) -> Optional[EvaluationDB]:
assert evaluation_id is not None, "evaluation_id cannot be None"
async with db_engine.get_session() as session:
- result = await session.execute(
- select(EvaluationDB)
- .options(
+ base_query = select(EvaluationDB).filter_by(id=uuid.UUID(evaluation_id))
+ if isCloudEE():
+ query = base_query.options(
+ joinedload(EvaluationDB.user.of_type(UserDB)).load_only(UserDB.id, UserDB.username), # type: ignore
+ joinedload(EvaluationDB.testset.of_type(TestSetDB)).load_only(TestSetDB.id, TestSetDB.name), # type: ignore
+ )
+ else:
+ query = base_query.options(
joinedload(EvaluationDB.user).load_only(UserDB.username), # type: ignore
joinedload(EvaluationDB.testset).load_only(TestSetDB.id, TestSetDB.name), # type: ignore
)
- .filter_by(id=uuid.UUID(evaluation_id))
- )
+ result = await session.execute(query)
evaluation = result.scalars().one_or_none()
return evaluation
@@ -1948,14 +1987,18 @@ async def list_human_evaluations(app_id: str):
"""
async with db_engine.get_session() as session:
- result = await session.execute(
- select(HumanEvaluationDB)
- .options(
+ base_query = select(HumanEvaluationDB).filter_by(app_id=uuid.UUID(app_id))
+ if isCloudEE():
+ query = base_query.options(
+ joinedload(HumanEvaluationDB.user.of_type(UserDB)).load_only(UserDB.id, UserDB.username), # type: ignore
+ joinedload(HumanEvaluationDB.testset.of_type(TestSetDB)).load_only(TestSetDB.id, TestSetDB.name), # type: ignore
+ )
+ else:
+ query = base_query.options(
joinedload(HumanEvaluationDB.user).load_only(UserDB.id, UserDB.username), # type: ignore
joinedload(HumanEvaluationDB.testset).load_only(TestSetDB.id, TestSetDB.name), # type: ignore
)
- .filter_by(app_id=uuid.UUID(app_id))
- )
+ result = await session.execute(query)
human_evaluations = result.scalars().all()
return human_evaluations
@@ -2015,18 +2058,20 @@ async def fetch_human_evaluation_variants(human_evaluation_id: str):
"""
async with db_engine.get_session() as session:
- result = await session.execute(
- select(HumanEvaluationVariantDB)
- .options(
- joinedload(HumanEvaluationVariantDB.variant).load_only(
- AppVariantDB.id, AppVariantDB.variant_name
- ), # type: ignore
- joinedload(HumanEvaluationVariantDB.variant_revision).load_only(
- AppVariantRevisionsDB.revision, AppVariantRevisionsDB.id
- ), # type: ignore
+ base_query = select(HumanEvaluationVariantDB).filter_by(human_evaluation_id=uuid.UUID(human_evaluation_id))
+ if isCloudEE():
+ query = base_query.options(
+ joinedload(HumanEvaluationVariantDB.variant.of_type(AppVariantDB)).load_only(AppVariantDB.id, AppVariantDB.variant_name), # type: ignore
+ joinedload(HumanEvaluationVariantDB.variant_revision.of_type(AppVariantRevisionsDB)).load_only(AppVariantRevisionsDB.id, AppVariantRevisionsDB.revision), # type: ignore
)
- .filter_by(human_evaluation_id=uuid.UUID(human_evaluation_id))
- )
+ else:
+ query = base_query.options(
+ joinedload(HumanEvaluationVariantDB.variant)
+ .load_only(AppVariantDB.id, AppVariantDB.variant_name), # type: ignore
+ joinedload(HumanEvaluationVariantDB.variant_revision)
+ .load_only(AppVariantRevisionsDB.revision, AppVariantRevisionsDB.id), # type: ignore
+ )
+ result = await session.execute(query)
evaluation_variants = result.scalars().all()
return evaluation_variants
@@ -2085,14 +2130,18 @@ async def fetch_human_evaluation_by_id(
assert evaluation_id is not None, "evaluation_id cannot be None"
async with db_engine.get_session() as session:
- result = await session.execute(
- select(HumanEvaluationDB)
- .options(
- joinedload(HumanEvaluationDB.user).load_only(UserDB.username), # type: ignore
- joinedload(HumanEvaluationDB.testset).load_only(TestSetDB.name), # type: ignore
+ base_query = select(HumanEvaluationDB).filter_by(id=uuid.UUID(evaluation_id))
+ if isCloudEE():
+ query = base_query.options(
+ joinedload(HumanEvaluationDB.user.of_type(UserDB)).load_only(UserDB.id, UserDB.username), # type: ignore
+ joinedload(HumanEvaluationDB.testset.of_type(TestSetDB)).load_only(TestSetDB.id, TestSetDB.name), # type: ignore
)
- .filter_by(id=uuid.UUID(evaluation_id))
- )
+ else:
+ query = base_query.options(
+ joinedload(HumanEvaluationDB.user).load_only(UserDB.id, UserDB.username), # type: ignore
+ joinedload(HumanEvaluationDB.testset).load_only(TestSetDB.id, TestSetDB.name), # type: ignore
+ )
+ result = await session.execute(query)
evaluation = result.scalars().one_or_none()
return evaluation
@@ -2599,7 +2648,7 @@ async def fetch_app_by_name_and_parameters(
query = base_query.filter_by(
organization_id=uuid.UUID(organization_id),
- workspace_id=uuid.UUID(workspace_id),
+ workspace_id=workspace_id,
)
else:
query = base_query.join(UserDB).filter(UserDB.uid == user_uid)
@@ -2640,8 +2689,8 @@ async def create_new_evaluation(
organization is not None and workspace is not None
), "organization and workspace must be provided together"
- evaluation.organization_id = uuid.UUID(organization_id) # type: ignore
- evaluation.workspace_id = uuid.UUID(workspace_id) # type: ignore
+ evaluation.organization_id = uuid.UUID(organization) # type: ignore
+ evaluation.workspace_id = uuid.UUID(workspace) # type: ignore
session.add(evaluation)
await session.commit()
@@ -2660,14 +2709,20 @@ async def list_evaluations(app_id: str):
"""
async with db_engine.get_session() as session:
- result = await session.execute(
- select(EvaluationDB)
- .options(
+ base_query = select(EvaluationDB).filter_by(app_id=uuid.UUID(app_id))
+ if isCloudEE():
+ query = base_query.options(
+ joinedload(EvaluationDB.user.of_type(UserDB)).load_only(UserDB.id, UserDB.username), # type: ignore
+ joinedload(EvaluationDB.testset.of_type(TestSetDB)).load_only(TestSetDB.id, TestSetDB.name), # type: ignore
+ )
+ else:
+ query = base_query.options(
joinedload(EvaluationDB.user).load_only(UserDB.id, UserDB.username), # type: ignore
joinedload(EvaluationDB.testset).load_only(TestSetDB.id, TestSetDB.name), # type: ignore
- joinedload(EvaluationDB.aggregated_results),
)
- .filter_by(app_id=uuid.UUID(app_id))
+
+ result = await session.execute(
+ query.options(joinedload(EvaluationDB.aggregated_results))
)
evaluations = result.unique().scalars().all()
return evaluations
@@ -2819,9 +2874,21 @@ async def fetch_eval_aggregated_results(evaluation_id: str):
"""
async with db_engine.get_session() as session:
- result = await session.execute(
- select(EvaluationAggregatedResultDB)
- .options(
+ base_query = select(EvaluationAggregatedResultDB).filter_by(evaluation_id=uuid.UUID(evaluation_id))
+ if isCloudEE():
+ query = base_query.options(
+ joinedload(EvaluationAggregatedResultDB.evaluator_config.of_type(EvaluatorConfigDB))
+ .load_only(
+ EvaluatorConfigDB.id, # type: ignore
+ EvaluatorConfigDB.name, # type: ignore
+ EvaluatorConfigDB.evaluator_key, # type: ignore
+ EvaluatorConfigDB.settings_values, # type: ignore
+ EvaluatorConfigDB.created_at, # type: ignore
+ EvaluatorConfigDB.updated_at, # type: ignore
+ )
+ )
+ else:
+ query = base_query.options(
joinedload(EvaluationAggregatedResultDB.evaluator_config).load_only(
EvaluatorConfigDB.id, # type: ignore
EvaluatorConfigDB.name, # type: ignore
@@ -2831,8 +2898,8 @@ async def fetch_eval_aggregated_results(evaluation_id: str):
EvaluatorConfigDB.updated_at, # type: ignore
)
)
- .filter_by(evaluation_id=uuid.UUID(evaluation_id))
- )
+
+ result = await session.execute(query)
aggregated_results = result.scalars().all()
return aggregated_results
@@ -2862,7 +2929,8 @@ async def fetch_evaluator_config(evaluator_config_id: str):
async with db_engine.get_session() as session:
result = await session.execute(
- select(EvaluatorConfigDB).filter_by(id=uuid.UUID(evaluator_config_id))
+ select(EvaluatorConfigDB)
+ .filter_by(id=uuid.UUID(evaluator_config_id))
)
evaluator_config = result.scalars().one_or_none()
return evaluator_config
@@ -2922,8 +2990,6 @@ async def create_evaluator_config(
user_id: str,
name: str,
evaluator_key: str,
- organization=None,
- workspace=None,
settings_values: Optional[Dict[str, Any]] = None,
) -> EvaluatorConfigDB:
"""Create a new evaluator configuration in the database."""
@@ -2938,22 +3004,14 @@ async def create_evaluator_config(
)
if isCloudEE():
- # assert that if organization is provided, workspace is also provided, and vice versa
- assert (
- organization is not None and workspace is not None
- ), "organization and workspace must be provided together"
+ new_evaluator_config.organization_id = app.organization_id
+ new_evaluator_config.workspace_id = app.workspace_id
- new_evaluator_config.organization_id = uuid.UUID(organization)
- new_evaluator_config.workspace_id = uuid.UUID(workspace)
-
- try:
- session.add(new_evaluator_config)
- await session.commit()
- await session.refresh(new_evaluator_config)
+ session.add(new_evaluator_config)
+ await session.commit()
+ await session.refresh(new_evaluator_config)
- return new_evaluator_config
- except Exception as e:
- raise e
+ return new_evaluator_config
async def update_evaluator_config(
From 82e2b92961fa8e5c919a160b56fab3932aa566c1 Mon Sep 17 00:00:00 2001
From: Abram
Date: Tue, 25 Jun 2024 13:10:43 +0100
Subject: [PATCH 139/268] refactor (backend -> api routers): remove redundant
imports, added exception traceback tracking, error handling, and imporved
rback logic when checking object permissions
---
.../agenta_backend/routers/app_router.py | 3 +
.../agenta_backend/routers/configs_router.py | 56 +++++++++++--------
.../routers/evaluation_router.py | 13 +++--
.../routers/evaluators_router.py | 3 +
.../routers/human_evaluation_router.py | 36 +++++++-----
.../agenta_backend/routers/testset_router.py | 23 ++++----
6 files changed, 82 insertions(+), 52 deletions(-)
diff --git a/agenta-backend/agenta_backend/routers/app_router.py b/agenta-backend/agenta_backend/routers/app_router.py
index bed5fbf315..85cb62930b 100644
--- a/agenta-backend/agenta_backend/routers/app_router.py
+++ b/agenta-backend/agenta_backend/routers/app_router.py
@@ -714,5 +714,8 @@ async def list_app_environment_revisions(
app_environment, app_environment_revisions
)
except Exception as e:
+ import traceback
+
+ traceback.print_exc()
logger.exception(f"An error occurred: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
diff --git a/agenta-backend/agenta_backend/routers/configs_router.py b/agenta-backend/agenta_backend/routers/configs_router.py
index a7c9a531d6..d30b567dd1 100644
--- a/agenta-backend/agenta_backend/routers/configs_router.py
+++ b/agenta-backend/agenta_backend/routers/configs_router.py
@@ -63,7 +63,7 @@ async def save_config(
)
else:
raise HTTPException(
- status_code=200,
+ status_code=400,
detail="Config name already exists. Please use a different name or set overwrite to True.",
)
else:
@@ -84,7 +84,8 @@ async def save_config(
traceback.print_exc()
logger.error(f"save_config exception ===> {e}")
- raise HTTPException(status_code=500, detail=str(e)) from e
+ status_code = e.status_code if hasattr(e, "status_code") else 500 # type: ignore
+ raise HTTPException(status_code, detail=str(e)) from e
@router.get("/", response_model=GetConfigResponse, operation_id="get_config")
@@ -167,7 +168,8 @@ async def get_config(
raise
except Exception as e:
logger.error(f"get_config exception: {e}")
- raise HTTPException(status_code=500, detail=str(e)) from e
+ status_code = e.status_code if hasattr(e, "status_code") else 500 # type: ignore
+ raise HTTPException(status_code, detail=str(e))
@router.get(
@@ -175,26 +177,34 @@ async def get_config(
operation_id="get_config_deployment_revision",
)
async def get_config_deployment_revision(request: Request, deployment_revision_id: str):
- environment_revision = await db_manager.fetch_app_environment_revision(
- deployment_revision_id
- )
- if environment_revision is None:
- raise HTTPException(
- 404, f"No environment revision found for {deployment_revision_id}"
+ try:
+ environment_revision = await db_manager.fetch_app_environment_revision(
+ deployment_revision_id
)
+ if environment_revision is None:
+ raise HTTPException(
+ 404, f"No environment revision found for {deployment_revision_id}"
+ )
- variant_revision = await db_manager.fetch_app_variant_revision_by_id(
- str(environment_revision.deployed_app_variant_revision)
- )
- if not variant_revision:
- raise HTTPException(
- 404,
- f"No configuration found for deployment revision {deployment_revision_id}",
+ variant_revision = await db_manager.fetch_app_variant_revision_by_id(
+ str(environment_revision.deployed_app_variant_revision_id)
)
- return GetConfigResponse(
- **variant_revision.config.dict(),
- current_version=environment_revision.revision,
- )
+ if not variant_revision:
+ raise HTTPException(
+ 404,
+ f"No configuration found for deployment revision {deployment_revision_id}",
+ )
+ return GetConfigResponse(
+ **variant_revision.get_config(),
+ current_version=environment_revision.revision, # type: ignore
+ )
+ except Exception as e:
+ import traceback
+
+ traceback.print_exc()
+ logger.error(f"get config deployment revision exception ===> {e}")
+ status_code = e.status_code if hasattr(e, "status_code") else 500 # type: ignore
+ raise HTTPException(status_code, detail=str(e))
@router.post(
@@ -225,14 +235,14 @@ async def revert_deployment_revision(request: Request, deployment_revision_id: s
status_code=403,
)
- if environment_revision.deployed_app_variant_revision is None:
+ if environment_revision.deployed_app_variant_revision_id is None:
raise HTTPException(
404,
f"No deployed app variant found for deployment revision: {deployment_revision_id}",
)
await db_manager.update_app_environment_deployed_variant_revision(
- environment_revision.environment,
- environment_revision.deployed_app_variant_revision,
+ str(environment_revision.environment_id),
+ str(environment_revision.deployed_app_variant_revision_id),
)
return "Environment was reverted to deployment revision successful"
diff --git a/agenta-backend/agenta_backend/routers/evaluation_router.py b/agenta-backend/agenta_backend/routers/evaluation_router.py
index e883b70237..59809a75fb 100644
--- a/agenta-backend/agenta_backend/routers/evaluation_router.py
+++ b/agenta-backend/agenta_backend/routers/evaluation_router.py
@@ -1,4 +1,4 @@
-import secrets
+import random
import logging
from typing import Any, List
@@ -14,7 +14,6 @@
EvaluationScenario,
NewEvaluation,
DeleteEvaluation,
- EvaluationWebhook,
)
from agenta_backend.services.evaluator_manager import (
check_ai_critique_inputs,
@@ -146,6 +145,12 @@ async def create_evaluation(
status_code=400,
detail="columns in the test set should match the names of the inputs in the variant",
)
+ except Exception as e:
+ import traceback
+
+ traceback.print_exc()
+ status_code = e.status_code if hasattr(e, "status_code") else 500 # type: ignore
+ raise HTTPException(status_code, detail=str(e))
@router.get("/{evaluation_id}/status/", operation_id="fetch_evaluation_status")
@@ -390,10 +395,10 @@ async def delete_evaluations(
try:
if isCloudEE():
- # TODO (abram): improve rbac logic for evaluation permission
+ evaluation_id = random.choice(payload.evaluations_ids)
has_permission = await check_action_access(
user_uid=request.state.user_id,
- # object_id=evaluation_id,
+ object_id=evaluation_id,
object_type="evaluation",
permission=Permission.DELETE_EVALUATION,
)
diff --git a/agenta-backend/agenta_backend/routers/evaluators_router.py b/agenta-backend/agenta_backend/routers/evaluators_router.py
index 3cd966d982..932bc5a6f8 100644
--- a/agenta-backend/agenta_backend/routers/evaluators_router.py
+++ b/agenta-backend/agenta_backend/routers/evaluators_router.py
@@ -188,6 +188,9 @@ async def update_evaluator_config(
)
return evaluators_configs
except Exception as e:
+ import traceback
+
+ traceback.print_exc()
raise HTTPException(
status_code=500, detail=f"Error updating evaluator configuration: {str(e)}"
)
diff --git a/agenta-backend/agenta_backend/routers/human_evaluation_router.py b/agenta-backend/agenta_backend/routers/human_evaluation_router.py
index f71f7616f9..a258784e7a 100644
--- a/agenta-backend/agenta_backend/routers/human_evaluation_router.py
+++ b/agenta-backend/agenta_backend/routers/human_evaluation_router.py
@@ -1,3 +1,4 @@
+import random
from typing import List, Dict
from fastapi import HTTPException, Body, Request, status, Response
@@ -79,6 +80,12 @@ async def create_evaluation(
status_code=400,
detail="columns in the test set should match the names of the inputs in the variant",
)
+ except Exception as e:
+ import traceback
+
+ traceback.print_exc()
+ status_code = e.status_code if hasattr(e, "status_code") else 500 # type: ignore
+ raise HTTPException(status_code, detail=str(e))
@router.get("/", response_model=List[HumanEvaluation])
@@ -473,24 +480,27 @@ async def delete_evaluations(
try:
if isCloudEE():
- for evaluation_id in delete_evaluations.evaluations_ids:
- has_permission = await check_action_access(
- user_uid=request.state.user_id,
- object_id=evaluation_id,
- object_type="evaluation",
- permission=Permission.DELETE_EVALUATION,
+ evaluation_id = random.choice(delete_evaluations.evaluations_ids)
+ has_permission = await check_action_access(
+ user_uid=request.state.user_id,
+ object_id=evaluation_id,
+ object_type="human_evaluation",
+ permission=Permission.DELETE_EVALUATION,
+ )
+ if not has_permission:
+ error_msg = f"You do not have permission to perform this action. Please contact your Organization Admin."
+ raise HTTPException(
+ detail=error_msg,
+ status_code=403,
)
- if not has_permission:
- error_msg = f"You do not have permission to perform this action. Please contact your Organization Admin."
- raise HTTPException(
- detail=error_msg,
- status_code=403,
- )
await evaluation_service.delete_human_evaluations(
delete_evaluations.evaluations_ids
)
return Response(status_code=status.HTTP_204_NO_CONTENT)
except Exception as e:
+ import traceback
+
+ traceback.print_exc()
status_code = e.status_code if hasattr(e, "status_code") else 500 # type: ignore
- raise HTTPException(status_code=status_code, detail=str(e)) from e
+ raise HTTPException(status_code=status_code, detail=str(e))
diff --git a/agenta-backend/agenta_backend/routers/testset_router.py b/agenta-backend/agenta_backend/routers/testset_router.py
index 1139b9ccb0..b6691f4965 100644
--- a/agenta-backend/agenta_backend/routers/testset_router.py
+++ b/agenta-backend/agenta_backend/routers/testset_router.py
@@ -1,19 +1,18 @@
import io
import csv
-import uuid
import json
+import random
import logging
import requests
from typing import Optional, List
-from datetime import datetime, timezone
-from bson import ObjectId
from pydantic import ValidationError
+
from fastapi.responses import JSONResponse
+from fastapi import HTTPException, UploadFile, File, Form, Request
+
from agenta_backend.services import db_manager
-from agenta_backend.services.db_manager import get_user
from agenta_backend.utils.common import APIRouter, isCloudEE
-from fastapi import HTTPException, UploadFile, File, Form, Request
from agenta_backend.models.converters import testset_db_to_pydantic
@@ -411,13 +410,13 @@ async def delete_testsets(
"""
if isCloudEE():
- # TODO: improve rbac logic for testset permission
- # has_permission = await check_action_access(
- # user_uid=request.state.user_id,
- # object=test_set,
- # permission=Permission.DELETE_TESTSET,
- # )
- has_permission = False
+ testset_id = random.choice(payload.testset_ids)
+ has_permission = await check_action_access(
+ user_uid=request.state.user_id,
+ object_id=testset_id,
+ object_type="testset",
+ permission=Permission.DELETE_TESTSET,
+ )
logger.debug(f"User has Permission to delete Testset: {has_permission}")
if not has_permission:
error_msg = f"You do not have permission to perform this action. Please contact your organization admin."
From e1b389e69e56237616be4772a4574641e04b6906 Mon Sep 17 00:00:00 2001
From: Abram
Date: Tue, 25 Jun 2024 13:11:36 +0100
Subject: [PATCH 140/268] refactor (backend): improve use of relational
attribute uIDs and remove redundant code
---
.../agenta_backend/services/app_manager.py | 30 +++++++++----------
.../services/deployment_manager.py | 9 +++---
.../services/evaluator_manager.py | 5 ----
.../agenta_backend/tasks/evaluations.py | 21 ++++---------
4 files changed, 26 insertions(+), 39 deletions(-)
diff --git a/agenta-backend/agenta_backend/services/app_manager.py b/agenta-backend/agenta_backend/services/app_manager.py
index 12a0c83927..8d428367d0 100644
--- a/agenta-backend/agenta_backend/services/app_manager.py
+++ b/agenta-backend/agenta_backend/services/app_manager.py
@@ -80,8 +80,8 @@ async def start_variant(
db_app_variant.image.docker_id,
db_app_variant.image.tags,
db_app_variant.app.app_name,
- db_app_variant.organization if isCloudEE() else None,
- db_app_variant.workspace if isCloudEE() else None,
+ str(db_app_variant.organization_id) if isCloudEE() else None,
+ str(db_app_variant.workspace_id) if isCloudEE() else None,
)
logger.debug("App name is %s", db_app_variant.app.app_name)
# update the env variables
@@ -103,7 +103,7 @@ async def start_variant(
if isCloudEE():
api_key = await api_key_service.create_api_key(
str(db_app_variant.user.uid),
- workspace_id=str(db_app_variant.workspace.id),
+ workspace_id=str(db_app_variant.workspace_id),
expiration_date=None,
hidden=True,
)
@@ -426,14 +426,14 @@ async def add_variant_based_on_image(
if parsed_url.scheme and parsed_url.netloc:
db_image = await db_manager.get_orga_image_instance_by_uri(
template_uri=docker_id_or_template_uri,
- organization_id=str(app.organization.id) if isCloudEE() else None, # type: ignore
- workspace_id=str(app.workspace.id) if isCloudEE() else None, # type: ignore
+ organization_id=str(app.organization_id) if isCloudEE() else None, # type: ignore
+ workspace_id=str(app.workspace_id) if isCloudEE() else None, # type: ignore
)
else:
db_image = await db_manager.get_orga_image_instance_by_docker_id(
docker_id=docker_id_or_template_uri,
- organization_id=str(app.organization.id) if isCloudEE() else None, # type: ignore
- workspace_id=str(app.workspace.id) if isCloudEE() else None, # type: ignore
+ organization_id=str(app.organization_id) if isCloudEE() else None, # type: ignore
+ workspace_id=str(app.workspace_id) if isCloudEE() else None, # type: ignore
)
# Create new image if not exists
@@ -445,8 +445,8 @@ async def add_variant_based_on_image(
template_uri=docker_id_or_template_uri,
deletable=not (is_template_image),
user=user_instance,
- organization=app.organization if isCloudEE() else None, # noqa
- workspace=app.workspace if isCloudEE() else None, # noqa
+ organization=str(app.organization_id) if isCloudEE() else None, # noqa
+ workspace=str(app.workspace_id) if isCloudEE() else None, # noqa
)
else:
docker_id = docker_id_or_template_uri
@@ -456,8 +456,8 @@ async def add_variant_based_on_image(
tags=tags,
deletable=not (is_template_image),
user=user_instance,
- organization=app.organization if isCloudEE() else None, # noqa
- workspace=app.workspace if isCloudEE() else None, # noqa
+ organization=str(app.organization_id) if isCloudEE() else None, # noqa
+ workspace=str(app.workspace_id) if isCloudEE() else None, # noqa
)
# Create config
@@ -474,8 +474,8 @@ async def add_variant_based_on_image(
] # TODO: Change this in SDK2 to directly use base_name
db_base = await db_manager.create_new_variant_base(
app=app,
- organization=app.organization if isCloudEE() else None, # noqa
- workspace=app.workspace if isCloudEE() else None, # noqa
+ organization=str(app.organization_id) if isCloudEE() else None, # noqa
+ workspace=str(app.workspace_id) if isCloudEE() else None, # noqa
user=user_instance,
base_name=base_name, # the first variant always has default base
image=db_image,
@@ -488,8 +488,8 @@ async def add_variant_based_on_image(
variant_name=variant_name,
image=db_image,
user=user_instance,
- organization=app.organization if isCloudEE() else None, # noqa
- workspace=app.workspace if isCloudEE() else None, # noqa
+ organization=str(app.organization_id) if isCloudEE() else None, # noqa
+ workspace=str(app.workspace_id) if isCloudEE() else None, # noqa
base_name=base_name,
base=db_base,
config=config_db,
diff --git a/agenta-backend/agenta_backend/services/deployment_manager.py b/agenta-backend/agenta_backend/services/deployment_manager.py
index 8815b65c3b..e61f671fc2 100644
--- a/agenta-backend/agenta_backend/services/deployment_manager.py
+++ b/agenta-backend/agenta_backend/services/deployment_manager.py
@@ -35,6 +35,7 @@ async def start_service(
else:
uri_path = f"{app_variant_db.user.id}/{app_variant_db.app.app_name}/{app_variant_db.base_name}"
container_name = f"{app_variant_db.app.app_name}-{app_variant_db.base_name}-{app_variant_db.user.id}"
+
logger.debug("Starting service with the following parameters:")
logger.debug(f"image_name: {app_variant_db.image.tags}")
logger.debug(f"uri_path: {uri_path}")
@@ -57,14 +58,14 @@ async def start_service(
)
deployment = await db_manager.create_deployment(
- app=app_variant_db.app,
- user=app_variant_db.user,
+ app_id=str(app_variant_db.app.id),
+ user_id=str(app_variant_db.user.id),
container_name=container_name,
container_id=container_id,
uri=uri,
status="running",
- organization=app_variant_db.organization if isCloudEE() else None,
- workspace=app_variant_db.workspace if isCloudEE() else None,
+ organization=str(app_variant_db.organization_id) if isCloudEE() else None,
+ workspace=str(app_variant_db.workspace_id) if isCloudEE() else None,
)
return deployment
diff --git a/agenta-backend/agenta_backend/services/evaluator_manager.py b/agenta-backend/agenta_backend/services/evaluator_manager.py
index 472c8c4a1b..586c59b282 100644
--- a/agenta-backend/agenta_backend/services/evaluator_manager.py
+++ b/agenta-backend/agenta_backend/services/evaluator_manager.py
@@ -17,7 +17,6 @@
from agenta_backend.models.converters import evaluator_config_db_to_pydantic
from agenta_backend.resources.evaluators.evaluators import get_all_evaluators
from agenta_backend.models.api.evaluation_model import Evaluator, EvaluatorConfig
-from agenta_backend.resources.evaluators import evaluators
def get_evaluators() -> List[Evaluator]:
@@ -87,8 +86,6 @@ async def create_evaluator_config(
user_id=str(app.user_id),
name=name,
evaluator_key=evaluator_key,
- organization=app.organization if isCloudEE() else None, # noqa,
- workspace=app.workspace if isCloudEE() else None, # noqa,
settings_values=settings_values,
)
return evaluator_config_db_to_pydantic(evaluator_config=evaluator_config)
@@ -167,8 +164,6 @@ async def create_ready_to_use_evaluators(app: AppDB):
user_id=str(app.user_id),
name=evaluator.name,
evaluator_key=evaluator.key,
- organization=app.organization if isCloudEE() else None, # noqa,
- workspace=app.workspace if isCloudEE() else None, # noqa,
settings_values=settings_values,
)
diff --git a/agenta-backend/agenta_backend/tasks/evaluations.py b/agenta-backend/agenta_backend/tasks/evaluations.py
index 400e0a8478..f367df67ca 100644
--- a/agenta-backend/agenta_backend/tasks/evaluations.py
+++ b/agenta-backend/agenta_backend/tasks/evaluations.py
@@ -1,5 +1,3 @@
-import re
-import os
import asyncio
import logging
import traceback
@@ -8,7 +6,7 @@
from celery import shared_task, states
from agenta_backend.utils.common import isCloudEE
-from agenta_backend.models.db_engine import DBEngine
+from agenta_backend.models.db_engine import db_engine
from agenta_backend.services import (
evaluators_service,
llm_apps_service,
@@ -16,9 +14,6 @@
aggregation_service,
)
from agenta_backend.models.api.evaluation_model import EvaluationStatusEnum
-from agenta_backend.models.db_models import (
- AppDB,
-)
from agenta_backend.models.shared_models import (
AggregatedResult,
CorrectAnswer,
@@ -44,10 +39,6 @@
)
from agenta_backend.services.evaluator_manager import get_evaluators
-if isCloudEE():
- from agenta_backend.commons.models.db_models import AppDB_ as AppDB
-else:
- from agenta_backend.models.db_models import AppDB
# Set logger
logger = logging.getLogger(__name__)
@@ -97,7 +88,7 @@ def evaluate(
try:
# 1. Fetch data from the database
- loop.run_until_complete(DBEngine().init_db())
+ loop.run_until_complete(db_engine.init_db())
app = loop.run_until_complete(fetch_app_by_id(app_id))
app_variant_db = loop.run_until_complete(fetch_app_variant_by_id(variant_id))
assert (
@@ -205,8 +196,8 @@ def evaluate(
is_pinned=False,
note="",
results=error_results,
- organization=app.organization if isCloudEE() else None,
- workspace=app.workspace if isCloudEE() else None,
+ organization=str(app.organization_id) if isCloudEE() else None,
+ workspace=str(app.workspace_id) if isCloudEE() else None,
)
)
continue
@@ -277,8 +268,8 @@ def evaluate(
is_pinned=False,
note="",
results=evaluators_results,
- organization=app.organization if isCloudEE() else None,
- workspace=app.workspace if isCloudEE() else None,
+ organization=str(app.organization_id) if isCloudEE() else None,
+ workspace=str(app.workspace_id) if isCloudEE() else None,
)
)
From 0a1ffc426c662eb7aca3a46acfd719ef47d68370 Mon Sep 17 00:00:00 2001
From: Abram
Date: Tue, 25 Jun 2024 13:12:22 +0100
Subject: [PATCH 141/268] refactor (backend): improve db to api model
converters
---
.../agenta_backend/models/api/api_models.py | 4 +--
.../agenta_backend/models/converters.py | 33 ++++++++++---------
.../agenta_backend/models/db_models.py | 3 ++
3 files changed, 23 insertions(+), 17 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/api/api_models.py b/agenta-backend/agenta_backend/models/api/api_models.py
index 656a2fc554..540b754dbc 100644
--- a/agenta-backend/agenta_backend/models/api/api_models.py
+++ b/agenta-backend/agenta_backend/models/api/api_models.py
@@ -105,7 +105,7 @@ class AppVariantRevision(BaseModel):
revision: int
modified_by: str
config: ConfigDB
- created_at: datetime
+ created_at: str
class AppVariantOutputExtended(BaseModel):
@@ -139,7 +139,7 @@ class EnvironmentRevision(BaseModel):
modified_by: str
deployed_app_variant_revision: Optional[str]
deployment: Optional[str]
- created_at: datetime
+ created_at: str
class EnvironmentOutputExtended(EnvironmentOutput):
diff --git a/agenta-backend/agenta_backend/models/converters.py b/agenta-backend/agenta_backend/models/converters.py
index 46a985e5ed..06e665d1b6 100644
--- a/agenta-backend/agenta_backend/models/converters.py
+++ b/agenta-backend/agenta_backend/models/converters.py
@@ -293,8 +293,8 @@ def app_variant_db_to_pydantic(
)
if isCloudEE():
- app_variant.organization_id = str(app_variant_db.organization.id)
- app_variant.workspace_id = str(app_variant_db.workspace.id)
+ app_variant.organization_id = str(app_variant_db.organization_id)
+ app_variant.workspace_id = str(app_variant_db.workspace_id)
return app_variant
@@ -344,8 +344,11 @@ async def app_variant_db_revisions_to_output(
AppVariantRevision(
revision=app_variant_revision_db.revision,
modified_by=app_variant_revision_db.modified_by.username,
- config=app_variant_revision_db.config,
- created_at=app_variant_revision_db.created_at,
+ config={
+ "config_name": app_variant_revision_db.config_name, # type: ignore
+ "parameters": app_variant_revision_db.config_parameters # type: ignore
+ },
+ created_at=str(app_variant_revision_db.created_at),
)
)
return app_variant_revisions
@@ -407,8 +410,8 @@ async def environment_db_and_revision_to_extended_output(
app_environment_revisions_db: List[AppEnvironmentRevisionDB],
) -> EnvironmentOutput:
deployed_app_variant_id = (
- str(environment_db.deployed_app_variant)
- if environment_db.deployed_app_variant
+ str(environment_db.deployed_app_variant_id)
+ if isinstance(environment_db.deployed_app_variant_id, uuid.UUID)
else None
)
if deployed_app_variant_id:
@@ -427,19 +430,19 @@ async def environment_db_and_revision_to_extended_output(
revision=app_environment_revision.revision,
modified_by=app_environment_revision.modified_by.username,
deployed_app_variant_revision=str(
- app_environment_revision.deployed_app_variant_revision
+ app_environment_revision.deployed_app_variant_revision_id
),
- deployment=str(app_environment_revision.deployment),
- created_at=app_environment_revision.created_at,
+ deployment=str(app_environment_revision.deployment_id),
+ created_at=str(app_environment_revision.created_at),
)
)
environment_output_extended = EnvironmentOutputExtended(
name=environment_db.name,
- app_id=str(environment_db.app.id),
+ app_id=str(environment_db.app_id),
deployed_app_variant_id=deployed_app_variant_id,
deployed_variant_name=deployed_variant_name,
deployed_app_variant_revision_id=str(
- environment_db.deployed_app_variant_revision.id
+ environment_db.deployed_app_variant_revision_id
),
revision=environment_db.revision,
revisions=app_environment_revisions,
@@ -447,9 +450,9 @@ async def environment_db_and_revision_to_extended_output(
if isCloudEE():
environment_output_extended.organization_id = str(
- environment_db.organization.id
+ environment_db.organization_id
)
- environment_output_extended.workspace_id = str(environment_db.workspace.id)
+ environment_output_extended.workspace_id = str(environment_db.workspace_id)
return environment_output_extended
@@ -469,8 +472,8 @@ def image_db_to_pydantic(image_db: ImageDB) -> ImageExtended:
)
if isCloudEE():
- image.organization_id = str(image_db.organization.id)
- image.workspace_id = str(image_db.workspace.id)
+ image.organization_id = str(image_db.organization_id)
+ image.workspace_id = str(image_db.workspace_id)
return image
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index 17f0f6d1e2..94667c6163 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -233,6 +233,9 @@ class AppVariantRevisionsDB(Base):
modified_by = relationship("UserDB")
base = relationship("VariantBaseDB")
+ def get_config(self) -> dict:
+ return {"config_name": self.config_name, "parameters": self.config_parameters}
+
class AppEnvironmentDB(Base):
__tablename__ = "environments"
From 8f1fe9106538bc7943bb16741e9df6ac43bf23b0 Mon Sep 17 00:00:00 2001
From: Abram
Date: Tue, 25 Jun 2024 13:13:15 +0100
Subject: [PATCH 142/268] chore (backend): format codebase with black@23.12.0
---
.../agenta_backend/models/converters.py | 4 +-
.../agenta_backend/models/db_engine.py | 8 ++-
.../agenta_backend/routers/configs_router.py | 32 +++++----
.../routers/evaluation_router.py | 2 +-
.../routers/human_evaluation_router.py | 2 +-
.../agenta_backend/services/db_manager.py | 70 +++++++++++--------
6 files changed, 70 insertions(+), 48 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/converters.py b/agenta-backend/agenta_backend/models/converters.py
index 06e665d1b6..429ad1589d 100644
--- a/agenta-backend/agenta_backend/models/converters.py
+++ b/agenta-backend/agenta_backend/models/converters.py
@@ -345,8 +345,8 @@ async def app_variant_db_revisions_to_output(
revision=app_variant_revision_db.revision,
modified_by=app_variant_revision_db.modified_by.username,
config={
- "config_name": app_variant_revision_db.config_name, # type: ignore
- "parameters": app_variant_revision_db.config_parameters # type: ignore
+ "config_name": app_variant_revision_db.config_name, # type: ignore
+ "parameters": app_variant_revision_db.config_parameters, # type: ignore
},
created_at=str(app_variant_revision_db.created_at),
)
diff --git a/agenta-backend/agenta_backend/models/db_engine.py b/agenta-backend/agenta_backend/models/db_engine.py
index db321af8b5..aeec1046c1 100644
--- a/agenta-backend/agenta_backend/models/db_engine.py
+++ b/agenta-backend/agenta_backend/models/db_engine.py
@@ -130,10 +130,12 @@ async def initialize_mongodb(self):
"""
try:
- from beanie import init_beanie # type: ignore
- from motor.motor_asyncio import AsyncIOMotorClient # type: ignore
+ from beanie import init_beanie # type: ignore
+ from motor.motor_asyncio import AsyncIOMotorClient # type: ignore
except ImportError as exc:
- raise ValueError(f"It looks like one of the following packages are not installed: beanie, motor. Exception: {str(exc)}")
+ raise ValueError(
+ f"It looks like one of the following packages are not installed: beanie, motor. Exception: {str(exc)}"
+ )
db_name = f"agenta_{self.mode}"
client = AsyncIOMotorClient(self.mongo_uri)
diff --git a/agenta-backend/agenta_backend/routers/configs_router.py b/agenta-backend/agenta_backend/routers/configs_router.py
index d30b567dd1..767dd203d5 100644
--- a/agenta-backend/agenta_backend/routers/configs_router.py
+++ b/agenta-backend/agenta_backend/routers/configs_router.py
@@ -84,7 +84,7 @@ async def save_config(
traceback.print_exc()
logger.error(f"save_config exception ===> {e}")
- status_code = e.status_code if hasattr(e, "status_code") else 500 # type: ignore
+ status_code = e.status_code if hasattr(e, "status_code") else 500 # type: ignore
raise HTTPException(status_code, detail=str(e)) from e
@@ -116,7 +116,7 @@ async def get_config(
# in case environment_name is provided, find the variant deployed
if environment_name:
app_environments = await db_manager.list_environments(
- app_id=str(base_db.app_id) # type: ignore
+ app_id=str(base_db.app_id) # type: ignore
)
found_variant_revision = next(
(
@@ -138,14 +138,17 @@ async def get_config(
)
variant_revision = found_variant_revision.revision
- config = {"name": found_variant_revision.config_name, "parameters": found_variant_revision.config_parameters}
+ config = {
+ "name": found_variant_revision.config_name,
+ "parameters": found_variant_revision.config_parameters,
+ }
elif config_name:
variants_db = await db_manager.list_variants_for_base(base_db)
found_variant = next(
(
variant_db
for variant_db in variants_db
- if variant_db.config_name == config_name # type: ignore
+ if variant_db.config_name == config_name # type: ignore
),
None,
)
@@ -155,20 +158,25 @@ async def get_config(
detail=f"Config name {config_name} not found for base {base_id}",
)
variant_revision = found_variant.revision
- config = {"name": found_variant.config_name, "parameters": found_variant.config_parameters}
+ config = {
+ "name": found_variant.config_name,
+ "parameters": found_variant.config_parameters,
+ }
- assert "name" and "parameters" in config, "'name' and 'parameters' not found in configuration"
+ assert (
+ "name" and "parameters" in config
+ ), "'name' and 'parameters' not found in configuration"
return GetConfigResponse(
- config_name=config["name"], # type: ignore
- current_version=variant_revision, # type: ignore
- parameters=config["parameters"], # type: ignore
+ config_name=config["name"], # type: ignore
+ current_version=variant_revision, # type: ignore
+ parameters=config["parameters"], # type: ignore
)
except HTTPException as e:
logger.error(f"get_config http exception: {e.detail}")
raise
except Exception as e:
logger.error(f"get_config exception: {e}")
- status_code = e.status_code if hasattr(e, "status_code") else 500 # type: ignore
+ status_code = e.status_code if hasattr(e, "status_code") else 500 # type: ignore
raise HTTPException(status_code, detail=str(e))
@@ -196,14 +204,14 @@ async def get_config_deployment_revision(request: Request, deployment_revision_i
)
return GetConfigResponse(
**variant_revision.get_config(),
- current_version=environment_revision.revision, # type: ignore
+ current_version=environment_revision.revision, # type: ignore
)
except Exception as e:
import traceback
traceback.print_exc()
logger.error(f"get config deployment revision exception ===> {e}")
- status_code = e.status_code if hasattr(e, "status_code") else 500 # type: ignore
+ status_code = e.status_code if hasattr(e, "status_code") else 500 # type: ignore
raise HTTPException(status_code, detail=str(e))
diff --git a/agenta-backend/agenta_backend/routers/evaluation_router.py b/agenta-backend/agenta_backend/routers/evaluation_router.py
index 59809a75fb..5440c4d19a 100644
--- a/agenta-backend/agenta_backend/routers/evaluation_router.py
+++ b/agenta-backend/agenta_backend/routers/evaluation_router.py
@@ -149,7 +149,7 @@ async def create_evaluation(
import traceback
traceback.print_exc()
- status_code = e.status_code if hasattr(e, "status_code") else 500 # type: ignore
+ status_code = e.status_code if hasattr(e, "status_code") else 500 # type: ignore
raise HTTPException(status_code, detail=str(e))
diff --git a/agenta-backend/agenta_backend/routers/human_evaluation_router.py b/agenta-backend/agenta_backend/routers/human_evaluation_router.py
index a258784e7a..cc35bdae14 100644
--- a/agenta-backend/agenta_backend/routers/human_evaluation_router.py
+++ b/agenta-backend/agenta_backend/routers/human_evaluation_router.py
@@ -84,7 +84,7 @@ async def create_evaluation(
import traceback
traceback.print_exc()
- status_code = e.status_code if hasattr(e, "status_code") else 500 # type: ignore
+ status_code = e.status_code if hasattr(e, "status_code") else 500 # type: ignore
raise HTTPException(status_code, detail=str(e))
diff --git a/agenta-backend/agenta_backend/services/db_manager.py b/agenta-backend/agenta_backend/services/db_manager.py
index f476af07fb..38798d4319 100644
--- a/agenta-backend/agenta_backend/services/db_manager.py
+++ b/agenta-backend/agenta_backend/services/db_manager.py
@@ -182,8 +182,8 @@ async def fetch_app_by_id(app_id: str) -> AppDB:
base_query = select(AppDB).filter_by(id=uuid.UUID(app_id))
if isCloudEE():
base_query = base_query.options(
- joinedload(AppDB.workspace).joinedload(WorkspaceDB.members), # type: ignore
- joinedload(AppDB.organization)
+ joinedload(AppDB.workspace).joinedload(WorkspaceDB.members), # type: ignore
+ joinedload(AppDB.organization),
)
result = await session.execute(base_query)
@@ -207,13 +207,12 @@ async def fetch_app_variant_by_id(
assert app_variant_id is not None, "app_variant_id cannot be None"
async with db_engine.get_session() as session:
base_query = select(AppVariantDB).options(
- joinedload(AppVariantDB.base),
- joinedload(AppVariantDB.app)
+ joinedload(AppVariantDB.base), joinedload(AppVariantDB.app)
)
if isCloudEE():
query = base_query.options(
joinedload(AppVariantDB.user.of_type(UserDB)).load_only(UserDB.uid), # type: ignore
- joinedload(AppVariantDB.image.of_type(ImageDB)).load_only(ImageDB.docker_id, ImageDB.tags), # type: ignore
+ joinedload(AppVariantDB.image.of_type(ImageDB)).load_only(ImageDB.docker_id, ImageDB.tags), # type: ignore
)
else:
query = base_query.options(
@@ -221,9 +220,7 @@ async def fetch_app_variant_by_id(
joinedload(AppVariantDB.image).load_only(ImageDB.docker_id, ImageDB.tags), # type: ignore
)
- result = await session.execute(
- query.filter_by(id=uuid.UUID(app_variant_id))
- )
+ result = await session.execute(query.filter_by(id=uuid.UUID(app_variant_id)))
app_variant = result.scalars().one_or_none()
return app_variant
@@ -1344,10 +1341,12 @@ async def fetch_environment_revisions_for_environment(
"""
async with db_engine.get_session() as session:
- query = select(AppEnvironmentRevisionDB).filter_by(environment_id=environment.id)
+ query = select(AppEnvironmentRevisionDB).filter_by(
+ environment_id=environment.id
+ )
if isCloudEE():
query = query.options(
- joinedload(AppEnvironmentRevisionDB.modified_by.of_type(UserDB)).load_only(UserDB.username) # type: ignore
+ joinedload(AppEnvironmentRevisionDB.modified_by.of_type(UserDB)).load_only(UserDB.username) # type: ignore
)
result = await session.execute(query)
environment_revisions = result.scalars().all()
@@ -1414,7 +1413,7 @@ async def update_app_environment_deployed_variant_revision(
select(AppEnvironmentDB).filter_by(id=uuid.UUID(app_environment_id))
)
app_environment = app_environment_result.scalars().one_or_none()
- app_environment.deployed_app_variant_revision_id = app_variant_revision.id # type: ignore
+ app_environment.deployed_app_variant_revision_id = app_variant_revision.id # type: ignore
await session.commit()
await session.refresh(app_environment)
@@ -1569,8 +1568,9 @@ async def list_app_variant_revisions_by_variant(
base_query = select(AppVariantRevisionsDB).filter_by(variant_id=app_variant.id)
if isCloudEE():
base_query = base_query.options(
- joinedload(AppVariantRevisionsDB.modified_by.of_type(UserDB))
- .load_only(UserDB.username) # type: ignore
+ joinedload(AppVariantRevisionsDB.modified_by.of_type(UserDB)).load_only(
+ UserDB.username
+ ) # type: ignore
)
result = await session.execute(base_query)
@@ -1589,16 +1589,20 @@ async def fetch_app_variant_revision(app_variant: str, revision_number: int):
"""
async with db_engine.get_session() as session:
- base_query = select(AppVariantRevisionsDB).filter_by(variant_id=uuid.UUID(app_variant), revision=revision_number)
+ base_query = select(AppVariantRevisionsDB).filter_by(
+ variant_id=uuid.UUID(app_variant), revision=revision_number
+ )
if isCloudEE():
query = base_query.options(
- joinedload(AppVariantRevisionsDB.modified_by.of_type(UserDB))
- .load_only(UserDB.username) # type: ignore
+ joinedload(AppVariantRevisionsDB.modified_by.of_type(UserDB)).load_only(
+ UserDB.username
+ ) # type: ignore
)
else:
query = base_query.options(
- joinedload(AppVariantRevisionsDB.modified_by)
- .load_only(UserDB.username) # type: ignore
+ joinedload(AppVariantRevisionsDB.modified_by).load_only(
+ UserDB.username
+ ) # type: ignore
)
result = await session.execute(query)
app_variant_revisions = result.scalars().one_or_none()
@@ -1842,7 +1846,7 @@ async def get_app_variant_instance_by_id(variant_id: str) -> AppVariantDB:
async def get_app_variant_revision_by_id(
- variant_revision_id: str
+ variant_revision_id: str,
) -> AppVariantRevisionsDB:
"""Get the app variant revision object from the database with the provided id.
@@ -2058,7 +2062,9 @@ async def fetch_human_evaluation_variants(human_evaluation_id: str):
"""
async with db_engine.get_session() as session:
- base_query = select(HumanEvaluationVariantDB).filter_by(human_evaluation_id=uuid.UUID(human_evaluation_id))
+ base_query = select(HumanEvaluationVariantDB).filter_by(
+ human_evaluation_id=uuid.UUID(human_evaluation_id)
+ )
if isCloudEE():
query = base_query.options(
joinedload(HumanEvaluationVariantDB.variant.of_type(AppVariantDB)).load_only(AppVariantDB.id, AppVariantDB.variant_name), # type: ignore
@@ -2066,10 +2072,12 @@ async def fetch_human_evaluation_variants(human_evaluation_id: str):
)
else:
query = base_query.options(
- joinedload(HumanEvaluationVariantDB.variant)
- .load_only(AppVariantDB.id, AppVariantDB.variant_name), # type: ignore
- joinedload(HumanEvaluationVariantDB.variant_revision)
- .load_only(AppVariantRevisionsDB.revision, AppVariantRevisionsDB.id), # type: ignore
+ joinedload(HumanEvaluationVariantDB.variant).load_only(
+ AppVariantDB.id, AppVariantDB.variant_name
+ ), # type: ignore
+ joinedload(HumanEvaluationVariantDB.variant_revision).load_only(
+ AppVariantRevisionsDB.revision, AppVariantRevisionsDB.id
+ ), # type: ignore
)
result = await session.execute(query)
evaluation_variants = result.scalars().all()
@@ -2874,11 +2882,16 @@ async def fetch_eval_aggregated_results(evaluation_id: str):
"""
async with db_engine.get_session() as session:
- base_query = select(EvaluationAggregatedResultDB).filter_by(evaluation_id=uuid.UUID(evaluation_id))
+ base_query = select(EvaluationAggregatedResultDB).filter_by(
+ evaluation_id=uuid.UUID(evaluation_id)
+ )
if isCloudEE():
query = base_query.options(
- joinedload(EvaluationAggregatedResultDB.evaluator_config.of_type(EvaluatorConfigDB))
- .load_only(
+ joinedload(
+ EvaluationAggregatedResultDB.evaluator_config.of_type(
+ EvaluatorConfigDB
+ )
+ ).load_only(
EvaluatorConfigDB.id, # type: ignore
EvaluatorConfigDB.name, # type: ignore
EvaluatorConfigDB.evaluator_key, # type: ignore
@@ -2929,8 +2942,7 @@ async def fetch_evaluator_config(evaluator_config_id: str):
async with db_engine.get_session() as session:
result = await session.execute(
- select(EvaluatorConfigDB)
- .filter_by(id=uuid.UUID(evaluator_config_id))
+ select(EvaluatorConfigDB).filter_by(id=uuid.UUID(evaluator_config_id))
)
evaluator_config = result.scalars().one_or_none()
return evaluator_config
From 80538cd33b58e6ebdca3ff9adbece84079c9efaf Mon Sep 17 00:00:00 2001
From: Kaosiso Ezealigo
Date: Tue, 25 Jun 2024 14:15:20 +0100
Subject: [PATCH 143/268] fix(frontend): prevents duplicate entries when saving
new testset
---
.../src/components/TestSetTable/TestsetTable.tsx | 14 +++++++++++---
.../apps/[app_id]/testsets/[testset_id]/index.tsx | 2 +-
2 files changed, 12 insertions(+), 4 deletions(-)
diff --git a/agenta-web/src/components/TestSetTable/TestsetTable.tsx b/agenta-web/src/components/TestSetTable/TestsetTable.tsx
index dad1316aca..9bfc913c4f 100644
--- a/agenta-web/src/components/TestSetTable/TestsetTable.tsx
+++ b/agenta-web/src/components/TestSetTable/TestsetTable.tsx
@@ -155,6 +155,7 @@ const TestsetTable: React.FC = ({mode}) => {
const [columnDefs, setColumnDefs] = useState<{field: string; [key: string]: any}[]>([])
const [inputValues, setInputValues] = useStateCallback(columnDefs.map((col) => col.field))
const [focusedRowData, setFocusedRowData] = useState()
+ const [isDuplicate, setIsDuplicate] = useState({bool: false, id: ""})
const gridRef = useRef(null)
const [selectedRow, setSelectedRow] = useState([])
@@ -438,12 +439,17 @@ const TestsetTable: React.FC = ({mode}) => {
const onSaveData = async () => {
try {
setIsLoading(true)
- const afterSave = (response: AxiosResponse) => {
+ const afterSave = (response: AxiosResponse, isNew?: boolean) => {
if (response.status === 200) {
setUnSavedChanges(false, () => {
mssgModal("success", "Changes saved successfully!")
})
setIsLoading(false)
+ if (isNew) {
+ // createNewTestset returns id prop
+ // updateTestset returns _id prop
+ setIsDuplicate({bool: true, id: response.data.id || response.data._id})
+ }
}
}
@@ -452,8 +458,10 @@ const TestsetTable: React.FC = ({mode}) => {
setIsModalOpen(true)
setIsLoading(false)
} else {
- const response = await createNewTestset(appId, testsetName, rowData)
- afterSave(response)
+ const response = isDuplicate.bool
+ ? await updateTestset(isDuplicate.id, testsetName, rowData)
+ : await createNewTestset(appId, testsetName, rowData)
+ afterSave(response, true)
}
} else if (mode === "edit") {
if (!testsetName) {
diff --git a/agenta-web/src/pages/apps/[app_id]/testsets/[testset_id]/index.tsx b/agenta-web/src/pages/apps/[app_id]/testsets/[testset_id]/index.tsx
index 7518ed7d0f..9e9a4a47bf 100644
--- a/agenta-web/src/pages/apps/[app_id]/testsets/[testset_id]/index.tsx
+++ b/agenta-web/src/pages/apps/[app_id]/testsets/[testset_id]/index.tsx
@@ -1,4 +1,4 @@
-import React, {useState, useEffect} from "react"
+import React from "react"
import TestsetTable from "@/components/TestSetTable/TestsetTable"
const testsetDisplay = () => {
From f3d89c05ac4acd1d2af0df63cd1ed31cae5ef01a Mon Sep 17 00:00:00 2001
From: Kaosiso Ezealigo
Date: Tue, 25 Jun 2024 15:04:44 +0100
Subject: [PATCH 144/268] fix: improved modifications
---
.../components/TestSetTable/TestsetTable.tsx | 36 +++++++++----------
1 file changed, 18 insertions(+), 18 deletions(-)
diff --git a/agenta-web/src/components/TestSetTable/TestsetTable.tsx b/agenta-web/src/components/TestSetTable/TestsetTable.tsx
index 9bfc913c4f..355695dd3b 100644
--- a/agenta-web/src/components/TestSetTable/TestsetTable.tsx
+++ b/agenta-web/src/components/TestSetTable/TestsetTable.tsx
@@ -155,7 +155,8 @@ const TestsetTable: React.FC = ({mode}) => {
const [columnDefs, setColumnDefs] = useState<{field: string; [key: string]: any}[]>([])
const [inputValues, setInputValues] = useStateCallback(columnDefs.map((col) => col.field))
const [focusedRowData, setFocusedRowData] = useState()
- const [isDuplicate, setIsDuplicate] = useState({bool: false, id: ""})
+ const [writeMode, setWriteMode] = useState(mode)
+ const [duplicateTestsetId, setDuplicateTestsetId] = useState(undefined)
const gridRef = useRef(null)
const [selectedRow, setSelectedRow] = useState([])
@@ -203,7 +204,7 @@ const TestsetTable: React.FC = ({mode}) => {
ADD_BUTTON_COL,
]
setColumnDefs(newColDefs)
- if (mode === "create") {
+ if (writeMode === "create") {
const initialRowData = Array(3).fill({})
const separateRowData = initialRowData.map(() => {
return colData.reduce((acc, curr) => ({...acc, [curr.field]: ""}), {})
@@ -214,7 +215,7 @@ const TestsetTable: React.FC = ({mode}) => {
setInputValues(newColDefs.filter((col) => !!col.field).map((col) => col.field))
}
- if (mode === "edit" && testset_id) {
+ if (writeMode === "edit" && testset_id) {
setLoading(true)
fetchTestset(testset_id as string).then((data) => {
setTestsetName(data.name)
@@ -225,7 +226,7 @@ const TestsetTable: React.FC = ({mode}) => {
})),
)
})
- } else if (mode === "create" && appId) {
+ } else if (writeMode === "create" && appId) {
setLoading(true)
;(async () => {
const backendVariants = await fetchVariants(appId)
@@ -239,7 +240,7 @@ const TestsetTable: React.FC = ({mode}) => {
applyColData([])
})
}
- }, [mode, testset_id, appId])
+ }, [writeMode, testset_id, appId])
const updateTable = (inputValues: string[]) => {
const dataColumns = columnDefs.filter((colDef) => colDef.field !== "")
@@ -439,35 +440,34 @@ const TestsetTable: React.FC = ({mode}) => {
const onSaveData = async () => {
try {
setIsLoading(true)
- const afterSave = (response: AxiosResponse, isNew?: boolean) => {
+ const afterSave = (response: AxiosResponse) => {
if (response.status === 200) {
setUnSavedChanges(false, () => {
mssgModal("success", "Changes saved successfully!")
})
setIsLoading(false)
- if (isNew) {
- // createNewTestset returns id prop
- // updateTestset returns _id prop
- setIsDuplicate({bool: true, id: response.data.id || response.data._id})
- }
+ setWriteMode("edit")
}
}
- if (mode === "create") {
+ if (writeMode === "create") {
if (!testsetName) {
setIsModalOpen(true)
setIsLoading(false)
} else {
- const response = isDuplicate.bool
- ? await updateTestset(isDuplicate.id, testsetName, rowData)
- : await createNewTestset(appId, testsetName, rowData)
- afterSave(response, true)
+ const response = await createNewTestset(appId, testsetName, rowData)
+ afterSave(response)
+ setDuplicateTestsetId(response.data.id)
}
- } else if (mode === "edit") {
+ } else if (writeMode === "edit") {
if (!testsetName) {
setIsModalOpen(true)
} else {
- const response = await updateTestset(testset_id as string, testsetName, rowData)
+ const response = await updateTestset(
+ (duplicateTestsetId || testset_id) as string,
+ testsetName,
+ rowData,
+ )
afterSave(response)
}
}
From 7d016c3f61a2b395be60e440bec1e4b83af7da90 Mon Sep 17 00:00:00 2001
From: Mahmoud Mabrouk
Date: Tue, 25 Jun 2024 16:29:35 +0200
Subject: [PATCH 145/268] fix(tool): [bug] Incorrect Configuration for Variants
fix migration
fixed migation for app variant parameters
Closes: [bug] Incorrect Configuration for Variants
---
.../agenta_backend/migrations/mongo_to_postgres/migration.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
index 04780119e5..a41a84b2b8 100644
--- a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
+++ b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
@@ -172,7 +172,7 @@ async def transform_app_variant(variant):
"base_name": variant.get("base_name"),
"base_id": base_uuid,
"config_name": variant["config_name"],
- "config_parameters": variant["config"],
+ "config_parameters": variant["config"]["parameters"],
"created_at": get_datetime(variant.get("created_at")),
"updated_at": get_datetime(variant.get("updated_at")),
}
From a990a949143dcc8bd4aeabb14740c92738db42f4 Mon Sep 17 00:00:00 2001
From: Kaosiso Ezealigo
Date: Tue, 25 Jun 2024 15:53:21 +0100
Subject: [PATCH 146/268] refactor(frontend): modified state name
---
agenta-web/src/components/TestSetTable/TestsetTable.tsx | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/agenta-web/src/components/TestSetTable/TestsetTable.tsx b/agenta-web/src/components/TestSetTable/TestsetTable.tsx
index 355695dd3b..936fc8189d 100644
--- a/agenta-web/src/components/TestSetTable/TestsetTable.tsx
+++ b/agenta-web/src/components/TestSetTable/TestsetTable.tsx
@@ -156,7 +156,7 @@ const TestsetTable: React.FC = ({mode}) => {
const [inputValues, setInputValues] = useStateCallback(columnDefs.map((col) => col.field))
const [focusedRowData, setFocusedRowData] = useState()
const [writeMode, setWriteMode] = useState(mode)
- const [duplicateTestsetId, setDuplicateTestsetId] = useState(undefined)
+ const [testsetId, setTestsetId] = useState(undefined)
const gridRef = useRef(null)
const [selectedRow, setSelectedRow] = useState([])
@@ -457,14 +457,14 @@ const TestsetTable: React.FC = ({mode}) => {
} else {
const response = await createNewTestset(appId, testsetName, rowData)
afterSave(response)
- setDuplicateTestsetId(response.data.id)
+ setTestsetId(response.data.id)
}
} else if (writeMode === "edit") {
if (!testsetName) {
setIsModalOpen(true)
} else {
const response = await updateTestset(
- (duplicateTestsetId || testset_id) as string,
+ (testsetId || testset_id) as string,
testsetName,
rowData,
)
From 582b60d0b3fd27ea7b6fd37a89ca22069eaed167 Mon Sep 17 00:00:00 2001
From: aakrem
Date: Tue, 25 Jun 2024 18:16:00 +0200
Subject: [PATCH 147/268] fix human evaluation variants results
---
.../agenta_backend/migrations/mongo_to_postgres/migration.py | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
index a41a84b2b8..6d65bf071e 100644
--- a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
+++ b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
@@ -356,7 +356,9 @@ async def transform_human_evaluation_scenario(scenario):
evaluation_uuid = await get_mapped_uuid(
"human_evaluations", scenario["evaluation"].id
)
+ variant_uuid = str(await get_mapped_uuid("app_variants", scenario["vote"]))
scenario_uuid = generate_uuid()
+
await store_mapping("human_evaluations_scenarios", scenario["_id"], scenario_uuid)
return {
"id": scenario_uuid,
@@ -364,7 +366,7 @@ async def transform_human_evaluation_scenario(scenario):
"evaluation_id": evaluation_uuid,
"inputs": scenario["inputs"],
"outputs": scenario["outputs"],
- "vote": scenario.get("vote"),
+ "vote": variant_uuid,
"score": scenario.get("score"),
"correct_answer": scenario.get("correct_answer"),
"created_at": get_datetime(scenario.get("created_at")),
From b396da1b01d82cd299b1c5e6cdcdecc8bfe9390f Mon Sep 17 00:00:00 2001
From: aakrem
Date: Tue, 25 Jun 2024 18:25:16 +0200
Subject: [PATCH 148/268] fix tqdm progress length
---
.../agenta_backend/migrations/mongo_to_postgres/migration.py | 2 +-
.../agenta_backend/migrations/mongo_to_postgres/utils.py | 1 +
2 files changed, 2 insertions(+), 1 deletion(-)
diff --git a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
index 6d65bf071e..d2d5958733 100644
--- a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
+++ b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
@@ -532,7 +532,7 @@ async def main():
transform_evaluation_scenario,
EvaluationScenarioResultDB,
)
- print("\n ========================================================")
+ print("\n========================================================")
print("Migration completed successfully.")
except Exception as e:
import traceback
diff --git a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
index ce994d54d8..ac9f80e763 100644
--- a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
+++ b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
@@ -137,6 +137,7 @@ async def migrate_collection(
range(0, total_docs, BATCH_SIZE),
total=(total_docs - 1) // BATCH_SIZE + 1,
desc=f"Migrating: {collection_name}",
+ ncols=85,
):
batch = await asyncio.get_event_loop().run_in_executor(
None,
From 0c6520fdf9cf4f2188a5dd1297eede6158bb066b Mon Sep 17 00:00:00 2001
From: aakrem
Date: Wed, 26 Jun 2024 10:48:38 +0200
Subject: [PATCH 149/268] move all previous migrations to a separate folder
---
.../v0_10_0_to_v0_11_0/20240131130415_updating_app_environment.py | 0
.../20240131132738_create_app_environment_revision.py | 0
.../v0_11_0_to_v0_12_0/20240126100524_models_revamp.py | 0
.../v0_11_0_to_v0_12_0/20240126144938_drop_organization_model.py | 0
.../20240509122536_evaluation_scenario_correct_answer.py | 0
.../v0_16_0_1_to_v0_16_0_2/20240603145957_code_evaluators.py | 0
.../{ => mongo}/v0_16_0_to_v0_16_0_1/20240602150517_evaluators.py | 0
.../v0_7_0_to_v0_8_0/20240110001454_initial_migration.py | 0
.../20240110132547_create_exact_match_evaluator_config.py | 0
.../v0_7_0_to_v0_8_0/20240110165900_evaluations_revamp.py | 0
.../20240112120721_evaluation_scenarios_revamp.py | 0
.../20240112120740_human_a_b_evaluation_scenarios.py | 0
.../20240112120800_human_single_model_evaluation_scenarios.py | 0
.../20240113131802_new_evaluation_results_aggregation.py | 0
.../20240113204909_change_odmantic_reference_to_link.py | 0
.../v0_8_0_to_v0_9_0/20240130133518_updating_app_variant.py | 0
.../20240130133544_create_app_variant_revision.py | 0
.../v0_8_0_to_v0_9_0/20240130133603_updating_app_environment.py | 0
.../v0_8_0_to_v0_9_0/20240130140202_connection_evaluation.py | 0
.../v0_9_0_to_v10_0_0/20240124174954_evaluation_error_handling.py | 0
.../20240124225808_evaluation_scenario_error_handling.py | 0
21 files changed, 0 insertions(+), 0 deletions(-)
rename agenta-backend/agenta_backend/migrations/{ => mongo}/v0_10_0_to_v0_11_0/20240131130415_updating_app_environment.py (100%)
rename agenta-backend/agenta_backend/migrations/{ => mongo}/v0_10_0_to_v0_11_0/20240131132738_create_app_environment_revision.py (100%)
rename agenta-backend/agenta_backend/migrations/{ => mongo}/v0_11_0_to_v0_12_0/20240126100524_models_revamp.py (100%)
rename agenta-backend/agenta_backend/migrations/{ => mongo}/v0_11_0_to_v0_12_0/20240126144938_drop_organization_model.py (100%)
rename agenta-backend/agenta_backend/migrations/{ => mongo}/v0_12_0_to_v0_16_0/20240509122536_evaluation_scenario_correct_answer.py (100%)
rename agenta-backend/agenta_backend/migrations/{ => mongo}/v0_16_0_1_to_v0_16_0_2/20240603145957_code_evaluators.py (100%)
rename agenta-backend/agenta_backend/migrations/{ => mongo}/v0_16_0_to_v0_16_0_1/20240602150517_evaluators.py (100%)
rename agenta-backend/agenta_backend/migrations/{ => mongo}/v0_7_0_to_v0_8_0/20240110001454_initial_migration.py (100%)
rename agenta-backend/agenta_backend/migrations/{ => mongo}/v0_7_0_to_v0_8_0/20240110132547_create_exact_match_evaluator_config.py (100%)
rename agenta-backend/agenta_backend/migrations/{ => mongo}/v0_7_0_to_v0_8_0/20240110165900_evaluations_revamp.py (100%)
rename agenta-backend/agenta_backend/migrations/{ => mongo}/v0_7_0_to_v0_8_0/20240112120721_evaluation_scenarios_revamp.py (100%)
rename agenta-backend/agenta_backend/migrations/{ => mongo}/v0_7_0_to_v0_8_0/20240112120740_human_a_b_evaluation_scenarios.py (100%)
rename agenta-backend/agenta_backend/migrations/{ => mongo}/v0_7_0_to_v0_8_0/20240112120800_human_single_model_evaluation_scenarios.py (100%)
rename agenta-backend/agenta_backend/migrations/{ => mongo}/v0_7_0_to_v0_8_0/20240113131802_new_evaluation_results_aggregation.py (100%)
rename agenta-backend/agenta_backend/migrations/{ => mongo}/v0_7_0_to_v0_8_0/20240113204909_change_odmantic_reference_to_link.py (100%)
rename agenta-backend/agenta_backend/migrations/{ => mongo}/v0_8_0_to_v0_9_0/20240130133518_updating_app_variant.py (100%)
rename agenta-backend/agenta_backend/migrations/{ => mongo}/v0_8_0_to_v0_9_0/20240130133544_create_app_variant_revision.py (100%)
rename agenta-backend/agenta_backend/migrations/{ => mongo}/v0_8_0_to_v0_9_0/20240130133603_updating_app_environment.py (100%)
rename agenta-backend/agenta_backend/migrations/{ => mongo}/v0_8_0_to_v0_9_0/20240130140202_connection_evaluation.py (100%)
rename agenta-backend/agenta_backend/migrations/{ => mongo}/v0_9_0_to_v10_0_0/20240124174954_evaluation_error_handling.py (100%)
rename agenta-backend/agenta_backend/migrations/{ => mongo}/v0_9_0_to_v10_0_0/20240124225808_evaluation_scenario_error_handling.py (100%)
diff --git a/agenta-backend/agenta_backend/migrations/v0_10_0_to_v0_11_0/20240131130415_updating_app_environment.py b/agenta-backend/agenta_backend/migrations/mongo/v0_10_0_to_v0_11_0/20240131130415_updating_app_environment.py
similarity index 100%
rename from agenta-backend/agenta_backend/migrations/v0_10_0_to_v0_11_0/20240131130415_updating_app_environment.py
rename to agenta-backend/agenta_backend/migrations/mongo/v0_10_0_to_v0_11_0/20240131130415_updating_app_environment.py
diff --git a/agenta-backend/agenta_backend/migrations/v0_10_0_to_v0_11_0/20240131132738_create_app_environment_revision.py b/agenta-backend/agenta_backend/migrations/mongo/v0_10_0_to_v0_11_0/20240131132738_create_app_environment_revision.py
similarity index 100%
rename from agenta-backend/agenta_backend/migrations/v0_10_0_to_v0_11_0/20240131132738_create_app_environment_revision.py
rename to agenta-backend/agenta_backend/migrations/mongo/v0_10_0_to_v0_11_0/20240131132738_create_app_environment_revision.py
diff --git a/agenta-backend/agenta_backend/migrations/v0_11_0_to_v0_12_0/20240126100524_models_revamp.py b/agenta-backend/agenta_backend/migrations/mongo/v0_11_0_to_v0_12_0/20240126100524_models_revamp.py
similarity index 100%
rename from agenta-backend/agenta_backend/migrations/v0_11_0_to_v0_12_0/20240126100524_models_revamp.py
rename to agenta-backend/agenta_backend/migrations/mongo/v0_11_0_to_v0_12_0/20240126100524_models_revamp.py
diff --git a/agenta-backend/agenta_backend/migrations/v0_11_0_to_v0_12_0/20240126144938_drop_organization_model.py b/agenta-backend/agenta_backend/migrations/mongo/v0_11_0_to_v0_12_0/20240126144938_drop_organization_model.py
similarity index 100%
rename from agenta-backend/agenta_backend/migrations/v0_11_0_to_v0_12_0/20240126144938_drop_organization_model.py
rename to agenta-backend/agenta_backend/migrations/mongo/v0_11_0_to_v0_12_0/20240126144938_drop_organization_model.py
diff --git a/agenta-backend/agenta_backend/migrations/v0_12_0_to_v0_16_0/20240509122536_evaluation_scenario_correct_answer.py b/agenta-backend/agenta_backend/migrations/mongo/v0_12_0_to_v0_16_0/20240509122536_evaluation_scenario_correct_answer.py
similarity index 100%
rename from agenta-backend/agenta_backend/migrations/v0_12_0_to_v0_16_0/20240509122536_evaluation_scenario_correct_answer.py
rename to agenta-backend/agenta_backend/migrations/mongo/v0_12_0_to_v0_16_0/20240509122536_evaluation_scenario_correct_answer.py
diff --git a/agenta-backend/agenta_backend/migrations/v0_16_0_1_to_v0_16_0_2/20240603145957_code_evaluators.py b/agenta-backend/agenta_backend/migrations/mongo/v0_16_0_1_to_v0_16_0_2/20240603145957_code_evaluators.py
similarity index 100%
rename from agenta-backend/agenta_backend/migrations/v0_16_0_1_to_v0_16_0_2/20240603145957_code_evaluators.py
rename to agenta-backend/agenta_backend/migrations/mongo/v0_16_0_1_to_v0_16_0_2/20240603145957_code_evaluators.py
diff --git a/agenta-backend/agenta_backend/migrations/v0_16_0_to_v0_16_0_1/20240602150517_evaluators.py b/agenta-backend/agenta_backend/migrations/mongo/v0_16_0_to_v0_16_0_1/20240602150517_evaluators.py
similarity index 100%
rename from agenta-backend/agenta_backend/migrations/v0_16_0_to_v0_16_0_1/20240602150517_evaluators.py
rename to agenta-backend/agenta_backend/migrations/mongo/v0_16_0_to_v0_16_0_1/20240602150517_evaluators.py
diff --git a/agenta-backend/agenta_backend/migrations/v0_7_0_to_v0_8_0/20240110001454_initial_migration.py b/agenta-backend/agenta_backend/migrations/mongo/v0_7_0_to_v0_8_0/20240110001454_initial_migration.py
similarity index 100%
rename from agenta-backend/agenta_backend/migrations/v0_7_0_to_v0_8_0/20240110001454_initial_migration.py
rename to agenta-backend/agenta_backend/migrations/mongo/v0_7_0_to_v0_8_0/20240110001454_initial_migration.py
diff --git a/agenta-backend/agenta_backend/migrations/v0_7_0_to_v0_8_0/20240110132547_create_exact_match_evaluator_config.py b/agenta-backend/agenta_backend/migrations/mongo/v0_7_0_to_v0_8_0/20240110132547_create_exact_match_evaluator_config.py
similarity index 100%
rename from agenta-backend/agenta_backend/migrations/v0_7_0_to_v0_8_0/20240110132547_create_exact_match_evaluator_config.py
rename to agenta-backend/agenta_backend/migrations/mongo/v0_7_0_to_v0_8_0/20240110132547_create_exact_match_evaluator_config.py
diff --git a/agenta-backend/agenta_backend/migrations/v0_7_0_to_v0_8_0/20240110165900_evaluations_revamp.py b/agenta-backend/agenta_backend/migrations/mongo/v0_7_0_to_v0_8_0/20240110165900_evaluations_revamp.py
similarity index 100%
rename from agenta-backend/agenta_backend/migrations/v0_7_0_to_v0_8_0/20240110165900_evaluations_revamp.py
rename to agenta-backend/agenta_backend/migrations/mongo/v0_7_0_to_v0_8_0/20240110165900_evaluations_revamp.py
diff --git a/agenta-backend/agenta_backend/migrations/v0_7_0_to_v0_8_0/20240112120721_evaluation_scenarios_revamp.py b/agenta-backend/agenta_backend/migrations/mongo/v0_7_0_to_v0_8_0/20240112120721_evaluation_scenarios_revamp.py
similarity index 100%
rename from agenta-backend/agenta_backend/migrations/v0_7_0_to_v0_8_0/20240112120721_evaluation_scenarios_revamp.py
rename to agenta-backend/agenta_backend/migrations/mongo/v0_7_0_to_v0_8_0/20240112120721_evaluation_scenarios_revamp.py
diff --git a/agenta-backend/agenta_backend/migrations/v0_7_0_to_v0_8_0/20240112120740_human_a_b_evaluation_scenarios.py b/agenta-backend/agenta_backend/migrations/mongo/v0_7_0_to_v0_8_0/20240112120740_human_a_b_evaluation_scenarios.py
similarity index 100%
rename from agenta-backend/agenta_backend/migrations/v0_7_0_to_v0_8_0/20240112120740_human_a_b_evaluation_scenarios.py
rename to agenta-backend/agenta_backend/migrations/mongo/v0_7_0_to_v0_8_0/20240112120740_human_a_b_evaluation_scenarios.py
diff --git a/agenta-backend/agenta_backend/migrations/v0_7_0_to_v0_8_0/20240112120800_human_single_model_evaluation_scenarios.py b/agenta-backend/agenta_backend/migrations/mongo/v0_7_0_to_v0_8_0/20240112120800_human_single_model_evaluation_scenarios.py
similarity index 100%
rename from agenta-backend/agenta_backend/migrations/v0_7_0_to_v0_8_0/20240112120800_human_single_model_evaluation_scenarios.py
rename to agenta-backend/agenta_backend/migrations/mongo/v0_7_0_to_v0_8_0/20240112120800_human_single_model_evaluation_scenarios.py
diff --git a/agenta-backend/agenta_backend/migrations/v0_7_0_to_v0_8_0/20240113131802_new_evaluation_results_aggregation.py b/agenta-backend/agenta_backend/migrations/mongo/v0_7_0_to_v0_8_0/20240113131802_new_evaluation_results_aggregation.py
similarity index 100%
rename from agenta-backend/agenta_backend/migrations/v0_7_0_to_v0_8_0/20240113131802_new_evaluation_results_aggregation.py
rename to agenta-backend/agenta_backend/migrations/mongo/v0_7_0_to_v0_8_0/20240113131802_new_evaluation_results_aggregation.py
diff --git a/agenta-backend/agenta_backend/migrations/v0_7_0_to_v0_8_0/20240113204909_change_odmantic_reference_to_link.py b/agenta-backend/agenta_backend/migrations/mongo/v0_7_0_to_v0_8_0/20240113204909_change_odmantic_reference_to_link.py
similarity index 100%
rename from agenta-backend/agenta_backend/migrations/v0_7_0_to_v0_8_0/20240113204909_change_odmantic_reference_to_link.py
rename to agenta-backend/agenta_backend/migrations/mongo/v0_7_0_to_v0_8_0/20240113204909_change_odmantic_reference_to_link.py
diff --git a/agenta-backend/agenta_backend/migrations/v0_8_0_to_v0_9_0/20240130133518_updating_app_variant.py b/agenta-backend/agenta_backend/migrations/mongo/v0_8_0_to_v0_9_0/20240130133518_updating_app_variant.py
similarity index 100%
rename from agenta-backend/agenta_backend/migrations/v0_8_0_to_v0_9_0/20240130133518_updating_app_variant.py
rename to agenta-backend/agenta_backend/migrations/mongo/v0_8_0_to_v0_9_0/20240130133518_updating_app_variant.py
diff --git a/agenta-backend/agenta_backend/migrations/v0_8_0_to_v0_9_0/20240130133544_create_app_variant_revision.py b/agenta-backend/agenta_backend/migrations/mongo/v0_8_0_to_v0_9_0/20240130133544_create_app_variant_revision.py
similarity index 100%
rename from agenta-backend/agenta_backend/migrations/v0_8_0_to_v0_9_0/20240130133544_create_app_variant_revision.py
rename to agenta-backend/agenta_backend/migrations/mongo/v0_8_0_to_v0_9_0/20240130133544_create_app_variant_revision.py
diff --git a/agenta-backend/agenta_backend/migrations/v0_8_0_to_v0_9_0/20240130133603_updating_app_environment.py b/agenta-backend/agenta_backend/migrations/mongo/v0_8_0_to_v0_9_0/20240130133603_updating_app_environment.py
similarity index 100%
rename from agenta-backend/agenta_backend/migrations/v0_8_0_to_v0_9_0/20240130133603_updating_app_environment.py
rename to agenta-backend/agenta_backend/migrations/mongo/v0_8_0_to_v0_9_0/20240130133603_updating_app_environment.py
diff --git a/agenta-backend/agenta_backend/migrations/v0_8_0_to_v0_9_0/20240130140202_connection_evaluation.py b/agenta-backend/agenta_backend/migrations/mongo/v0_8_0_to_v0_9_0/20240130140202_connection_evaluation.py
similarity index 100%
rename from agenta-backend/agenta_backend/migrations/v0_8_0_to_v0_9_0/20240130140202_connection_evaluation.py
rename to agenta-backend/agenta_backend/migrations/mongo/v0_8_0_to_v0_9_0/20240130140202_connection_evaluation.py
diff --git a/agenta-backend/agenta_backend/migrations/v0_9_0_to_v10_0_0/20240124174954_evaluation_error_handling.py b/agenta-backend/agenta_backend/migrations/mongo/v0_9_0_to_v10_0_0/20240124174954_evaluation_error_handling.py
similarity index 100%
rename from agenta-backend/agenta_backend/migrations/v0_9_0_to_v10_0_0/20240124174954_evaluation_error_handling.py
rename to agenta-backend/agenta_backend/migrations/mongo/v0_9_0_to_v10_0_0/20240124174954_evaluation_error_handling.py
diff --git a/agenta-backend/agenta_backend/migrations/v0_9_0_to_v10_0_0/20240124225808_evaluation_scenario_error_handling.py b/agenta-backend/agenta_backend/migrations/mongo/v0_9_0_to_v10_0_0/20240124225808_evaluation_scenario_error_handling.py
similarity index 100%
rename from agenta-backend/agenta_backend/migrations/v0_9_0_to_v10_0_0/20240124225808_evaluation_scenario_error_handling.py
rename to agenta-backend/agenta_backend/migrations/mongo/v0_9_0_to_v10_0_0/20240124225808_evaluation_scenario_error_handling.py
From 1002e642ec4aead5f0bd1f1700302e230aba10c8 Mon Sep 17 00:00:00 2001
From: aakrem
Date: Wed, 26 Jun 2024 10:49:24 +0200
Subject: [PATCH 150/268] add backup
---
agenta-backend/agenta_backend/migrations/{ => mongo}/backup.py | 0
1 file changed, 0 insertions(+), 0 deletions(-)
rename agenta-backend/agenta_backend/migrations/{ => mongo}/backup.py (100%)
diff --git a/agenta-backend/agenta_backend/migrations/backup.py b/agenta-backend/agenta_backend/migrations/mongo/backup.py
similarity index 100%
rename from agenta-backend/agenta_backend/migrations/backup.py
rename to agenta-backend/agenta_backend/migrations/mongo/backup.py
From fdaf060acab6df85109085a06feeda4285d2123b Mon Sep 17 00:00:00 2001
From: Mahmoud Mabrouk
Date: Wed, 26 Jun 2024 12:23:22 +0200
Subject: [PATCH 151/268] fix(backend): 1817 fix access to variant revision
fix the access to variant revision for deleted variants
Closes: 1817
---
agenta-backend/agenta_backend/models/converters.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/agenta-backend/agenta_backend/models/converters.py b/agenta-backend/agenta_backend/models/converters.py
index cb73ea5976..1f89ffa117 100644
--- a/agenta-backend/agenta_backend/models/converters.py
+++ b/agenta-backend/agenta_backend/models/converters.py
@@ -160,7 +160,7 @@ async def human_evaluation_db_to_pydantic(
variant_revision = await db_manager.get_app_variant_revision_by_id(
str(variant_revision_id)
)
- revision = variant_revision.revision
+ revision = variant_revision.revision if variant_revision else ""
revisions.append(str(revision))
return HumanEvaluation(
From bdcfdf5ba82b8478fdfcdd28182e5f8caaa0c019 Mon Sep 17 00:00:00 2001
From: Abram
Date: Wed, 26 Jun 2024 21:46:07 +0100
Subject: [PATCH 152/268] minor refactor (backend): ensure that when
docker_images are deleted, the referenced pk in app_variant is removed
---
agenta-backend/agenta_backend/models/db_models.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index 94667c6163..a45774b308 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -175,7 +175,7 @@ class AppVariantDB(Base):
app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id", ondelete="CASCADE"))
variant_name = Column(String)
revision = Column(Integer)
- image_id = Column(UUID(as_uuid=True), ForeignKey("docker_images.id"))
+ image_id = Column(UUID(as_uuid=True), ForeignKey("docker_images.id", ondelete="SET NULL"))
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
modified_by_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
base_name = Column(String)
From 50a3b037f462f322782eb266a05990e307c8e48b Mon Sep 17 00:00:00 2001
From: Abram
Date: Wed, 26 Jun 2024 21:49:30 +0100
Subject: [PATCH 153/268] refactor (backend): deploy current LLM app
configuration to production when sent to backend
---
.../agenta_backend/routers/configs_router.py | 12 ++++++++++++
agenta-backend/agenta_backend/services/db_manager.py | 9 ++-------
2 files changed, 14 insertions(+), 7 deletions(-)
diff --git a/agenta-backend/agenta_backend/routers/configs_router.py b/agenta-backend/agenta_backend/routers/configs_router.py
index 767dd203d5..890707999e 100644
--- a/agenta-backend/agenta_backend/routers/configs_router.py
+++ b/agenta-backend/agenta_backend/routers/configs_router.py
@@ -61,6 +61,13 @@ async def save_config(
parameters=payload.parameters,
user_uid=request.state.user_id,
)
+
+ logger.debug("Deploying to production environment")
+ await db_manager.deploy_to_environment(
+ environment_name="production",
+ variant_id=str(variant_to_overwrite.id),
+ user_uid=request.state.user_id,
+ )
else:
raise HTTPException(
status_code=400,
@@ -76,6 +83,7 @@ async def save_config(
parameters=payload.parameters,
user_uid=request.state.user_id,
)
+
except HTTPException as e:
logger.error(f"save_config http exception ===> {e.detail}")
raise
@@ -175,6 +183,9 @@ async def get_config(
logger.error(f"get_config http exception: {e.detail}")
raise
except Exception as e:
+ import traceback
+
+ traceback.print_exc()
logger.error(f"get_config exception: {e}")
status_code = e.status_code if hasattr(e, "status_code") else 500 # type: ignore
raise HTTPException(status_code, detail=str(e))
@@ -202,6 +213,7 @@ async def get_config_deployment_revision(request: Request, deployment_revision_i
404,
f"No configuration found for deployment revision {deployment_revision_id}",
)
+
return GetConfigResponse(
**variant_revision.get_config(),
current_version=environment_revision.revision, # type: ignore
diff --git a/agenta-backend/agenta_backend/services/db_manager.py b/agenta-backend/agenta_backend/services/db_manager.py
index 38798d4319..565257983b 100644
--- a/agenta-backend/agenta_backend/services/db_manager.py
+++ b/agenta-backend/agenta_backend/services/db_manager.py
@@ -211,6 +211,7 @@ async def fetch_app_variant_by_id(
)
if isCloudEE():
query = base_query.options(
+ joinedload(AppVariantDB.organization),
joinedload(AppVariantDB.user.of_type(UserDB)).load_only(UserDB.uid), # type: ignore
joinedload(AppVariantDB.image.of_type(ImageDB)).load_only(ImageDB.docker_id, ImageDB.tags), # type: ignore
)
@@ -1257,14 +1258,8 @@ async def deploy_to_environment(
if environment_db is None:
raise ValueError(f"Environment {environment_name} not found")
- # TODO: Modify below to add logic to disable redeployment of the same variant revision here and in front-end
- # if environment_db.deployed_app_variant_ == app_variant_db.id:
- # raise ValueError(
- # f"Variant {app_variant_db.app.app_name}/{app_variant_db.variant_name} is already deployed to the environment {environment_name}"
- # )
-
# Update the environment with the new variant name
- environment_db.revision += 1 # type: ignore
+ environment_db.revision = app_variant_revision_db.revision # type: ignore
environment_db.deployed_app_variant_id = app_variant_db.id
environment_db.deployed_app_variant_revision_id = app_variant_revision_db.id
environment_db.deployment_id = deployment.id
From a6afc7fffc04ea246d9e24db2d0ddddd7cc71230 Mon Sep 17 00:00:00 2001
From: Abram
Date: Wed, 26 Jun 2024 22:53:14 +0100
Subject: [PATCH 154/268] refactor (backend): ensures that a 404 exception is
raised in the case a revision does not exist for a variant
---
.../agenta_backend/routers/variants_router.py | 10 +++++++++-
1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/agenta-backend/agenta_backend/routers/variants_router.py b/agenta-backend/agenta_backend/routers/variants_router.py
index f087ffed5c..81bf24eba6 100644
--- a/agenta-backend/agenta_backend/routers/variants_router.py
+++ b/agenta-backend/agenta_backend/routers/variants_router.py
@@ -424,6 +424,7 @@ async def get_variant_revisions(variant_id: str, request: Request):
async def get_variant_revision(variant_id: str, revision_number: int, request: Request):
logger.debug("getting variant revision: ", variant_id, revision_number)
try:
+ assert variant_id != "undefined", "Variant id is required to retrieve variant revision"
app_variant = await db_manager.fetch_app_variant_by_id(
app_variant_id=variant_id
)
@@ -446,7 +447,14 @@ async def get_variant_revision(variant_id: str, revision_number: int, request: R
app_variant_revision = await db_manager.fetch_app_variant_revision(
variant_id, revision_number
)
+ if not app_variant_revision:
+ raise HTTPException(404, detail=f"Revision {revision_number} does not exist for variant '{app_variant.variant_name}'. Please check the available revisions and try again.")
+
return await converters.app_variant_db_revision_to_output(app_variant_revision)
except Exception as e:
+ import traceback
+
+ traceback.print_exc()
logger.exception(f"An error occurred: {str(e)}")
- raise HTTPException(status_code=500, detail=str(e))
+ status_code = e.status_code if hasattr(e, "status_code") else 500
+ raise HTTPException(status_code=status_code, detail=str(e))
From 3f0779669ac64823df9ac4dd77c8172eb6a516e7 Mon Sep 17 00:00:00 2001
From: Abram
Date: Wed, 26 Jun 2024 22:59:45 +0100
Subject: [PATCH 155/268] refactor (backend): fix error when updating variant
image during CLI re-serving
---
agenta-backend/agenta_backend/routers/app_router.py | 11 ++---------
agenta-backend/agenta_backend/services/app_manager.py | 6 +++---
2 files changed, 5 insertions(+), 12 deletions(-)
diff --git a/agenta-backend/agenta_backend/routers/app_router.py b/agenta-backend/agenta_backend/routers/app_router.py
index 85cb62930b..ae1bc3157a 100644
--- a/agenta-backend/agenta_backend/routers/app_router.py
+++ b/agenta-backend/agenta_backend/routers/app_router.py
@@ -256,7 +256,7 @@ async def create_app(
has_permission = await check_rbac_permission(
user_org_workspace_data=user_org_workspace_data,
- workspace_id=workspace.id,
+ workspace_id=str(workspace.id),
organization=organization,
permission=Permission.CREATE_APPLICATION,
)
@@ -276,7 +276,7 @@ async def create_app(
payload.app_name,
request.state.user_id,
organization_id if isCloudEE() else None,
- workspace.id if isCloudEE() else None,
+ str(workspace.id) if isCloudEE() else None,
)
return CreateAppOutput(app_id=str(app_db.id), app_name=str(app_db.app_name))
except Exception as e:
@@ -600,13 +600,6 @@ async def create_app_and_variant_from_template(
envvars = {} if payload.env_vars is None else payload.env_vars
await app_manager.start_variant(app_variant_db, envvars)
- logger.debug("Step 11: Deploying to production environment")
- await db_manager.deploy_to_environment(
- environment_name="production",
- variant_id=str(app_variant_db.id),
- user_uid=request.state.user_id,
- )
-
logger.debug("End: Successfully created app and variant")
return await converters.app_variant_db_to_output(app_variant_db)
diff --git a/agenta-backend/agenta_backend/services/app_manager.py b/agenta-backend/agenta_backend/services/app_manager.py
index 8d428367d0..e88e8ea687 100644
--- a/agenta-backend/agenta_backend/services/app_manager.py
+++ b/agenta-backend/agenta_backend/services/app_manager.py
@@ -163,11 +163,11 @@ async def update_variant_image(
docker_id=image.docker_id,
user=app_variant_db.user,
deletable=True,
- organization=app_variant_db.organization if isCloudEE() else None, # noqa
- workspace=app_variant_db.workspace if isCloudEE() else None, # noqa
+ organization=str(app_variant_db.organization_id) if isCloudEE() else None, # noqa
+ workspace=str(app_variant_db.workspace_id) if isCloudEE() else None, # noqa
)
# Update base with new image
- await db_manager.update_base(str(app_variant_db.base_id), image=db_image)
+ await db_manager.update_base(str(app_variant_db.base_id), image_id=db_image.id)
# Update variant to remove configuration
await db_manager.update_variant_parameters(
str(app_variant_db.id), parameters={}, user_uid=user_uid
From 183456a370885d4c9ce81bba00adb65010652383 Mon Sep 17 00:00:00 2001
From: Abram
Date: Wed, 26 Jun 2024 23:02:14 +0100
Subject: [PATCH 156/268] chore (backend): format codebase with black@23.12.0
---
agenta-backend/agenta_backend/models/db_models.py | 4 +++-
agenta-backend/agenta_backend/routers/variants_router.py | 9 +++++++--
agenta-backend/agenta_backend/services/app_manager.py | 4 +++-
3 files changed, 13 insertions(+), 4 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index a45774b308..b9ff6e319e 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -175,7 +175,9 @@ class AppVariantDB(Base):
app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id", ondelete="CASCADE"))
variant_name = Column(String)
revision = Column(Integer)
- image_id = Column(UUID(as_uuid=True), ForeignKey("docker_images.id", ondelete="SET NULL"))
+ image_id = Column(
+ UUID(as_uuid=True), ForeignKey("docker_images.id", ondelete="SET NULL")
+ )
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
modified_by_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
base_name = Column(String)
diff --git a/agenta-backend/agenta_backend/routers/variants_router.py b/agenta-backend/agenta_backend/routers/variants_router.py
index 81bf24eba6..c0b4f595b7 100644
--- a/agenta-backend/agenta_backend/routers/variants_router.py
+++ b/agenta-backend/agenta_backend/routers/variants_router.py
@@ -424,7 +424,9 @@ async def get_variant_revisions(variant_id: str, request: Request):
async def get_variant_revision(variant_id: str, revision_number: int, request: Request):
logger.debug("getting variant revision: ", variant_id, revision_number)
try:
- assert variant_id != "undefined", "Variant id is required to retrieve variant revision"
+ assert (
+ variant_id != "undefined"
+ ), "Variant id is required to retrieve variant revision"
app_variant = await db_manager.fetch_app_variant_by_id(
app_variant_id=variant_id
)
@@ -448,7 +450,10 @@ async def get_variant_revision(variant_id: str, revision_number: int, request: R
variant_id, revision_number
)
if not app_variant_revision:
- raise HTTPException(404, detail=f"Revision {revision_number} does not exist for variant '{app_variant.variant_name}'. Please check the available revisions and try again.")
+ raise HTTPException(
+ 404,
+ detail=f"Revision {revision_number} does not exist for variant '{app_variant.variant_name}'. Please check the available revisions and try again.",
+ )
return await converters.app_variant_db_revision_to_output(app_variant_revision)
except Exception as e:
diff --git a/agenta-backend/agenta_backend/services/app_manager.py b/agenta-backend/agenta_backend/services/app_manager.py
index e88e8ea687..78c707bd3d 100644
--- a/agenta-backend/agenta_backend/services/app_manager.py
+++ b/agenta-backend/agenta_backend/services/app_manager.py
@@ -163,7 +163,9 @@ async def update_variant_image(
docker_id=image.docker_id,
user=app_variant_db.user,
deletable=True,
- organization=str(app_variant_db.organization_id) if isCloudEE() else None, # noqa
+ organization=str(app_variant_db.organization_id)
+ if isCloudEE()
+ else None, # noqa
workspace=str(app_variant_db.workspace_id) if isCloudEE() else None, # noqa
)
# Update base with new image
From 9b6009f15416aef66a29cdc1d351058f8a902c9f Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Thu, 27 Jun 2024 03:18:08 +0000
Subject: [PATCH 157/268] build(deps): bump pymongo from 4.7.3 to 4.8.0 in
/agenta-cli
Bumps [pymongo](https://github.com/mongodb/mongo-python-driver) from 4.7.3 to 4.8.0.
- [Release notes](https://github.com/mongodb/mongo-python-driver/releases)
- [Changelog](https://github.com/mongodb/mongo-python-driver/blob/master/doc/changelog.rst)
- [Commits](https://github.com/mongodb/mongo-python-driver/compare/4.7.3...4.8.0)
---
updated-dependencies:
- dependency-name: pymongo
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
---
agenta-cli/poetry.lock | 115 +++++++++++++++++++----------------------
1 file changed, 53 insertions(+), 62 deletions(-)
diff --git a/agenta-cli/poetry.lock b/agenta-cli/poetry.lock
index a090925b39..10987fcfbb 100644
--- a/agenta-cli/poetry.lock
+++ b/agenta-cli/poetry.lock
@@ -1009,71 +1009,61 @@ windows-terminal = ["colorama (>=0.4.6)"]
[[package]]
name = "pymongo"
-version = "4.7.3"
+version = "4.8.0"
description = "Python driver for MongoDB "
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "pymongo-4.7.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e9580b4537b3cc5d412070caabd1dabdf73fdce249793598792bac5782ecf2eb"},
- {file = "pymongo-4.7.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:517243b2b189c98004570dd8fc0e89b1a48363d5578b3b99212fa2098b2ea4b8"},
- {file = "pymongo-4.7.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23b1e9dabd61da1c7deb54d888f952f030e9e35046cebe89309b28223345b3d9"},
- {file = "pymongo-4.7.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:03e0f9901ad66c6fb7da0d303461377524d61dab93a4e4e5af44164c5bb4db76"},
- {file = "pymongo-4.7.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9a870824aa54453aee030bac08c77ebcf2fe8999400f0c2a065bebcbcd46b7f8"},
- {file = "pymongo-4.7.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dfd7b3d3f4261bddbb74a332d87581bc523353e62bb9da4027cc7340f6fcbebc"},
- {file = "pymongo-4.7.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4d719a643ea6da46d215a3ba51dac805a773b611c641319558d8576cbe31cef8"},
- {file = "pymongo-4.7.3-cp310-cp310-win32.whl", hash = "sha256:d8b1e06f361f3c66ee694cb44326e1a2e4f93bc9c3a4849ae8547889fca71154"},
- {file = "pymongo-4.7.3-cp310-cp310-win_amd64.whl", hash = "sha256:c450ab2f9397e2d5caa7fddeb4feb30bf719c47c13ae02c0bbb3b71bf4099c1c"},
- {file = "pymongo-4.7.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:79cc6459209e885ba097779eaa0fe7f2fa049db39ab43b1731cf8d065a4650e8"},
- {file = "pymongo-4.7.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6e2287f1e2cc35e73cd74a4867e398a97962c5578a3991c730ef78d276ca8e46"},
- {file = "pymongo-4.7.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:413506bd48d8c31ee100645192171e4773550d7cb940b594d5175ac29e329ea1"},
- {file = "pymongo-4.7.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1cc1febf17646d52b7561caa762f60bdfe2cbdf3f3e70772f62eb624269f9c05"},
- {file = "pymongo-4.7.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8dfcf18a49955d50a16c92b39230bd0668ffc9c164ccdfe9d28805182b48fa72"},
- {file = "pymongo-4.7.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89872041196c008caddf905eb59d3dc2d292ae6b0282f1138418e76f3abd3ad6"},
- {file = "pymongo-4.7.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d3ed97b89de62ea927b672ad524de0d23f3a6b4a01c8d10e3d224abec973fbc3"},
- {file = "pymongo-4.7.3-cp311-cp311-win32.whl", hash = "sha256:d2f52b38151e946011d888a8441d3d75715c663fc5b41a7ade595e924e12a90a"},
- {file = "pymongo-4.7.3-cp311-cp311-win_amd64.whl", hash = "sha256:4a4cc91c28e81c0ce03d3c278e399311b0af44665668a91828aec16527082676"},
- {file = "pymongo-4.7.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cb30c8a78f5ebaca98640943447b6a0afcb146f40b415757c9047bf4a40d07b4"},
- {file = "pymongo-4.7.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9cf2069f5d37c398186453589486ea98bb0312214c439f7d320593b61880dc05"},
- {file = "pymongo-4.7.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3564f423958fced8a8c90940fd2f543c27adbcd6c7c6ed6715d847053f6200a0"},
- {file = "pymongo-4.7.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7a8af8a38fa6951fff73e6ff955a6188f829b29fed7c5a1b739a306b4aa56fe8"},
- {file = "pymongo-4.7.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3a0e81c8dba6d825272867d487f18764cfed3c736d71d7d4ff5b79642acbed42"},
- {file = "pymongo-4.7.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88fc1d146feabac4385ea8ddb1323e584922922641303c8bf392fe1c36803463"},
- {file = "pymongo-4.7.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4225100b2c5d1f7393d7c5d256ceb8b20766830eecf869f8ae232776347625a6"},
- {file = "pymongo-4.7.3-cp312-cp312-win32.whl", hash = "sha256:5f3569ed119bf99c0f39ac9962fb5591eff02ca210fe80bb5178d7a1171c1b1e"},
- {file = "pymongo-4.7.3-cp312-cp312-win_amd64.whl", hash = "sha256:eb383c54c0c8ba27e7712b954fcf2a0905fee82a929d277e2e94ad3a5ba3c7db"},
- {file = "pymongo-4.7.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a46cffe91912570151617d866a25d07b9539433a32231ca7e7cf809b6ba1745f"},
- {file = "pymongo-4.7.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4c3cba427dac50944c050c96d958c5e643c33a457acee03bae27c8990c5b9c16"},
- {file = "pymongo-4.7.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a7a5fd893edbeb7fa982f8d44b6dd0186b6cd86c89e23f6ef95049ff72bffe46"},
- {file = "pymongo-4.7.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c168a2fadc8b19071d0a9a4f85fe38f3029fe22163db04b4d5c046041c0b14bd"},
- {file = "pymongo-4.7.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c59c2c9e70f63a7f18a31e367898248c39c068c639b0579623776f637e8f482"},
- {file = "pymongo-4.7.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d08165fd82c89d372e82904c3268bd8fe5de44f92a00e97bb1db1785154397d9"},
- {file = "pymongo-4.7.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:397fed21afec4fdaecf72f9c4344b692e489756030a9c6d864393e00c7e80491"},
- {file = "pymongo-4.7.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:f903075f8625e2d228f1b9b9a0cf1385f1c41e93c03fd7536c91780a0fb2e98f"},
- {file = "pymongo-4.7.3-cp37-cp37m-win32.whl", hash = "sha256:8ed1132f58c38add6b6138b771d0477a3833023c015c455d9a6e26f367f9eb5c"},
- {file = "pymongo-4.7.3-cp37-cp37m-win_amd64.whl", hash = "sha256:8d00a5d8fc1043a4f641cbb321da766699393f1b6f87c70fae8089d61c9c9c54"},
- {file = "pymongo-4.7.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9377b868c38700c7557aac1bc4baae29f47f1d279cc76b60436e547fd643318c"},
- {file = "pymongo-4.7.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:da4a6a7b4f45329bb135aa5096823637bd5f760b44d6224f98190ee367b6b5dd"},
- {file = "pymongo-4.7.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:487e2f9277f8a63ac89335ec4f1699ae0d96ebd06d239480d69ed25473a71b2c"},
- {file = "pymongo-4.7.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6db3d608d541a444c84f0bfc7bad80b0b897e0f4afa580a53f9a944065d9b633"},
- {file = "pymongo-4.7.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e90af2ad3a8a7c295f4d09a2fbcb9a350c76d6865f787c07fe843b79c6e821d1"},
- {file = "pymongo-4.7.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e28feb18dc559d50ededba27f9054c79f80c4edd70a826cecfe68f3266807b3"},
- {file = "pymongo-4.7.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f21ecddcba2d9132d5aebd8e959de8d318c29892d0718420447baf2b9bccbb19"},
- {file = "pymongo-4.7.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:26140fbb3f6a9a74bd73ed46d0b1f43d5702e87a6e453a31b24fad9c19df9358"},
- {file = "pymongo-4.7.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:94baa5fc7f7d22c3ce2ac7bd92f7e03ba7a6875f2480e3b97a400163d6eaafc9"},
- {file = "pymongo-4.7.3-cp38-cp38-win32.whl", hash = "sha256:92dd247727dd83d1903e495acc743ebd757f030177df289e3ba4ef8a8c561fad"},
- {file = "pymongo-4.7.3-cp38-cp38-win_amd64.whl", hash = "sha256:1c90c848a5e45475731c35097f43026b88ef14a771dfd08f20b67adc160a3f79"},
- {file = "pymongo-4.7.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f598be401b416319a535c386ac84f51df38663f7a9d1071922bda4d491564422"},
- {file = "pymongo-4.7.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:35ba90477fae61c65def6e7d09e8040edfdd3b7fd47c3c258b4edded60c4d625"},
- {file = "pymongo-4.7.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9aa8735955c70892634d7e61b0ede9b1eefffd3cd09ccabee0ffcf1bdfe62254"},
- {file = "pymongo-4.7.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:82a97d8f7f138586d9d0a0cff804a045cdbbfcfc1cd6bba542b151e284fbbec5"},
- {file = "pymongo-4.7.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de3b9db558930efab5eaef4db46dcad8bf61ac3ddfd5751b3e5ac6084a25e366"},
- {file = "pymongo-4.7.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0e149217ef62812d3c2401cf0e2852b0c57fd155297ecc4dcd67172c4eca402"},
- {file = "pymongo-4.7.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3a8a1ef4a824f5feb793b3231526d0045eadb5eb01080e38435dfc40a26c3e5"},
- {file = "pymongo-4.7.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d14e5e89a4be1f10efc3d9dcb13eb7a3b2334599cb6bb5d06c6a9281b79c8e22"},
- {file = "pymongo-4.7.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:c6bfa29f032fd4fd7b129520f8cdb51ab71d88c2ba0567cccd05d325f963acb5"},
- {file = "pymongo-4.7.3-cp39-cp39-win32.whl", hash = "sha256:1421d0bd2ce629405f5157bd1aaa9b83f12d53a207cf68a43334f4e4ee312b66"},
- {file = "pymongo-4.7.3-cp39-cp39-win_amd64.whl", hash = "sha256:f7ee974f8b9370a998919c55b1050889f43815ab588890212023fecbc0402a6d"},
- {file = "pymongo-4.7.3.tar.gz", hash = "sha256:6354a66b228f2cd399be7429685fb68e07f19110a3679782ecb4fdb68da03831"},
+ {file = "pymongo-4.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f2b7bec27e047e84947fbd41c782f07c54c30c76d14f3b8bf0c89f7413fac67a"},
+ {file = "pymongo-4.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3c68fe128a171493018ca5c8020fc08675be130d012b7ab3efe9e22698c612a1"},
+ {file = "pymongo-4.8.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:920d4f8f157a71b3cb3f39bc09ce070693d6e9648fb0e30d00e2657d1dca4e49"},
+ {file = "pymongo-4.8.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:52b4108ac9469febba18cea50db972605cc43978bedaa9fea413378877560ef8"},
+ {file = "pymongo-4.8.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:180d5eb1dc28b62853e2f88017775c4500b07548ed28c0bd9c005c3d7bc52526"},
+ {file = "pymongo-4.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aec2b9088cdbceb87e6ca9c639d0ff9b9d083594dda5ca5d3c4f6774f4c81b33"},
+ {file = "pymongo-4.8.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d0cf61450feadca81deb1a1489cb1a3ae1e4266efd51adafecec0e503a8dcd84"},
+ {file = "pymongo-4.8.0-cp310-cp310-win32.whl", hash = "sha256:8b18c8324809539c79bd6544d00e0607e98ff833ca21953df001510ca25915d1"},
+ {file = "pymongo-4.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:e5df28f74002e37bcbdfdc5109799f670e4dfef0fb527c391ff84f078050e7b5"},
+ {file = "pymongo-4.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6b50040d9767197b77ed420ada29b3bf18a638f9552d80f2da817b7c4a4c9c68"},
+ {file = "pymongo-4.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:417369ce39af2b7c2a9c7152c1ed2393edfd1cbaf2a356ba31eb8bcbd5c98dd7"},
+ {file = "pymongo-4.8.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf821bd3befb993a6db17229a2c60c1550e957de02a6ff4dd0af9476637b2e4d"},
+ {file = "pymongo-4.8.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9365166aa801c63dff1a3cb96e650be270da06e3464ab106727223123405510f"},
+ {file = "pymongo-4.8.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cc8b8582f4209c2459b04b049ac03c72c618e011d3caa5391ff86d1bda0cc486"},
+ {file = "pymongo-4.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16e5019f75f6827bb5354b6fef8dfc9d6c7446894a27346e03134d290eb9e758"},
+ {file = "pymongo-4.8.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3b5802151fc2b51cd45492c80ed22b441d20090fb76d1fd53cd7760b340ff554"},
+ {file = "pymongo-4.8.0-cp311-cp311-win32.whl", hash = "sha256:4bf58e6825b93da63e499d1a58de7de563c31e575908d4e24876234ccb910eba"},
+ {file = "pymongo-4.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:b747c0e257b9d3e6495a018309b9e0c93b7f0d65271d1d62e572747f4ffafc88"},
+ {file = "pymongo-4.8.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e6a720a3d22b54183352dc65f08cd1547204d263e0651b213a0a2e577e838526"},
+ {file = "pymongo-4.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:31e4d21201bdf15064cf47ce7b74722d3e1aea2597c6785882244a3bb58c7eab"},
+ {file = "pymongo-4.8.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6b804bb4f2d9dc389cc9e827d579fa327272cdb0629a99bfe5b83cb3e269ebf"},
+ {file = "pymongo-4.8.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f2fbdb87fe5075c8beb17a5c16348a1ea3c8b282a5cb72d173330be2fecf22f5"},
+ {file = "pymongo-4.8.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd39455b7ee70aabee46f7399b32ab38b86b236c069ae559e22be6b46b2bbfc4"},
+ {file = "pymongo-4.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:940d456774b17814bac5ea7fc28188c7a1338d4a233efbb6ba01de957bded2e8"},
+ {file = "pymongo-4.8.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:236bbd7d0aef62e64caf4b24ca200f8c8670d1a6f5ea828c39eccdae423bc2b2"},
+ {file = "pymongo-4.8.0-cp312-cp312-win32.whl", hash = "sha256:47ec8c3f0a7b2212dbc9be08d3bf17bc89abd211901093e3ef3f2adea7de7a69"},
+ {file = "pymongo-4.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:e84bc7707492f06fbc37a9f215374d2977d21b72e10a67f1b31893ec5a140ad8"},
+ {file = "pymongo-4.8.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:519d1bab2b5e5218c64340b57d555d89c3f6c9d717cecbf826fb9d42415e7750"},
+ {file = "pymongo-4.8.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:87075a1feb1e602e539bdb1ef8f4324a3427eb0d64208c3182e677d2c0718b6f"},
+ {file = "pymongo-4.8.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77f53429515d2b3e86dcc83dadecf7ff881e538c168d575f3688698a8707b80a"},
+ {file = "pymongo-4.8.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fdc20cd1e1141b04696ffcdb7c71e8a4a665db31fe72e51ec706b3bdd2d09f36"},
+ {file = "pymongo-4.8.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:284d0717d1a7707744018b0b6ee7801b1b1ff044c42f7be7a01bb013de639470"},
+ {file = "pymongo-4.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5bf0eb8b6ef40fa22479f09375468c33bebb7fe49d14d9c96c8fd50355188b0"},
+ {file = "pymongo-4.8.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2ecd71b9226bd1d49416dc9f999772038e56f415a713be51bf18d8676a0841c8"},
+ {file = "pymongo-4.8.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e0061af6e8c5e68b13f1ec9ad5251247726653c5af3c0bbdfbca6cf931e99216"},
+ {file = "pymongo-4.8.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:658d0170f27984e0d89c09fe5c42296613b711a3ffd847eb373b0dbb5b648d5f"},
+ {file = "pymongo-4.8.0-cp38-cp38-win32.whl", hash = "sha256:3ed1c316718a2836f7efc3d75b4b0ffdd47894090bc697de8385acd13c513a70"},
+ {file = "pymongo-4.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:7148419eedfea9ecb940961cfe465efaba90595568a1fb97585fb535ea63fe2b"},
+ {file = "pymongo-4.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e8400587d594761e5136a3423111f499574be5fd53cf0aefa0d0f05b180710b0"},
+ {file = "pymongo-4.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:af3e98dd9702b73e4e6fd780f6925352237f5dce8d99405ff1543f3771201704"},
+ {file = "pymongo-4.8.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:de3a860f037bb51f968de320baef85090ff0bbb42ec4f28ec6a5ddf88be61871"},
+ {file = "pymongo-4.8.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0fc18b3a093f3db008c5fea0e980dbd3b743449eee29b5718bc2dc15ab5088bb"},
+ {file = "pymongo-4.8.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18c9d8f975dd7194c37193583fd7d1eb9aea0c21ee58955ecf35362239ff31ac"},
+ {file = "pymongo-4.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:408b2f8fdbeca3c19e4156f28fff1ab11c3efb0407b60687162d49f68075e63c"},
+ {file = "pymongo-4.8.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6564780cafd6abeea49759fe661792bd5a67e4f51bca62b88faab497ab5fe89"},
+ {file = "pymongo-4.8.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d18d86bc9e103f4d3d4f18b85a0471c0e13ce5b79194e4a0389a224bb70edd53"},
+ {file = "pymongo-4.8.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:9097c331577cecf8034422956daaba7ec74c26f7b255d718c584faddd7fa2e3c"},
+ {file = "pymongo-4.8.0-cp39-cp39-win32.whl", hash = "sha256:d5428dbcd43d02f6306e1c3c95f692f68b284e6ee5390292242f509004c9e3a8"},
+ {file = "pymongo-4.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:ef7225755ed27bfdb18730c68f6cb023d06c28f2b734597480fb4c0e500feb6f"},
+ {file = "pymongo-4.8.0.tar.gz", hash = "sha256:454f2295875744dc70f1881e4b2eb99cdad008a33574bc8aaf120530f66c0cde"},
]
[package.dependencies]
@@ -1081,6 +1071,7 @@ dnspython = ">=1.16.0,<3.0.0"
[package.extras]
aws = ["pymongo-auth-aws (>=1.1.0,<2.0.0)"]
+docs = ["furo (==2023.9.10)", "readthedocs-sphinx-search (>=0.3,<1.0)", "sphinx (>=5.3,<8)", "sphinx-rtd-theme (>=2,<3)", "sphinxcontrib-shellcheck (>=1,<2)"]
encryption = ["certifi", "pymongo-auth-aws (>=1.1.0,<2.0.0)", "pymongocrypt (>=1.6.0,<2.0.0)"]
gssapi = ["pykerberos", "winkerberos (>=0.5.0)"]
ocsp = ["certifi", "cryptography (>=2.5)", "pyopenssl (>=17.2.0)", "requests (<3.0.0)", "service-identity (>=18.1.0)"]
From 49c8f9dd0f49319675c9e3c8feb10fcc70b346d9 Mon Sep 17 00:00:00 2001
From: aakrem
Date: Thu, 27 Jun 2024 11:30:20 +0200
Subject: [PATCH 158/268] add assertion to prevent duplicated values
---
.../migrations/mongo_to_postgres/utils.py | 11 +++++++----
1 file changed, 7 insertions(+), 4 deletions(-)
diff --git a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
index ac9f80e763..38ff517f28 100644
--- a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
+++ b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
@@ -11,7 +11,7 @@
from sqlalchemy.orm import sessionmaker
import uuid_utils.compat as uuid
from sqlalchemy.future import select
-
+from sqlalchemy.exc import NoResultFound
from agenta_backend.models.db_engine import db_engine
from agenta_backend.models.db_models import IDsMappingDB
@@ -59,7 +59,7 @@ async def store_mapping(table_name, mongo_id, uuid):
await session.commit()
-async def get_mapped_uuid(table_name, mongo_id):
+async def get_mapped_uuid(table_name, mongo_id, print_result=False):
"""Retrieve the mapped UUID for a given MongoDB ObjectId and table name."""
async with db_engine.get_session() as session:
stmt = select(IDsMappingDB.uuid).filter(
@@ -67,8 +67,11 @@ async def get_mapped_uuid(table_name, mongo_id):
IDsMappingDB.objectid == str(mongo_id),
)
result = await session.execute(stmt)
- row = result.first()
- return row[0] if row else None
+ try:
+ row = result.one()
+ except NoResultFound:
+ return None
+ return row[0]
def get_datetime(value):
From 6405171c396815b120215391b3f04ee59265c6cc Mon Sep 17 00:00:00 2001
From: Kaosiso Ezealigo
Date: Thu, 27 Jun 2024 10:45:03 +0100
Subject: [PATCH 159/268] fix(backend): filtered out blank single model test
scores when calculating result
---
agenta-backend/agenta_backend/services/results_service.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/agenta-backend/agenta_backend/services/results_service.py b/agenta-backend/agenta_backend/services/results_service.py
index 86425f9fa5..b1ca31a58a 100644
--- a/agenta-backend/agenta_backend/services/results_service.py
+++ b/agenta-backend/agenta_backend/services/results_service.py
@@ -99,7 +99,8 @@ async def fetch_results_for_single_model_test(evaluation_id: str):
scores_and_counts = {}
for result in results:
score = result.score
- scores_and_counts[score] = scores_and_counts.get(score, 0) + 1
+ if score:
+ scores_and_counts[score] = scores_and_counts.get(score, 0) + 1
return scores_and_counts
From 95bf50252f399c845b1477964196318680e1b085 Mon Sep 17 00:00:00 2001
From: Abram
Date: Thu, 27 Jun 2024 15:07:51 +0100
Subject: [PATCH 160/268] minor refactor (backend): resolve NameError: name
'organization_id' is not defined
---
agenta-backend/agenta_backend/services/db_manager.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/agenta-backend/agenta_backend/services/db_manager.py b/agenta-backend/agenta_backend/services/db_manager.py
index 565257983b..ffd1539113 100644
--- a/agenta-backend/agenta_backend/services/db_manager.py
+++ b/agenta-backend/agenta_backend/services/db_manager.py
@@ -2827,8 +2827,8 @@ async def create_new_evaluation_scenario(
organization is not None and workspace is not None
), "organization and workspace must be provided together"
- evaluation_scenario.organization_id = organization_id # type: ignore
- evaluation_scenario.workspace_id = workspace_id # type: ignore
+ evaluation_scenario.organization_id = organization # type: ignore
+ evaluation_scenario.workspace_id = workspace # type: ignore
session.add(evaluation_scenario)
await session.commit()
From 23729dfdac5427e17a3ddb0290664bc59a0cd652 Mon Sep 17 00:00:00 2001
From: Mahmoud Mabrouk
Date: Thu, 27 Jun 2024 19:19:01 +0200
Subject: [PATCH 161/268] docs(tool): AGE-367 added new cookbook
Closes: AGE-367
---
cookbook/creating_evaluators_with_sdk.ipynb | 470 ++++++++++++++++++++
1 file changed, 470 insertions(+)
create mode 100644 cookbook/creating_evaluators_with_sdk.ipynb
diff --git a/cookbook/creating_evaluators_with_sdk.ipynb b/cookbook/creating_evaluators_with_sdk.ipynb
new file mode 100644
index 0000000000..dd32af728b
--- /dev/null
+++ b/cookbook/creating_evaluators_with_sdk.ipynb
@@ -0,0 +1,470 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Using evaluations with the SDK\n",
+ "In this cookbook we will show how to interact with evaluation in agenta programatically. Either using the SDK (or the raw API). \n",
+ "\n",
+ "We will do the following:\n",
+ "\n",
+ "- Create a test set\n",
+ "- Create and configure an evaluator\n",
+ "- Run an evaluation\n",
+ "- Retrieve the results of evaluations\n",
+ "\n",
+ "We assume that you have already created an LLM application and variants in agenta. \n",
+ "\n",
+ "\n",
+ "### Architectural Overview:\n",
+ "In this scenario, evaluations are executed on the Agenta backend. Specifically, Agenta invokes the LLM application for each row in the test set and subsequently processes the output using the designated evaluator. \n",
+ "This operation is managed through Celery tasks. The interactions with the LLM application are asynchronous, batched, and include retry mechanisms. Additionally, the batching configuration can be adjusted to avoid exceeding the rate limits imposed by the LLM provider.\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Setup "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Requirement already satisfied: agenta in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (0.17.5)\n",
+ "Requirement already satisfied: docker<8.0.0,>=6.1.1 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from agenta) (6.1.2)\n",
+ "Requirement already satisfied: posthog<4.0.0,>=3.1.0 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from agenta) (3.3.1)\n",
+ "Requirement already satisfied: click<9.0.0,>=8.1.3 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from agenta) (8.1.3)\n",
+ "Requirement already satisfied: importlib-metadata<8.0,>=6.7 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from agenta) (6.8.0)\n",
+ "Requirement already satisfied: pymongo<5.0.0,>=4.6.3 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from agenta) (4.7.2)\n",
+ "Requirement already satisfied: questionary<3.0,>=1.10 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from agenta) (1.10.0)\n",
+ "Requirement already satisfied: httpx<0.28,>=0.24 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from agenta) (0.27.0)\n",
+ "Requirement already satisfied: python-dotenv<2.0.0,>=1.0.0 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from agenta) (1.0.0)\n",
+ "Requirement already satisfied: pydantic>=2 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from agenta) (2.7.4)\n",
+ "Requirement already satisfied: toml<0.11.0,>=0.10.2 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from agenta) (0.10.2)\n",
+ "Requirement already satisfied: cachetools<6.0.0,>=5.3.3 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from agenta) (5.3.3)\n",
+ "Requirement already satisfied: python-multipart<0.0.10,>=0.0.6 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from agenta) (0.0.9)\n",
+ "Requirement already satisfied: fastapi>=0.100.0 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from agenta) (0.111.0)\n",
+ "Requirement already satisfied: ipdb>=0.13 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from agenta) (0.13.13)\n",
+ "Requirement already satisfied: urllib3>=1.26.0 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from docker<8.0.0,>=6.1.1->agenta) (2.2.1)\n",
+ "Requirement already satisfied: requests>=2.26.0 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from docker<8.0.0,>=6.1.1->agenta) (2.31.0)\n",
+ "Requirement already satisfied: packaging>=14.0 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from docker<8.0.0,>=6.1.1->agenta) (23.2)\n",
+ "Requirement already satisfied: websocket-client>=0.32.0 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from docker<8.0.0,>=6.1.1->agenta) (1.5.2)\n",
+ "Requirement already satisfied: orjson>=3.2.1 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from fastapi>=0.100.0->agenta) (3.9.15)\n",
+ "Requirement already satisfied: typing-extensions>=4.8.0 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from fastapi>=0.100.0->agenta) (4.9.0)\n",
+ "Requirement already satisfied: ujson!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0,>=4.0.1 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from fastapi>=0.100.0->agenta) (5.10.0)\n",
+ "Requirement already satisfied: jinja2>=2.11.2 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from fastapi>=0.100.0->agenta) (3.1.2)\n",
+ "Requirement already satisfied: starlette<0.38.0,>=0.37.2 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from fastapi>=0.100.0->agenta) (0.37.2)\n",
+ "Requirement already satisfied: uvicorn[standard]>=0.12.0 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from fastapi>=0.100.0->agenta) (0.20.0)\n",
+ "Requirement already satisfied: email_validator>=2.0.0 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from fastapi>=0.100.0->agenta) (2.1.1)\n",
+ "Requirement already satisfied: fastapi-cli>=0.0.2 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from fastapi>=0.100.0->agenta) (0.0.4)\n",
+ "Requirement already satisfied: idna>=2.0.0 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from email_validator>=2.0.0->fastapi>=0.100.0->agenta) (3.2)\n",
+ "Requirement already satisfied: dnspython>=2.0.0 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from email_validator>=2.0.0->fastapi>=0.100.0->agenta) (2.2.1)\n",
+ "Requirement already satisfied: typer>=0.12.3 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from fastapi-cli>=0.0.2->fastapi>=0.100.0->agenta) (0.12.3)\n",
+ "Requirement already satisfied: httpcore==1.* in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from httpx<0.28,>=0.24->agenta) (1.0.4)\n",
+ "Requirement already satisfied: anyio in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from httpx<0.28,>=0.24->agenta) (3.6.2)\n",
+ "Requirement already satisfied: sniffio in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from httpx<0.28,>=0.24->agenta) (1.2.0)\n",
+ "Requirement already satisfied: certifi in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from httpx<0.28,>=0.24->agenta) (2023.11.17)\n",
+ "Requirement already satisfied: h11<0.15,>=0.13 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from httpcore==1.*->httpx<0.28,>=0.24->agenta) (0.14.0)\n",
+ "Requirement already satisfied: zipp>=0.5 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from importlib-metadata<8.0,>=6.7->agenta) (3.6.0)\n",
+ "Requirement already satisfied: tomli in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from ipdb>=0.13->agenta) (2.0.1)\n",
+ "Requirement already satisfied: ipython>=7.31.1 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from ipdb>=0.13->agenta) (8.13.2)\n",
+ "Requirement already satisfied: decorator in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from ipdb>=0.13->agenta) (5.1.0)\n",
+ "Requirement already satisfied: stack-data in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from ipython>=7.31.1->ipdb>=0.13->agenta) (0.6.2)\n",
+ "Requirement already satisfied: pexpect>4.3 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from ipython>=7.31.1->ipdb>=0.13->agenta) (4.8.0)\n",
+ "Requirement already satisfied: prompt-toolkit!=3.0.37,<3.1.0,>=3.0.30 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from ipython>=7.31.1->ipdb>=0.13->agenta) (3.0.38)\n",
+ "Requirement already satisfied: jedi>=0.16 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from ipython>=7.31.1->ipdb>=0.13->agenta) (0.18.0)\n",
+ "Requirement already satisfied: pickleshare in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from ipython>=7.31.1->ipdb>=0.13->agenta) (0.7.5)\n",
+ "Requirement already satisfied: matplotlib-inline in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from ipython>=7.31.1->ipdb>=0.13->agenta) (0.1.2)\n",
+ "Requirement already satisfied: backcall in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from ipython>=7.31.1->ipdb>=0.13->agenta) (0.2.0)\n",
+ "Requirement already satisfied: traitlets>=5 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from ipython>=7.31.1->ipdb>=0.13->agenta) (5.1.0)\n",
+ "Requirement already satisfied: appnope in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from ipython>=7.31.1->ipdb>=0.13->agenta) (0.1.2)\n",
+ "Requirement already satisfied: pygments>=2.4.0 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from ipython>=7.31.1->ipdb>=0.13->agenta) (2.10.0)\n",
+ "Requirement already satisfied: parso<0.9.0,>=0.8.0 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from jedi>=0.16->ipython>=7.31.1->ipdb>=0.13->agenta) (0.8.2)\n",
+ "Requirement already satisfied: MarkupSafe>=2.0 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from jinja2>=2.11.2->fastapi>=0.100.0->agenta) (2.1.1)\n",
+ "Requirement already satisfied: ptyprocess>=0.5 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from pexpect>4.3->ipython>=7.31.1->ipdb>=0.13->agenta) (0.7.0)\n",
+ "Requirement already satisfied: backoff>=1.10.0 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from posthog<4.0.0,>=3.1.0->agenta) (1.10.0)\n",
+ "Requirement already satisfied: python-dateutil>2.1 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from posthog<4.0.0,>=3.1.0->agenta) (2.8.2)\n",
+ "Requirement already satisfied: monotonic>=1.5 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from posthog<4.0.0,>=3.1.0->agenta) (1.6)\n",
+ "Requirement already satisfied: six>=1.5 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from posthog<4.0.0,>=3.1.0->agenta) (1.16.0)\n",
+ "Requirement already satisfied: wcwidth in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from prompt-toolkit!=3.0.37,<3.1.0,>=3.0.30->ipython>=7.31.1->ipdb>=0.13->agenta) (0.2.5)\n",
+ "Requirement already satisfied: annotated-types>=0.4.0 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from pydantic>=2->agenta) (0.5.0)\n",
+ "Requirement already satisfied: pydantic-core==2.18.4 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from pydantic>=2->agenta) (2.18.4)\n",
+ "Requirement already satisfied: charset-normalizer<4,>=2 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from requests>=2.26.0->docker<8.0.0,>=6.1.1->agenta) (2.0.4)\n",
+ "Requirement already satisfied: rich>=10.11.0 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from typer>=0.12.3->fastapi-cli>=0.0.2->fastapi>=0.100.0->agenta) (12.6.0)\n",
+ "Requirement already satisfied: shellingham>=1.3.0 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from typer>=0.12.3->fastapi-cli>=0.0.2->fastapi>=0.100.0->agenta) (1.5.4)\n",
+ "Requirement already satisfied: commonmark<0.10.0,>=0.9.0 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from rich>=10.11.0->typer>=0.12.3->fastapi-cli>=0.0.2->fastapi>=0.100.0->agenta) (0.9.1)\n",
+ "Requirement already satisfied: httptools>=0.5.0 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from uvicorn[standard]>=0.12.0->fastapi>=0.100.0->agenta) (0.6.1)\n",
+ "Requirement already satisfied: watchfiles>=0.13 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from uvicorn[standard]>=0.12.0->fastapi>=0.100.0->agenta) (0.22.0)\n",
+ "Requirement already satisfied: websockets>=10.4 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from uvicorn[standard]>=0.12.0->fastapi>=0.100.0->agenta) (10.4)\n",
+ "Requirement already satisfied: uvloop!=0.15.0,!=0.15.1,>=0.14.0 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from uvicorn[standard]>=0.12.0->fastapi>=0.100.0->agenta) (0.19.0)\n",
+ "Requirement already satisfied: pyyaml>=5.1 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from uvicorn[standard]>=0.12.0->fastapi>=0.100.0->agenta) (6.0.1)\n",
+ "Requirement already satisfied: asttokens>=2.1.0 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from stack-data->ipython>=7.31.1->ipdb>=0.13->agenta) (2.2.1)\n",
+ "Requirement already satisfied: pure-eval in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from stack-data->ipython>=7.31.1->ipdb>=0.13->agenta) (0.2.2)\n",
+ "Requirement already satisfied: executing>=1.2.0 in /Users/mahmoudmabrouk/opt/anaconda3/lib/python3.9/site-packages (from stack-data->ipython>=7.31.1->ipdb>=0.13->agenta) (1.2.0)\n"
+ ]
+ }
+ ],
+ "source": [
+ "! pip install -U agenta"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Configuration Setup\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[App(app_id='666dde95962bbaffdb0072b5', app_name='product-classification'),\n",
+ " App(app_id='666fde62962bbaffdb0073d9', app_name='product-title-generation'),\n",
+ " App(app_id='66704efa962bbaffdb007574', app_name='project-qa'),\n",
+ " App(app_id='6670570b962bbaffdb0075a7', app_name='project-qa-prompt-rewriting'),\n",
+ " App(app_id='667d8cfad1812781f7e375d9', app_name='find_capital')]"
+ ]
+ },
+ "execution_count": 13,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Assuming an application has already been created through the user interface, you will need to obtain the application ID.\n",
+ "# In this example we will use the default template single_prompt which has the prompt \"Determine the capital of {country}\"\n",
+ "\n",
+ "# You can find the application ID in the URL. For example, in the URL https://cloud.agenta.ai/apps/666dde95962bbaffdb0072b5/playground?variant=app.default, the application ID is `666dde95962bbaffdb0072b5`.\n",
+ "from agenta.client.backend.client import AgentaApi\n",
+ "# Let's list the applications\n",
+ "client.apps.list_apps()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "\n",
+ "app_id = \"667d8cfad1812781f7e375d9\"\n",
+ "\n",
+ "# You can create the API key under the settings page. If you are using the OSS version, you should keep this as an empty string\n",
+ "api_key = \"EUqJGOUu.xxxx\"\n",
+ "\n",
+ "# Host. \n",
+ "host = \"https://cloud.agenta.ai\"\n",
+ "\n",
+ "# Initialize the client\n",
+ "\n",
+ "client = AgentaApi(base_url=host + \"/api\", api_key=api_key)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Create a test set"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{'status': 'success',\n",
+ " 'message': 'testset updated successfully',\n",
+ " '_id': '667d8ecfd1812781f7e375eb'}"
+ ]
+ },
+ "execution_count": 15,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "from agenta.client.backend.types.new_testset import NewTestset\n",
+ "\n",
+ "csvdata = [\n",
+ " {\"country\": \"france\", \"capital\": \"Paris\"},\n",
+ " {\"country\": \"Germany\", \"capital\": \"paris\"}\n",
+ " ]\n",
+ "\n",
+ "response = client.testsets.create_testset(app_id=app_id, request=NewTestset(name=\"test set\", csvdata=csvdata))\n",
+ "test_set_id = response.id\n",
+ "\n",
+ "# let's now update it\n",
+ "\n",
+ "csvdata = [\n",
+ " {\"country\": \"france\", \"capital\": \"Paris\"},\n",
+ " {\"country\": \"Germany\", \"capital\": \"Berlin\"}\n",
+ " ]\n",
+ "\n",
+ "client.testsets.update_testset(testset_id=test_set_id, request=NewTestset(name=\"test set\", csvdata=csvdata))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Create evaluators"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Create an evaluator that performs an exact match comparison on the 'capital' column\n",
+ "# You can find the list of evaluator keys and evaluators and their configurations in https://github.com/Agenta-AI/agenta/blob/main/agenta-backend/agenta_backend/resources/evaluators/evaluators.py\n",
+ "response = client.evaluators.create_new_evaluator_config(app_id=app_id, name=\"capital_evaluator\", evaluator_key=\"auto_exact_match\", settings_values={\"correct_answer_key\": \"capital\"})\n",
+ "exact_match_eval_id = response.id\n",
+ "\n",
+ "code_snippet = \"\"\"\n",
+ "from typing import Dict\n",
+ "\n",
+ "def evaluate(\n",
+ " app_params: Dict[str, str],\n",
+ " inputs: Dict[str, str],\n",
+ " output: str, # output of the llm app\n",
+ " datapoint: Dict[str, str] # contains the testset row \n",
+ ") -> float:\n",
+ " if output and output[0].isupper():\n",
+ " return 1.0\n",
+ " else:\n",
+ " return 0.0\n",
+ "\"\"\"\n",
+ "\n",
+ "response = client.evaluators.create_new_evaluator_config(app_id=app_id, name=\"capital_letter_evaluator\", evaluator_key=\"auto_custom_code_run\", settings_values={\"code\": code_snippet})\n",
+ "letter_match_eval_id = response.id"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[EvaluatorConfig(id='667d8cfbd1812781f7e375e2', name='Exact Match', evaluator_key='auto_exact_match', settings_values={'correct_answer_key': 'correct_answer'}, created_at=datetime.datetime(2024, 6, 26, 12, 22, 31, 775000), updated_at=datetime.datetime(2024, 6, 26, 12, 22, 31, 775000)),\n",
+ " EvaluatorConfig(id='667d8cfbd1812781f7e375e3', name='Contains Json', evaluator_key='auto_contains_json', settings_values={}, created_at=datetime.datetime(2024, 6, 26, 12, 22, 31, 775000), updated_at=datetime.datetime(2024, 6, 26, 12, 22, 31, 775000)),\n",
+ " EvaluatorConfig(id='667d8ed6d1812781f7e375ec', name='capital_evaluator', evaluator_key='auto_exact_match', settings_values={'correct_answer_key': 'capital'}, created_at=datetime.datetime(2024, 6, 26, 12, 22, 31, 775000), updated_at=datetime.datetime(2024, 6, 26, 12, 22, 31, 775000)),\n",
+ " EvaluatorConfig(id='667d8ed6d1812781f7e375ed', name='capital_letter_evaluator', evaluator_key='auto_custom_code_run', settings_values={'code': '\\nfrom typing import Dict\\n\\ndef evaluate(\\n app_params: Dict[str, str],\\n inputs: Dict[str, str],\\n output: str, # output of the llm app\\n datapoint: Dict[str, str] # contains the testset row \\n) -> float:\\n if output and output[0].isupper():\\n return 1.0\\n else:\\n return 0.0\\n'}, created_at=datetime.datetime(2024, 6, 26, 12, 22, 31, 775000), updated_at=datetime.datetime(2024, 6, 26, 12, 22, 31, 775000))]"
+ ]
+ },
+ "execution_count": 17,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# get list of all evaluators\n",
+ "client.evaluators.get_evaluator_configs(app_id=app_id)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Run an evaluation"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 21,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[AppVariantResponse(app_id='667d8cfad1812781f7e375d9', app_name='find_capital', variant_id='667d8cfbd1812781f7e375df', variant_name='app.default', parameters={'temperature': 1.0, 'model': 'gpt-3.5-turbo', 'max_tokens': -1, 'prompt_system': 'You are an expert in geography.', 'prompt_user': 'What is the capital of {country}?', 'top_p': 1.0, 'frequence_penalty': 0.0, 'presence_penalty': 0.0, 'force_json': 0}, previous_variant_name=None, user_id='666dde45962bbaffdb0072b2', base_name='app', base_id='667d8cfbd1812781f7e375de', config_name='default', uri='https://vmripsmtbzlysdbptjl4hzrbga0ckadr.lambda-url.eu-central-1.on.aws', revision=1, organization_id='666dde45962bbaffdb0072b3', workspace_id='666dde45962bbaffdb0072b4')]\n"
+ ]
+ }
+ ],
+ "source": [
+ "response = client.apps.list_app_variants(app_id=app_id)\n",
+ "print(response)\n",
+ "myvariant_id = response[0].variant_id"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 28,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[Evaluation(id='667d98fbd1812781f7e3761a', app_id='667d8cfad1812781f7e375d9', user_id='666dde45962bbaffdb0072b2', user_username='mahmoud+demo', variant_ids=['667d8cfbd1812781f7e375df'], variant_names=['app.default'], variant_revision_ids=['667d8d0dd1812781f7e375e7'], revisions=['1'], testset_id='667d8ecfd1812781f7e375eb', testset_name='test set', status=Result(type='status', value='EVALUATION_STARTED', error=None), aggregated_results=[], average_cost=None, total_cost=None, average_latency=None, created_at=datetime.datetime(2024, 6, 27, 16, 53, 15, 281313, tzinfo=datetime.timezone.utc), updated_at=datetime.datetime(2024, 6, 27, 16, 53, 15, 281328, tzinfo=datetime.timezone.utc))]\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Run an evaluation\n",
+ "from agenta.client.backend.types.llm_run_rate_limit import LlmRunRateLimit\n",
+ "response = client.evaluations.create_evaluation(app_id=app_id, variant_ids=[myvariant_id], testset_id=test_set_id, evaluators_configs=[exact_match_eval_id, letter_match_eval_id],\n",
+ " rate_limit=LlmRunRateLimit(\n",
+ " batch_size=10, # number of rows to call in parallel\n",
+ " max_retries=3, # max number of time to retry a failed llm call\n",
+ " retry_delay=2, # delay before retrying a failed llm call\n",
+ " delay_between_batches=5, # delay between batches\n",
+ " ),)\n",
+ "print(response)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 47,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{'status': {'type': 'status', 'value': 'EVALUATION_FINISHED', 'error': None}}"
+ ]
+ },
+ "execution_count": 47,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# check the status\n",
+ "client.evaluations.fetch_evaluation_status('667d98fbd1812781f7e3761a')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 40,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[('capital_evaluator', {'type': 'number', 'value': 0.0, 'error': None}), ('capital_letter_evaluator', {'type': 'number', 'value': 1.0, 'error': None})]\n"
+ ]
+ }
+ ],
+ "source": [
+ "# fetch the overall results\n",
+ "response = client.evaluations.fetch_evaluation_results('667d98fbd1812781f7e3761a')\n",
+ "\n",
+ "results = [(evaluator[\"evaluator_config\"][\"name\"], evaluator[\"result\"]) for evaluator in response[\"results\"]]\n",
+ "# End of Selection"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 46,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{'inputs': [{'input_name': 'country', 'input_value': 'france'},\n",
+ " {'input_name': 'capital', 'input_value': 'Paris'},\n",
+ " {'input_name': 'country', 'input_value': 'Germany'},\n",
+ " {'input_name': 'capital', 'input_value': 'Berlin'}],\n",
+ " 'data': [{'input_name': 'country',\n",
+ " 'input_value': 'france',\n",
+ " 'scenarios': [{'id': '667d994d72010c439240463a',\n",
+ " 'evaluation_id': '667d98fbd1812781f7e3761a',\n",
+ " 'inputs': [{'name': 'country', 'type': 'text', 'value': 'france'}],\n",
+ " 'outputs': [{'result': {'type': 'text',\n",
+ " 'value': 'The capital of France is Paris.',\n",
+ " 'error': None},\n",
+ " 'cost': 5.1500000000000005e-05,\n",
+ " 'latency': 1.1813}],\n",
+ " 'evaluation': None,\n",
+ " 'correct_answers': [{'key': 'capital', 'value': 'Paris'},\n",
+ " {'key': '', 'value': ''}],\n",
+ " 'is_pinned': False,\n",
+ " 'note': '',\n",
+ " 'results': [{'evaluator_config': '667d8ed6d1812781f7e375ec',\n",
+ " 'result': {'type': 'bool', 'value': False, 'error': None}},\n",
+ " {'evaluator_config': '667d8ed6d1812781f7e375ed',\n",
+ " 'result': {'type': 'number', 'value': 1.0, 'error': None}}]}]},\n",
+ " {'input_name': 'capital', 'input_value': 'Paris', 'scenarios': []},\n",
+ " {'input_name': 'country',\n",
+ " 'input_value': 'Germany',\n",
+ " 'scenarios': [{'id': '667d994d72010c439240463b',\n",
+ " 'evaluation_id': '667d98fbd1812781f7e3761a',\n",
+ " 'inputs': [{'name': 'country', 'type': 'text', 'value': 'Germany'}],\n",
+ " 'outputs': [{'result': {'type': 'text',\n",
+ " 'value': 'The capital of Germany is Berlin.',\n",
+ " 'error': None},\n",
+ " 'cost': 5.1500000000000005e-05,\n",
+ " 'latency': 0.9169}],\n",
+ " 'evaluation': None,\n",
+ " 'correct_answers': [{'key': 'capital', 'value': 'Berlin'},\n",
+ " {'key': '', 'value': ''}],\n",
+ " 'is_pinned': False,\n",
+ " 'note': '',\n",
+ " 'results': [{'evaluator_config': '667d8ed6d1812781f7e375ec',\n",
+ " 'result': {'type': 'bool', 'value': False, 'error': None}},\n",
+ " {'evaluator_config': '667d8ed6d1812781f7e375ed',\n",
+ " 'result': {'type': 'number', 'value': 1.0, 'error': None}}]}]},\n",
+ " {'input_name': 'capital', 'input_value': 'Berlin', 'scenarios': []}]}"
+ ]
+ },
+ "execution_count": 46,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# fetch the detailed results\n",
+ "client.evaluations.fetch_evaluation_scenarios(evaluations_ids='667d98fbd1812781f7e3761a')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "base",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.9.7"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
From dfe9ef99228a63a138ebe03608ffda18c1f276b0 Mon Sep 17 00:00:00 2001
From: Mahmoud Mabrouk
Date: Thu, 27 Jun 2024 19:25:01 +0200
Subject: [PATCH 162/268] Rename creating_evaluators_with_sdk.ipynb to
evaluations_with_sdk.ipynb
---
...ating_evaluators_with_sdk.ipynb => evaluations_with_sdk.ipynb} | 0
1 file changed, 0 insertions(+), 0 deletions(-)
rename cookbook/{creating_evaluators_with_sdk.ipynb => evaluations_with_sdk.ipynb} (100%)
diff --git a/cookbook/creating_evaluators_with_sdk.ipynb b/cookbook/evaluations_with_sdk.ipynb
similarity index 100%
rename from cookbook/creating_evaluators_with_sdk.ipynb
rename to cookbook/evaluations_with_sdk.ipynb
From fe9e1f2c605fde09d6a967360d11713e5fe0f68e Mon Sep 17 00:00:00 2001
From: Mahmoud Mabrouk
Date: Thu, 27 Jun 2024 19:42:18 +0200
Subject: [PATCH 163/268] docs(tool): AGE-367 add doc for using sdk
Closes: AGE-367
---
docs/guides/evaluation_from_sdk.mdx | 107 ++++++++++++++++++++++++++++
docs/mint.json | 7 ++
2 files changed, 114 insertions(+)
create mode 100644 docs/guides/evaluation_from_sdk.mdx
diff --git a/docs/guides/evaluation_from_sdk.mdx b/docs/guides/evaluation_from_sdk.mdx
new file mode 100644
index 0000000000..3a2e3620bf
--- /dev/null
+++ b/docs/guides/evaluation_from_sdk.mdx
@@ -0,0 +1,107 @@
+---
+title: "Running Evaluations with SDK"
+---
+
+
+ This guide is also available as a [Jupyter
+ Notebook](https://github.com/Agenta-AI/agenta/blob/main/cookbook/evaluations_with_sdk.ipynb).
+
+
+## Introduction
+
+In this guide, we'll demonstrate how to interact programmatically with evaluations in the Agenta platform using the SDK (or the raw API). This will include:
+
+- Creating a test set
+- Configuring an evaluator
+- Running an evaluation
+- Retrieving the results of evaluations
+
+This assumes that you have already created an LLM application and variants in Agenta.
+
+## Architectural Overview
+
+Evaluations are executed on the Agenta backend. Specifically, Agenta invokes the LLM application for each row in the test set and processes the output using the designated evaluator. Operations are managed through Celery tasks. The interactions with the LLM application are asynchronous, batched, and include retry mechanisms. The batching configuration can be adjusted to avoid exceeding rate limits imposed by the LLM provider.
+
+## Setup
+
+### Installation
+
+Ensure that the Agenta SDK is installed and up-to-date in your development environment:
+
+```bash
+pip install -U agenta
+```
+
+### Configuration
+
+After setting up your environment, you need to configure the SDK:
+
+```python
+from agenta.client.backend.client import AgentaApi
+
+# Set up your application ID and API key
+app_id = "your_app_id"
+api_key = "your_api_key"
+host = "https://cloud.agenta.ai"
+
+# Initialize the client
+client = AgentaApi(base_url=host + "/api", api_key=api_key)
+```
+
+## Create a Test Set
+
+You can create and update test sets as follows:
+
+```python
+from agenta.client.backend.types.new_testset import NewTestset
+
+# Example data for the test set
+csvdata = [
+ {"country": "France", "capital": "Paris"},
+ {"country": "Germany", "capital": "Berlin"}
+]
+
+# Create a new test set
+response = client.testsets.create_testset(app_id=app_id, request=NewTestset(name="Test Set", csvdata=csvdata))
+test_set_id = response.id
+```
+
+## Create Evaluators
+
+Set up evaluators that will assess the performance based on specific criteria:
+
+```python
+# Create an exact match evaluator
+response = client.evaluators.create_new_evaluator_config(
+ app_id=app_id, name="Capital Evaluator", evaluator_key="auto_exact_match",
+ settings_values={"correct_answer_key": "capital"}
+)
+exact_match_eval_id = response.id
+```
+
+## Run an Evaluation
+
+Execute an evaluation using the previously defined test set and evaluators:
+
+```python
+from agenta.client.backend.types.llm_run_rate_limit import LlmRunRateLimit
+
+response = client.evaluations.create_evaluation(
+ app_id=app_id, variant_ids=["your_variant_id"], testset_id=test_set_id,
+ evaluators_configs=[exact_match_eval_id],
+ rate_limit=LlmRunRateLimit(batch_size=10, max_retries=3, retry_delay=2, delay_between_batches=5)
+)
+```
+
+## Retrieve Results
+
+After running the evaluation, fetch the results to see how well the model performed against the test set:
+
+```python
+results = client.evaluations.fetch_evaluation_results("your_evaluation_id")
+print(results)
+```
+
+## Conclusion
+
+This guide covers the basic steps for using the SDK to manage evaluations within Agenta.
diff --git a/docs/mint.json b/docs/mint.json
index 1ae772ad3d..f425367ca4 100644
--- a/docs/mint.json
+++ b/docs/mint.json
@@ -343,7 +343,14 @@
"guides/tutorials/deploy-mistral-model",
"guides/extract_job_information"
]
+ },
+ {
+ "group": "Cookbooks",
+ "pages": [
+ "guides/evaluation_from_sdk"
+ ]
}
+
],
"api": {
"baseUrl": "http://localhost/api"
From 65dc7124226c5f6d074d210da4556a14fe021302 Mon Sep 17 00:00:00 2001
From: Mahmoud Mabrouk
Date: Fri, 28 Jun 2024 13:38:49 +0200
Subject: [PATCH 164/268] fix(sdk): AGE-341 added failsafe for usage in litell
added failsafe for usage in litellm so taht even when usage does not exit the app does not throw an error
Closes: AGE-341
---
agenta-cli/agenta/sdk/tracing/callbacks.py | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/agenta-cli/agenta/sdk/tracing/callbacks.py b/agenta-cli/agenta/sdk/tracing/callbacks.py
index 9ab5e23c19..9a5f931059 100644
--- a/agenta-cli/agenta/sdk/tracing/callbacks.py
+++ b/agenta-cli/agenta/sdk/tracing/callbacks.py
@@ -55,7 +55,7 @@ def log_stream_event(self, kwargs, response_obj, start_time, end_time):
"message": kwargs.get(
"complete_streaming_response"
), # the complete streamed response (only set if `completion(..stream=True)`)
- "usage": response_obj.usage.dict(), # litellm calculates usage
+ "usage": response_obj.usage.dict() if hasattr(response_obj, "usage") else None, # litellm calculates usage
"cost": kwargs.get(
"response_cost"
), # litellm calculates response cost
@@ -69,7 +69,7 @@ def log_success_event(
self._trace.end_span(
outputs={
"message": response_obj.choices[0].message.content,
- "usage": response_obj.usage.dict(), # litellm calculates usage
+ "usage": response_obj.usage.dict() if hasattr(response_obj, "usage") else None, # litellm calculates usage
"cost": kwargs.get(
"response_cost"
), # litellm calculates response cost
@@ -93,7 +93,7 @@ def log_failure_event(
self._trace.end_span(
outputs={
"message": kwargs["exception"], # the Exception raised
- "usage": response_obj.usage.dict(), # litellm calculates usage
+ "usage": response_obj.usage.dict() if hasattr(response_obj, "usage") else None, # litellm calculates usage
"cost": kwargs.get(
"response_cost"
), # litellm calculates response cost
@@ -109,7 +109,7 @@ async def async_log_stream_event(
"message": kwargs.get(
"complete_streaming_response"
), # the complete streamed response (only set if `completion(..stream=True)`)
- "usage": response_obj.usage.dict(), # litellm calculates usage
+ "usage": response_obj.usage.dict() if hasattr(response_obj, "usage") else None, # litellm calculates usage
"cost": kwargs.get(
"response_cost"
), # litellm calculates response cost
@@ -123,7 +123,7 @@ async def async_log_success_event(
self._trace.end_span(
outputs={
"message": response_obj.choices[0].message.content,
- "usage": response_obj.usage.dict(), # litellm calculates usage
+ "usage": response_obj.usage.dict() if hasattr(response_obj, "usage") else None, # litellm calculates usage
"cost": kwargs.get(
"response_cost"
), # litellm calculates response cost
@@ -147,7 +147,7 @@ async def async_log_failure_event(
self._trace.end_span(
outputs={
"message": kwargs["exception"], # the Exception raised
- "usage": response_obj.usage.dict(), # litellm calculates usage
+ "usage": response_obj.usage.dict() if hasattr(response_obj, "usage") else None, # litellm calculates usage
"cost": kwargs.get(
"response_cost"
), # litellm calculates response cost
From 969001f932288855bf204317ba9517424e89388e Mon Sep 17 00:00:00 2001
From: Mahmoud Mabrouk
Date: Fri, 28 Jun 2024 13:39:55 +0200
Subject: [PATCH 165/268] chore(sdk): bump
---
agenta-cli/pyproject.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/agenta-cli/pyproject.toml b/agenta-cli/pyproject.toml
index a3fc9fb07b..c58cd169ce 100644
--- a/agenta-cli/pyproject.toml
+++ b/agenta-cli/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "agenta"
-version = "0.17.5"
+version = "0.17.6a"
description = "The SDK for agenta is an open-source LLMOps platform."
readme = "README.md"
authors = ["Mahmoud Mabrouk "]
From 624827275fab9694f586f30da9bb306d5b956e69 Mon Sep 17 00:00:00 2001
From: Mahmoud Mabrouk
Date: Fri, 28 Jun 2024 14:23:06 +0200
Subject: [PATCH 166/268] chore(sdk): AGE-341 format
---
agenta-cli/agenta/sdk/tracing/callbacks.py | 24 ++++++++++++++++------
1 file changed, 18 insertions(+), 6 deletions(-)
diff --git a/agenta-cli/agenta/sdk/tracing/callbacks.py b/agenta-cli/agenta/sdk/tracing/callbacks.py
index 9a5f931059..c69871a915 100644
--- a/agenta-cli/agenta/sdk/tracing/callbacks.py
+++ b/agenta-cli/agenta/sdk/tracing/callbacks.py
@@ -55,7 +55,9 @@ def log_stream_event(self, kwargs, response_obj, start_time, end_time):
"message": kwargs.get(
"complete_streaming_response"
), # the complete streamed response (only set if `completion(..stream=True)`)
- "usage": response_obj.usage.dict() if hasattr(response_obj, "usage") else None, # litellm calculates usage
+ "usage": response_obj.usage.dict()
+ if hasattr(response_obj, "usage")
+ else None, # litellm calculates usage
"cost": kwargs.get(
"response_cost"
), # litellm calculates response cost
@@ -69,7 +71,9 @@ def log_success_event(
self._trace.end_span(
outputs={
"message": response_obj.choices[0].message.content,
- "usage": response_obj.usage.dict() if hasattr(response_obj, "usage") else None, # litellm calculates usage
+ "usage": response_obj.usage.dict()
+ if hasattr(response_obj, "usage")
+ else None, # litellm calculates usage
"cost": kwargs.get(
"response_cost"
), # litellm calculates response cost
@@ -93,7 +97,9 @@ def log_failure_event(
self._trace.end_span(
outputs={
"message": kwargs["exception"], # the Exception raised
- "usage": response_obj.usage.dict() if hasattr(response_obj, "usage") else None, # litellm calculates usage
+ "usage": response_obj.usage.dict()
+ if hasattr(response_obj, "usage")
+ else None, # litellm calculates usage
"cost": kwargs.get(
"response_cost"
), # litellm calculates response cost
@@ -109,7 +115,9 @@ async def async_log_stream_event(
"message": kwargs.get(
"complete_streaming_response"
), # the complete streamed response (only set if `completion(..stream=True)`)
- "usage": response_obj.usage.dict() if hasattr(response_obj, "usage") else None, # litellm calculates usage
+ "usage": response_obj.usage.dict()
+ if hasattr(response_obj, "usage")
+ else None, # litellm calculates usage
"cost": kwargs.get(
"response_cost"
), # litellm calculates response cost
@@ -123,7 +131,9 @@ async def async_log_success_event(
self._trace.end_span(
outputs={
"message": response_obj.choices[0].message.content,
- "usage": response_obj.usage.dict() if hasattr(response_obj, "usage") else None, # litellm calculates usage
+ "usage": response_obj.usage.dict()
+ if hasattr(response_obj, "usage")
+ else None, # litellm calculates usage
"cost": kwargs.get(
"response_cost"
), # litellm calculates response cost
@@ -147,7 +157,9 @@ async def async_log_failure_event(
self._trace.end_span(
outputs={
"message": kwargs["exception"], # the Exception raised
- "usage": response_obj.usage.dict() if hasattr(response_obj, "usage") else None, # litellm calculates usage
+ "usage": response_obj.usage.dict()
+ if hasattr(response_obj, "usage")
+ else None, # litellm calculates usage
"cost": kwargs.get(
"response_cost"
), # litellm calculates response cost
From 0a8167d988b3b97986f53287eb46afa9ea500748 Mon Sep 17 00:00:00 2001
From: Mahmoud Mabrouk
Date: Fri, 28 Jun 2024 14:23:56 +0200
Subject: [PATCH 167/268] chore(sdk): AGE-341 bump ve
---
agenta-cli/pyproject.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/agenta-cli/pyproject.toml b/agenta-cli/pyproject.toml
index c58cd169ce..a12b403886 100644
--- a/agenta-cli/pyproject.toml
+++ b/agenta-cli/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "agenta"
-version = "0.17.6a"
+version = "0.17.6a0"
description = "The SDK for agenta is an open-source LLMOps platform."
readme = "README.md"
authors = ["Mahmoud Mabrouk "]
From 78472f2a3afa5f1b6652c8e59f9848996c44e54b Mon Sep 17 00:00:00 2001
From: Kaosiso Ezealigo
Date: Fri, 28 Jun 2024 18:40:59 +0100
Subject: [PATCH 168/268] docs(tool): corrected docs links in github readme
---
README.md | 58 +++++++++++++++++++++++++++++--------------------------
1 file changed, 31 insertions(+), 27 deletions(-)
diff --git a/README.md b/README.md
index 4c71c00d76..2c1483f5cd 100644
--- a/README.md
+++ b/README.md
@@ -42,7 +42,6 @@
-
@@ -55,7 +54,6 @@
-
@@ -64,7 +62,6 @@
-
@@ -96,54 +93,58 @@
# ⭐️ Why Agenta?
-Agenta is an end-to-end LLM developer platform. It provides the tools for **prompt engineering and management**, ⚖️ **evaluation**, **human annotation**, and :rocket: **deployment**. All without imposing any restrictions on your choice of framework, library, or model.
+Agenta is an end-to-end LLM developer platform. It provides the tools for **prompt engineering and management**, ⚖️ **evaluation**, **human annotation**, and :rocket: **deployment**. All without imposing any restrictions on your choice of framework, library, or model.
-Agenta allows developers and product teams to collaborate in building production-grade LLM-powered applications in less time.
+Agenta allows developers and product teams to collaborate in building production-grade LLM-powered applications in less time.
### With Agenta, you can:
-- [🧪 **Experiment** and **compare** prompts](https://docs.agenta.ai/basic_guides/prompt_engineering) on [any LLM workflow](https://docs.agenta.ai/advanced_guides/custom_applications) (chain-of-prompts, Retrieval Augmented Generation (RAG), LLM agents...)
-- ✍️ Collect and [**annotate golden test sets**](https://docs.agenta.ai/basic_guides/test_sets) for evaluation
-- 📈 [**Evaluate** your application](https://docs.agenta.ai/basic_guides/automatic_evaluation) with pre-existing or [**custom evaluators**](https://docs.agenta.ai/advanced_guides/using_custom_evaluators)
-- [🔍 **Annotate** and **A/B test**](https://docs.agenta.aibasic_guides/human_evaluation) your applications with **human feedback**
-- [🤝 **Collaborate with product teams**](https://docs.agenta.ai/basic_guides/team_management) for prompt engineering and evaluation
-- [🚀 **Deploy your application**](https://docs.agenta.ai/basic_guides/deployment) in one-click in the UI, through CLI, or through github workflows.
+- [🧪 **Experiment** and **compare** prompts](https://docs.agenta.ai/prompt_management/prompt_engineering) on [any LLM workflow](https://docs.agenta.ai/prompt_management/custom_applications) (chain-of-prompts, Retrieval Augmented Generation (RAG), LLM agents...)
+- ✍️ Collect and [**annotate golden test sets**](https://docs.agenta.ai/evaluation/test_sets) for evaluation
+- 📈 [**Evaluate** your application](https://docs.agenta.ai/evaluation/automatic_evaluation) with pre-existing or [**custom evaluators**](https://docs.agenta.ai/evaluation/custom_evaluator)
+- [🔍 **Annotate** and **A/B test**](https://docs.agenta.ai/evaluation/human_evaluation) your applications with **human feedback**
+- [🤝 **Collaborate with product teams**](https://docs.agenta.ai/misc/team_management) for prompt engineering and evaluation
+- [🚀 **Deploy your application**](https://docs.agenta.ai/prompt_management/deployment) in one-click in the UI, through CLI, or through github workflows.
### Works with any LLM app workflow
Agenta enables prompt engineering and evaluation on any LLM app architecture:
+
- Chain of prompts
- RAG
- Agents
-- ...
-It works with any framework such as [Langchain](https://langchain.com), [LlamaIndex](https://www.llamaindex.ai/) and any LLM provider (openAI, Cohere, Mistral).
-
-[Jump here to see how to use your own custom application with agenta](/advanced_guides/custom_applications)
+It works with any framework such as [Langchain](https://langchain.com), [LlamaIndex](https://www.llamaindex.ai/) and any LLM provider (openAI, Cohere, Mistral).
# Quick Start
### [Get started for free](https://cloud.agenta.ai?utm_source=github&utm_medium=readme&utm_campaign=github)
-### [Explore the Docs](https://docs.agenta.ai)
-### [Create your first application in one-minute](https://docs.agenta.ai/quickstart/getting-started-ui)
-### [Create an application using Langchain](https://docs.agenta.ai/tutorials/first-app-with-langchain)
+
+### [Explore the Docs](https://docs.agenta.ai/getting_started/introduction)
+
+### [Create your first application in one-minute](https://docs.agenta.ai/getting_started/quick-start)
+
+### [Create an application using Langchain](https://docs.agenta.ai/guides/tutorials/first-app-with-langchain)
+
### [Self-host agenta](https://docs.agenta.ai/self-host/host-locally)
-### [Check the Cookbook](https://docs.agenta.ai/cookbook)
-# Features
+### [Check the Cookbook](https://docs.agenta.ai/guides/evaluation_from_sdk)
+# Features
-| Playground | Evaluation |
-| ------- | ------- |
-| Compare and version prompts for any LLM app, from single prompt to agents. | Define test sets, then evaluate manually or programmatically your different variants. |
-| Human annotation | Deployment |
-| Use Human annotator to A/B test and score your LLM apps. | When you are ready, deploy your LLM applications as APIs in one click. ![](https://github.com/Agenta-AI/agenta/blob/main/docs/images/endpoint.gif) |
+| Playground | Evaluation |
+| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| Compare and version prompts for any LLM app, from single prompt to agents. | Define test sets, then evaluate manually or programmatically your different variants. |
+| Human annotation | Deployment |
+| Use Human annotator to A/B test and score your LLM apps. | When you are ready, deploy your LLM applications as APIs in one click. ![](https://github.com/Agenta-AI/agenta/blob/main/docs/images/endpoint.gif) |
# Enterprise Support
+
Contact us here for enterprise support and early access to agenta self-managed enterprise with Kubernetes support.
# Disabling Anonymized Tracking
+
By default, Agenta automatically reports anonymized basic usage statistics. This helps us understand how Agenta is used and track its overall usage and growth. This data does not include any sensitive information.
To disable anonymized telemetry, follow these steps:
@@ -154,6 +155,7 @@ To disable anonymized telemetry, follow these steps:
After making this change, restart Agenta Compose.
# ⭐️ Join Our Team
+
- [Founding Lead Software Engineer Backend](https://agentaai.notion.site/Founding-Lead-Software-Engineer-Backend-d70bfefed6d543778bc4aa38b543a678)
- [Founding Product Engineer Frontend](https://agentaai.notion.site/Founding-Product-Engineer-Frontend-b6d26a3e9b254be6b6c2bfffbf0b53c5)
- [Founding Product Designer](https://agentaai.notion.site/Founding-Product-Designer-96b1e760ff0241fd96632578d533a778)
@@ -164,12 +166,14 @@ We warmly welcome contributions to Agenta. Feel free to submit issues, fork the
We are usually hanging in our Slack. Feel free to [join our Slack and ask us anything](https://join.slack.com/t/agenta-hq/shared_invite/zt-1zsafop5i-Y7~ZySbhRZvKVPV5DO_7IA)
-Check out our [Contributing Guide](https://docs.agenta.ai/contributing/getting-started) for more information.
+Check out our [Contributing Guide](https://docs.agenta.ai/misc/contributing/getting-started) for more information.
## Contributors ✨
+
[![All Contributors](https://img.shields.io/badge/all_contributors-46-orange.svg?style=flat-square)](#contributors-)
+
Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/docs/en/emoji-key)):
@@ -190,7 +194,7 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
Pavle Janjusevic 🚇
- Kaosi Ezealigo 🐛 💻
+ Kaosiso Ezealigo 🐛 💻
Alberto Nunes 🐛
Maaz Bin Khawar 💻 👀 🧑🏫
Nehemiah Onyekachukwu Emmanuel 💻 💡 📖
From fca96ea34a988697786831d6b87f40fea6403f25 Mon Sep 17 00:00:00 2001
From: aakrem
Date: Sun, 30 Jun 2024 16:23:17 +0200
Subject: [PATCH 169/268] support skipping documents in case of duplicated
values
---
.../migrations/mongo_to_postgres/utils.py | 68 +++++++++++++------
1 file changed, 46 insertions(+), 22 deletions(-)
diff --git a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
index 38ff517f28..04dc8a08e3 100644
--- a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
+++ b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
@@ -1,5 +1,6 @@
import os
import asyncio
+import asyncpg
from datetime import datetime, timezone
from tqdm import tqdm
@@ -13,6 +14,7 @@
from sqlalchemy.future import select
from sqlalchemy.exc import NoResultFound
from agenta_backend.models.db_engine import db_engine
+from sqlalchemy.exc import IntegrityError
from agenta_backend.models.db_models import IDsMappingDB
from agenta_backend.models.base import Base
@@ -59,7 +61,7 @@ async def store_mapping(table_name, mongo_id, uuid):
await session.commit()
-async def get_mapped_uuid(table_name, mongo_id, print_result=False):
+async def get_mapped_uuid(table_name, mongo_id):
"""Retrieve the mapped UUID for a given MongoDB ObjectId and table name."""
async with db_engine.get_session() as session:
stmt = select(IDsMappingDB.uuid).filter(
@@ -86,8 +88,12 @@ def generate_uuid():
return uuid.uuid7()
-def update_migration_report(collection_name, total_docs, migrated_docs):
- migration_report[collection_name] = {"total": total_docs, "migrated": migrated_docs}
+def update_migration_report(collection_name, total_docs, migrated_docs, skipped_docs):
+ migration_report[collection_name] = {
+ "total": total_docs,
+ "migrated": migrated_docs,
+ "skipped": skipped_docs,
+ }
def print_migration_report():
@@ -96,7 +102,7 @@ def print_migration_report():
)
# Headers
- headers = ["Table", "Total in MongoDB", "Migrated to PostgreSQL"]
+ headers = ["Table", "Total in MongoDB", "Migrated to PostgreSQL", "Skipped"]
if not migration_report:
print("No data available in the migration report.")
@@ -114,18 +120,25 @@ def print_migration_report():
len(headers[2]),
max(len(str(counts["migrated"])) for counts in migration_report.values()),
)
+ max_skipped_length = max(
+ len(headers[3]),
+ max(len(str(counts.get("skipped", 0))) for counts in migration_report.values()),
+ )
# Set the header and divider with appropriate padding
- table_header = f"| {headers[0].ljust(max_table_length)} | {headers[1].ljust(max_total_length)} | {headers[2].ljust(max_migrated_length)} |"
- table_divider = f"|{'-' * (max_table_length + 2)}|{'-' * (max_total_length + 2)}|{'-' * (max_migrated_length + 2)}|"
+ table_header = f"| {headers[0].ljust(max_table_length)} | {headers[1].ljust(max_total_length)} | {headers[2].ljust(max_migrated_length)} | {headers[3].ljust(max_skipped_length)} |"
+ table_divider = f"|{'-' * (max_table_length + 2)}|{'-' * (max_total_length + 2)}|{'-' * (max_migrated_length + 2)}|{'-' * (max_skipped_length + 2)}|"
print(table_header)
print(table_divider)
for table, counts in migration_report.items():
- table_row = f"| {table.ljust(max_table_length)} | {str(counts['total']).ljust(max_total_length)} | {str(counts['migrated']).ljust(max_migrated_length)} |"
+ skipped = counts.get("skipped", 0)
+ table_row = f"| {table.ljust(max_table_length)} | {str(counts['total']).ljust(max_total_length)} | {str(counts['migrated']).ljust(max_migrated_length)} | {str(skipped).ljust(max_skipped_length)} |"
print(table_row)
+ print(table_divider)
+
async def migrate_collection(
collection_name, model_class, transformation_func, association_model=None
@@ -134,6 +147,7 @@ async def migrate_collection(
print(f"\n")
total_docs = mongo_db[collection_name].count_documents({})
migrated_docs = 0
+ skipped_docs = 0
async with db_engine.get_session() as session:
for skip in tqdm(
@@ -149,18 +163,28 @@ async def migrate_collection(
),
)
for document in batch:
- if association_model:
- (
- transformed_document,
- associated_entities,
- ) = await transformation_func(document)
- session.add(model_class(**transformed_document))
- for assoc_entity in associated_entities:
- session.add(association_model(**assoc_entity))
- else:
- transformed_document = await transformation_func(document)
- session.add(model_class(**transformed_document))
- await session.commit()
- migrated_docs += 1
-
- update_migration_report(collection_name, total_docs, migrated_docs)
+ try:
+ if association_model:
+ (
+ transformed_document,
+ associated_entities,
+ ) = await transformation_func(document)
+ session.add(model_class(**transformed_document))
+ for assoc_entity in associated_entities:
+ session.add(association_model(**assoc_entity))
+ else:
+ transformed_document = await transformation_func(document)
+ session.add(model_class(**transformed_document))
+ await session.commit()
+ migrated_docs += 1
+ except (asyncpg.exceptions.UniqueViolationError, IntegrityError) as e:
+ await session.rollback()
+ print(f"\nSkipping duplicate document in {collection_name}: {e}\n")
+ skipped_docs += 1
+ pass
+ except Exception as e:
+ print(f"Error migrating document in {collection_name}: {e}")
+ print(f"Failing migration for collection: {collection_name}")
+ raise
+
+ update_migration_report(collection_name, total_docs, migrated_docs, skipped_docs)
From 3fa794be21877e16cef71f05e8c1272319cb3242 Mon Sep 17 00:00:00 2001
From: aakrem
Date: Sun, 30 Jun 2024 19:19:46 +0200
Subject: [PATCH 170/268] separate db engine for migration
---
.../migrations/mongo_to_postgres/db_engine.py | 152 ++++++++++++++++++
.../migrations/mongo_to_postgres/utils.py | 2 +-
2 files changed, 153 insertions(+), 1 deletion(-)
create mode 100644 agenta-backend/agenta_backend/migrations/mongo_to_postgres/db_engine.py
diff --git a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/db_engine.py b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/db_engine.py
new file mode 100644
index 0000000000..30aa53262d
--- /dev/null
+++ b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/db_engine.py
@@ -0,0 +1,152 @@
+import os
+import logging
+from asyncio import current_task
+from typing import AsyncGenerator
+from contextlib import asynccontextmanager
+
+from sqlalchemy.ext.asyncio import (
+ AsyncSession,
+ create_async_engine,
+ async_sessionmaker,
+ async_scoped_session,
+)
+
+from agenta_backend.utils.common import isCloudEE
+
+if isCloudEE():
+ from agenta_backend.commons.observability.models.db import SpanDB
+ from agenta_backend.commons.models.db_models import (
+ APIKeyDB,
+ WorkspaceDB,
+ OrganizationDB,
+ AppDB_ as AppDB,
+ UserDB_ as UserDB,
+ ImageDB_ as ImageDB,
+ TestSetDB_ as TestSetDB,
+ AppVariantDB_ as AppVariantDB,
+ EvaluationDB_ as EvaluationDB,
+ DeploymentDB_ as DeploymentDB,
+ VariantBaseDB_ as VariantBaseDB,
+ AppEnvironmentDB_ as AppEnvironmentDB,
+ AppEnvironmentRevisionDB_ as AppEnvironmentRevisionDB,
+ EvaluatorConfigDB_ as EvaluatorConfigDB,
+ HumanEvaluationDB_ as HumanEvaluationDB,
+ EvaluationScenarioDB_ as EvaluationScenarioDB,
+ HumanEvaluationScenarioDB_ as HumanEvaluationScenarioDB,
+ )
+else:
+ from agenta_backend.models.db_models import (
+ AppDB,
+ UserDB,
+ ImageDB,
+ TestSetDB,
+ EvaluationDB,
+ DeploymentDB,
+ AppVariantDB,
+ VariantBaseDB,
+ AppEnvironmentDB,
+ AppEnvironmentRevisionDB,
+ EvaluatorConfigDB,
+ HumanEvaluationDB,
+ EvaluationScenarioDB,
+ HumanEvaluationScenarioDB,
+ )
+
+from agenta_backend.models.db_models import (
+ TemplateDB,
+ AppVariantRevisionsDB,
+)
+
+models = [
+ AppDB,
+ UserDB,
+ ImageDB,
+ TestSetDB,
+ TemplateDB,
+ AppVariantDB,
+ DeploymentDB,
+ EvaluationDB,
+ VariantBaseDB,
+ AppEnvironmentDB,
+ AppEnvironmentRevisionDB,
+ EvaluatorConfigDB,
+ HumanEvaluationDB,
+ EvaluationScenarioDB,
+ AppVariantRevisionsDB,
+ HumanEvaluationScenarioDB,
+]
+
+if isCloudEE():
+ models.extend([OrganizationDB, WorkspaceDB, APIKeyDB]) # type: ignore
+
+
+# Configure and set logging level
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.INFO)
+
+
+class DBEngine:
+ """
+ Database engine to initialize SQLAlchemy and return the engine based on mode.
+ """
+
+ def __init__(self) -> None:
+ self.mode = os.environ.get("DATABASE_MODE", "v2")
+ self.db_url = f"{os.environ.get('POSTGRES_URI')}"
+ self.engine = create_async_engine(url=self.db_url)
+ self.async_session_maker = async_sessionmaker(
+ bind=self.engine, class_=AsyncSession, expire_on_commit=False
+ )
+ self.async_session = async_scoped_session(
+ session_factory=self.async_session_maker, scopefunc=current_task
+ )
+
+ async def init_db(self):
+ """
+ Initialize the database based on the mode and create all tables.
+ """
+ async with self.engine.begin() as conn:
+ # Drop all existing tables (if needed)
+ # await conn.run_sync(Base.metadata.drop_all)
+ # Create tables
+ for model in models:
+ await conn.run_sync(model.metadata.create_all)
+ logger.info(f"Using {self.mode} database...")
+
+ async def remove_db(self) -> None:
+ """
+ Remove the database based on the mode.
+ """
+ async with self.engine.begin() as conn:
+ for model in models:
+ await conn.run_sync(model.metadata.drop_all)
+
+ @asynccontextmanager
+ async def get_session(self) -> AsyncGenerator[AsyncSession, None]:
+ session = self.async_session()
+ try:
+ yield session
+ except Exception as e:
+ await session.rollback()
+ raise e
+ finally:
+ await session.close()
+
+ async def close(self):
+ """
+ Closes and dispose all the connections using the engine.
+
+ :raises Exception: if engine is initialized
+ """
+
+ if self.engine is None:
+ raise Exception("DBEngine is not initialized")
+
+ await self.engine.dispose()
+
+ self.engine = None
+ self.async_session_maker = None
+ self.async_session = None
+
+
+db_engine = DBEngine()
diff --git a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
index 04dc8a08e3..90bf2fbc93 100644
--- a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
+++ b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
@@ -13,7 +13,7 @@
import uuid_utils.compat as uuid
from sqlalchemy.future import select
from sqlalchemy.exc import NoResultFound
-from agenta_backend.models.db_engine import db_engine
+from agenta_backend.migrations.mongo_to_postgres.db_engine import db_engine
from sqlalchemy.exc import IntegrityError
from agenta_backend.models.db_models import IDsMappingDB
From 3c43f07a8f593f10d46f876d749b1fd2e08cfddd Mon Sep 17 00:00:00 2001
From: Abram
Date: Mon, 1 Jul 2024 18:14:16 +0100
Subject: [PATCH 171/268] minor refactor (backend): remove redundant
UserWorkspaceDB model from cloud
---
agenta-backend/agenta_backend/models/db_engine.py | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/db_engine.py b/agenta-backend/agenta_backend/models/db_engine.py
index aeec1046c1..361bff503d 100644
--- a/agenta-backend/agenta_backend/models/db_engine.py
+++ b/agenta-backend/agenta_backend/models/db_engine.py
@@ -21,7 +21,6 @@
OrganizationDB,
InvitationDB,
UserOrganizationDB,
- UserWorkspaceDB,
WorkspaceMemberDB,
AppDB_ as AppDB,
UserDB_ as UserDB,
@@ -81,7 +80,7 @@
]
if isCloudEE():
- models.extend([OrganizationDB, WorkspaceDB, APIKeyDB, InvitationDB, UserOrganizationDB, UserWorkspaceDB, WorkspaceMemberDB]) # type: ignore
+ models.extend([OrganizationDB, WorkspaceDB, APIKeyDB, InvitationDB, UserOrganizationDB, WorkspaceMemberDB]) # type: ignore
# Configure and set logging level
From abc39ab93e9ccc503a82097cff0a2e4c0c811275 Mon Sep 17 00:00:00 2001
From: aakrem
Date: Mon, 1 Jul 2024 20:08:10 +0200
Subject: [PATCH 172/268] small refactoring
---
.../migrations/mongo_to_postgres/mongo_db_engine.py | 13 +++++++++++++
.../migrations/mongo_to_postgres/utils.py | 9 ++-------
2 files changed, 15 insertions(+), 7 deletions(-)
create mode 100644 agenta-backend/agenta_backend/migrations/mongo_to_postgres/mongo_db_engine.py
diff --git a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/mongo_db_engine.py b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/mongo_db_engine.py
new file mode 100644
index 0000000000..828a9753b2
--- /dev/null
+++ b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/mongo_db_engine.py
@@ -0,0 +1,13 @@
+import os
+from pymongo import MongoClient
+
+# MongoDB connection
+MONGO_URI = os.environ.get("MONGODB_URI")
+DATABASE_MODE = os.environ.get("DATABASE_MODE")
+mongo_client = MongoClient(MONGO_URI)
+mongo_db_name = f"agenta_{DATABASE_MODE}"
+mongo_db = mongo_client[mongo_db_name]
+
+
+def get_mongo_db():
+ return mongo_db
diff --git a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
index 90bf2fbc93..c4f5a0a0c9 100644
--- a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
+++ b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
@@ -4,7 +4,6 @@
from datetime import datetime, timezone
from tqdm import tqdm
-from pymongo import MongoClient
from bson import ObjectId, DBRef
from sqlalchemy import MetaData, Column, String, DateTime, text, create_engine
from sqlalchemy.dialects.postgresql import UUID
@@ -18,15 +17,11 @@
from agenta_backend.models.db_models import IDsMappingDB
from agenta_backend.models.base import Base
+from agenta_backend.migrations.mongo_to_postgres.mongo_db_engine import get_mongo_db
BATCH_SIZE = 1000
-# MongoDB connection
-MONGO_URI = os.environ.get("MONGODB_URI")
-DATABASE_MODE = os.environ.get("DATABASE_MODE")
-mongo_client = MongoClient(MONGO_URI)
-mongo_db_name = f"agenta_{DATABASE_MODE}"
-mongo_db = mongo_client[mongo_db_name]
+mongo_db = get_mongo_db()
migration_report = {}
From e37497361ab04124a31350cbb73398f3121c5f52 Mon Sep 17 00:00:00 2001
From: Abram
Date: Mon, 1 Jul 2024 19:24:38 +0100
Subject: [PATCH 173/268] refactor (docs): moved and renamed migration.mdx to
migration folder under self-host
---
.../{migration.mdx => migration/migration-to-mongodb.mdx} | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
rename docs/self-host/{migration.mdx => migration/migration-to-mongodb.mdx} (98%)
diff --git a/docs/self-host/migration.mdx b/docs/self-host/migration/migration-to-mongodb.mdx
similarity index 98%
rename from docs/self-host/migration.mdx
rename to docs/self-host/migration/migration-to-mongodb.mdx
index 75d272cfd9..d382b85581 100644
--- a/docs/self-host/migration.mdx
+++ b/docs/self-host/migration/migration-to-mongodb.mdx
@@ -1,5 +1,5 @@
---
-title: Migration
+title: Migration to MongoDB (Deprecated)
description: 'This is a step-by-step guide for upgrading to the latest version of Agenta'
---
From f114e42f10fbc1071f33be4a43a89caf4f9957d8 Mon Sep 17 00:00:00 2001
From: Abram
Date: Mon, 1 Jul 2024 19:24:56 +0100
Subject: [PATCH 174/268] feat (docs): created documentation for Postgres
migration
---
.../migration/migration-to-postgres.mdx | 46 +++++++++++++++++++
1 file changed, 46 insertions(+)
create mode 100644 docs/self-host/migration/migration-to-postgres.mdx
diff --git a/docs/self-host/migration/migration-to-postgres.mdx b/docs/self-host/migration/migration-to-postgres.mdx
new file mode 100644
index 0000000000..82c1227a47
--- /dev/null
+++ b/docs/self-host/migration/migration-to-postgres.mdx
@@ -0,0 +1,46 @@
+---
+title: Migration to PostgreSQL
+description: 'This is a step-by-step instructions for migrating Agenta to the newly released PostgreSQL version.'
+---
+
+This guide provides step-by-step instructions for migrating your Agenta instance from MongoDB to the newly released PostgreSQL
+
+> ⚠️ As of version 0.11.0, Agenta is transitioning from MongoDB to PostgreSQL. Users need to migrate their MongoDB databases to this latest version, as this will be the only version receiving feature updates and patches.
+
+**Table of content:**
+ - [Prepare for Migration](#prepare-for-migration)
+ - [Start the Migration](#start-the-migration)
+ - [Post Migration](#post-migrataion)
+
+
+### Prepare for Migration
+
+Before starting the migration, ensure that you have backed up your production data.
+
+While the migration will not modify any data in your MongoDB instance, it is highly recommended that you create a [backup](https://www.mongodb.com/docs/manual/tutorial/backup-and-restore-tools/) of your database in the MongoDB instance before running the migration script. This ensures you have a recovery point in case of any issues.
+
+### Start the Migration
+
+1. Start the local instance of Agenta, and ensure that both MongoDB and Postgres instances are active.
+2. Use the following commands to initiate the migration:
+```bash
+docker ps
+```
+The above command will list the running docker containers that you have. Copy the backend container id and execute bash.
+
+```bash
+docker exec -it {backend-container-id} bash
+```
+
+Next, navigate to the `mongo_to_postgres` folder to execute the migration script.
+
+```bash
+cd /app/agenta_backend/migrations/mongo_to_postgres
+python3 migration.py
+```
+
+### Post Migration
+
+After completing the migration, ensure you check the data integrity in PostgreSQL by accessing Agenta on the web and verifying that your data is intact and everything works fine.
+
+In the event that you encounter issues and need to revert the migration, rest assured that your data in the MongoDB instance is still intact. All you need to do to revert is to check out the last commit you were on before the PostgreSQL migration and create a Github [issue](https://github.com/Agenta-AI/agenta/issues/new?assignees=&labels=postgres,bug,Backend&projects=&template=bug_report.md&title=[Bug]+) describing the problem you encountered.
From 9d4b31ce66df3834197dc5528c1fcdc60250241e Mon Sep 17 00:00:00 2001
From: Abram
Date: Mon, 1 Jul 2024 19:30:23 +0100
Subject: [PATCH 175/268] minor refactor (docs): replace tag with
'version'
---
docs/self-host/migration/migration-to-postgres.mdx | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/self-host/migration/migration-to-postgres.mdx b/docs/self-host/migration/migration-to-postgres.mdx
index 82c1227a47..2d47e151c3 100644
--- a/docs/self-host/migration/migration-to-postgres.mdx
+++ b/docs/self-host/migration/migration-to-postgres.mdx
@@ -3,7 +3,7 @@ title: Migration to PostgreSQL
description: 'This is a step-by-step instructions for migrating Agenta to the newly released PostgreSQL version.'
---
-This guide provides step-by-step instructions for migrating your Agenta instance from MongoDB to the newly released PostgreSQL
+This guide provides step-by-step instructions for migrating your Agenta instance from MongoDB to the newly released PostgreSQL version.
> ⚠️ As of version 0.11.0, Agenta is transitioning from MongoDB to PostgreSQL. Users need to migrate their MongoDB databases to this latest version, as this will be the only version receiving feature updates and patches.
From 9d5b06f846a6b4769594b162458303970404a0d8 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 2 Jul 2024 04:04:59 +0000
Subject: [PATCH 176/268] build(deps-dev): bump setuptools from 70.1.0 to
70.2.0 in /agenta-cli
Bumps [setuptools](https://github.com/pypa/setuptools) from 70.1.0 to 70.2.0.
- [Release notes](https://github.com/pypa/setuptools/releases)
- [Changelog](https://github.com/pypa/setuptools/blob/main/NEWS.rst)
- [Commits](https://github.com/pypa/setuptools/compare/v70.1.0...v70.2.0)
---
updated-dependencies:
- dependency-name: setuptools
dependency-type: direct:development
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
---
agenta-cli/poetry.lock | 12 ++++++------
agenta-cli/pyproject.toml | 2 +-
2 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/agenta-cli/poetry.lock b/agenta-cli/poetry.lock
index a090925b39..32ccb16192 100644
--- a/agenta-cli/poetry.lock
+++ b/agenta-cli/poetry.lock
@@ -1290,18 +1290,18 @@ jupyter = ["ipywidgets (>=7.5.1,<9)"]
[[package]]
name = "setuptools"
-version = "70.1.0"
+version = "70.2.0"
description = "Easily download, build, install, upgrade, and uninstall Python packages"
optional = false
python-versions = ">=3.8"
files = [
- {file = "setuptools-70.1.0-py3-none-any.whl", hash = "sha256:d9b8b771455a97c8a9f3ab3448ebe0b29b5e105f1228bba41028be116985a267"},
- {file = "setuptools-70.1.0.tar.gz", hash = "sha256:01a1e793faa5bd89abc851fa15d0a0db26f160890c7102cd8dce643e886b47f5"},
+ {file = "setuptools-70.2.0-py3-none-any.whl", hash = "sha256:b8b8060bb426838fbe942479c90296ce976249451118ef566a5a0b7d8b78fb05"},
+ {file = "setuptools-70.2.0.tar.gz", hash = "sha256:bd63e505105011b25c3c11f753f7e3b8465ea739efddaccef8f0efac2137bac1"},
]
[package.extras]
-docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"]
-testing = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test", "mypy (==1.10.0)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.1)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf", "pytest-ruff (>=0.3.2)", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"]
+doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"]
+test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test", "mypy (==1.10.0)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf", "pytest-ruff (>=0.3.2)", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"]
[[package]]
name = "shellingham"
@@ -1809,4 +1809,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more
[metadata]
lock-version = "2.0"
python-versions = "^3.9"
-content-hash = "ec2c2c9036a752dacfcc4584a6c1868e26813b1f6e7578d3ae2796fd119be3a2"
+content-hash = "95365188fc7c2f1f28f7d354fbcff40bac4c3cbb021a78400f71fa68bf31bce1"
diff --git a/agenta-cli/pyproject.toml b/agenta-cli/pyproject.toml
index a3fc9fb07b..3ba8848ef2 100644
--- a/agenta-cli/pyproject.toml
+++ b/agenta-cli/pyproject.toml
@@ -34,7 +34,7 @@ cachetools = "^5.3.3"
[tool.poetry.dev-dependencies]
pytest = "^8.2"
-setuptools = "^70.1.0"
+setuptools = "^70.2.0"
[build-system]
requires = ["poetry-core"]
From 5af8bb63f66a0b70294c8e580a74405c7adb8b7b Mon Sep 17 00:00:00 2001
From: Mahmoud Mabrouk
Date: Tue, 2 Jul 2024 08:36:33 +0200
Subject: [PATCH 177/268] Update pyproject.toml
---
agenta-cli/pyproject.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/agenta-cli/pyproject.toml b/agenta-cli/pyproject.toml
index a12b403886..a3fc9fb07b 100644
--- a/agenta-cli/pyproject.toml
+++ b/agenta-cli/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "agenta"
-version = "0.17.6a0"
+version = "0.17.5"
description = "The SDK for agenta is an open-source LLMOps platform."
readme = "README.md"
authors = ["Mahmoud Mabrouk "]
From 120c94be11497b170874a93d3d2009b34b239616 Mon Sep 17 00:00:00 2001
From: ashrafchowdury
Date: Tue, 2 Jul 2024 14:21:32 +0600
Subject: [PATCH 178/268] refactor: removed code duplication from fucntions
---
.../src/components/ChatInputs/ChatInputs.tsx | 25 ++++++++-----------
1 file changed, 10 insertions(+), 15 deletions(-)
diff --git a/agenta-web/src/components/ChatInputs/ChatInputs.tsx b/agenta-web/src/components/ChatInputs/ChatInputs.tsx
index c4932b7538..c64df839c4 100644
--- a/agenta-web/src/components/ChatInputs/ChatInputs.tsx
+++ b/agenta-web/src/components/ChatInputs/ChatInputs.tsx
@@ -88,39 +88,34 @@ const ChatInputs: React.FC = ({
disableEditRole = true
}
- const handleRoleChange = (index: number, role: ChatRole) => {
- const newMessages = [...messages]
- newMessages[index].role = role
+ const updateMessages = (newMessages: ChatMessage[]) => {
setMessages(newMessages)
if (onChangeRef.current) {
onChangeRef.current(cloneDeep(newMessages))
}
}
+ const handleRoleChange = (index: number, role: ChatRole) => {
+ const newMessages = [...messages]
+ newMessages[index].role = role
+ updateMessages(newMessages)
+ }
+
const handleInputChange = (index: number, event: React.ChangeEvent) => {
const {value} = event.target
const newMessages = [...messages]
newMessages[index].content = value
- setMessages(newMessages)
- if (onChangeRef.current) {
- onChangeRef.current(cloneDeep(newMessages))
- }
+ updateMessages(newMessages)
}
const handleDelete = (index: number) => {
const newMessages = messages.filter((_, i) => i !== index)
- setMessages(newMessages)
- if (onChangeRef.current) {
- onChangeRef.current(cloneDeep(newMessages))
- }
+ updateMessages(newMessages)
}
const handleAdd = () => {
const newMessages = messages.concat([getDefaultNewMessage()])
- setMessages(newMessages)
- if (onChangeRef.current) {
- onChangeRef.current(cloneDeep(newMessages))
- }
+ updateMessages(newMessages)
}
useEffect(() => {
From 573ca2a680c9fa10f6c851272043b45469686103 Mon Sep 17 00:00:00 2001
From: Abram
Date: Tue, 2 Jul 2024 11:58:48 +0100
Subject: [PATCH 179/268] refactor (backend): replace deprecated legacy query
api .one_or_none() to .first()
---
.../agenta_backend/services/db_manager.py | 100 +++++++++---------
1 file changed, 50 insertions(+), 50 deletions(-)
diff --git a/agenta-backend/agenta_backend/services/db_manager.py b/agenta-backend/agenta_backend/services/db_manager.py
index ffd1539113..1bdca299ff 100644
--- a/agenta-backend/agenta_backend/services/db_manager.py
+++ b/agenta-backend/agenta_backend/services/db_manager.py
@@ -166,7 +166,7 @@ async def get_image_by_id(image_id: str) -> ImageDB:
result = await session.execute(
select(ImageDB).filter_by(id=uuid.UUID(image_id))
)
- image = result.scalars().one_or_none()
+ image = result.scalars().first()
return image
@@ -187,7 +187,7 @@ async def fetch_app_by_id(app_id: str) -> AppDB:
)
result = await session.execute(base_query)
- app = result.unique().scalars().one_or_none()
+ app = result.unique().scalars().first()
return app
@@ -222,7 +222,7 @@ async def fetch_app_variant_by_id(
)
result = await session.execute(query.filter_by(id=uuid.UUID(app_variant_id)))
- app_variant = result.scalars().one_or_none()
+ app_variant = result.scalars().first()
return app_variant
@@ -242,7 +242,7 @@ async def fetch_app_variant_by_base_id(base_id: str) -> Optional[AppVariantDB]:
result = await session.execute(
select(AppVariantDB).filter_by(base_id=uuid.UUID(base_id))
)
- app_variant = result.scalars().one_or_none()
+ app_variant = result.scalars().first()
return app_variant
@@ -268,7 +268,7 @@ async def fetch_app_variant_by_base_id_and_config_name(
base_id=uuid.UUID(base_id), config_name=config_name
)
)
- app_variant = result.scalars().one_or_none()
+ app_variant = result.scalars().first()
return app_variant
@@ -294,7 +294,7 @@ async def fetch_app_variant_revision_by_variant(
variant_id=uuid.UUID(app_variant_id), revision=revision
)
)
- app_variant_revision = result.scalars().one_or_none()
+ app_variant_revision = result.scalars().first()
if app_variant_revision is None:
raise Exception(
f"app variant revision for app_variant {app_variant_id} and revision {revision} not found"
@@ -320,7 +320,7 @@ async def fetch_base_by_id(base_id: str) -> Optional[VariantBaseDB]:
)
.filter_by(id=uuid.UUID(base_id))
)
- base = result.scalars().one_or_none()
+ base = result.scalars().first()
return base
@@ -343,7 +343,7 @@ async def fetch_app_variant_by_name_and_appid(
variant_name=variant_name, app_id=uuid.UUID(app_id)
)
)
- app_variant = result.scalars().one_or_none()
+ app_variant = result.scalars().first()
return app_variant
@@ -684,7 +684,7 @@ async def get_deployment_by_id(
result = await session.execute(
select(DeploymentDB).filter_by(id=uuid.UUID(deployment_id))
)
- deployment = result.scalars().one_or_none()
+ deployment = result.scalars().first()
return deployment
@@ -702,7 +702,7 @@ async def get_deployment_by_appid(app_id: str) -> DeploymentDB:
result = await session.execute(
select(DeploymentDB).filter_by(app_id=uuid.UUID(app_id))
)
- deployment = result.scalars().one_or_none()
+ deployment = result.scalars().first()
logger.debug(f"deployment: {deployment}")
return deployment
@@ -781,7 +781,7 @@ async def get_user(user_uid: str) -> UserDB:
async with db_engine.get_session() as session:
result = await session.execute(select(UserDB).filter_by(uid=user_uid))
- user = result.scalars().one_or_none()
+ user = result.scalars().first()
if user is None and isCloudEE():
raise Exception("Please login or signup")
@@ -814,7 +814,7 @@ async def get_user_with_id(user_id: str):
async with db_engine.get_session() as session:
result = await session.execute(select(UserDB).filter_by(id=uuid.UUID(user_id)))
- user = result.scalars().one_or_none()
+ user = result.scalars().first()
if user is None:
logger.error("Failed to get user with id")
raise Exception("Error while getting user")
@@ -844,7 +844,7 @@ async def get_user_with_email(email: str):
async with db_engine.get_session() as session:
result = await session.execute(select(UserDB).filter_by(email=email))
- user = result.scalars().one_or_none()
+ user = result.scalars().first()
return user
@@ -893,7 +893,7 @@ async def get_orga_image_instance_by_docker_id(
)
result = await session.execute(query)
- image = result.scalars().one_or_none()
+ image = result.scalars().first()
return image
@@ -931,7 +931,7 @@ async def get_orga_image_instance_by_uri(
)
result = await session.execute(query)
- image = result.scalars().one_or_none()
+ image = result.scalars().first()
return image
@@ -947,7 +947,7 @@ async def get_app_instance_by_id(app_id: str) -> AppDB:
async with db_engine.get_session() as session:
result = await session.execute(select(AppDB).filter_by(id=uuid.UUID(app_id)))
- app = result.scalars().one_or_none()
+ app = result.scalars().first()
return app
@@ -1158,7 +1158,7 @@ async def remove_deployment(deployment_id: str):
result = await session.execute(
select(DeploymentDB).filter_by(id=uuid.UUID(deployment_id))
)
- deployment = result.scalars().one_or_none()
+ deployment = result.scalars().first()
if not deployment:
raise NoResultFound(f"Deployment with {deployment_id} not found")
@@ -1254,7 +1254,7 @@ async def deploy_to_environment(
app_id=app_variant_db.app_id, name=environment_name
)
)
- environment_db = result.scalars().one_or_none()
+ environment_db = result.scalars().first()
if environment_db is None:
raise ValueError(f"Environment {environment_name} not found")
@@ -1298,7 +1298,7 @@ async def fetch_app_environment_by_name_and_appid(
joinedload(AppEnvironmentDB.deployed_app_variant.of_type(AppVariantDB)), # type: ignore
)
result = await session.execute(query)
- app_environment = result.scalars().one_or_none()
+ app_environment = result.scalars().first()
return app_environment
@@ -1318,7 +1318,7 @@ async def fetch_app_variant_revision_by_id(
result = await session.execute(
select(AppVariantRevisionsDB).filter_by(id=uuid.UUID(variant_revision_id))
)
- app_revision = result.scalars().one_or_none()
+ app_revision = result.scalars().first()
return app_revision
@@ -1359,7 +1359,7 @@ async def fetch_app_environment_revision(revision_id: str) -> AppEnvironmentRevi
result = await session.execute(
select(AppEnvironmentRevisionDB).filter_by(id=uuid.UUID(revision_id))
)
- environment_revision = result.scalars().one_or_none()
+ environment_revision = result.scalars().first()
return environment_revision
@@ -1398,7 +1398,7 @@ async def update_app_environment_deployed_variant_revision(
id=uuid.UUID(deployed_variant_revision)
)
)
- app_variant_revision = result.scalars().one_or_none()
+ app_variant_revision = result.scalars().first()
if app_variant_revision is None:
raise Exception(
f"App variant revision {deployed_variant_revision} not found"
@@ -1407,7 +1407,7 @@ async def update_app_environment_deployed_variant_revision(
app_environment_result = await session.execute(
select(AppEnvironmentDB).filter_by(id=uuid.UUID(app_environment_id))
)
- app_environment = app_environment_result.scalars().one_or_none()
+ app_environment = app_environment_result.scalars().first()
app_environment.deployed_app_variant_revision_id = app_variant_revision.id # type: ignore
await session.commit()
@@ -1600,7 +1600,7 @@ async def fetch_app_variant_revision(app_variant: str, revision_number: int):
) # type: ignore
)
result = await session.execute(query)
- app_variant_revisions = result.scalars().one_or_none()
+ app_variant_revisions = result.scalars().first()
return app_variant_revisions
@@ -1645,7 +1645,7 @@ async def remove_image(image: ImageDB):
async with db_engine.get_session() as session:
result = await session.execute(select(ImageDB).filter_by(id=image.id))
- image = result.scalars().one_or_none()
+ image = result.scalars().first()
await session.delete(image)
await session.commit()
@@ -1739,7 +1739,7 @@ async def remove_base_from_db(base_id: str):
result = await session.execute(
select(VariantBaseDB).filter_by(id=uuid.UUID(base_id))
)
- base = result.scalars().one_or_none()
+ base = result.scalars().first()
if not base:
raise NoResultFound(f"Base with id {base_id} not found")
@@ -1764,7 +1764,7 @@ async def remove_app_by_id(app_id: str):
assert app_id is not None, "app_id cannot be None"
async with db_engine.get_session() as session:
result = await session.execute(select(AppDB).filter_by(id=uuid.UUID(app_id)))
- app_db = result.scalars().one_or_none()
+ app_db = result.scalars().first()
if not app_db:
raise NoResultFound(f"App with id {app_id} not found")
@@ -1792,7 +1792,7 @@ async def update_variant_parameters(
result = await session.execute(
select(AppVariantDB).filter_by(id=uuid.UUID(app_variant_id))
)
- app_variant_db = result.scalars().one_or_none()
+ app_variant_db = result.scalars().first()
if not app_variant_db:
raise NoResultFound(f"App variant with id {app_variant_id} not found")
@@ -1836,7 +1836,7 @@ async def get_app_variant_instance_by_id(variant_id: str) -> AppVariantDB:
.options(joinedload(AppVariantDB.base), joinedload(AppVariantDB.app))
.filter_by(id=uuid.UUID(variant_id))
)
- app_variant_db = result.scalars().one_or_none()
+ app_variant_db = result.scalars().first()
return app_variant_db
@@ -1856,7 +1856,7 @@ async def get_app_variant_revision_by_id(
result = await session.execute(
select(AppVariantRevisionsDB).filter_by(id=uuid.UUID(variant_revision_id))
)
- variant_revision_db = result.scalars().one_or_none()
+ variant_revision_db = result.scalars().first()
return variant_revision_db
@@ -1878,7 +1878,7 @@ async def fetch_testset_by_id(testset_id: str) -> Optional[TestSetDB]:
async with db_engine.get_session() as session:
result = await session.execute(select(TestSetDB).filter_by(id=testset_uuid))
- testset = result.scalars().one_or_none()
+ testset = result.scalars().first()
return testset
@@ -1921,7 +1921,7 @@ async def update_testset(testset_id: str, values_to_update: dict) -> None:
result = await session.execute(
select(TestSetDB).filter_by(id=uuid.UUID(testset_id))
)
- testset = result.scalars().one_or_none()
+ testset = result.scalars().first()
# Validate keys in values_to_update and update attributes
valid_keys = [key for key in values_to_update.keys() if hasattr(testset, key)]
@@ -1973,7 +1973,7 @@ async def fetch_evaluation_by_id(evaluation_id: str) -> Optional[EvaluationDB]:
joinedload(EvaluationDB.testset).load_only(TestSetDB.id, TestSetDB.name), # type: ignore
)
result = await session.execute(query)
- evaluation = result.scalars().one_or_none()
+ evaluation = result.scalars().first()
return evaluation
@@ -2145,7 +2145,7 @@ async def fetch_human_evaluation_by_id(
joinedload(HumanEvaluationDB.testset).load_only(TestSetDB.id, TestSetDB.name), # type: ignore
)
result = await session.execute(query)
- evaluation = result.scalars().one_or_none()
+ evaluation = result.scalars().first()
return evaluation
@@ -2164,7 +2164,7 @@ async def update_human_evaluation(evaluation_id: str, values_to_update: dict):
result = await session.execute(
select(HumanEvaluationDB).filter_by(id=uuid.UUID(evaluation_id))
)
- human_evaluation = result.scalars().one_or_none()
+ human_evaluation = result.scalars().first()
if not human_evaluation:
raise NoResultFound(f"Human evaluation with id {evaluation_id} not found")
@@ -2188,7 +2188,7 @@ async def delete_human_evaluation(evaluation_id: str):
result = await session.execute(
select(HumanEvaluationDB).filter_by(id=uuid.UUID(evaluation_id))
)
- evaluation = result.scalars().one_or_none()
+ evaluation = result.scalars().first()
if not evaluation:
raise NoResultFound(f"Human evaluation with id {evaluation_id} not found")
@@ -2250,7 +2250,7 @@ async def update_human_evaluation_scenario(
id=uuid.UUID(evaluation_scenario_id)
)
)
- human_evaluation_scenario = result.scalars().one_or_none()
+ human_evaluation_scenario = result.scalars().first()
if not human_evaluation_scenario:
raise NoResultFound(
f"Human evaluation scenario with id {evaluation_scenario_id} not found"
@@ -2347,7 +2347,7 @@ async def fetch_evaluation_scenario_by_id(
result = await session.execute(
select(EvaluationScenarioDB).filter_by(id=uuid.UUID(evaluation_scenario_id))
)
- evaluation_scenario = result.scalars().one_or_none()
+ evaluation_scenario = result.scalars().first()
return evaluation_scenario
@@ -2368,7 +2368,7 @@ async def fetch_human_evaluation_scenario_by_id(
id=uuid.UUID(evaluation_scenario_id)
)
)
- evaluation_scenario = result.scalars().one_or_none()
+ evaluation_scenario = result.scalars().first()
return evaluation_scenario
@@ -2389,7 +2389,7 @@ async def fetch_human_evaluation_scenario_by_evaluation_id(
evaluation_id=evaluation.id # type: ignore
)
)
- human_eval_scenario = result.scalars().one_or_none()
+ human_eval_scenario = result.scalars().first()
return human_eval_scenario
@@ -2433,7 +2433,7 @@ async def add_template(**kwargs: dict) -> str:
result = await session.execute(
select(TemplateDB).filter_by(tag_id=kwargs["tag_id"])
)
- existing_template = result.scalars().one_or_none()
+ existing_template = result.scalars().first()
if existing_template is None:
db_template = TemplateDB(**kwargs)
@@ -2513,7 +2513,7 @@ async def get_template(template_id: str) -> TemplateDB:
result = await session.execute(
select(TemplateDB).filter_by(id=uuid.UUID(template_id))
)
- template_db = result.scalars().one_or_none()
+ template_db = result.scalars().first()
return template_db
@@ -2570,7 +2570,7 @@ async def update_base(
result = await session.execute(
select(VariantBaseDB).filter_by(id=uuid.UUID(base_id))
)
- base = result.scalars().one_or_none()
+ base = result.scalars().first()
for key, value in kwargs.items():
if hasattr(base, key):
setattr(base, key, value)
@@ -2607,7 +2607,7 @@ async def update_app_variant(
result = await session.execute(
select(AppVariantDB).filter_by(id=uuid.UUID(app_variant_id))
)
- app_variant = result.scalars().one_or_none()
+ app_variant = result.scalars().first()
if not app_variant:
raise NoResultFound(f"App variant with id {app_variant_id} not found")
@@ -2657,7 +2657,7 @@ async def fetch_app_by_name_and_parameters(
query = base_query.join(UserDB).filter(UserDB.uid == user_uid)
result = await session.execute(query)
- app_db = result.unique().scalars().one_or_none()
+ app_db = result.unique().scalars().first()
return app_db
@@ -2939,7 +2939,7 @@ async def fetch_evaluator_config(evaluator_config_id: str):
result = await session.execute(
select(EvaluatorConfigDB).filter_by(id=uuid.UUID(evaluator_config_id))
)
- evaluator_config = result.scalars().one_or_none()
+ evaluator_config = result.scalars().first()
return evaluator_config
@@ -2988,7 +2988,7 @@ async def fetch_evaluator_config_by_appId(
app_id=uuid.UUID(app_id), evaluator_key=evaluator_name
)
)
- evaluator_config = result.scalars().one_or_none()
+ evaluator_config = result.scalars().first()
return evaluator_config
@@ -3039,7 +3039,7 @@ async def update_evaluator_config(
result = await session.execute(
select(EvaluatorConfigDB).filter_by(id=uuid.UUID(evaluator_config_id))
)
- evaluator_config = result.scalars().one_or_none()
+ evaluator_config = result.scalars().first()
if not evaluator_config:
raise NoResultFound(
f"Evaluator config with id {evaluator_config_id} not found"
@@ -3064,7 +3064,7 @@ async def delete_evaluator_config(evaluator_config_id: str) -> bool:
result = await session.execute(
select(EvaluatorConfigDB).filter_by(id=uuid.UUID(evaluator_config_id))
)
- evaluator_config = result.scalars().one_or_none()
+ evaluator_config = result.scalars().first()
if evaluator_config is None:
raise NoResultFound(
f"Evaluator config with id {evaluator_config_id} not found"
@@ -3094,7 +3094,7 @@ async def update_evaluation(
result = await session.execute(
select(EvaluationDB).filter_by(id=uuid.UUID(evaluation_id))
)
- evaluation = result.scalars().one_or_none()
+ evaluation = result.scalars().first()
for key, value in updates.items():
if hasattr(evaluation, key):
setattr(evaluation, key, value)
From 7c23e2406b3392ff6f220ff1069f4d2b7e66c7c9 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 2 Jul 2024 14:39:44 +0000
Subject: [PATCH 180/268] build(deps): bump importlib-metadata from 7.2.0 to
8.0.0 in /agenta-cli
Bumps [importlib-metadata](https://github.com/python/importlib_metadata) from 7.2.0 to 8.0.0.
- [Release notes](https://github.com/python/importlib_metadata/releases)
- [Changelog](https://github.com/python/importlib_metadata/blob/main/NEWS.rst)
- [Commits](https://github.com/python/importlib_metadata/compare/v7.2.0...v8.0.0)
---
updated-dependencies:
- dependency-name: importlib-metadata
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot]
---
agenta-cli/poetry.lock | 8 ++++----
agenta-cli/pyproject.toml | 2 +-
2 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/agenta-cli/poetry.lock b/agenta-cli/poetry.lock
index 4f77c1fbc4..f50d84ec44 100644
--- a/agenta-cli/poetry.lock
+++ b/agenta-cli/poetry.lock
@@ -465,13 +465,13 @@ files = [
[[package]]
name = "importlib-metadata"
-version = "7.2.1"
+version = "8.0.0"
description = "Read metadata from Python packages"
optional = false
python-versions = ">=3.8"
files = [
- {file = "importlib_metadata-7.2.1-py3-none-any.whl", hash = "sha256:ffef94b0b66046dd8ea2d619b701fe978d9264d38f3998bc4c27ec3b146a87c8"},
- {file = "importlib_metadata-7.2.1.tar.gz", hash = "sha256:509ecb2ab77071db5137c655e24ceb3eee66e7bbc6574165d0d114d9fc4bbe68"},
+ {file = "importlib_metadata-8.0.0-py3-none-any.whl", hash = "sha256:15584cf2b1bf449d98ff8a6ff1abef57bf20f3ac6454f431736cd3e660921b2f"},
+ {file = "importlib_metadata-8.0.0.tar.gz", hash = "sha256:188bd24e4c346d3f0a933f275c2fec67050326a856b9a359881d7c2a697e8812"},
]
[package.dependencies]
@@ -1809,4 +1809,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more
[metadata]
lock-version = "2.0"
python-versions = "^3.9"
-content-hash = "95365188fc7c2f1f28f7d354fbcff40bac4c3cbb021a78400f71fa68bf31bce1"
+content-hash = "33f91d2f3c40171c653d29d5633e90028ae06d435c8be937d463fb860f7156a4"
diff --git a/agenta-cli/pyproject.toml b/agenta-cli/pyproject.toml
index 3ba8848ef2..9754d64af8 100644
--- a/agenta-cli/pyproject.toml
+++ b/agenta-cli/pyproject.toml
@@ -25,7 +25,7 @@ questionary = ">=1.10,<3.0"
ipdb = ">=0.13"
python-dotenv = "^1.0.0"
python-multipart = ">=0.0.6,<0.0.10"
-importlib-metadata = ">=6.7,<8.0"
+importlib-metadata = ">=8.0.0,<9.0"
posthog = "^3.1.0"
pydantic = ">=2"
httpx = ">=0.24, <0.28"
From 747b5be39db10bdb76dcff031590c1baedc5550f Mon Sep 17 00:00:00 2001
From: Abram
Date: Tue, 2 Jul 2024 17:36:32 +0100
Subject: [PATCH 181/268] refactor (tools): set db name in postgres_uri to
backend and celery_worker compose services
---
docker-compose.test.yml | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/docker-compose.test.yml b/docker-compose.test.yml
index fbacfd81c6..b4b0770b8d 100644
--- a/docker-compose.test.yml
+++ b/docker-compose.test.yml
@@ -15,7 +15,7 @@ services:
build: ./agenta-backend
container_name: agenta-backend-test
environment:
- - POSTGRES_URI=postgresql+asyncpg://username:password@postgres:5432
+ - POSTGRES_URI=postgresql+asyncpg://username:password@postgres:5432/agenta_test
- REDIS_URL=redis://redis:6379/0
- ENVIRONMENT=${ENVIRONMENT}
- BARE_DOMAIN_NAME=localhost
@@ -111,7 +111,7 @@ services:
command: >
watchmedo auto-restart --directory=./agenta_backend --pattern=*.py --recursive -- celery -A agenta_backend.main.celery_app worker --concurrency=1 --loglevel=INFO
environment:
- - POSTGRES_URI=postgresql+asyncpg://username:password@postgres:5432
+ - POSTGRES_URI=postgresql+asyncpg://username:password@postgres:5432/agenta_test
- REDIS_URL=redis://redis:6379/0
- ENVIRONMENT=${ENVIRONMENT}
- CELERY_BROKER_URL=amqp://guest@rabbitmq//
From 3a05bcd7659f9ef1f56aeffe99247a1116bd68ce Mon Sep 17 00:00:00 2001
From: Abram
Date: Wed, 3 Jul 2024 06:41:56 +0100
Subject: [PATCH 182/268] refactor (backend): move initialization of async
sqlalchemy engine to __init__ method
---
.../agenta_backend/models/db_engine.py | 21 +++++++------------
1 file changed, 8 insertions(+), 13 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/db_engine.py b/agenta-backend/agenta_backend/models/db_engine.py
index 361bff503d..f9379a9696 100644
--- a/agenta-backend/agenta_backend/models/db_engine.py
+++ b/agenta-backend/agenta_backend/models/db_engine.py
@@ -95,8 +95,15 @@ class DBEngine:
def __init__(self) -> None:
self.mode = os.environ.get("DATABASE_MODE", "v2")
- self.postgres_uri = os.environ.get("POSTGRES_URI", None)
+ self.postgres_uri = os.environ.get("POSTGRES_URI")
self.mongo_uri = os.environ.get("MONGODB_URI")
+ self.engine = create_async_engine(url=self.postgres_uri) # type: ignore
+ self.async_session_maker = async_sessionmaker(
+ bind=self.engine, class_=AsyncSession, expire_on_commit=False
+ )
+ self.async_session = async_scoped_session(
+ session_factory=self.async_session_maker, scopefunc=current_task
+ )
async def initialize_async_postgres(self):
"""
@@ -106,14 +113,6 @@ async def initialize_async_postgres(self):
if not self.postgres_uri:
raise ValueError("Postgres URI cannot be None.")
- self.engine = create_async_engine(self.postgres_uri)
- self.async_session_maker = async_sessionmaker(
- bind=self.engine, class_=AsyncSession, expire_on_commit=False
- )
- self.async_session = async_scoped_session(
- session_factory=self.async_session_maker, scopefunc=current_task
- )
-
async with self.engine.begin() as conn:
# Drop and create tables if needed
for model in models:
@@ -184,9 +183,5 @@ async def close(self):
await self.engine.dispose()
- self.engine = None
- self.async_session_maker = None
- self.async_session = None
-
db_engine = DBEngine()
From 8dd92a24f83d5184ce1cda5bf613e775b0a3797d Mon Sep 17 00:00:00 2001
From: Abram
Date: Wed, 3 Jul 2024 06:43:00 +0100
Subject: [PATCH 183/268] minor refactor (tests): initialize db_engine and
ensure that database is dropped before closing pool connection
---
agenta-backend/agenta_backend/tests/conftest.py | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/agenta-backend/agenta_backend/tests/conftest.py b/agenta-backend/agenta_backend/tests/conftest.py
index e78d0f2b4c..943fcb6e43 100644
--- a/agenta-backend/agenta_backend/tests/conftest.py
+++ b/agenta-backend/agenta_backend/tests/conftest.py
@@ -16,10 +16,11 @@ def event_loop():
res._close = res.close # type: ignore
# Initialize database and create tables
- res.run_until_complete(DBEngine().init_db())
+ db_engine = DBEngine()
+ res.run_until_complete(db_engine.init_db())
yield res
- res.run_until_complete(DBEngine().close()) # close connections to database
- res.run_until_complete(DBEngine().remove_db()) # drop database
+ res.run_until_complete(db_engine.close()) # close connections to database
+ res.run_until_complete(db_engine.remove_db()) # drop database
res._close() # close event loop # type: ignore
From 31247252b19fb18bfdb1c64ae44550aabd965cf4 Mon Sep 17 00:00:00 2001
From: Abram
Date: Wed, 3 Jul 2024 06:46:09 +0100
Subject: [PATCH 184/268] minor refactor (tests): ensure that test db is
dropped before closing engine connection
---
agenta-backend/agenta_backend/tests/conftest.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/agenta-backend/agenta_backend/tests/conftest.py b/agenta-backend/agenta_backend/tests/conftest.py
index 943fcb6e43..c603898e27 100644
--- a/agenta-backend/agenta_backend/tests/conftest.py
+++ b/agenta-backend/agenta_backend/tests/conftest.py
@@ -21,6 +21,6 @@ def event_loop():
yield res
- res.run_until_complete(db_engine.close()) # close connections to database
res.run_until_complete(db_engine.remove_db()) # drop database
+ res.run_until_complete(db_engine.close()) # close connections to database
res._close() # close event loop # type: ignore
From a8c05516b6afb7f017f55a3aa237ca5f295901ee Mon Sep 17 00:00:00 2001
From: Abram
Date: Wed, 3 Jul 2024 07:08:24 +0100
Subject: [PATCH 185/268] minor refactor (tools): revert back to using default
database created
---
docker-compose.test.yml | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/docker-compose.test.yml b/docker-compose.test.yml
index b4b0770b8d..fbacfd81c6 100644
--- a/docker-compose.test.yml
+++ b/docker-compose.test.yml
@@ -15,7 +15,7 @@ services:
build: ./agenta-backend
container_name: agenta-backend-test
environment:
- - POSTGRES_URI=postgresql+asyncpg://username:password@postgres:5432/agenta_test
+ - POSTGRES_URI=postgresql+asyncpg://username:password@postgres:5432
- REDIS_URL=redis://redis:6379/0
- ENVIRONMENT=${ENVIRONMENT}
- BARE_DOMAIN_NAME=localhost
@@ -111,7 +111,7 @@ services:
command: >
watchmedo auto-restart --directory=./agenta_backend --pattern=*.py --recursive -- celery -A agenta_backend.main.celery_app worker --concurrency=1 --loglevel=INFO
environment:
- - POSTGRES_URI=postgresql+asyncpg://username:password@postgres:5432/agenta_test
+ - POSTGRES_URI=postgresql+asyncpg://username:password@postgres:5432
- REDIS_URL=redis://redis:6379/0
- ENVIRONMENT=${ENVIRONMENT}
- CELERY_BROKER_URL=amqp://guest@rabbitmq//
From b0b86dcf9ee4481ea4d8bfbe335ff1e5adf8bf52 Mon Sep 17 00:00:00 2001
From: Juan Pablo Vega
Date: Wed, 3 Jul 2024 12:53:33 +0200
Subject: [PATCH 186/268] Fix (AGE-285and AGE-342) Handle uncaught exceptions
in aggregate_ai_critique() and in evaluate()
---
.../models/api/evaluation_model.py | 1 +
.../services/aggregation_service.py | 47 ++++++++------
.../agenta_backend/tasks/evaluations.py | 64 ++++++++++++-------
3 files changed, 70 insertions(+), 42 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/api/evaluation_model.py b/agenta-backend/agenta_backend/models/api/evaluation_model.py
index 0313022409..dfeac03c7b 100644
--- a/agenta-backend/agenta_backend/models/api/evaluation_model.py
+++ b/agenta-backend/agenta_backend/models/api/evaluation_model.py
@@ -33,6 +33,7 @@ class EvaluationStatusEnum(str, Enum):
EVALUATION_FINISHED = "EVALUATION_FINISHED"
EVALUATION_FINISHED_WITH_ERRORS = "EVALUATION_FINISHED_WITH_ERRORS"
EVALUATION_FAILED = "EVALUATION_FAILED"
+ EVALUATION_AGGREGATION_FAILED = EVALUATION_FAILED #"EVALUATION_AGGREGATION_FAILED" <-- this will break the frontend.
class EvaluationScenarioStatusEnum(str, Enum):
diff --git a/agenta-backend/agenta_backend/services/aggregation_service.py b/agenta-backend/agenta_backend/services/aggregation_service.py
index c4b019b0d9..0d3d2fa3e5 100644
--- a/agenta-backend/agenta_backend/services/aggregation_service.py
+++ b/agenta-backend/agenta_backend/services/aggregation_service.py
@@ -15,26 +15,33 @@ def aggregate_ai_critique(results: List[Result]) -> Result:
Result: aggregated result
"""
- numeric_scores = []
- for result in results:
- # Extract the first number found in the result value
- match = re.search(r"\d+", result.value)
- if match:
- try:
- score = int(match.group())
- numeric_scores.append(score)
- except ValueError:
- # Ignore if the extracted value is not an integer
- continue
-
- # Calculate the average of numeric scores if any are present
- average_value = (
- sum(numeric_scores) / len(numeric_scores) if numeric_scores else None
- )
- return Result(
- type="number",
- value=average_value,
- )
+ try:
+ numeric_scores = []
+ for result in results:
+ # Extract the first number found in the result value
+ match = re.search(r"\d+", result.value)
+ if match:
+ try:
+ score = int(match.group())
+ numeric_scores.append(score)
+ except ValueError:
+ # Ignore if the extracted value is not an integer
+ continue
+
+ # Calculate the average of numeric scores if any are present
+ average_value = (
+ sum(numeric_scores) / len(numeric_scores) if numeric_scores else None
+ )
+ return Result(
+ type="number",
+ value=average_value,
+ )
+ except Exception as exc:
+ return Result(
+ type="error",
+ value=None,
+ error=Error(message="Failed", stacktrace=str(traceback.format_exc())),
+ )
def aggregate_binary(results: List[Result]) -> Result:
diff --git a/agenta-backend/agenta_backend/tasks/evaluations.py b/agenta-backend/agenta_backend/tasks/evaluations.py
index 5a79eb0740..7658475c09 100644
--- a/agenta-backend/agenta_backend/tasks/evaluations.py
+++ b/agenta-backend/agenta_backend/tasks/evaluations.py
@@ -320,36 +320,56 @@ def evaluate(
)
self.update_state(state=states.FAILURE)
return
-
- aggregated_results = loop.run_until_complete(
- aggregate_evaluator_results(app, evaluators_aggregated_data)
- )
- loop.run_until_complete(
- update_evaluation_with_aggregated_results(
- new_evaluation_db.id, aggregated_results
+
+ try:
+ aggregated_results = loop.run_until_complete(
+ aggregate_evaluator_results(app, evaluators_aggregated_data)
)
- )
- failed_evaluation_scenarios = loop.run_until_complete(
- check_if_evaluation_contains_failed_evaluation_scenarios(new_evaluation_db.id)
- )
+ loop.run_until_complete(
+ update_evaluation_with_aggregated_results(
+ new_evaluation_db.id, aggregated_results
+ )
+ )
- evaluation_status = Result(
- type="status", value=EvaluationStatusEnum.EVALUATION_FINISHED, error=None
- )
+ failed_evaluation_scenarios = loop.run_until_complete(
+ check_if_evaluation_contains_failed_evaluation_scenarios(new_evaluation_db.id)
+ )
- if failed_evaluation_scenarios:
evaluation_status = Result(
- type="status",
- value=EvaluationStatusEnum.EVALUATION_FINISHED_WITH_ERRORS,
- error=None,
+ type="status", value=EvaluationStatusEnum.EVALUATION_FINISHED, error=None
+ )
+
+ if failed_evaluation_scenarios:
+ evaluation_status = Result(
+ type="status",
+ value=EvaluationStatusEnum.EVALUATION_FINISHED_WITH_ERRORS,
+ error=None,
+ )
+
+ loop.run_until_complete(
+ update_evaluation(
+ evaluation_id=new_evaluation_db.id, updates={"status": evaluation_status}
+ )
)
- loop.run_until_complete(
- update_evaluation(
- evaluation_id=new_evaluation_db.id, updates={"status": evaluation_status}
+ except Exception as e:
+ logger.error(f"An error occurred during evaluation aggregation: {e}")
+ traceback.print_exc()
+ loop.run_until_complete(
+ update_evaluation(
+ evaluation_id,
+ {
+ "status": Result(
+ type="status",
+ value="EVALUATION_AGGREGATION_FAILED",
+ error=Error(message="Evaluation Aggregation Failed", stacktrace=str(e)),
+ )
+ },
+ )
)
- )
+ self.update_state(state=states.FAILURE)
+ return
async def aggregate_evaluator_results(
From 337ba1122e1a0a78559a90f98d9bf2586d36f8a8 Mon Sep 17 00:00:00 2001
From: Juan Pablo Vega
Date: Wed, 3 Jul 2024 14:21:10 +0200
Subject: [PATCH 187/268] Fix (AGE-381) Evaluations are initially Queued and
only move to Started when the worker actually starts on the task.
---
.../agenta_backend/services/evaluation_service.py | 2 +-
agenta-backend/agenta_backend/tasks/evaluations.py | 13 ++++++++++++-
.../test_variant_evaluators_router.py | 2 +-
3 files changed, 14 insertions(+), 3 deletions(-)
diff --git a/agenta-backend/agenta_backend/services/evaluation_service.py b/agenta-backend/agenta_backend/services/evaluation_service.py
index 7afa2ff194..85bc547b32 100644
--- a/agenta-backend/agenta_backend/services/evaluation_service.py
+++ b/agenta-backend/agenta_backend/services/evaluation_service.py
@@ -545,7 +545,7 @@ async def create_new_evaluation(
user=app.user,
testset=testset,
status=Result(
- value=EvaluationStatusEnum.EVALUATION_STARTED, type="status", error=None
+ value=EvaluationStatusEnum.EVALUATION_INITIALIZED, type="status", error=None
),
variant=variant_id,
variant_revision=str(variant_revision.id),
diff --git a/agenta-backend/agenta_backend/tasks/evaluations.py b/agenta-backend/agenta_backend/tasks/evaluations.py
index 5a79eb0740..795033231e 100644
--- a/agenta-backend/agenta_backend/tasks/evaluations.py
+++ b/agenta-backend/agenta_backend/tasks/evaluations.py
@@ -94,8 +94,18 @@ def evaluate(
loop = asyncio.get_event_loop()
try:
- # 1. Fetch data from the database
loop.run_until_complete(DBEngine().init_db())
+
+ # 0. Update evaluation status to STARTED
+ loop.run_until_complete(
+ update_evaluation(
+ evaluation_id,
+ { "status": Result(type="status", value="EVALUATION_STARTED") },
+ )
+ )
+ self.update_state(state=states.STARTED)
+
+ # 1. Fetch data from the database
app = loop.run_until_complete(fetch_app_by_id(app_id))
app_variant_db = loop.run_until_complete(fetch_app_variant_by_id(variant_id))
assert (
@@ -351,6 +361,7 @@ def evaluate(
)
)
+ self.update_state(state=states.SUCCESS)
async def aggregate_evaluator_results(
app: AppDB, evaluators_aggregated_data: dict
diff --git a/agenta-backend/agenta_backend/tests/variants_main_router/test_variant_evaluators_router.py b/agenta-backend/agenta_backend/tests/variants_main_router/test_variant_evaluators_router.py
index 9ff8ad60b2..63a92700e7 100644
--- a/agenta-backend/agenta_backend/tests/variants_main_router/test_variant_evaluators_router.py
+++ b/agenta-backend/agenta_backend/tests/variants_main_router/test_variant_evaluators_router.py
@@ -222,7 +222,7 @@ async def create_evaluation_with_evaluator(evaluator_config_name):
assert response_data["app_id"] == payload["app_id"]
assert (
response_data["status"]["value"]
- == EvaluationStatusEnum.EVALUATION_STARTED.value
+ == EvaluationStatusEnum.EVALUATION_INITIALIZED.value
)
assert response_data is not None
From 33ace668520cee4d5ffb3692a164e66bf2c18e5f Mon Sep 17 00:00:00 2001
From: Juan Pablo Vega
Date: Wed, 3 Jul 2024 14:43:23 +0200
Subject: [PATCH 188/268] Fix (AGE-380) Improve stacktrace quality in
evaluations. Still not shown in UI.
---
.../services/evaluators_service.py | 29 ++++++++++---------
.../agenta_backend/tasks/evaluations.py | 2 +-
2 files changed, 16 insertions(+), 15 deletions(-)
diff --git a/agenta-backend/agenta_backend/services/evaluators_service.py b/agenta-backend/agenta_backend/services/evaluators_service.py
index f3c0d25e54..6b3608b9d9 100644
--- a/agenta-backend/agenta_backend/services/evaluators_service.py
+++ b/agenta-backend/agenta_backend/services/evaluators_service.py
@@ -1,6 +1,7 @@
import json
import logging
import re
+import traceback
from typing import Any, Dict, List, Tuple
import httpx
@@ -79,7 +80,7 @@ def auto_exact_match(
type="error",
value=None,
error=Error(
- message="Error during Auto Exact Match evaluation", stacktrace=str(e)
+ message="Error during Auto Exact Match evaluation", stacktrace=str(traceback.format_exc())
),
)
@@ -103,7 +104,7 @@ def auto_regex_test(
type="error",
value=None,
error=Error(
- message="Error during Auto Regex evaluation", stacktrace=str(e)
+ message="Error during Auto Regex evaluation", stacktrace=str(traceback.format_exc())
),
)
@@ -186,7 +187,7 @@ def auto_webhook_test(
value=None,
error=Error(
message="Error during Auto Webhook evaluation; An HTTP error occurred",
- stacktrace=str(e),
+ stacktrace=str(traceback.format_exc()),
),
)
except Exception as e: # pylint: disable=broad-except
@@ -194,7 +195,7 @@ def auto_webhook_test(
type="error",
value=None,
error=Error(
- message="Error during Auto Webhook evaluation", stacktrace=str(e)
+ message="Error during Auto Webhook evaluation", stacktrace=str(traceback.format_exc())
),
)
@@ -224,7 +225,7 @@ def auto_custom_code_run(
type="error",
value=None,
error=Error(
- message="Error during Auto Custom Code Evaluation", stacktrace=str(e)
+ message="Error during Auto Custom Code Evaluation", stacktrace=str(traceback.format_exc())
),
)
@@ -281,7 +282,7 @@ def auto_ai_critique(
return Result(
type="error",
value=None,
- error=Error(message="Error during Auto AI Critique", stacktrace=str(e)),
+ error=Error(message="Error during Auto AI Critique", stacktrace=str(traceback.format_exc())),
)
@@ -308,7 +309,7 @@ def auto_starts_with(
type="error",
value=None,
error=Error(
- message="Error during Starts With evaluation", stacktrace=str(e)
+ message="Error during Starts With evaluation", stacktrace=str(traceback.format_exc())
),
)
@@ -335,7 +336,7 @@ def auto_ends_with(
return Result(
type="error",
value=None,
- error=Error(message="Error during Ends With evaluation", stacktrace=str(e)),
+ error=Error(message="Error during Ends With evaluation", stacktrace=str(traceback.format_exc())),
)
@@ -361,7 +362,7 @@ def auto_contains(
return Result(
type="error",
value=None,
- error=Error(message="Error during Contains evaluation", stacktrace=str(e)),
+ error=Error(message="Error during Contains evaluation", stacktrace=str(traceback.format_exc())),
)
@@ -391,7 +392,7 @@ def auto_contains_any(
type="error",
value=None,
error=Error(
- message="Error during Contains Any evaluation", stacktrace=str(e)
+ message="Error during Contains Any evaluation", stacktrace=str(traceback.format_exc())
),
)
@@ -422,7 +423,7 @@ def auto_contains_all(
type="error",
value=None,
error=Error(
- message="Error during Contains All evaluation", stacktrace=str(e)
+ message="Error during Contains All evaluation", stacktrace=str(traceback.format_exc())
),
)
@@ -452,7 +453,7 @@ def auto_contains_json(
type="error",
value=None,
error=Error(
- message="Error during Contains JSON evaluation", stacktrace=str(e)
+ message="Error during Contains JSON evaluation", stacktrace=str(traceback.format_exc())
),
)
@@ -511,7 +512,7 @@ def auto_levenshtein_distance(
value=None,
error=Error(
message="Error during Levenshtein threshold evaluation",
- stacktrace=str(e),
+ stacktrace=str(traceback.format_exc()),
),
)
@@ -552,7 +553,7 @@ def auto_similarity_match(
value=None,
error=Error(
message="Error during Auto Similarity Match evaluation",
- stacktrace=str(e),
+ stacktrace=str(traceback.format_exc()),
),
)
diff --git a/agenta-backend/agenta_backend/tasks/evaluations.py b/agenta-backend/agenta_backend/tasks/evaluations.py
index 5a79eb0740..b4cf259211 100644
--- a/agenta-backend/agenta_backend/tasks/evaluations.py
+++ b/agenta-backend/agenta_backend/tasks/evaluations.py
@@ -313,7 +313,7 @@ def evaluate(
"status": Result(
type="status",
value="EVALUATION_FAILED",
- error=Error(message="Evaluation Failed", stacktrace=str(e)),
+ error=Error(message="Evaluation Failed !!!", stacktrace=str(traceback.format_exc())),
)
},
)
From 64a47cbd65b2fd7300032067b30afb59cfb291b7 Mon Sep 17 00:00:00 2001
From: Kaosiso Ezealigo
Date: Wed, 3 Jul 2024 16:13:20 +0100
Subject: [PATCH 189/268] perf(frontend): added new eval status type and
improved component to handle unknown values_
---
.../cellRenderers/cellRenderers.tsx | 60 ++++++++++++-------
.../evaluationResults/EvaluationResults.tsx | 6 +-
agenta-web/src/lib/Types.ts | 1 +
3 files changed, 41 insertions(+), 26 deletions(-)
diff --git a/agenta-web/src/components/pages/evaluations/cellRenderers/cellRenderers.tsx b/agenta-web/src/components/pages/evaluations/cellRenderers/cellRenderers.tsx
index 61a5195dd5..98ca08f23a 100644
--- a/agenta-web/src/components/pages/evaluations/cellRenderers/cellRenderers.tsx
+++ b/agenta-web/src/components/pages/evaluations/cellRenderers/cellRenderers.tsx
@@ -156,28 +156,42 @@ export const ResultRenderer = React.memo(
)
export const runningStatuses = [EvaluationStatus.INITIALIZED, EvaluationStatus.STARTED]
-export const statusMapper = (token: GlobalToken) => ({
- [EvaluationStatus.INITIALIZED]: {
- label: "Queued",
- color: token.colorTextSecondary,
- },
- [EvaluationStatus.STARTED]: {
- label: "Running",
- color: token.colorWarning,
- },
- [EvaluationStatus.FINISHED]: {
- label: "Completed",
- color: token.colorSuccess,
- },
- [EvaluationStatus.ERROR]: {
- label: "Failed",
- color: token.colorError,
- },
- [EvaluationStatus.FINISHED_WITH_ERRORS]: {
- label: "Completed with Errors",
- color: token.colorWarning,
- },
-})
+export const statusMapper = (token: GlobalToken) => (status: EvaluationStatus) => {
+ const statusMap = {
+ [EvaluationStatus.INITIALIZED]: {
+ label: "Queued",
+ color: token.colorTextSecondary,
+ },
+ [EvaluationStatus.STARTED]: {
+ label: "Running",
+ color: token.colorWarning,
+ },
+ [EvaluationStatus.FINISHED]: {
+ label: "Completed",
+ color: token.colorSuccess,
+ },
+ [EvaluationStatus.ERROR]: {
+ label: "Failed",
+ color: token.colorError,
+ },
+ [EvaluationStatus.FINISHED_WITH_ERRORS]: {
+ label: "Completed with Errors",
+ color: token.colorWarning,
+ },
+ [EvaluationStatus.AGGREGATION_FAILED]: {
+ label: "Aggregation Failed",
+ color: token.colorError,
+ },
+ }
+
+ return (
+ statusMap[status] || {
+ label: "Unknown",
+ color: token.colorError,
+ }
+ )
+}
+
export const StatusRenderer = React.memo(
(params: ICellRendererParams<_Evaluation>) => {
const classes = useStyles()
@@ -186,7 +200,7 @@ export const StatusRenderer = React.memo(
params.data?.duration || 0,
runningStatuses.includes(params.value),
)
- const {label, color} = statusMapper(token)[params.data?.status.value as EvaluationStatus]
+ const {label, color} = statusMapper(token)(params.data?.status.value as EvaluationStatus)
const errorMsg = params.data?.status.error?.message
return (
diff --git a/agenta-web/src/components/pages/evaluations/evaluationResults/EvaluationResults.tsx b/agenta-web/src/components/pages/evaluations/evaluationResults/EvaluationResults.tsx
index ee243c0eee..96ece51c0a 100644
--- a/agenta-web/src/components/pages/evaluations/evaluationResults/EvaluationResults.tsx
+++ b/agenta-web/src/components/pages/evaluations/evaluationResults/EvaluationResults.tsx
@@ -295,10 +295,10 @@ const EvaluationResults: React.FC = () => {
pinned: "right",
...getFilterParams("text"),
filterValueGetter: (params) =>
- statusMapper(token)[params.data?.status.value as EvaluationStatus].label,
+ statusMapper(token)(params.data?.status.value as EvaluationStatus).label,
cellRenderer: StatusRenderer,
valueGetter: (params) =>
- statusMapper(token)[params.data?.status.value as EvaluationStatus].label,
+ statusMapper(token)(params.data?.status.value as EvaluationStatus).label,
},
{
flex: 1,
@@ -393,7 +393,7 @@ const EvaluationResults: React.FC = () => {
"Avg. Latency": getTypedValue(item.average_latency),
"Total Cost": getTypedValue(item.average_cost),
Created: formatDate24(item.created_at),
- Status: statusMapper(token)[item.status.value as EvaluationStatus].label,
+ Status: statusMapper(token)(item.status.value as EvaluationStatus).label,
})),
colDefs.map((col) => col.headerName!),
)
diff --git a/agenta-web/src/lib/Types.ts b/agenta-web/src/lib/Types.ts
index c742244029..58d5273cef 100644
--- a/agenta-web/src/lib/Types.ts
+++ b/agenta-web/src/lib/Types.ts
@@ -377,6 +377,7 @@ export enum EvaluationStatus {
FINISHED = "EVALUATION_FINISHED",
FINISHED_WITH_ERRORS = "EVALUATION_FINISHED_WITH_ERRORS",
ERROR = "EVALUATION_FAILED",
+ AGGREGATION_FAILED = "EVALUATION_AGGREGATION_FAILED",
}
export enum EvaluationStatusType {
From 784db617be7eb6e50aad38127bee09f4fd76f455 Mon Sep 17 00:00:00 2001
From: Kaosiso Ezealigo
Date: Wed, 3 Jul 2024 16:16:51 +0100
Subject: [PATCH 190/268] perf(frontend): show error stacktrace in tooltip
---
.../pages/evaluations/cellRenderers/cellRenderers.tsx | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/agenta-web/src/components/pages/evaluations/cellRenderers/cellRenderers.tsx b/agenta-web/src/components/pages/evaluations/cellRenderers/cellRenderers.tsx
index 61a5195dd5..3c3a02b3a4 100644
--- a/agenta-web/src/components/pages/evaluations/cellRenderers/cellRenderers.tsx
+++ b/agenta-web/src/components/pages/evaluations/cellRenderers/cellRenderers.tsx
@@ -188,6 +188,7 @@ export const StatusRenderer = React.memo(
)
const {label, color} = statusMapper(token)[params.data?.status.value as EvaluationStatus]
const errorMsg = params.data?.status.error?.message
+ const errorStacktrace = params.data?.status.error?.stacktrace
return (
@@ -195,7 +196,7 @@ export const StatusRenderer = React.memo(
{label}
{errorMsg && (
-
+
From 48eb0f7eb7f8c08429147933f097d51f811c38f1 Mon Sep 17 00:00:00 2001
From: Juan Pablo Vega
Date: Wed, 3 Jul 2024 18:34:10 +0200
Subject: [PATCH 191/268] EVALUATION_AGGREGATION_FAILED now has a different
value than EVALUATION_FAILED
---
agenta-backend/agenta_backend/models/api/evaluation_model.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/agenta-backend/agenta_backend/models/api/evaluation_model.py b/agenta-backend/agenta_backend/models/api/evaluation_model.py
index dfeac03c7b..3b664283aa 100644
--- a/agenta-backend/agenta_backend/models/api/evaluation_model.py
+++ b/agenta-backend/agenta_backend/models/api/evaluation_model.py
@@ -33,7 +33,7 @@ class EvaluationStatusEnum(str, Enum):
EVALUATION_FINISHED = "EVALUATION_FINISHED"
EVALUATION_FINISHED_WITH_ERRORS = "EVALUATION_FINISHED_WITH_ERRORS"
EVALUATION_FAILED = "EVALUATION_FAILED"
- EVALUATION_AGGREGATION_FAILED = EVALUATION_FAILED #"EVALUATION_AGGREGATION_FAILED" <-- this will break the frontend.
+ EVALUATION_AGGREGATION_FAILED = "EVALUATION_AGGREGATION_FAILED"
class EvaluationScenarioStatusEnum(str, Enum):
From 95b64b1c40e04570d3bdac7d20f9797539b88fb6 Mon Sep 17 00:00:00 2001
From: Kaosiso Ezealigo
Date: Wed, 3 Jul 2024 17:47:28 +0100
Subject: [PATCH 192/268] fix(backend): run black formatter
---
.../services/evaluators_service.py | 39 +++++++++++++------
.../agenta_backend/tasks/evaluations.py | 17 +++++---
2 files changed, 39 insertions(+), 17 deletions(-)
diff --git a/agenta-backend/agenta_backend/services/evaluators_service.py b/agenta-backend/agenta_backend/services/evaluators_service.py
index 6b3608b9d9..d4f526eafa 100644
--- a/agenta-backend/agenta_backend/services/evaluators_service.py
+++ b/agenta-backend/agenta_backend/services/evaluators_service.py
@@ -80,7 +80,8 @@ def auto_exact_match(
type="error",
value=None,
error=Error(
- message="Error during Auto Exact Match evaluation", stacktrace=str(traceback.format_exc())
+ message="Error during Auto Exact Match evaluation",
+ stacktrace=str(traceback.format_exc()),
),
)
@@ -104,7 +105,8 @@ def auto_regex_test(
type="error",
value=None,
error=Error(
- message="Error during Auto Regex evaluation", stacktrace=str(traceback.format_exc())
+ message="Error during Auto Regex evaluation",
+ stacktrace=str(traceback.format_exc()),
),
)
@@ -195,7 +197,8 @@ def auto_webhook_test(
type="error",
value=None,
error=Error(
- message="Error during Auto Webhook evaluation", stacktrace=str(traceback.format_exc())
+ message="Error during Auto Webhook evaluation",
+ stacktrace=str(traceback.format_exc()),
),
)
@@ -225,7 +228,8 @@ def auto_custom_code_run(
type="error",
value=None,
error=Error(
- message="Error during Auto Custom Code Evaluation", stacktrace=str(traceback.format_exc())
+ message="Error during Auto Custom Code Evaluation",
+ stacktrace=str(traceback.format_exc()),
),
)
@@ -282,7 +286,10 @@ def auto_ai_critique(
return Result(
type="error",
value=None,
- error=Error(message="Error during Auto AI Critique", stacktrace=str(traceback.format_exc())),
+ error=Error(
+ message="Error during Auto AI Critique",
+ stacktrace=str(traceback.format_exc()),
+ ),
)
@@ -309,7 +316,8 @@ def auto_starts_with(
type="error",
value=None,
error=Error(
- message="Error during Starts With evaluation", stacktrace=str(traceback.format_exc())
+ message="Error during Starts With evaluation",
+ stacktrace=str(traceback.format_exc()),
),
)
@@ -336,7 +344,10 @@ def auto_ends_with(
return Result(
type="error",
value=None,
- error=Error(message="Error during Ends With evaluation", stacktrace=str(traceback.format_exc())),
+ error=Error(
+ message="Error during Ends With evaluation",
+ stacktrace=str(traceback.format_exc()),
+ ),
)
@@ -362,7 +373,10 @@ def auto_contains(
return Result(
type="error",
value=None,
- error=Error(message="Error during Contains evaluation", stacktrace=str(traceback.format_exc())),
+ error=Error(
+ message="Error during Contains evaluation",
+ stacktrace=str(traceback.format_exc()),
+ ),
)
@@ -392,7 +406,8 @@ def auto_contains_any(
type="error",
value=None,
error=Error(
- message="Error during Contains Any evaluation", stacktrace=str(traceback.format_exc())
+ message="Error during Contains Any evaluation",
+ stacktrace=str(traceback.format_exc()),
),
)
@@ -423,7 +438,8 @@ def auto_contains_all(
type="error",
value=None,
error=Error(
- message="Error during Contains All evaluation", stacktrace=str(traceback.format_exc())
+ message="Error during Contains All evaluation",
+ stacktrace=str(traceback.format_exc()),
),
)
@@ -453,7 +469,8 @@ def auto_contains_json(
type="error",
value=None,
error=Error(
- message="Error during Contains JSON evaluation", stacktrace=str(traceback.format_exc())
+ message="Error during Contains JSON evaluation",
+ stacktrace=str(traceback.format_exc()),
),
)
diff --git a/agenta-backend/agenta_backend/tasks/evaluations.py b/agenta-backend/agenta_backend/tasks/evaluations.py
index b4cf259211..b1fcfd72ff 100644
--- a/agenta-backend/agenta_backend/tasks/evaluations.py
+++ b/agenta-backend/agenta_backend/tasks/evaluations.py
@@ -249,12 +249,14 @@ def evaluate(
evaluators_results.append(result_object)
all_correct_answers = [
- CorrectAnswer(
- key=ground_truth_column_name,
- value=data_point[ground_truth_column_name],
+ (
+ CorrectAnswer(
+ key=ground_truth_column_name,
+ value=data_point[ground_truth_column_name],
+ )
+ if ground_truth_column_name in data_point
+ else CorrectAnswer(key=ground_truth_column_name, value="")
)
- if ground_truth_column_name in data_point
- else CorrectAnswer(key=ground_truth_column_name, value="")
for ground_truth_column_name in ground_truth_column_names
]
# 4. We save the result of the eval scenario in the db
@@ -313,7 +315,10 @@ def evaluate(
"status": Result(
type="status",
value="EVALUATION_FAILED",
- error=Error(message="Evaluation Failed !!!", stacktrace=str(traceback.format_exc())),
+ error=Error(
+ message="Evaluation Failed !!!",
+ stacktrace=str(traceback.format_exc()),
+ ),
)
},
)
From 0f1dccc9d951ef8a25670b355332761687f893a5 Mon Sep 17 00:00:00 2001
From: Kaosiso Ezealigo
Date: Wed, 3 Jul 2024 17:49:39 +0100
Subject: [PATCH 193/268] fix(backend): run black formatter
---
.../agenta_backend/services/llm_apps_service.py | 6 +++---
.../agenta_backend/tasks/evaluations.py | 15 +++++++++------
2 files changed, 12 insertions(+), 9 deletions(-)
diff --git a/agenta-backend/agenta_backend/services/llm_apps_service.py b/agenta-backend/agenta_backend/services/llm_apps_service.py
index 322f262de0..348f087efa 100644
--- a/agenta-backend/agenta_backend/services/llm_apps_service.py
+++ b/agenta-backend/agenta_backend/services/llm_apps_service.py
@@ -221,9 +221,9 @@ async def batch_invoke(
"delay_between_batches"
] # Delay between batches (in seconds)
- list_of_app_outputs: List[
- InvokationResult
- ] = [] # Outputs after running all batches
+ list_of_app_outputs: List[InvokationResult] = (
+ []
+ ) # Outputs after running all batches
openapi_parameters = await get_parameters_from_openapi(uri + "/openapi.json")
async def run_batch(start_idx: int):
diff --git a/agenta-backend/agenta_backend/tasks/evaluations.py b/agenta-backend/agenta_backend/tasks/evaluations.py
index 795033231e..d234b55df7 100644
--- a/agenta-backend/agenta_backend/tasks/evaluations.py
+++ b/agenta-backend/agenta_backend/tasks/evaluations.py
@@ -100,7 +100,7 @@ def evaluate(
loop.run_until_complete(
update_evaluation(
evaluation_id,
- { "status": Result(type="status", value="EVALUATION_STARTED") },
+ {"status": Result(type="status", value="EVALUATION_STARTED")},
)
)
self.update_state(state=states.STARTED)
@@ -259,12 +259,14 @@ def evaluate(
evaluators_results.append(result_object)
all_correct_answers = [
- CorrectAnswer(
- key=ground_truth_column_name,
- value=data_point[ground_truth_column_name],
+ (
+ CorrectAnswer(
+ key=ground_truth_column_name,
+ value=data_point[ground_truth_column_name],
+ )
+ if ground_truth_column_name in data_point
+ else CorrectAnswer(key=ground_truth_column_name, value="")
)
- if ground_truth_column_name in data_point
- else CorrectAnswer(key=ground_truth_column_name, value="")
for ground_truth_column_name in ground_truth_column_names
]
# 4. We save the result of the eval scenario in the db
@@ -363,6 +365,7 @@ def evaluate(
self.update_state(state=states.SUCCESS)
+
async def aggregate_evaluator_results(
app: AppDB, evaluators_aggregated_data: dict
) -> List[AggregatedResult]:
From dda018ac7216e3c9890cbd2cafdfdac3e0076b7f Mon Sep 17 00:00:00 2001
From: Kaosiso Ezealigo
Date: Wed, 3 Jul 2024 17:51:21 +0100
Subject: [PATCH 194/268] fix(backend): run black formatter
---
.../services/llm_apps_service.py | 6 ++---
.../agenta_backend/tasks/evaluations.py | 25 ++++++++++++-------
2 files changed, 19 insertions(+), 12 deletions(-)
diff --git a/agenta-backend/agenta_backend/services/llm_apps_service.py b/agenta-backend/agenta_backend/services/llm_apps_service.py
index 322f262de0..348f087efa 100644
--- a/agenta-backend/agenta_backend/services/llm_apps_service.py
+++ b/agenta-backend/agenta_backend/services/llm_apps_service.py
@@ -221,9 +221,9 @@ async def batch_invoke(
"delay_between_batches"
] # Delay between batches (in seconds)
- list_of_app_outputs: List[
- InvokationResult
- ] = [] # Outputs after running all batches
+ list_of_app_outputs: List[InvokationResult] = (
+ []
+ ) # Outputs after running all batches
openapi_parameters = await get_parameters_from_openapi(uri + "/openapi.json")
async def run_batch(start_idx: int):
diff --git a/agenta-backend/agenta_backend/tasks/evaluations.py b/agenta-backend/agenta_backend/tasks/evaluations.py
index 7658475c09..17d3428173 100644
--- a/agenta-backend/agenta_backend/tasks/evaluations.py
+++ b/agenta-backend/agenta_backend/tasks/evaluations.py
@@ -249,12 +249,14 @@ def evaluate(
evaluators_results.append(result_object)
all_correct_answers = [
- CorrectAnswer(
- key=ground_truth_column_name,
- value=data_point[ground_truth_column_name],
+ (
+ CorrectAnswer(
+ key=ground_truth_column_name,
+ value=data_point[ground_truth_column_name],
+ )
+ if ground_truth_column_name in data_point
+ else CorrectAnswer(key=ground_truth_column_name, value="")
)
- if ground_truth_column_name in data_point
- else CorrectAnswer(key=ground_truth_column_name, value="")
for ground_truth_column_name in ground_truth_column_names
]
# 4. We save the result of the eval scenario in the db
@@ -320,7 +322,7 @@ def evaluate(
)
self.update_state(state=states.FAILURE)
return
-
+
try:
aggregated_results = loop.run_until_complete(
aggregate_evaluator_results(app, evaluators_aggregated_data)
@@ -333,7 +335,9 @@ def evaluate(
)
failed_evaluation_scenarios = loop.run_until_complete(
- check_if_evaluation_contains_failed_evaluation_scenarios(new_evaluation_db.id)
+ check_if_evaluation_contains_failed_evaluation_scenarios(
+ new_evaluation_db.id
+ )
)
evaluation_status = Result(
@@ -349,7 +353,8 @@ def evaluate(
loop.run_until_complete(
update_evaluation(
- evaluation_id=new_evaluation_db.id, updates={"status": evaluation_status}
+ evaluation_id=new_evaluation_db.id,
+ updates={"status": evaluation_status},
)
)
@@ -363,7 +368,9 @@ def evaluate(
"status": Result(
type="status",
value="EVALUATION_AGGREGATION_FAILED",
- error=Error(message="Evaluation Aggregation Failed", stacktrace=str(e)),
+ error=Error(
+ message="Evaluation Aggregation Failed", stacktrace=str(e)
+ ),
)
},
)
From d1aec70099dce056b2d0f0e9dac3f628659910a5 Mon Sep 17 00:00:00 2001
From: Kaosiso Ezealigo
Date: Wed, 3 Jul 2024 17:56:23 +0100
Subject: [PATCH 195/268] fix(backend): run black formatter
---
agenta-backend/agenta_backend/services/llm_apps_service.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/agenta-backend/agenta_backend/services/llm_apps_service.py b/agenta-backend/agenta_backend/services/llm_apps_service.py
index 348f087efa..322f262de0 100644
--- a/agenta-backend/agenta_backend/services/llm_apps_service.py
+++ b/agenta-backend/agenta_backend/services/llm_apps_service.py
@@ -221,9 +221,9 @@ async def batch_invoke(
"delay_between_batches"
] # Delay between batches (in seconds)
- list_of_app_outputs: List[InvokationResult] = (
- []
- ) # Outputs after running all batches
+ list_of_app_outputs: List[
+ InvokationResult
+ ] = [] # Outputs after running all batches
openapi_parameters = await get_parameters_from_openapi(uri + "/openapi.json")
async def run_batch(start_idx: int):
From f229c863623f9c6f34e251ae96deb82611645e90 Mon Sep 17 00:00:00 2001
From: Kaosiso Ezealigo
Date: Wed, 3 Jul 2024 17:56:36 +0100
Subject: [PATCH 196/268] fix(backend): run black formatter
---
agenta-backend/agenta_backend/services/llm_apps_service.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/agenta-backend/agenta_backend/services/llm_apps_service.py b/agenta-backend/agenta_backend/services/llm_apps_service.py
index 348f087efa..322f262de0 100644
--- a/agenta-backend/agenta_backend/services/llm_apps_service.py
+++ b/agenta-backend/agenta_backend/services/llm_apps_service.py
@@ -221,9 +221,9 @@ async def batch_invoke(
"delay_between_batches"
] # Delay between batches (in seconds)
- list_of_app_outputs: List[InvokationResult] = (
- []
- ) # Outputs after running all batches
+ list_of_app_outputs: List[
+ InvokationResult
+ ] = [] # Outputs after running all batches
openapi_parameters = await get_parameters_from_openapi(uri + "/openapi.json")
async def run_batch(start_idx: int):
From 8b3b08382794d2e6ccd7e2eceae8035ba1b5744c Mon Sep 17 00:00:00 2001
From: aakrem
Date: Wed, 3 Jul 2024 20:40:30 +0200
Subject: [PATCH 197/268] data-migration fixes for human evaluation scenario
(outputs variants ids + both are good votes + both are bad votes)
---
.../migrations/mongo_to_postgres/migration.py | 28 +++++++++++++++----
1 file changed, 23 insertions(+), 5 deletions(-)
diff --git a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
index d2d5958733..7154b382d7 100644
--- a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
+++ b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/migration.py
@@ -1,4 +1,4 @@
-import json
+from bson import ObjectId
import os
import asyncio
from datetime import datetime, timezone
@@ -356,18 +356,36 @@ async def transform_human_evaluation_scenario(scenario):
evaluation_uuid = await get_mapped_uuid(
"human_evaluations", scenario["evaluation"].id
)
- variant_uuid = str(await get_mapped_uuid("app_variants", scenario["vote"]))
+
+ vote_value = scenario.get("vote")
+ if ObjectId.is_valid(vote_value):
+ vote_uuid = str(await get_mapped_uuid("app_variants", vote_value))
+ else:
+ vote_uuid = vote_value
+
scenario_uuid = generate_uuid()
await store_mapping("human_evaluations_scenarios", scenario["_id"], scenario_uuid)
+
+ outputs = []
+ for output in scenario["outputs"]:
+ variant_id = output["variant_id"]
+ variant_uuid = await get_mapped_uuid("app_variants", variant_id)
+ outputs.append(
+ {
+ "variant_id": str(variant_uuid),
+ "variant_output": output["variant_output"],
+ }
+ )
+
return {
"id": scenario_uuid,
"user_id": user_uuid,
"evaluation_id": evaluation_uuid,
"inputs": scenario["inputs"],
- "outputs": scenario["outputs"],
- "vote": variant_uuid,
- "score": scenario.get("score"),
+ "outputs": outputs,
+ "vote": vote_uuid,
+ "score": str(scenario.get("score")),
"correct_answer": scenario.get("correct_answer"),
"created_at": get_datetime(scenario.get("created_at")),
"updated_at": get_datetime(scenario.get("updated_at")),
From a5aade82eaeee59e81ce3e625ceedc17ba7a8ad4 Mon Sep 17 00:00:00 2001
From: Juan Pablo Vega
Date: Thu, 4 Jul 2024 18:08:49 +0200
Subject: [PATCH 198/268] Fix (AGE-382) Catch None-related issue in
Optional[...] field: EvaluationResult.correct_answers Adds logger.error(e)
for exception in evaluation router
---
agenta-backend/agenta_backend/models/converters.py | 12 ++++++++----
.../agenta_backend/routers/evaluation_router.py | 2 ++
2 files changed, 10 insertions(+), 4 deletions(-)
diff --git a/agenta-backend/agenta_backend/models/converters.py b/agenta-backend/agenta_backend/models/converters.py
index 1f89ffa117..4c66f8e2bf 100644
--- a/agenta-backend/agenta_backend/models/converters.py
+++ b/agenta-backend/agenta_backend/models/converters.py
@@ -254,10 +254,14 @@ def evaluation_scenario_db_to_pydantic(
EvaluationScenarioOutput(**scenario_output.dict())
for scenario_output in evaluation_scenario_db.outputs
],
- correct_answers=[
- CorrectAnswer(**correct_answer.dict())
- for correct_answer in evaluation_scenario_db.correct_answers
- ],
+ correct_answers=(
+ [
+ CorrectAnswer(**correct_answer.dict())
+ for correct_answer in evaluation_scenario_db.correct_answers
+ ]
+ if evaluation_scenario_db.correct_answers is not None
+ else None
+ ),
is_pinned=evaluation_scenario_db.is_pinned or False,
note=evaluation_scenario_db.note or "",
results=evaluation_scenarios_results_to_pydantic(
diff --git a/agenta-backend/agenta_backend/routers/evaluation_router.py b/agenta-backend/agenta_backend/routers/evaluation_router.py
index cb71942398..c820036a3e 100644
--- a/agenta-backend/agenta_backend/routers/evaluation_router.py
+++ b/agenta-backend/agenta_backend/routers/evaluation_router.py
@@ -1,5 +1,6 @@
import secrets
import logging
+import traceback
from typing import Any, List
from fastapi.responses import JSONResponse
@@ -270,6 +271,7 @@ async def fetch_evaluation_scenarios(
return eval_scenarios
except Exception as exc:
+ logger.error(str(traceback.format_exc()))
raise HTTPException(status_code=500, detail=str(exc))
From 59631afa7b7d3822248294a55317d2687542f9f5 Mon Sep 17 00:00:00 2001
From: Kaosiso Ezealigo
Date: Thu, 4 Jul 2024 21:30:16 +0100
Subject: [PATCH 199/268] improved evaluation status label message
---
.../pages/evaluations/cellRenderers/cellRenderers.tsx | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/agenta-web/src/components/pages/evaluations/cellRenderers/cellRenderers.tsx b/agenta-web/src/components/pages/evaluations/cellRenderers/cellRenderers.tsx
index 98ca08f23a..0cdf5cd7f7 100644
--- a/agenta-web/src/components/pages/evaluations/cellRenderers/cellRenderers.tsx
+++ b/agenta-web/src/components/pages/evaluations/cellRenderers/cellRenderers.tsx
@@ -179,7 +179,7 @@ export const statusMapper = (token: GlobalToken) => (status: EvaluationStatus) =
color: token.colorWarning,
},
[EvaluationStatus.AGGREGATION_FAILED]: {
- label: "Aggregation Failed",
+ label: "Result Aggregation Failed",
color: token.colorError,
},
}
From 1dd2e92d46f57b1c9aee601fd3c92de22a1ab4dc Mon Sep 17 00:00:00 2001
From: Abram
Date: Fri, 5 Jul 2024 09:24:00 +0100
Subject: [PATCH 200/268] fix (backend): remove logic that unpublishes other
variants' environments when a specific variant is deleted
---
agenta-backend/agenta_backend/services/db_manager.py | 7 -------
1 file changed, 7 deletions(-)
diff --git a/agenta-backend/agenta_backend/services/db_manager.py b/agenta-backend/agenta_backend/services/db_manager.py
index 1bdca299ff..5f3cf1864b 100644
--- a/agenta-backend/agenta_backend/services/db_manager.py
+++ b/agenta-backend/agenta_backend/services/db_manager.py
@@ -1199,13 +1199,6 @@ async def remove_app_variant_from_db(app_variant_db: AppVariantDB):
app_variant_revisions = await list_app_variant_revisions_by_variant(app_variant_db)
async with db_engine.get_session() as session:
- logger.debug("list_environments_by_variant")
- environments = await list_environments_by_variant(session, app_variant_db)
-
- # Remove the variant from the associated environments
- for environment in environments:
- environment.deployed_app_variant = None
- await session.commit()
# Delete all the revisions associated with the variant
for app_variant_revision in app_variant_revisions:
From 81ea552dc8c826e9149bce63da127a8b732584a2 Mon Sep 17 00:00:00 2001
From: Abram
Date: Fri, 5 Jul 2024 09:25:42 +0100
Subject: [PATCH 201/268] style (backend): format code with black@23.12.0
---
agenta-backend/agenta_backend/services/db_manager.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/agenta-backend/agenta_backend/services/db_manager.py b/agenta-backend/agenta_backend/services/db_manager.py
index 5f3cf1864b..92c32d3482 100644
--- a/agenta-backend/agenta_backend/services/db_manager.py
+++ b/agenta-backend/agenta_backend/services/db_manager.py
@@ -1199,7 +1199,6 @@ async def remove_app_variant_from_db(app_variant_db: AppVariantDB):
app_variant_revisions = await list_app_variant_revisions_by_variant(app_variant_db)
async with db_engine.get_session() as session:
-
# Delete all the revisions associated with the variant
for app_variant_revision in app_variant_revisions:
await session.delete(app_variant_revision)
From 169b1c07f6844d46982f772a8b08dc85a208d87f Mon Sep 17 00:00:00 2001
From: aakrem
Date: Fri, 5 Jul 2024 11:37:14 +0200
Subject: [PATCH 202/268] adjust db engine and small fixes
---
.../mongo_to_postgres/mongo_db_engine.py | 37 +++++++++++++++----
.../migrations/mongo_to_postgres/utils.py | 10 +++--
2 files changed, 36 insertions(+), 11 deletions(-)
diff --git a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/mongo_db_engine.py b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/mongo_db_engine.py
index 828a9753b2..219451b5e0 100644
--- a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/mongo_db_engine.py
+++ b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/mongo_db_engine.py
@@ -1,13 +1,34 @@
import os
from pymongo import MongoClient
+from agenta_backend.utils.common import isCloudEE
-# MongoDB connection
-MONGO_URI = os.environ.get("MONGODB_URI")
-DATABASE_MODE = os.environ.get("DATABASE_MODE")
-mongo_client = MongoClient(MONGO_URI)
-mongo_db_name = f"agenta_{DATABASE_MODE}"
-mongo_db = mongo_client[mongo_db_name]
+MONGO_URI_SRC = os.environ.get("MONGODB_URI")
+MONGO_DATABASE_MODE = os.environ.get("DATABASE_MODE")
+MONGO_DB_NAME_SRC = f"agenta_{MONGO_DATABASE_MODE}"
-def get_mongo_db():
- return mongo_db
+if isCloudEE():
+ MONGO_URI_DEST = os.environ.get("MONGO_URI_DEST", None)
+ MONGO_DEST_DATABASE_MODE = os.environ.get("MONGO_DEST_DATABASE_MODE", None)
+ MONGO_DB_NAME_DEST = (
+ f"agenta_{MONGO_DEST_DATABASE_MODE}" if MONGO_DEST_DATABASE_MODE else None
+ )
+ if not MONGO_URI_DEST and not MONGO_DB_NAME_DEST:
+ mongo_client_dest = MongoClient(MONGO_URI_DEST)
+
+mongo_client_src = MongoClient(MONGO_URI_SRC)
+
+
+def get_mongo_db(mode):
+ if mode.lower() == "src":
+ return mongo_client_src[MONGO_DB_NAME_SRC]
+ elif mode.lower() == "dest":
+ return mongo_client_dest[MONGO_DB_NAME_DEST]
+ else:
+ raise ValueError("Invalid mode. Use 'src' or 'dest'.")
+
+
+mongo_db = get_mongo_db("src")
+
+if isCloudEE():
+ mongo_db_dest = get_mongo_db("dest") if MONGO_DB_NAME_DEST else None
diff --git a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
index c4f5a0a0c9..3e5d345454 100644
--- a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
+++ b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/utils.py
@@ -13,15 +13,14 @@
from sqlalchemy.future import select
from sqlalchemy.exc import NoResultFound
from agenta_backend.migrations.mongo_to_postgres.db_engine import db_engine
-from sqlalchemy.exc import IntegrityError
+from sqlalchemy.exc import IntegrityError, MultipleResultsFound
from agenta_backend.models.db_models import IDsMappingDB
from agenta_backend.models.base import Base
-from agenta_backend.migrations.mongo_to_postgres.mongo_db_engine import get_mongo_db
+from agenta_backend.migrations.mongo_to_postgres.mongo_db_engine import mongo_db
BATCH_SIZE = 1000
-mongo_db = get_mongo_db()
migration_report = {}
@@ -66,6 +65,11 @@ async def get_mapped_uuid(table_name, mongo_id):
result = await session.execute(stmt)
try:
row = result.one()
+ except MultipleResultsFound:
+ print(
+ f"Multiple mappings found for {table_name} and {mongo_id}. Skipping..."
+ )
+ return None
except NoResultFound:
return None
return row[0]
From 90076bb28a8a489814be03a8a085c99a9071db31 Mon Sep 17 00:00:00 2001
From: Abram
Date: Fri, 5 Jul 2024 11:04:55 +0100
Subject: [PATCH 203/268] minor refactor (backend): remove redundant db_manager
'list_environments_by_variant' function
---
.../agenta_backend/services/db_manager.py | 22 -------------------
1 file changed, 22 deletions(-)
diff --git a/agenta-backend/agenta_backend/services/db_manager.py b/agenta-backend/agenta_backend/services/db_manager.py
index 92c32d3482..f363d51029 100644
--- a/agenta-backend/agenta_backend/services/db_manager.py
+++ b/agenta-backend/agenta_backend/services/db_manager.py
@@ -1596,28 +1596,6 @@ async def fetch_app_variant_revision(app_variant: str, revision_number: int):
return app_variant_revisions
-async def list_environments_by_variant(
- session: AsyncSession,
- app_variant: AppVariantDB,
-):
- """
- Returns a list of environments for a given app variant.
-
- Args:
- session (AsyncSession): the current ongoing session
- app_variant (AppVariantDB): The app variant to retrieve environments for.
-
- Returns:
- List[AppEnvironmentDB]: A list of AppEnvironmentDB objects.
- """
-
- result = await session.execute(
- select(AppEnvironmentDB).filter_by(app_id=app_variant.app.id)
- )
- environments_db = result.scalars().all()
- return environments_db
-
-
async def remove_image(image: ImageDB):
"""
Removes an image from the database.
From 0bfc70be63521ed02e2bdd3393d945cbb9b86a46 Mon Sep 17 00:00:00 2001
From: Juan Pablo Vega
Date: Fri, 5 Jul 2024 12:40:02 +0200
Subject: [PATCH 204/268] fix(sdk): AGE-272 Propagate func errors up in
@ag.instrument() wrappers
Func errors used to be set as func result. Now they are propagated up by forwarding the exception up (raise e).
---
.../agenta/sdk/decorators/llm_entrypoint.py | 2 ++
agenta-cli/agenta/sdk/decorators/tracing.py | 33 ++++++++++---------
agenta-cli/pyproject.toml | 2 +-
3 files changed, 21 insertions(+), 16 deletions(-)
diff --git a/agenta-cli/agenta/sdk/decorators/llm_entrypoint.py b/agenta-cli/agenta/sdk/decorators/llm_entrypoint.py
index e18ae1bc41..4ee869222b 100644
--- a/agenta-cli/agenta/sdk/decorators/llm_entrypoint.py
+++ b/agenta-cli/agenta/sdk/decorators/llm_entrypoint.py
@@ -83,9 +83,11 @@ async def wrapper(*args, **kwargs) -> Any:
{"config": config_params, "environment": "playground"}
)
+ # Exceptions are all handled inside self.execute_function()
llm_result = await self.execute_function(
func, *args, params=func_params, config_params=config_params
)
+
return llm_result
@functools.wraps(func)
diff --git a/agenta-cli/agenta/sdk/decorators/tracing.py b/agenta-cli/agenta/sdk/decorators/tracing.py
index a158740494..b7571477c8 100644
--- a/agenta-cli/agenta/sdk/decorators/tracing.py
+++ b/agenta-cli/agenta/sdk/decorators/tracing.py
@@ -58,19 +58,20 @@ async def async_wrapper(*args, **kwargs):
try:
result = await func(*args, **kwargs)
self.tracing.update_span_status(span=span, value="OK")
- except Exception as e:
- result = str(e)
- self.tracing.set_span_attribute(
- {"traceback_exception": traceback.format_exc()}
- )
- self.tracing.update_span_status(span=span, value="ERROR")
- finally:
self.tracing.end_span(
outputs=(
{"message": result} if not isinstance(result, dict) else result
)
)
- return result
+ return result
+
+ except Exception as e:
+ self.tracing.set_span_attribute(
+ {"traceback_exception": traceback.format_exc()}
+ )
+ self.tracing.update_span_status(span=span, value="ERROR")
+ self.tracing.end_span(outputs={})
+ raise e
@wraps(func)
def sync_wrapper(*args, **kwargs):
@@ -89,17 +90,19 @@ def sync_wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
self.tracing.update_span_status(span=span, value="OK")
- except Exception as e:
- result = str(e)
- self.tracing.set_span_attribute(
- {"traceback_exception": traceback.format_exc()}
- )
- self.tracing.update_span_status(span=span, value="ERROR")
- finally:
self.tracing.end_span(
outputs=(
{"message": result} if not isinstance(result, dict) else result
)
)
+ return result
+
+ except Exception as e:
+ self.tracing.set_span_attribute(
+ {"traceback_exception": traceback.format_exc()}
+ )
+ self.tracing.update_span_status(span=span, value="ERROR")
+ self.tracing.end_span(outputs={})
+ raise e
return async_wrapper if is_coroutine_function else sync_wrapper
diff --git a/agenta-cli/pyproject.toml b/agenta-cli/pyproject.toml
index 3ba8848ef2..966176c9c1 100644
--- a/agenta-cli/pyproject.toml
+++ b/agenta-cli/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "agenta"
-version = "0.17.5"
+version = "0.17.6-alpha.1"
description = "The SDK for agenta is an open-source LLMOps platform."
readme = "README.md"
authors = ["Mahmoud Mabrouk "]
From 32a24862189d2e1ce633402b9adf74ceac5b8010 Mon Sep 17 00:00:00 2001
From: Juan Pablo Vega
Date: Fri, 5 Jul 2024 13:05:27 +0200
Subject: [PATCH 205/268] fix(sdk): Add func error and stacktrace to result
---
agenta-cli/agenta/sdk/decorators/tracing.py | 12 ++++++++++--
1 file changed, 10 insertions(+), 2 deletions(-)
diff --git a/agenta-cli/agenta/sdk/decorators/tracing.py b/agenta-cli/agenta/sdk/decorators/tracing.py
index b7571477c8..a99e91558c 100644
--- a/agenta-cli/agenta/sdk/decorators/tracing.py
+++ b/agenta-cli/agenta/sdk/decorators/tracing.py
@@ -66,11 +66,15 @@ async def async_wrapper(*args, **kwargs):
return result
except Exception as e:
+ result = {
+ "message": str(e),
+ "stacktrace": traceback.format_exc(),
+ }
self.tracing.set_span_attribute(
{"traceback_exception": traceback.format_exc()}
)
self.tracing.update_span_status(span=span, value="ERROR")
- self.tracing.end_span(outputs={})
+ self.tracing.end_span(outputs=result)
raise e
@wraps(func)
@@ -98,11 +102,15 @@ def sync_wrapper(*args, **kwargs):
return result
except Exception as e:
+ result = {
+ "message": str(e),
+ "stacktrace": traceback.format_exc(),
+ }
self.tracing.set_span_attribute(
{"traceback_exception": traceback.format_exc()}
)
self.tracing.update_span_status(span=span, value="ERROR")
- self.tracing.end_span(outputs={})
+ self.tracing.end_span(outputs=result)
raise e
return async_wrapper if is_coroutine_function else sync_wrapper
From a995950f745b94e711c66bb4578225972461d098 Mon Sep 17 00:00:00 2001
From: aakrem
Date: Fri, 5 Jul 2024 13:30:17 +0200
Subject: [PATCH 206/268] enhancements to mongo db engine
---
.../mongo_to_postgres/mongo_db_engine.py | 18 ++++++++----------
1 file changed, 8 insertions(+), 10 deletions(-)
diff --git a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/mongo_db_engine.py b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/mongo_db_engine.py
index 219451b5e0..b074d9d5a3 100644
--- a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/mongo_db_engine.py
+++ b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/mongo_db_engine.py
@@ -2,21 +2,19 @@
from pymongo import MongoClient
from agenta_backend.utils.common import isCloudEE
-MONGO_URI_SRC = os.environ.get("MONGODB_URI")
+MONGO_URI = os.environ.get("MONGODB_URI")
MONGO_DATABASE_MODE = os.environ.get("DATABASE_MODE")
MONGO_DB_NAME_SRC = f"agenta_{MONGO_DATABASE_MODE}"
if isCloudEE():
- MONGO_URI_DEST = os.environ.get("MONGO_URI_DEST", None)
- MONGO_DEST_DATABASE_MODE = os.environ.get("MONGO_DEST_DATABASE_MODE", None)
- MONGO_DB_NAME_DEST = (
- f"agenta_{MONGO_DEST_DATABASE_MODE}" if MONGO_DEST_DATABASE_MODE else None
- )
- if not MONGO_URI_DEST and not MONGO_DB_NAME_DEST:
- mongo_client_dest = MongoClient(MONGO_URI_DEST)
-
-mongo_client_src = MongoClient(MONGO_URI_SRC)
+ MONGO_DB_NAME_DEST = os.environ.get("MONGO_DB_NAME_DEST", None)
+ MONGO_DB_NAME_DEST = f"agenta_{MONGO_DB_NAME_DEST}" if MONGO_DB_NAME_DEST else None
+
+ if MONGO_DB_NAME_DEST:
+ mongo_client_dest = MongoClient(MONGO_URI)
+
+mongo_client_src = MongoClient(MONGO_URI)
def get_mongo_db(mode):
From be92ce3059e9887b7d6fd36f49d8a9f3b0b4e668 Mon Sep 17 00:00:00 2001
From: Juan Pablo Vega
Date: Fri, 5 Jul 2024 14:03:22 +0200
Subject: [PATCH 207/268] refactor(backend): Review PR comments
Remove redundant celery state updates, update celery configuration, use enum properly.
---
agenta-backend/agenta_backend/celery_config.py | 1 +
agenta-backend/agenta_backend/tasks/evaluations.py | 9 +++++----
2 files changed, 6 insertions(+), 4 deletions(-)
diff --git a/agenta-backend/agenta_backend/celery_config.py b/agenta-backend/agenta_backend/celery_config.py
index df11dad091..c59e8cffbf 100644
--- a/agenta-backend/agenta_backend/celery_config.py
+++ b/agenta-backend/agenta_backend/celery_config.py
@@ -7,6 +7,7 @@
CELERY_ACCEPT_CONTENT = ["json"]
CELERY_RESULT_SERIALIZER = "json"
CELERY_TIMEZONE = "UTC"
+CELERY_TASK_TRACK_STARTED = True
CELERY_QUEUES = (
Queue(
diff --git a/agenta-backend/agenta_backend/tasks/evaluations.py b/agenta-backend/agenta_backend/tasks/evaluations.py
index d234b55df7..db183df797 100644
--- a/agenta-backend/agenta_backend/tasks/evaluations.py
+++ b/agenta-backend/agenta_backend/tasks/evaluations.py
@@ -100,10 +100,13 @@ def evaluate(
loop.run_until_complete(
update_evaluation(
evaluation_id,
- {"status": Result(type="status", value="EVALUATION_STARTED")},
+ {
+ "status": Result(
+ type="status", value=EvaluationStatusEnum.EVALUATION_STARTED
+ )
+ },
)
)
- self.update_state(state=states.STARTED)
# 1. Fetch data from the database
app = loop.run_until_complete(fetch_app_by_id(app_id))
@@ -363,8 +366,6 @@ def evaluate(
)
)
- self.update_state(state=states.SUCCESS)
-
async def aggregate_evaluator_results(
app: AppDB, evaluators_aggregated_data: dict
From 9ca795505e0b216336e854bac5b78af5f4f268c1 Mon Sep 17 00:00:00 2001
From: Juan Pablo Vega
Date: Fri, 5 Jul 2024 14:22:26 +0200
Subject: [PATCH 208/268] refactor(backend): Fixes comments from PR
Standardizes stacktrace to traceback.format_exc() (in all three files) and message to str(exc) (in aggregation_service.py)
---
.../services/aggregation_service.py | 4 +-
.../services/evaluators_service.py | 46 +++++++++++++------
.../agenta_backend/tasks/evaluations.py | 8 +++-
3 files changed, 40 insertions(+), 18 deletions(-)
diff --git a/agenta-backend/agenta_backend/services/aggregation_service.py b/agenta-backend/agenta_backend/services/aggregation_service.py
index 0d3d2fa3e5..56022b620a 100644
--- a/agenta-backend/agenta_backend/services/aggregation_service.py
+++ b/agenta-backend/agenta_backend/services/aggregation_service.py
@@ -40,7 +40,7 @@ def aggregate_ai_critique(results: List[Result]) -> Result:
return Result(
type="error",
value=None,
- error=Error(message="Failed", stacktrace=str(traceback.format_exc())),
+ error=Error(message=str(exc), stacktrace=str(traceback.format_exc())),
)
@@ -78,7 +78,7 @@ def aggregate_float(results: List[Result]) -> Result:
return Result(
type="error",
value=None,
- error=Error(message="Failed", stacktrace=str(traceback.format_exc())),
+ error=Error(message=str(exc), stacktrace=str(traceback.format_exc())),
)
diff --git a/agenta-backend/agenta_backend/services/evaluators_service.py b/agenta-backend/agenta_backend/services/evaluators_service.py
index f3c0d25e54..d4f526eafa 100644
--- a/agenta-backend/agenta_backend/services/evaluators_service.py
+++ b/agenta-backend/agenta_backend/services/evaluators_service.py
@@ -1,6 +1,7 @@
import json
import logging
import re
+import traceback
from typing import Any, Dict, List, Tuple
import httpx
@@ -79,7 +80,8 @@ def auto_exact_match(
type="error",
value=None,
error=Error(
- message="Error during Auto Exact Match evaluation", stacktrace=str(e)
+ message="Error during Auto Exact Match evaluation",
+ stacktrace=str(traceback.format_exc()),
),
)
@@ -103,7 +105,8 @@ def auto_regex_test(
type="error",
value=None,
error=Error(
- message="Error during Auto Regex evaluation", stacktrace=str(e)
+ message="Error during Auto Regex evaluation",
+ stacktrace=str(traceback.format_exc()),
),
)
@@ -186,7 +189,7 @@ def auto_webhook_test(
value=None,
error=Error(
message="Error during Auto Webhook evaluation; An HTTP error occurred",
- stacktrace=str(e),
+ stacktrace=str(traceback.format_exc()),
),
)
except Exception as e: # pylint: disable=broad-except
@@ -194,7 +197,8 @@ def auto_webhook_test(
type="error",
value=None,
error=Error(
- message="Error during Auto Webhook evaluation", stacktrace=str(e)
+ message="Error during Auto Webhook evaluation",
+ stacktrace=str(traceback.format_exc()),
),
)
@@ -224,7 +228,8 @@ def auto_custom_code_run(
type="error",
value=None,
error=Error(
- message="Error during Auto Custom Code Evaluation", stacktrace=str(e)
+ message="Error during Auto Custom Code Evaluation",
+ stacktrace=str(traceback.format_exc()),
),
)
@@ -281,7 +286,10 @@ def auto_ai_critique(
return Result(
type="error",
value=None,
- error=Error(message="Error during Auto AI Critique", stacktrace=str(e)),
+ error=Error(
+ message="Error during Auto AI Critique",
+ stacktrace=str(traceback.format_exc()),
+ ),
)
@@ -308,7 +316,8 @@ def auto_starts_with(
type="error",
value=None,
error=Error(
- message="Error during Starts With evaluation", stacktrace=str(e)
+ message="Error during Starts With evaluation",
+ stacktrace=str(traceback.format_exc()),
),
)
@@ -335,7 +344,10 @@ def auto_ends_with(
return Result(
type="error",
value=None,
- error=Error(message="Error during Ends With evaluation", stacktrace=str(e)),
+ error=Error(
+ message="Error during Ends With evaluation",
+ stacktrace=str(traceback.format_exc()),
+ ),
)
@@ -361,7 +373,10 @@ def auto_contains(
return Result(
type="error",
value=None,
- error=Error(message="Error during Contains evaluation", stacktrace=str(e)),
+ error=Error(
+ message="Error during Contains evaluation",
+ stacktrace=str(traceback.format_exc()),
+ ),
)
@@ -391,7 +406,8 @@ def auto_contains_any(
type="error",
value=None,
error=Error(
- message="Error during Contains Any evaluation", stacktrace=str(e)
+ message="Error during Contains Any evaluation",
+ stacktrace=str(traceback.format_exc()),
),
)
@@ -422,7 +438,8 @@ def auto_contains_all(
type="error",
value=None,
error=Error(
- message="Error during Contains All evaluation", stacktrace=str(e)
+ message="Error during Contains All evaluation",
+ stacktrace=str(traceback.format_exc()),
),
)
@@ -452,7 +469,8 @@ def auto_contains_json(
type="error",
value=None,
error=Error(
- message="Error during Contains JSON evaluation", stacktrace=str(e)
+ message="Error during Contains JSON evaluation",
+ stacktrace=str(traceback.format_exc()),
),
)
@@ -511,7 +529,7 @@ def auto_levenshtein_distance(
value=None,
error=Error(
message="Error during Levenshtein threshold evaluation",
- stacktrace=str(e),
+ stacktrace=str(traceback.format_exc()),
),
)
@@ -552,7 +570,7 @@ def auto_similarity_match(
value=None,
error=Error(
message="Error during Auto Similarity Match evaluation",
- stacktrace=str(e),
+ stacktrace=str(traceback.format_exc()),
),
)
diff --git a/agenta-backend/agenta_backend/tasks/evaluations.py b/agenta-backend/agenta_backend/tasks/evaluations.py
index 17d3428173..7f1d28f842 100644
--- a/agenta-backend/agenta_backend/tasks/evaluations.py
+++ b/agenta-backend/agenta_backend/tasks/evaluations.py
@@ -315,7 +315,10 @@ def evaluate(
"status": Result(
type="status",
value="EVALUATION_FAILED",
- error=Error(message="Evaluation Failed", stacktrace=str(e)),
+ error=Error(
+ message="Evaluation Failed",
+ stacktrace=str(traceback.format_exc()),
+ ),
)
},
)
@@ -369,7 +372,8 @@ def evaluate(
type="status",
value="EVALUATION_AGGREGATION_FAILED",
error=Error(
- message="Evaluation Aggregation Failed", stacktrace=str(e)
+ message="Evaluation Aggregation Failed",
+ stacktrace=str(traceback.format_exc()),
),
)
},
From da90b93c4661456810784558e3f8a3bdf331d0e1 Mon Sep 17 00:00:00 2001
From: Abram
Date: Fri, 5 Jul 2024 13:27:21 +0100
Subject: [PATCH 209/268] feat (backend): created db_manager
'fetch_corresponding_object_uuid' function
---
.../agenta_backend/services/db_manager.py | 23 +++++++++++++++++++
1 file changed, 23 insertions(+)
diff --git a/agenta-backend/agenta_backend/services/db_manager.py b/agenta-backend/agenta_backend/services/db_manager.py
index f363d51029..0888785a38 100644
--- a/agenta-backend/agenta_backend/services/db_manager.py
+++ b/agenta-backend/agenta_backend/services/db_manager.py
@@ -63,6 +63,7 @@
)
from agenta_backend.models.db_models import (
TemplateDB,
+ IDsMappingDB,
AppVariantRevisionsDB,
HumanEvaluationVariantDB,
EvaluationScenarioResultDB,
@@ -3094,3 +3095,25 @@ async def check_if_evaluation_contains_failed_evaluation_scenarios(
if not count:
return False
return count > 0
+
+
+async def fetch_corresponding_object_uuid(table_name: str, object_id: str) -> str:
+ """
+ Fetches a corresponding object uuid.
+
+ Args:
+ table_name (str): The table name
+ object_id (str): The object identifier
+
+ Returns:
+ The corresponding object uuid as string.
+ """
+
+ async with db_engine.get_session() as session:
+ result = await session.execute(
+ select(IDsMappingDB)
+ .filter_by(table_name=table_name, objectid=object_id)
+ .options(load_only(IDsMappingDB.uuid)) # type: ignore
+ )
+ object_mapping = result.scalars().first()
+ return str(object_mapping.uuid)
From 450e89932112b57172d13799f2340a9da97acd5f Mon Sep 17 00:00:00 2001
From: Kaosiso Ezealigo
Date: Fri, 5 Jul 2024 16:30:20 +0100
Subject: [PATCH 210/268] minor changes
---
.../pages/evaluations/cellRenderers/cellRenderers.tsx | 4 ++--
.../pages/evaluations/evaluationResults/EvaluationResults.tsx | 2 ++
2 files changed, 4 insertions(+), 2 deletions(-)
diff --git a/agenta-web/src/components/pages/evaluations/cellRenderers/cellRenderers.tsx b/agenta-web/src/components/pages/evaluations/cellRenderers/cellRenderers.tsx
index 0e3bc546a5..1ce610bff2 100644
--- a/agenta-web/src/components/pages/evaluations/cellRenderers/cellRenderers.tsx
+++ b/agenta-web/src/components/pages/evaluations/cellRenderers/cellRenderers.tsx
@@ -195,14 +195,14 @@ export const statusMapper = (token: GlobalToken) => (status: EvaluationStatus) =
},
[EvaluationStatus.AGGREGATION_FAILED]: {
label: "Result Aggregation Failed",
- color: token.colorError,
+ color: token.colorWarning,
},
}
return (
statusMap[status] || {
label: "Unknown",
- color: token.colorError,
+ color: "purple",
}
)
}
diff --git a/agenta-web/src/components/pages/evaluations/evaluationResults/EvaluationResults.tsx b/agenta-web/src/components/pages/evaluations/evaluationResults/EvaluationResults.tsx
index 1ae70b3126..427c743e4b 100644
--- a/agenta-web/src/components/pages/evaluations/evaluationResults/EvaluationResults.tsx
+++ b/agenta-web/src/components/pages/evaluations/evaluationResults/EvaluationResults.tsx
@@ -500,6 +500,8 @@ const EvaluationResults: React.FC = () => {
return
;(EvaluationStatus.FINISHED === params.data?.status.value ||
EvaluationStatus.FINISHED_WITH_ERRORS ===
+ params.data?.status.value ||
+ EvaluationStatus.AGGREGATION_FAILED ===
params.data?.status.value) &&
router.push(
`/apps/${appId}/evaluations/results/${params.data?.id}`,
From 8b5ecfa130ddd343b9ad46a69ce9b744a1582bf4 Mon Sep 17 00:00:00 2001
From: "allcontributors[bot]"
<46447321+allcontributors[bot]@users.noreply.github.com>
Date: Fri, 5 Jul 2024 15:45:51 +0000
Subject: [PATCH 211/268] docs: update README.md [skip ci]
---
README.md | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/README.md b/README.md
index 2c1483f5cd..69cc8ea1d4 100644
--- a/README.md
+++ b/README.md
@@ -171,9 +171,7 @@ Check out our [Contributing Guide](https://docs.agenta.ai/misc/contributing/gett
## Contributors ✨
-
-[![All Contributors](https://img.shields.io/badge/all_contributors-46-orange.svg?style=flat-square)](#contributors-)
-
+[![All Contributors](https://img.shields.io/badge/all_contributors-47-orange.svg?style=flat-square)](#contributors-)
Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/docs/en/emoji-key)):
@@ -194,7 +192,7 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
Pavle Janjusevic 🚇
- Kaosiso Ezealigo 🐛 💻
+ Kaosi Ezealigo 🐛 💻
Alberto Nunes 🐛
Maaz Bin Khawar 💻 👀 🧑🏫
Nehemiah Onyekachukwu Emmanuel 💻 💡 📖
@@ -242,6 +240,7 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
Youcef Boumar 📖
LucasTrg 💻 🐛
Ashraf Chowdury 🐛 💻
+ jp-agenta 💻 🐛
From 96c478d80565fcd1e763ba047ab2481061cb27e4 Mon Sep 17 00:00:00 2001
From: "allcontributors[bot]"
<46447321+allcontributors[bot]@users.noreply.github.com>
Date: Fri, 5 Jul 2024 15:45:52 +0000
Subject: [PATCH 212/268] docs: update .all-contributorsrc [skip ci]
---
.all-contributorsrc | 10 ++++++++++
1 file changed, 10 insertions(+)
diff --git a/.all-contributorsrc b/.all-contributorsrc
index b8616e3997..5a1ad7781f 100644
--- a/.all-contributorsrc
+++ b/.all-contributorsrc
@@ -437,6 +437,16 @@
"bug",
"code"
]
+ },
+ {
+ "login": "jp-agenta",
+ "name": "jp-agenta",
+ "avatar_url": "https://avatars.githubusercontent.com/u/174311389?v=4",
+ "profile": "https://github.com/jp-agenta",
+ "contributions": [
+ "code",
+ "bug"
+ ]
}
],
"contributorsPerLine": 7,
From 3c359427a8a1091eb2ce101258e5a1396f02f810 Mon Sep 17 00:00:00 2001
From: Mahmoud Mabrouk
Date: Fri, 5 Jul 2024 17:47:15 +0200
Subject: [PATCH 213/268] Update pyproject.toml
---
agenta-cli/pyproject.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/agenta-cli/pyproject.toml b/agenta-cli/pyproject.toml
index 966176c9c1..80a898978f 100644
--- a/agenta-cli/pyproject.toml
+++ b/agenta-cli/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "agenta"
-version = "0.17.6-alpha.1"
+version = "0.17.6a1"
description = "The SDK for agenta is an open-source LLMOps platform."
readme = "README.md"
authors = ["Mahmoud Mabrouk "]
From 2d2ee83141759c5f5caf43b9541f9b67a82998a3 Mon Sep 17 00:00:00 2001
From: Mahmoud Mabrouk
Date: Fri, 5 Jul 2024 18:36:33 +0200
Subject: [PATCH 214/268] Update pyproject.toml
---
agenta-cli/pyproject.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/agenta-cli/pyproject.toml b/agenta-cli/pyproject.toml
index 80a898978f..3ba8848ef2 100644
--- a/agenta-cli/pyproject.toml
+++ b/agenta-cli/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "agenta"
-version = "0.17.6a1"
+version = "0.17.5"
description = "The SDK for agenta is an open-source LLMOps platform."
readme = "README.md"
authors = ["Mahmoud Mabrouk "]
From d70c19e0b3d4aec5484fb21974f995c15ad4b424 Mon Sep 17 00:00:00 2001
From: mmabrouk <4510758+mmabrouk@users.noreply.github.com>
Date: Fri, 5 Jul 2024 16:40:22 +0000
Subject: [PATCH 215/268] Bump versions
---
agenta-backend/pyproject.toml | 2 +-
agenta-cli/pyproject.toml | 2 +-
agenta-web/package-lock.json | 4 ++--
agenta-web/package.json | 2 +-
4 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/agenta-backend/pyproject.toml b/agenta-backend/pyproject.toml
index f1adf3fcf3..e8ddd35018 100644
--- a/agenta-backend/pyproject.toml
+++ b/agenta-backend/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "agenta_backend"
-version = "0.17.5"
+version = "0.18.0"
description = ""
authors = ["Mahmoud Mabrouk "]
readme = "README.md"
diff --git a/agenta-cli/pyproject.toml b/agenta-cli/pyproject.toml
index 3ba8848ef2..f11e00502d 100644
--- a/agenta-cli/pyproject.toml
+++ b/agenta-cli/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "agenta"
-version = "0.17.5"
+version = "0.18.0"
description = "The SDK for agenta is an open-source LLMOps platform."
readme = "README.md"
authors = ["Mahmoud Mabrouk "]
diff --git a/agenta-web/package-lock.json b/agenta-web/package-lock.json
index 76ac9d6c43..441980087d 100644
--- a/agenta-web/package-lock.json
+++ b/agenta-web/package-lock.json
@@ -1,12 +1,12 @@
{
"name": "agenta",
- "version": "0.17.5",
+ "version": "0.18.0",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "agenta",
- "version": "0.17.5",
+ "version": "0.18.0",
"dependencies": {
"@ant-design/colors": "^7.0.0",
"@ant-design/icons": "^5.3.7",
diff --git a/agenta-web/package.json b/agenta-web/package.json
index 3ad495c93c..8d304bc031 100644
--- a/agenta-web/package.json
+++ b/agenta-web/package.json
@@ -1,6 +1,6 @@
{
"name": "agenta",
- "version": "0.17.5",
+ "version": "0.18.0",
"private": true,
"engines": {
"node": ">=18"
From 233f7ef52cdc9ddd5018f3b57fbb14bbee207183 Mon Sep 17 00:00:00 2001
From: Abram
Date: Fri, 5 Jul 2024 18:25:22 +0100
Subject: [PATCH 216/268] minor refactor (backend): remove options from
'fetch_corresponding_object_uuid' db_manager function
---
agenta-backend/agenta_backend/services/db_manager.py | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/agenta-backend/agenta_backend/services/db_manager.py b/agenta-backend/agenta_backend/services/db_manager.py
index 0888785a38..53b5713eec 100644
--- a/agenta-backend/agenta_backend/services/db_manager.py
+++ b/agenta-backend/agenta_backend/services/db_manager.py
@@ -3111,9 +3111,7 @@ async def fetch_corresponding_object_uuid(table_name: str, object_id: str) -> st
async with db_engine.get_session() as session:
result = await session.execute(
- select(IDsMappingDB)
- .filter_by(table_name=table_name, objectid=object_id)
- .options(load_only(IDsMappingDB.uuid)) # type: ignore
+ select(IDsMappingDB).filter_by(table_name=table_name, objectid=object_id)
)
object_mapping = result.scalars().first()
return str(object_mapping.uuid)
From 46cd3f840ad5c9494669b6d2b6ffc88ab11887e6 Mon Sep 17 00:00:00 2001
From: aakrem
Date: Fri, 5 Jul 2024 20:39:50 +0200
Subject: [PATCH 217/268] rewriting mongo db engine
---
.../mongo_to_postgres/mongo_db_engine.py | 14 ++++++--------
1 file changed, 6 insertions(+), 8 deletions(-)
diff --git a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/mongo_db_engine.py b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/mongo_db_engine.py
index b074d9d5a3..04af99a540 100644
--- a/agenta-backend/agenta_backend/migrations/mongo_to_postgres/mongo_db_engine.py
+++ b/agenta-backend/agenta_backend/migrations/mongo_to_postgres/mongo_db_engine.py
@@ -3,15 +3,13 @@
from agenta_backend.utils.common import isCloudEE
MONGO_URI = os.environ.get("MONGODB_URI")
-MONGO_DATABASE_MODE = os.environ.get("DATABASE_MODE")
-MONGO_DB_NAME_SRC = f"agenta_{MONGO_DATABASE_MODE}"
+db_src = f"agenta_{os.environ.get('MIGRATION_SRC_MONGO_DB_NAME')}"
if isCloudEE():
- MONGO_DB_NAME_DEST = os.environ.get("MONGO_DB_NAME_DEST", None)
- MONGO_DB_NAME_DEST = f"agenta_{MONGO_DB_NAME_DEST}" if MONGO_DB_NAME_DEST else None
+ db_dest = f"agenta_{os.environ.get('MIGRATION_DEST_MONGO_DB_NAME')}"
- if MONGO_DB_NAME_DEST:
+ if db_dest:
mongo_client_dest = MongoClient(MONGO_URI)
mongo_client_src = MongoClient(MONGO_URI)
@@ -19,9 +17,9 @@
def get_mongo_db(mode):
if mode.lower() == "src":
- return mongo_client_src[MONGO_DB_NAME_SRC]
+ return mongo_client_src[db_src]
elif mode.lower() == "dest":
- return mongo_client_dest[MONGO_DB_NAME_DEST]
+ return mongo_client_dest[db_dest]
else:
raise ValueError("Invalid mode. Use 'src' or 'dest'.")
@@ -29,4 +27,4 @@ def get_mongo_db(mode):
mongo_db = get_mongo_db("src")
if isCloudEE():
- mongo_db_dest = get_mongo_db("dest") if MONGO_DB_NAME_DEST else None
+ mongo_db_dest = get_mongo_db("dest") if db_dest else None
From d55d30fa046377aa55f92ad5fbcc5450daa633e8 Mon Sep 17 00:00:00 2001
From: Abram
Date: Sun, 7 Jul 2024 04:15:01 +0100
Subject: [PATCH 218/268] refactor (backend): update user lookup logic for
backward compatibility in API key migration
---
.../agenta_backend/services/db_manager.py | 15 +++++++++++++--
1 file changed, 13 insertions(+), 2 deletions(-)
diff --git a/agenta-backend/agenta_backend/services/db_manager.py b/agenta-backend/agenta_backend/services/db_manager.py
index 53b5713eec..aac0f84a04 100644
--- a/agenta-backend/agenta_backend/services/db_manager.py
+++ b/agenta-backend/agenta_backend/services/db_manager.py
@@ -12,7 +12,7 @@
from agenta_backend.models.db_engine import db_engine
from agenta_backend.services.json_importer_helper import get_json
-from sqlalchemy import func
+from sqlalchemy import func, or_
from sqlalchemy.future import select
from sqlalchemy.exc import NoResultFound
from sqlalchemy.ext.asyncio import AsyncSession
@@ -781,7 +781,18 @@ async def get_user(user_uid: str) -> UserDB:
"""
async with db_engine.get_session() as session:
- result = await session.execute(select(UserDB).filter_by(uid=user_uid))
+ # NOTE: Backward Compatibility
+ # ---------------------------
+ # Previously, the user_id field in the api_keys collection in MongoDB used the
+ # session_id from SuperTokens in Cloud and "0" as the uid in OSS.
+ # During migration, we changed this to use the actual user ID. Therefore, we have two checks:
+ # 1. Check if user_uid is found in the UserDB.uid column.
+ # 2. If not found, check if user_uid is found in the UserDB.id column.
+ result = await session.execute(
+ select(UserDB).where(
+ or_(UserDB.uid == user_uid, UserDB.id == uuid.UUID(user_uid))
+ )
+ )
user = result.scalars().first()
if user is None and isCloudEE():
From fbd6f99cfc1139f2432defffc145bce8004a458a Mon Sep 17 00:00:00 2001
From: Abram
Date: Sun, 7 Jul 2024 05:03:09 +0100
Subject: [PATCH 219/268] refactor (backend): add feature flag for conditional
user UID query in fetch_user function
---
agenta-backend/agenta_backend/services/db_manager.py | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/agenta-backend/agenta_backend/services/db_manager.py b/agenta-backend/agenta_backend/services/db_manager.py
index aac0f84a04..5818a34f52 100644
--- a/agenta-backend/agenta_backend/services/db_manager.py
+++ b/agenta-backend/agenta_backend/services/db_manager.py
@@ -788,11 +788,11 @@ async def get_user(user_uid: str) -> UserDB:
# During migration, we changed this to use the actual user ID. Therefore, we have two checks:
# 1. Check if user_uid is found in the UserDB.uid column.
# 2. If not found, check if user_uid is found in the UserDB.id column.
- result = await session.execute(
- select(UserDB).where(
- or_(UserDB.uid == user_uid, UserDB.id == uuid.UUID(user_uid))
- )
- )
+ conditions = [UserDB.uid == user_uid]
+ if isCloudEE():
+ conditions.append(UserDB.id == uuid.UUID(user_uid))
+
+ result = await session.execute(select(UserDB).where(or_(*conditions)))
user = result.scalars().first()
if user is None and isCloudEE():
From e61da32d7589172ded00878af4618e0c8cc3527b Mon Sep 17 00:00:00 2001
From: Abram
Date: Sun, 7 Jul 2024 05:25:46 +0100
Subject: [PATCH 220/268] minor refactor (backend): set image_id to be nullable
---
agenta-backend/agenta_backend/models/db_models.py | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/agenta-backend/agenta_backend/models/db_models.py b/agenta-backend/agenta_backend/models/db_models.py
index 1b1444b503..374d2a80e4 100644
--- a/agenta-backend/agenta_backend/models/db_models.py
+++ b/agenta-backend/agenta_backend/models/db_models.py
@@ -176,7 +176,9 @@ class AppVariantDB(Base):
variant_name = Column(String)
revision = Column(Integer)
image_id = Column(
- UUID(as_uuid=True), ForeignKey("docker_images.id", ondelete="SET NULL")
+ UUID(as_uuid=True),
+ ForeignKey("docker_images.id", ondelete="SET NULL"),
+ nullable=True,
)
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
modified_by_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
From 5801ad3120010b0ffeaadbf4c85e5835f021db09 Mon Sep 17 00:00:00 2001
From: Abram
Date: Sun, 7 Jul 2024 12:57:07 +0100
Subject: [PATCH 221/268] minor refactor (backend): resolve ImportError in
evaluators and aggregation service
---
agenta-backend/agenta_backend/services/aggregation_service.py | 2 +-
agenta-backend/agenta_backend/services/evaluators_service.py | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/agenta-backend/agenta_backend/services/aggregation_service.py b/agenta-backend/agenta_backend/services/aggregation_service.py
index 56022b620a..9664d5aa3a 100644
--- a/agenta-backend/agenta_backend/services/aggregation_service.py
+++ b/agenta-backend/agenta_backend/services/aggregation_service.py
@@ -2,7 +2,7 @@
import traceback
from typing import List, Optional
-from agenta_backend.models.db_models import InvokationResult, Result, Error
+from agenta_backend.models.shared_models import InvokationResult, Result, Error
def aggregate_ai_critique(results: List[Result]) -> Result:
diff --git a/agenta-backend/agenta_backend/services/evaluators_service.py b/agenta-backend/agenta_backend/services/evaluators_service.py
index a42efd63ec..b5d773e037 100644
--- a/agenta-backend/agenta_backend/services/evaluators_service.py
+++ b/agenta-backend/agenta_backend/services/evaluators_service.py
@@ -1,6 +1,6 @@
+import re
import json
import logging
-import re
import traceback
from typing import Any, Dict
@@ -8,7 +8,7 @@
from openai import OpenAI
from agenta_backend.services.security import sandbox
-from agenta_backend.models.db_models import Error, Result
+from agenta_backend.models.shared_models import Error, Result
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
From 9728818f964eb1c561555e4153da4aecd58bebb6 Mon Sep 17 00:00:00 2001
From: Abram
Date: Sun, 7 Jul 2024 12:59:57 +0100
Subject: [PATCH 222/268] refactor (backend): resolve 'super' object has no
attribute 'coerce' by making use of Pydantic objects in their dict format
---
.../agenta_backend/tasks/evaluations.py | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/agenta-backend/agenta_backend/tasks/evaluations.py b/agenta-backend/agenta_backend/tasks/evaluations.py
index 79cfb9c148..b3feade146 100644
--- a/agenta-backend/agenta_backend/tasks/evaluations.py
+++ b/agenta-backend/agenta_backend/tasks/evaluations.py
@@ -100,7 +100,7 @@ def evaluate(
{
"status": Result(
type="status", value=EvaluationStatusEnum.EVALUATION_STARTED
- )
+ ).model_dump()
},
)
)
@@ -307,9 +307,9 @@ def evaluate(
update_evaluation(
evaluation_id,
{
- "average_latency": average_latency,
- "average_cost": average_cost,
- "total_cost": total_cost,
+ "average_latency": average_latency.model_dump(),
+ "average_cost": average_cost.model_dump(),
+ "total_cost": total_cost.model_dump(),
},
)
)
@@ -328,7 +328,7 @@ def evaluate(
message="Evaluation Failed",
stacktrace=str(traceback.format_exc()),
),
- )
+ ).model_dump()
},
)
)
@@ -337,7 +337,7 @@ def evaluate(
try:
aggregated_results = loop.run_until_complete(
- aggregate_evaluator_results(app, evaluators_aggregated_data)
+ aggregate_evaluator_results(evaluators_aggregated_data)
)
loop.run_until_complete(
@@ -366,7 +366,7 @@ def evaluate(
loop.run_until_complete(
update_evaluation(
evaluation_id=str(new_evaluation_db.id),
- updates={"status": evaluation_status},
+ updates={"status": evaluation_status.model_dump()},
)
)
@@ -393,7 +393,7 @@ def evaluate(
async def aggregate_evaluator_results(
- evaluators_aggregated_data: dict
+ evaluators_aggregated_data: dict,
) -> List[AggregatedResult]:
"""
Aggregate the results of the evaluation evaluator.
From 48de96a3124708d98fbe0eef296fff5f3dfda6b3 Mon Sep 17 00:00:00 2001
From: Abram
Date: Sun, 7 Jul 2024 13:09:42 +0100
Subject: [PATCH 223/268] tests (backend): make use of EVALUATION_INITIALIZED
enum and not EVALUATION_STARTED
---
.../variants_main_router/test_variant_evaluators_router.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/agenta-backend/agenta_backend/tests/variants_main_router/test_variant_evaluators_router.py b/agenta-backend/agenta_backend/tests/variants_main_router/test_variant_evaluators_router.py
index 8dfa23c326..9836806e83 100644
--- a/agenta-backend/agenta_backend/tests/variants_main_router/test_variant_evaluators_router.py
+++ b/agenta-backend/agenta_backend/tests/variants_main_router/test_variant_evaluators_router.py
@@ -260,7 +260,7 @@ async def create_evaluation_with_evaluator(evaluator_config_name):
assert response_data["app_id"] == payload["app_id"]
assert (
response_data["status"]["value"]
- == EvaluationStatusEnum.EVALUATION_STARTED.value
+ == EvaluationStatusEnum.EVALUATION_INITIALIZED.value
)
assert response_data is not None
From 1455aa59db517e391822733c16192d23587fd1d1 Mon Sep 17 00:00:00 2001
From: Kaosiso Ezealigo
Date: Sun, 7 Jul 2024 23:39:25 +0100
Subject: [PATCH 224/268] fix(frontend): code cleanup
---
.../pages/settings/Secrets/Secrets.tsx | 100 +++++++-----------
1 file changed, 39 insertions(+), 61 deletions(-)
diff --git a/agenta-web/src/components/pages/settings/Secrets/Secrets.tsx b/agenta-web/src/components/pages/settings/Secrets/Secrets.tsx
index 3a4a25e7cb..b28f7dbf68 100644
--- a/agenta-web/src/components/pages/settings/Secrets/Secrets.tsx
+++ b/agenta-web/src/components/pages/settings/Secrets/Secrets.tsx
@@ -4,40 +4,20 @@ import {
removeSingleLlmProviderKey,
getAllProviderLlmKeys,
LlmProvider,
- getApikeys,
} from "@/lib/helpers/llmProviders"
import {Button, Input, Space, Typography, message} from "antd"
import {useState} from "react"
-import {createUseStyles} from "react-jss"
const {Title, Text} = Typography
-const useStyles = createUseStyles({
- title: {
- marginTop: 0,
- },
- container: {
- margin: "0px 0",
- },
- apiContainer: {
- marginBottom: 10,
- },
- input: {
- display: "flex",
- alignItems: "center",
- width: 420,
- },
-})
-
export default function Secrets() {
- const classes = useStyles()
const [llmProviderKeys, setLlmProviderKeys] = useState(getAllProviderLlmKeys())
const [messageAPI, contextHolder] = message.useMessage()
return (
{contextHolder}
-
+
LLM Keys
@@ -49,49 +29,47 @@ export default function Secrets() {