Skip to content

Commit

Permalink
Merge branch 'Agenta-AI:main' into issue-967/-run-all-functionality-n…
Browse files Browse the repository at this point in the history
…ot-working-properly
  • Loading branch information
bekossy authored Dec 6, 2023
2 parents f195f7e + 1ab7fc5 commit 8264946
Show file tree
Hide file tree
Showing 11 changed files with 93 additions and 108 deletions.
2 changes: 1 addition & 1 deletion agenta-backend/agenta_backend/routers/app_router.py
Original file line number Diff line number Diff line change
Expand Up @@ -352,7 +352,7 @@ async def create_app_and_variant_from_template(

logger.debug("Step 5: Retrieve template from db")
template_db = await db_manager.get_template(payload.template_id)
repo_name = os.environ.get("AGENTA_TEMPLATE_REPO", "agentaai/lambda_templates")
repo_name = os.environ.get("AGENTA_TEMPLATE_REPO", "agentaai/templates_v2")
image_name = f"{repo_name}:{template_db.name}"

logger.debug(
Expand Down
4 changes: 2 additions & 2 deletions agenta-backend/agenta_backend/services/evaluation_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ async def prepare_csvdata_and_create_evaluation_scenario(
Args:
csvdata: A list of dictionaries representing the CSV data.
inputs: A list of strings representing the names of the inputs in the variant.
payload_inputs: A list of strings representing the names of the inputs in the variant.
evaluation_type: The type of evaluation
new_evaluation: The instance of EvaluationDB
user: The owner of the evaluation scenario
Expand All @@ -208,7 +208,7 @@ async def prepare_csvdata_and_create_evaluation_scenario(
await engine.delete(new_evaluation)
msg = f"""
Columns in the test set should match the names of the inputs in the variant.
Inputs names in variant are: {inputs} while
Inputs names in variant are: {[variant_input for variant_input in payload_inputs]} while
columns in test set are: {[col for col in datum.keys() if col != 'correct_answer']}
"""
raise HTTPException(
Expand Down
1 change: 1 addition & 0 deletions agenta-web/cypress.config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ export default defineConfig({
baseUrl: "http://localhost",
defaultCommandTimeout: 30000,
requestTimeout: 10000,
specPattern: ["*/e2e/smoke-tests.cy.ts", "*/e2e/app-navigation.cy.ts"],
},
env: {
baseApiURL: "http://localhost/api",
Expand Down
4 changes: 2 additions & 2 deletions agenta-web/src/components/Evaluations/Evaluations.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ import {
useLoadTestsetsList,
fetchCustomEvaluations,
} from "@/lib/services/api"
import {dynamicComponent, getApikeys, isDemo} from "@/lib/helpers/utils"
import {dynamicComponent, getAgentaApiUrl, getApikeys, isDemo} from "@/lib/helpers/utils"
import {useRouter} from "next/router"
import {Variant, Parameter, GenericObject, SingleCustomEvaluation} from "@/lib/Types"
import {EvaluationType} from "@/lib/enums"
Expand Down Expand Up @@ -357,7 +357,7 @@ export default function Evaluations() {
evaluationTypeSettings.regex_pattern = ""
evaluationTypeSettings.regex_should_match = true
} else if (selectedEvaluationType === EvaluationType.auto_webhook_test) {
evaluationTypeSettings.webhook_url = `${process.env.NEXT_PUBLIC_AGENTA_API_URL}/api/evaluations/webhook_example_fake`
evaluationTypeSettings.webhook_url = `${getAgentaApiUrl()}/api/evaluations/webhook_example_fake`
}

const evaluationTableId = await createNewEvaluation({
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ import {createUseStyles} from "react-jss"
import {formatDate} from "@/lib/helpers/dateTimeHelper"
import {useAppTheme} from "../Layout/ThemeContextProvider"
import {getVotesPercentage} from "@/lib/helpers/evaluate"
import {EvaluationTypeLabels, isDemo} from "@/lib/helpers/utils"
import {EvaluationTypeLabels, getAgentaApiUrl, isDemo} from "@/lib/helpers/utils"

interface VariantVotesData {
number_of_votes: number
Expand Down Expand Up @@ -98,13 +98,11 @@ export default function HumanEvaluationResult() {
}
const fetchEvaluations = async () => {
try {
fetchData(
`${process.env.NEXT_PUBLIC_AGENTA_API_URL}/api/evaluations/?app_id=${app_id}`,
)
fetchData(`${getAgentaApiUrl()}/api/evaluations/?app_id=${app_id}`)
.then((response) => {
const fetchPromises = response.map((item: EvaluationResponseType) => {
return fetchData(
`${process.env.NEXT_PUBLIC_AGENTA_API_URL}/api/evaluations/${item.id}/results/`,
`${getAgentaApiUrl()}/api/evaluations/${item.id}/results/`,
)
.then((results) => {
if (item.evaluation_type === EvaluationType.human_a_b_testing) {
Expand Down
10 changes: 10 additions & 0 deletions agenta-web/src/lib/helpers/utils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -209,3 +209,13 @@ export const safeParse = (str: string, fallback: any = "") => {
return fallback
}
}

export const getAgentaApiUrl = () => {
const apiUrl = process.env.NEXT_PUBLIC_AGENTA_API_URL

if (!apiUrl && typeof window !== "undefined") {
return `${window.location.protocol}//${window.location.hostname}`
}

return apiUrl
}
Loading

0 comments on commit 8264946

Please sign in to comment.