Skip to content

Commit

Permalink
new fern code
Browse files Browse the repository at this point in the history
  • Loading branch information
aakrem committed Jan 12, 2024
1 parent 109fbc8 commit c159fff
Show file tree
Hide file tree
Showing 23 changed files with 1,232 additions and 787 deletions.
40 changes: 26 additions & 14 deletions agenta-cli/agenta/client/backend/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,37 +2,43 @@

from .types import (
AddVariantFromBaseAndConfigResponse,
AggregatedResult,
App,
AppVariantOutput,
BaseOutput,
BodyImportTestset,
ContainerTemplatesResponse,
CreateAppOutput,
CreateCustomEvaluation,
CustomEvaluationDetail,
CustomEvaluationNames,
CustomEvaluationOutput,
DeleteEvaluation,
DockerEnvVars,
EnvironmentOutput,
Evaluation,
EvaluationScenario,
EvaluationScenarioInput,
EvaluationScenarioOutput,
EvaluationScenarioScore,
EvaluationScenarioUpdateScore,
EvaluationScenarioResult,
EvaluationStatusEnum,
EvaluationType,
EvaluationTypeSettings,
EvaluationWebhook,
Evaluator,
EvaluatorConfig,
Feedback,
GetConfigReponse,
HttpValidationError,
HumanEvaluation,
HumanEvaluationScenario,
HumanEvaluationScenarioInput,
HumanEvaluationScenarioOutput,
HumanEvaluationScenarioScore,
HumanEvaluationScenarioUpdateScore,
Image,
InviteRequest,
ListApiKeysOutput,
LlmRunRateLimit,
NewTestset,
Organization,
OrganizationOutput,
Result,
SimpleEvaluationOutput,
Span,
Template,
Expand All @@ -50,37 +56,43 @@

__all__ = [
"AddVariantFromBaseAndConfigResponse",
"AggregatedResult",
"App",
"AppVariantOutput",
"BaseOutput",
"BodyImportTestset",
"ContainerTemplatesResponse",
"CreateAppOutput",
"CreateCustomEvaluation",
"CustomEvaluationDetail",
"CustomEvaluationNames",
"CustomEvaluationOutput",
"DeleteEvaluation",
"DockerEnvVars",
"EnvironmentOutput",
"Evaluation",
"EvaluationScenario",
"EvaluationScenarioInput",
"EvaluationScenarioOutput",
"EvaluationScenarioScore",
"EvaluationScenarioUpdateScore",
"EvaluationScenarioResult",
"EvaluationStatusEnum",
"EvaluationType",
"EvaluationTypeSettings",
"EvaluationWebhook",
"Evaluator",
"EvaluatorConfig",
"Feedback",
"GetConfigReponse",
"HttpValidationError",
"HumanEvaluation",
"HumanEvaluationScenario",
"HumanEvaluationScenarioInput",
"HumanEvaluationScenarioOutput",
"HumanEvaluationScenarioScore",
"HumanEvaluationScenarioUpdateScore",
"Image",
"InviteRequest",
"ListApiKeysOutput",
"LlmRunRateLimit",
"NewTestset",
"Organization",
"OrganizationOutput",
"Result",
"SimpleEvaluationOutput",
"Span",
"Template",
Expand Down
Loading

0 comments on commit c159fff

Please sign in to comment.