Skip to content

Commit

Permalink
fmt
Browse files Browse the repository at this point in the history
  • Loading branch information
baskaryan committed Dec 24, 2024
1 parent 197c037 commit dd0621a
Show file tree
Hide file tree
Showing 2 changed files with 10 additions and 3 deletions.
4 changes: 3 additions & 1 deletion python/docs/create_api_rst.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,9 @@ def _load_module_members(module_path: str, namespace: str) -> ModuleMembers:
else (
"enum"
if issubclass(type_, Enum)
else "Pydantic" if issubclass(type_, BaseModel) else "Regular"
else "Pydantic"
if issubclass(type_, BaseModel)
else "Regular"
)
)
# if hasattr(type_, "__slots__"):
Expand Down
9 changes: 7 additions & 2 deletions python/tests/unit_tests/evaluation/test_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
import pytest
from langchain_core.runnables import chain as as_runnable

from langsmith import Client, aevaluate, evaluate
from langsmith import Client, EvaluationResult, aevaluate, evaluate
from langsmith import schemas as ls_schemas
from langsmith.evaluation._runner import _include_attachments
from langsmith.evaluation.evaluator import (
Expand Down Expand Up @@ -978,6 +978,10 @@ def summary_eval_outputs_reference(outputs, reference_outputs):
return min([len(x["response"]) for x in outputs])


def summary_eval_outputs_reference(evaluation_results):
return len(evaluation_results)


@pytest.mark.parametrize(
"evaluator",
[
Expand All @@ -1004,7 +1008,8 @@ def test__normalize_summary_evaluator(evaluator: Callable) -> None:
inputs={"in": "b" * 12},
)
]
assert normalized(runs, examples)["score"] == 12
evaluation_results = [EvaluationResult(key="foo", score=1)] * 12
assert normalized(runs, examples, evaluation_results)["score"] == 12


def summary_eval_kwargs(*, runs, examples):
Expand Down

0 comments on commit dd0621a

Please sign in to comment.