From 9bd4417b0f1fa6bbf31fb97110817f9362db3a13 Mon Sep 17 00:00:00 2001 From: Madeesh Kannan Date: Fri, 9 Feb 2024 17:49:06 +0100 Subject: [PATCH] feat: Implement `DeepEvalEvaluator` (#346) * feat: Implement `DeepEvalEvaluator` and `DeepEvalMetrics` * Mock OpenAI API keys * Disambiguate module names * Update labeler workflow * Revert accidental changes to uptrain tests * Fix typo * Standardize namespacing and add API doc generation to CI * Lint * `mypy` fixes * Docs build fix --- .github/labeler.yml | 5 + .github/workflows/deepeval.yml | 60 ++++ README.md | 1 + integrations/deepeval/LICENSE.txt | 73 ++++ integrations/deepeval/README.md | 36 ++ integrations/deepeval/example/example.py | 37 ++ integrations/deepeval/pydoc/config.yml | 30 ++ integrations/deepeval/pyproject.toml | 157 +++++++++ .../evaluators/deepeval/__init__.py | 7 + .../evaluators/deepeval/evaluator.py | 150 ++++++++ .../components/evaluators/deepeval/metrics.py | 254 ++++++++++++++ integrations/deepeval/tests/__init__.py | 0 integrations/deepeval/tests/test_evaluator.py | 324 ++++++++++++++++++ integrations/deepeval/tests/test_metrics.py | 11 + 14 files changed, 1145 insertions(+) create mode 100644 .github/workflows/deepeval.yml create mode 100644 integrations/deepeval/LICENSE.txt create mode 100644 integrations/deepeval/README.md create mode 100644 integrations/deepeval/example/example.py create mode 100644 integrations/deepeval/pydoc/config.yml create mode 100644 integrations/deepeval/pyproject.toml create mode 100644 integrations/deepeval/src/haystack_integrations/components/evaluators/deepeval/__init__.py create mode 100644 integrations/deepeval/src/haystack_integrations/components/evaluators/deepeval/evaluator.py create mode 100644 integrations/deepeval/src/haystack_integrations/components/evaluators/deepeval/metrics.py create mode 100644 integrations/deepeval/tests/__init__.py create mode 100644 integrations/deepeval/tests/test_evaluator.py create mode 100644 integrations/deepeval/tests/test_metrics.py diff --git a/.github/labeler.yml b/.github/labeler.yml index 4d060772c..7d2e556c1 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -99,6 +99,11 @@ integration:weaviate: - any-glob-to-any-file: "integrations/weaviate/**/*" - any-glob-to-any-file: ".github/workflows/weaviate.yml" +integration:deepeval: + - changed-files: + - any-glob-to-any-file: "integrations/deepeval/**/*" + - any-glob-to-any-file: ".github/workflows/deepeval.yml" + # Topics topic:CI: - changed-files: diff --git a/.github/workflows/deepeval.yml b/.github/workflows/deepeval.yml new file mode 100644 index 000000000..e2468fa8c --- /dev/null +++ b/.github/workflows/deepeval.yml @@ -0,0 +1,60 @@ +# This workflow comes from https://github.com/ofek/hatch-mypyc +# https://github.com/ofek/hatch-mypyc/blob/5a198c0ba8660494d02716cfc9d79ce4adfb1442/.github/workflows/test.yml +name: Test / deepeval + +on: + schedule: + - cron: "0 0 * * *" + pull_request: + paths: + - "integrations/deepeval/**" + - ".github/workflows/deepeval.yml" + +defaults: + run: + working-directory: integrations/deepeval + +concurrency: + group: deepeval-${{ github.head_ref }} + cancel-in-progress: true + +env: + PYTHONUNBUFFERED: "1" + FORCE_COLOR: "1" + +jobs: + run: + name: Python ${{ matrix.python-version }} on ${{ startsWith(matrix.os, 'macos-') && 'macOS' || startsWith(matrix.os, 'windows-') && 'Windows' || 'Linux' }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, windows-latest, macos-latest] + python-version: ["3.9", "3.10"] + + steps: + - name: Support longpaths + if: matrix.os == 'windows-latest' + working-directory: . + run: git config --system core.longpaths true + + - uses: actions/checkout@v4 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Install Hatch + run: pip install --upgrade hatch + + - name: Lint + if: matrix.python-version == '3.9' && runner.os == 'Linux' + run: hatch run lint:all + + - name: Generate docs + if: matrix.python-version == '3.9' && runner.os == 'Linux' + run: hatch run docs + + - name: Run tests + run: hatch run cov diff --git a/README.md b/README.md index 39d669322..13ff60c93 100644 --- a/README.md +++ b/README.md @@ -66,6 +66,7 @@ deepset-haystack | [amazon-bedrock-haystack](integrations/amazon-bedrock/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/amazon-bedrock-haystack.svg)](https://pypi.org/project/amazon-bedrock-haystack) | [![Test / amazon_bedrock](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/amazon_bedrock.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/amazon_bedrock.yml) | | [chroma-haystack](integrations/chroma/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/chroma-haystack.svg)](https://pypi.org/project/chroma-haystack) | [![Test / chroma](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/chroma.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/chroma.yml) | | [cohere-haystack](integrations/cohere/) | Embedder, Generator | [![PyPI - Version](https://img.shields.io/pypi/v/cohere-haystack.svg)](https://pypi.org/project/cohere-haystack) | [![Test / cohere](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/cohere.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/cohere.yml) | +| [deepeval-haystack](integrations/deepeval/) | Evaluator | [![PyPI - Version](https://img.shields.io/pypi/v/deepeval-haystack.svg)](https://pypi.org/project/deepeval-haystack) | [![Test / deepeval](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/deepeval.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/deepeval.yml) | | [elasticsearch-haystack](integrations/elasticsearch/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/elasticsearch-haystack.svg)](https://pypi.org/project/elasticsearch-haystack) | [![Test / elasticsearch](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/elasticsearch.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/elasticsearch.yml) | | [google-ai-haystack](integrations/google_ai/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/google-ai-haystack.svg)](https://pypi.org/project/google-ai-haystack) | [![Test / google-ai](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/google_ai.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/google_ai.yml) | | [google-vertex-haystack](integrations/google_vertex/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/google-vertex-haystack.svg)](https://pypi.org/project/google-vertex-haystack) | [![Test / google-vertex](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/google_vertex.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/google_vertex.yml) | diff --git a/integrations/deepeval/LICENSE.txt b/integrations/deepeval/LICENSE.txt new file mode 100644 index 000000000..137069b82 --- /dev/null +++ b/integrations/deepeval/LICENSE.txt @@ -0,0 +1,73 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. + +"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: + + (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. + + You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + +To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/integrations/deepeval/README.md b/integrations/deepeval/README.md new file mode 100644 index 000000000..bc8be4394 --- /dev/null +++ b/integrations/deepeval/README.md @@ -0,0 +1,36 @@ +# deepeval-haystack + +[![PyPI - Version](https://img.shields.io/pypi/v/deepeval-haystack.svg)](https://pypi.org/project/deepeval-haystack) +[![PyPI - Python Version](https://img.shields.io/pypi/pyversions/deepeval-haystack.svg)](https://pypi.org/project/deepeval-haystack) + +--- + +**Table of Contents** + +- [deepeval-haystack](#deepeval-haystack) + - [Installation](#installation) + - [Testing](#testing) + - [Examples](#examples) + - [License](#license) + +## Installation + +```console +pip install deepeval-haystack +``` + +For more information about the deepeval evaluation framework, please refer to their [documentation](https://docs.confident-ai.com/docs/evaluation-introduction). + +## Testing + +```console +hatch run test +``` + +## Examples + +You can find a code example showing how to use the Evaluator under the `example/` folder of this repo. + +## License + +`deepeval-haystack` is distributed under the terms of the [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html) license. diff --git a/integrations/deepeval/example/example.py b/integrations/deepeval/example/example.py new file mode 100644 index 000000000..e1265a739 --- /dev/null +++ b/integrations/deepeval/example/example.py @@ -0,0 +1,37 @@ +# A valid OpenAI API key is required to run this example. + +from haystack import Pipeline +from haystack_integrations.components.evaluators.deepeval import DeepEvalEvaluator, DeepEvalMetric + +QUESTIONS = [ + "Which is the most popular global sport?", + "Who created the Python language?", +] +CONTEXTS = [ + [ + "The popularity of sports can be measured in various ways, including TV viewership, social media presence, number of participants, and economic impact.", + "Football is undoubtedly the world's most popular sport with major events like the FIFA World Cup and sports personalities like Ronaldo and Messi, drawing a followership of more than 4 billion people.", + ], + [ + "Python, created by Guido van Rossum in the late 1980s, is a high-level general-purpose programming language.", + "Its design philosophy emphasizes code readability, and its language constructs aim to help programmers write clear, logical code for both small and large-scale software projects.", + ], +] +RESPONSES = [ + "Football is the most popular sport with around 4 billion followers worldwide", + "Python language was created by Guido van Rossum.", +] + +pipeline = Pipeline() +evaluator = DeepEvalEvaluator( + metric=DeepEvalMetric.FAITHFULNESS, + metric_params={"model": "gpt-4"}, +) +pipeline.add_component("evaluator", evaluator) + +# Each metric expects a specific set of parameters as input. Refer to the +# DeepEvalMetric class' documentation for more details. +results = pipeline.run({"evaluator": {"questions": QUESTIONS, "contexts": CONTEXTS, "responses": RESPONSES}}) + +for output in results["evaluator"]["results"]: + print(output) diff --git a/integrations/deepeval/pydoc/config.yml b/integrations/deepeval/pydoc/config.yml new file mode 100644 index 000000000..9ffaa7b80 --- /dev/null +++ b/integrations/deepeval/pydoc/config.yml @@ -0,0 +1,30 @@ +loaders: + - type: haystack_pydoc_tools.loaders.CustomPythonLoader + search_path: [../src] + modules: + [ + "haystack_integrations.components.evaluators.deepeval.evaluator", + "haystack_integrations.components.evaluators.deepeval.metrics", + ] + ignore_when_discovered: ["__init__"] +processors: + - type: filter + expression: + documented_only: true + do_not_filter_modules: false + skip_empty_modules: true + - type: smart + - type: crossref +renderer: + type: haystack_pydoc_tools.renderers.ReadmePreviewRenderer + excerpt: DeepEval integration for Haystack + category_slug: haystack-integrations + title: Chroma + slug: integrations-deepeval + order: 1 + markdown: + descriptive_class_title: false + descriptive_module_title: true + add_method_class_prefix: true + add_member_class_prefix: false + filename: _readme_deepeval.md diff --git a/integrations/deepeval/pyproject.toml b/integrations/deepeval/pyproject.toml new file mode 100644 index 000000000..692e15270 --- /dev/null +++ b/integrations/deepeval/pyproject.toml @@ -0,0 +1,157 @@ +[build-system] +requires = ["hatchling", "hatch-vcs"] +build-backend = "hatchling.build" + +[project] +name = "deepeval-haystack" +dynamic = ["version"] +description = 'An integration of DeepEvla LLM evaluation framework with Haystack' +readme = "README.md" +requires-python = ">=3.8" +license = "Apache-2.0" +keywords = [] +authors = [{ name = "deepset GmbH", email = "info@deepset.ai" }] +classifiers = [ + "Development Status :: 4 - Beta", + "Programming Language :: Python", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: Implementation :: CPython", + "Programming Language :: Python :: Implementation :: PyPy", +] +dependencies = ["haystack-ai", "deepeval>=0.20"] + +[project.urls] +Source = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/deepeval" +Documentation = "https://github.com/deepset-ai/haystack-core-integrations/blob/main/integrations/deepeval/README.md" +Issues = "https://github.com/deepset-ai/haystack-core-integrations/issues" + +[tool.hatch.build.targets.wheel] +packages = ["src/haystack_integrations"] + +[tool.hatch.version] +source = "vcs" +tag-pattern = 'integrations\/deepeval-v(?P.*)' + +[tool.hatch.version.raw-options] +root = "../.." +git_describe_command = 'git describe --tags --match="integrations/deepeval-v[0-9]*"' + +[tool.hatch.envs.default] +dependencies = ["coverage[toml]>=6.5", "pytest", "haystack-pydoc-tools"] +[tool.hatch.envs.default.scripts] +test = "pytest {args:tests}" +test-cov = "coverage run -m pytest {args:tests}" +cov-report = ["- coverage combine", "coverage report"] +cov = ["test-cov", "cov-report"] +docs = ["pydoc-markdown pydoc/config.yml"] + +[[tool.hatch.envs.all.matrix]] +python = ["3.8", "3.9", "3.10", "3.11"] + +[tool.hatch.envs.lint] +detached = true +dependencies = ["black>=23.1.0", "mypy>=1.0.0", "ruff>=0.0.243"] +[tool.hatch.envs.lint.scripts] +typing = "mypy --install-types --non-interactive {args:src/}" +style = ["ruff {args:.}", "black --check --diff {args:.}"] +fmt = ["black {args:.}", "ruff --fix {args:.}", "style"] +all = ["style", "typing"] + +[tool.black] +target-version = ["py37"] +line-length = 120 +skip-string-normalization = true + +[tool.ruff] +target-version = "py37" +line-length = 120 +select = [ + "A", + "ARG", + "B", + "C", + "DTZ", + "E", + "EM", + "F", + "FBT", + "I", + "ICN", + "ISC", + "N", + "PLC", + "PLE", + "PLR", + "PLW", + "Q", + "RUF", + "S", + "T", + "TID", + "UP", + "W", + "YTT", +] +ignore = [ + # Allow non-abstract empty methods in abstract base classes + "B027", + # Allow boolean positional values in function calls, like `dict.get(... True)` + "FBT003", + # Ignore checks for possible passwords + "S105", + "S106", + "S107", + # Ignore complexity + "C901", + "PLR0911", + "PLR0912", + "PLR0913", + "PLR0915", + # Misc + "S101", + "TID252", +] +unfixable = [ + # Don't touch unused imports + "F401", +] +extend-exclude = ["tests", "example"] + +[tool.ruff.isort] +known-first-party = ["src"] + +[tool.ruff.flake8-tidy-imports] +ban-relative-imports = "all" + +[tool.ruff.per-file-ignores] +# Tests can use magic values, assertions, and relative imports +"tests/**/*" = ["PLR2004", "S101", "TID252"] + +[tool.coverage.run] +source_pkgs = ["src", "tests"] +branch = true +parallel = true + +[tool.coverage.paths] +deepeval_haystack = [ + "src/haystack_integrations", + "*/deepeval-haystack/src/deepeval_haystack", +] +tests = ["tests"] + +[tool.coverage.report] +exclude_lines = ["no cov", "if __name__ == .__main__.:", "if TYPE_CHECKING:"] + +[[tool.mypy.overrides]] +module = [ + "haystack.*", + "pytest.*", + "deepeval.*", + "numpy", + "grpc", + "haystack_integrations.*", +] +ignore_missing_imports = true diff --git a/integrations/deepeval/src/haystack_integrations/components/evaluators/deepeval/__init__.py b/integrations/deepeval/src/haystack_integrations/components/evaluators/deepeval/__init__.py new file mode 100644 index 000000000..1a6784fb9 --- /dev/null +++ b/integrations/deepeval/src/haystack_integrations/components/evaluators/deepeval/__init__.py @@ -0,0 +1,7 @@ +from .evaluator import DeepEvalEvaluator +from .metrics import DeepEvalMetric + +__all__ = ( + "DeepEvalEvaluator", + "DeepEvalMetric", +) diff --git a/integrations/deepeval/src/haystack_integrations/components/evaluators/deepeval/evaluator.py b/integrations/deepeval/src/haystack_integrations/components/evaluators/deepeval/evaluator.py new file mode 100644 index 000000000..03e226d0e --- /dev/null +++ b/integrations/deepeval/src/haystack_integrations/components/evaluators/deepeval/evaluator.py @@ -0,0 +1,150 @@ +import json +from typing import Any, Callable, Dict, List, Optional, Union + +from haystack import DeserializationError, component, default_from_dict, default_to_dict + +from deepeval.evaluate import TestResult, evaluate +from deepeval.metrics import BaseMetric +from deepeval.test_case import LLMTestCase + +from .metrics import ( + METRIC_DESCRIPTORS, + DeepEvalMetric, + InputConverters, +) + + +@component +class DeepEvalEvaluator: + """ + A component that uses the DeepEval framework to evaluate inputs against a specific metric. + + The supported metrics are defined by :class:`DeepEvalMetric`. The inputs of the component + metric-dependent. + """ + + _backend_metric: BaseMetric + # Wrapped for easy mocking. + _backend_callable: Callable[[List[LLMTestCase], BaseMetric], List[TestResult]] + + def __init__( + self, + metric: Union[str, DeepEvalMetric], + metric_params: Optional[Dict[str, Any]] = None, + ): + """ + Construct a new DeepEval evaluator. + + :param metric: + The metric to use for evaluation. + :param metric_params: + Parameters to pass to the metric's constructor. + """ + self.metric = metric if isinstance(metric, DeepEvalMetric) else DeepEvalMetric.from_str(metric) + self.metric_params = metric_params + self.descriptor = METRIC_DESCRIPTORS[self.metric] + + self._init_backend() + expected_inputs = self.descriptor.input_parameters + component.set_input_types(self, **expected_inputs) + + @component.output_types(results=List[List[Dict[str, Any]]]) + def run(self, **inputs) -> Dict[str, Any]: + """ + Run the DeepEval evaluator. + + Example: + ```python + pipeline = Pipeline() + evaluator = DeepEvalEvaluator( + metric=DeepEvalMetric.ANSWER_RELEVANCY, + metric_params={"model": "gpt-4"}, + ) + pipeline.add_component("evaluator", evaluator) + + # Each metric expects a specific set of parameters as input. Refer to the + # DeepEvalMetric class' documentation for more details. + output = pipeline.run({"evaluator": { + "questions": ["question], + "contexts": [["context"]], + "responses": ["response"] + }}) + ``` + + :param inputs: + The inputs to evaluate. These are determined by the + metric being calculated. See :class:`DeepEvalMetric` for more + information. + :returns: + A nested list of metric results. Each input can have one or more + results, depending on the metric. Each result is a dictionary + containing the following keys and values: + * `name` - The name of the metric. + * `score` - The score of the metric. + * `explanation` - An optional explanation of the score. + """ + InputConverters.validate_input_parameters(self.metric, self.descriptor.input_parameters, inputs) + converted_inputs: List[LLMTestCase] = list(self.descriptor.input_converter(**inputs)) # type: ignore + + results = self._backend_callable(converted_inputs, self._backend_metric) + converted_results = [[result.to_dict() for result in self.descriptor.output_converter(x)] for x in results] + + return {"results": converted_results} + + def to_dict(self) -> Dict[str, Any]: + """ + Serialize this component to a dictionary. + """ + + def check_serializable(obj: Any): + try: + json.dumps(obj) + return True + except (TypeError, OverflowError): + return False + + if not check_serializable(self.metric_params): + msg = "DeepEval evaluator cannot serialize the metric parameters" + raise DeserializationError(msg) + + return default_to_dict( + self, + metric=self.metric, + metric_params=self.metric_params, + ) + + @classmethod + def from_dict(cls, data: Dict[str, Any]) -> "DeepEvalEvaluator": + """ + Deserialize a component from a dictionary. + + :param data: + The dictionary to deserialize from. + """ + return default_from_dict(cls, data) + + @staticmethod + def _invoke_deepeval(test_cases: List[LLMTestCase], metric: BaseMetric) -> List[TestResult]: + return evaluate(test_cases, [metric]) + + def _init_backend(self): + """ + Initialize the DeepEval backend. + """ + if self.descriptor.init_parameters is not None: + if self.metric_params is None: + msg = f"DeepEval metric '{self.metric}' expected init parameters but got none" + raise ValueError(msg) + elif not all(k in self.descriptor.init_parameters for k in self.metric_params.keys()): + msg = ( + f"Invalid init parameters for DeepEval metric '{self.metric}'. " + f"Expected: {list(self.descriptor.init_parameters.keys())}" + ) + + raise ValueError(msg) + backend_metric_params = dict(self.metric_params) if self.metric_params is not None else {} + + # This shouldn't matter at all as we aren't asserting the outputs, but just in case... + backend_metric_params["threshold"] = 0.0 + self._backend_metric = self.descriptor.backend(**backend_metric_params) + self._backend_callable = DeepEvalEvaluator._invoke_deepeval diff --git a/integrations/deepeval/src/haystack_integrations/components/evaluators/deepeval/metrics.py b/integrations/deepeval/src/haystack_integrations/components/evaluators/deepeval/metrics.py new file mode 100644 index 000000000..5ca6922fc --- /dev/null +++ b/integrations/deepeval/src/haystack_integrations/components/evaluators/deepeval/metrics.py @@ -0,0 +1,254 @@ +import dataclasses +import inspect +from dataclasses import dataclass +from enum import Enum +from functools import partial +from typing import Any, Callable, Dict, Iterable, List, Mapping, Optional, Type + +from deepeval.evaluate import TestResult +from deepeval.metrics import ( # type: ignore + AnswerRelevancyMetric, # type: ignore + BaseMetric, # type: ignore + ContextualPrecisionMetric, # type: ignore + ContextualRecallMetric, # type: ignore + ContextualRelevancyMetric, # type: ignore + FaithfulnessMetric, # type: ignore +) +from deepeval.test_case import LLMTestCase + + +class DeepEvalMetric(Enum): + """ + Metrics supported by DeepEval. + """ + + #: Answer relevancy. + #: Inputs - `questions: List[str], contexts: List[List[str]], responses: List[str]` + ANSWER_RELEVANCY = "answer_relevancy" + + #: Faithfulness. + #: Inputs - `questions: List[str], contexts: List[List[str]], responses: List[str]` + FAITHFULNESS = "faithfulness" + + #: Contextual precision. + #: Inputs - `questions: List[str], contexts: List[List[str]], responses: List[str], ground_truths: List[str]` + #: The ground truth is the expected response. + CONTEXTUAL_PRECISION = "contextual_precision" + + #: Contextual recall. + #: Inputs - `questions: List[str], contexts: List[List[str]], responses: List[str], ground_truths: List[str]` + #: The ground truth is the expected response. + CONTEXTUAL_RECALL = "contextual_recall" + + #: Contextual relevance. + #: Inputs - `questions: List[str], contexts: List[List[str]], responses: List[str]` + CONTEXTUAL_RELEVANCE = "contextual_relevance" + + def __str__(self): + return self.value + + @classmethod + def from_str(cls, string: str) -> "DeepEvalMetric": + """ + Create a metric type from a string. + + :param string: + The string to convert. + :returns: + The metric. + """ + enum_map = {e.value: e for e in DeepEvalMetric} + metric = enum_map.get(string) + if metric is None: + msg = f"Unknown DeepEval metric '{string}'. Supported metrics: {list(enum_map.keys())}" + raise ValueError(msg) + return metric + + +@dataclass(frozen=True) +class MetricResult: + """ + Result of a metric evaluation. + + :param name: + The name of the metric. + :param score: + The score of the metric. + :param explanation: + An optional explanation of the metric. + """ + + name: str + score: float + explanation: Optional[str] = None + + def to_dict(self): + return dataclasses.asdict(self) + + +@dataclass(frozen=True) +class MetricDescriptor: + """ + Descriptor for a metric. + + :param metric: + The metric. + :param backend: + The associated DeepEval metric class. + :param input_parameters: + Parameters accepted by the metric. This is used + to set the input types of the evaluator component. + :param input_converter: + Callable that converts input parameters to the DeepEval input format. + :param output_converter: + Callable that converts the DeepEval output format to our output format. + Accepts a single output parameter and returns a list of results derived from it. + :param init_parameters: + Additional parameters that need to be passed to the metric class during initialization. + """ + + metric: DeepEvalMetric + backend: Type[BaseMetric] + input_parameters: Dict[str, Type] + input_converter: Callable[[Any], Iterable[LLMTestCase]] + output_converter: Callable[[TestResult], List[MetricResult]] + init_parameters: Optional[Mapping[str, Type]] = None + + @classmethod + def new( + cls, + metric: DeepEvalMetric, + backend: Type[BaseMetric], + input_converter: Callable[[Any], Iterable[LLMTestCase]], + output_converter: Optional[Callable[[TestResult], List[MetricResult]]] = None, + *, + init_parameters: Optional[Mapping[str, Type]] = None, + ) -> "MetricDescriptor": + input_converter_signature = inspect.signature(input_converter) + input_parameters = {} + for name, param in input_converter_signature.parameters.items(): + if name in ("cls", "self"): + continue + elif param.kind not in (inspect.Parameter.KEYWORD_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD): + continue + input_parameters[name] = param.annotation + + return cls( + metric=metric, + backend=backend, + input_parameters=input_parameters, + input_converter=input_converter, + output_converter=output_converter if output_converter is not None else OutputConverters.default(metric), + init_parameters=init_parameters, + ) + + +class InputConverters: + """ + Converters for input parameters. + + The signature of the converter functions serves as the ground-truth of the + expected input parameters of a given metric. They are also responsible for validating + the input parameters and converting them to the format expected by DeepEval. + """ + + @staticmethod + def _validate_input_elements(**kwargs): + for k, collection in kwargs.items(): + if not isinstance(collection, list): + msg = ( + f"DeepEval evaluator expected input '{k}' to be a collection of type 'list', " + f"got '{type(collection).__name__}' instead" + ) + raise ValueError(msg) + elif not all(isinstance(x, str) for x in collection) and not all(isinstance(x, list) for x in collection): + msg = f"DeepEval evaluator expects inputs to be of type 'str' or 'list' in '{k}'" + raise ValueError(msg) + + same_length = len({len(x) for x in kwargs.values()}) == 1 + if not same_length: + msg = f"Mismatching counts in the following inputs: {({k: len(v) for k, v in kwargs.items()})}" + raise ValueError(msg) + + @staticmethod + def validate_input_parameters(metric: DeepEvalMetric, expected: Dict[str, Any], received: Dict[str, Any]): + for param, _ in expected.items(): + if param not in received: + msg = f"DeepEval evaluator expected input parameter '{param}' for metric '{metric}'" + raise ValueError(msg) + + @staticmethod + def question_context_response( + questions: List[str], contexts: List[List[str]], responses: List[str] + ) -> Iterable[LLMTestCase]: + InputConverters._validate_input_elements(questions=questions, contexts=contexts, responses=responses) + for q, c, r in zip(questions, contexts, responses): # type: ignore + test_case = LLMTestCase(input=q, actual_output=r, retrieval_context=c) + yield test_case + + @staticmethod + def question_context_response_ground_truth( + questions: List[str], contexts: List[List[str]], responses: List[str], ground_truths: List[str] + ) -> Iterable[LLMTestCase]: + InputConverters._validate_input_elements(questions=questions, contexts=contexts, responses=responses) + for q, c, r, gt in zip(questions, contexts, responses, ground_truths): # type: ignore + test_case = LLMTestCase(input=q, actual_output=r, retrieval_context=c, expected_output=gt) + yield test_case + + +class OutputConverters: + """ + Converters for results returned by DeepEval. + + They are responsible for converting the results to our output format. + """ + + @staticmethod + def default( + metric: DeepEvalMetric, + ) -> Callable[[TestResult], List[MetricResult]]: + def inner(output: TestResult, metric: DeepEvalMetric) -> List[MetricResult]: + metric_name = str(metric) + assert len(output.metrics) == 1 + metric_result = output.metrics[0] + out = [MetricResult(name=metric_name, score=metric_result.score, explanation=metric_result.reason)] + if metric_result.score_breakdown is not None: + for k, v in metric_result.score_breakdown.items(): + out.append(MetricResult(name=f"{metric_name}_{k}", score=v)) + return out + + return partial(inner, metric=metric) + + +METRIC_DESCRIPTORS = { + DeepEvalMetric.ANSWER_RELEVANCY: MetricDescriptor.new( + DeepEvalMetric.ANSWER_RELEVANCY, + AnswerRelevancyMetric, + InputConverters.question_context_response, # type: ignore + init_parameters={"model": Optional[str]}, # type: ignore + ), + DeepEvalMetric.FAITHFULNESS: MetricDescriptor.new( + DeepEvalMetric.FAITHFULNESS, + FaithfulnessMetric, + InputConverters.question_context_response, # type: ignore + init_parameters={"model": Optional[str]}, # type: ignore + ), + DeepEvalMetric.CONTEXTUAL_PRECISION: MetricDescriptor.new( + DeepEvalMetric.CONTEXTUAL_PRECISION, + ContextualPrecisionMetric, + InputConverters.question_context_response_ground_truth, # type: ignore + init_parameters={"model": Optional[str]}, # type: ignore + ), + DeepEvalMetric.CONTEXTUAL_RECALL: MetricDescriptor.new( + DeepEvalMetric.CONTEXTUAL_RECALL, + ContextualRecallMetric, + InputConverters.question_context_response_ground_truth, # type: ignore + init_parameters={"model": Optional[str]}, # type: ignore + ), + DeepEvalMetric.CONTEXTUAL_RELEVANCE: MetricDescriptor.new( + DeepEvalMetric.CONTEXTUAL_RELEVANCE, + ContextualRelevancyMetric, + InputConverters.question_context_response, # type: ignore + init_parameters={"model": Optional[str]}, # type: ignore + ), +} diff --git a/integrations/deepeval/tests/__init__.py b/integrations/deepeval/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/integrations/deepeval/tests/test_evaluator.py b/integrations/deepeval/tests/test_evaluator.py new file mode 100644 index 000000000..8534ef687 --- /dev/null +++ b/integrations/deepeval/tests/test_evaluator.py @@ -0,0 +1,324 @@ +import copy +import os +from dataclasses import dataclass +from typing import Dict, Optional +from unittest.mock import patch + +import pytest +from haystack import DeserializationError + +from haystack_integrations.components.evaluators.deepeval import DeepEvalEvaluator, DeepEvalMetric +from deepeval.evaluate import TestResult, BaseMetric + +DEFAULT_QUESTIONS = [ + "Which is the most popular global sport?", + "Who created the Python language?", +] +DEFAULT_CONTEXTS = [ + [ + "The popularity of sports can be measured in various ways, including TV viewership, social media presence, number of participants, and economic impact. Football is undoubtedly the world's most popular sport with major events like the FIFA World Cup and sports personalities like Ronaldo and Messi, drawing a followership of more than 4 billion people." + ], + [ + "Python, created by Guido van Rossum in the late 1980s, is a high-level general-purpose programming language. Its design philosophy emphasizes code readability, and its language constructs aim to help programmers write clear, logical code for both small and large-scale software projects." + ], +] +DEFAULT_RESPONSES = [ + "Football is the most popular sport with around 4 billion followers worldwide", + "Python language was created by Guido van Rossum.", +] +DEFAULT_GROUND_TRUTHS = [ + "Football is the most popular sport with over 4 billion followers worldwide. It's horrible...", + "Python language was created by the seventh son of the seventh son.", +] + + +@dataclass(frozen=True) +class Unserializable: + something: str + + +@dataclass(frozen=True) +class MockResult: + score: float + reason: Optional[str] = None + score_breakdown: Optional[Dict[str, float]] = None + + +# Only returns results for the passed metrics. +class MockBackend: + def __init__(self, metric: DeepEvalMetric) -> None: + self.metric = metric + + def eval(self, test_cases, metric): + assert isinstance(metric, BaseMetric) + + output_map = { + DeepEvalMetric.ANSWER_RELEVANCY: [MockResult(0.5, "1")], + DeepEvalMetric.FAITHFULNESS: [MockResult(0.1, "2")], + DeepEvalMetric.CONTEXTUAL_PRECISION: [MockResult(0.2, "3")], + DeepEvalMetric.CONTEXTUAL_RECALL: [MockResult(35, "4")], + DeepEvalMetric.CONTEXTUAL_RELEVANCE: [MockResult(1.5, "5")], + } + + out = [] + for x in test_cases: + r = TestResult(False, [], x.input, x.actual_output, x.expected_output, x.context, x.retrieval_context) + r.metrics = copy.deepcopy(output_map[self.metric]) + out.append(r) + return out + + +def test_evaluator_metric_init_params(monkeypatch): + monkeypatch.setenv("OPENAI_API_KEY", "test-api-key") + + eval = DeepEvalEvaluator(DeepEvalMetric.ANSWER_RELEVANCY, metric_params={"model": "gpt-4-32k"}) + assert eval._backend_metric.evaluation_model == "gpt-4-32k" + + with pytest.raises(ValueError, match="Invalid init parameters"): + DeepEvalEvaluator(DeepEvalMetric.FAITHFULNESS, metric_params={"role": "village idiot"}) + + with pytest.raises(ValueError, match="expected init parameters"): + DeepEvalEvaluator(DeepEvalMetric.CONTEXTUAL_RECALL) + + +def test_evaluator_serde(monkeypatch): + monkeypatch.setenv("OPENAI_API_KEY", "test-api-key") + + init_params = { + "metric": DeepEvalMetric.ANSWER_RELEVANCY, + "metric_params": {"model": "gpt-4-32k"}, + } + eval = DeepEvalEvaluator(**init_params) + serde_data = eval.to_dict() + new_eval = DeepEvalEvaluator.from_dict(serde_data) + + assert eval.metric == new_eval.metric + assert eval.metric_params == new_eval.metric_params + assert type(new_eval._backend_metric) == type(eval._backend_metric) + + with pytest.raises(DeserializationError, match=r"cannot serialize the metric parameters"): + eval.metric_params["model"] = Unserializable("") + eval.to_dict() + + +@pytest.mark.parametrize( + "metric, inputs, params", + [ + ( + DeepEvalMetric.ANSWER_RELEVANCY, + {"questions": [], "contexts": [], "responses": []}, + {"model": "gpt-4"}, + ), + ( + DeepEvalMetric.FAITHFULNESS, + {"questions": [], "contexts": [], "responses": []}, + {"model": "gpt-4"}, + ), + ( + DeepEvalMetric.CONTEXTUAL_PRECISION, + {"questions": [], "contexts": [], "responses": [], "ground_truths": []}, + {"model": "gpt-4"}, + ), + ( + DeepEvalMetric.CONTEXTUAL_RECALL, + {"questions": [], "contexts": [], "responses": [], "ground_truths": []}, + {"model": "gpt-4"}, + ), + ( + DeepEvalMetric.CONTEXTUAL_RELEVANCE, + {"questions": [], "contexts": [], "responses": []}, + {"model": "gpt-4"}, + ), + ], +) +def test_evaluator_valid_inputs(metric, inputs, params, monkeypatch): + monkeypatch.setenv("OPENAI_API_KEY", "test-api-key") + + init_params = { + "metric": metric, + "metric_params": params, + } + eval = DeepEvalEvaluator(**init_params) + output = eval.run(**inputs) + + +@pytest.mark.parametrize( + "metric, inputs, error_string, params", + [ + ( + DeepEvalMetric.ANSWER_RELEVANCY, + {"questions": [], "contexts": [], "responses": []}, + "expected init parameters but got none", + None, + ), + ( + DeepEvalMetric.ANSWER_RELEVANCY, + {"questions": {}, "contexts": [], "responses": []}, + "to be a collection of type 'list'", + {"model": "gpt-4"}, + ), + ( + DeepEvalMetric.ANSWER_RELEVANCY, + {"questions": [], "contexts": [], "responses": []}, + "Invalid init parameters", + {"role": "chatbot"}, + ), + ( + DeepEvalMetric.FAITHFULNESS, + {"questions": [1], "contexts": [2], "responses": [3]}, + "expects inputs to be of type 'str'", + {"model": "gpt-4"}, + ), + ( + DeepEvalMetric.FAITHFULNESS, + {"questions": [], "contexts": [[]], "responses": []}, + "Mismatching counts ", + {"model": "gpt-4"}, + ), + ( + DeepEvalMetric.CONTEXTUAL_RECALL, + {"questions": [], "contexts": [], "responses": []}, + "expected input parameter ", + {"model": "gpt-4"}, + ), + ], +) +def test_evaluator_invalid_inputs(metric, inputs, error_string, params, monkeypatch): + monkeypatch.setenv("OPENAI_API_KEY", "test-api-key") + + with pytest.raises(ValueError, match=error_string): + init_params = { + "metric": metric, + "metric_params": params, + } + eval = DeepEvalEvaluator(**init_params) + output = eval.run(**inputs) + + +# This test validates the expected outputs of the evaluator. +# Each output is parameterized as a list of tuples, where each tuple is +# (name, score, explanation). The name and explanation are optional. If +# the name is None, then the metric name is used. +@pytest.mark.parametrize( + "metric, inputs, expected_outputs, metric_params", + [ + ( + DeepEvalMetric.ANSWER_RELEVANCY, + {"questions": DEFAULT_QUESTIONS, "contexts": DEFAULT_CONTEXTS, "responses": DEFAULT_RESPONSES}, + [[(None, 0.5, "1")]] * 2, + {"model": "gpt-4"}, + ), + ( + DeepEvalMetric.FAITHFULNESS, + {"questions": DEFAULT_QUESTIONS, "contexts": DEFAULT_CONTEXTS, "responses": DEFAULT_RESPONSES}, + [[(None, 0.1, "2")]] * 2, + {"model": "gpt-4"}, + ), + ( + DeepEvalMetric.CONTEXTUAL_PRECISION, + { + "questions": DEFAULT_QUESTIONS, + "contexts": DEFAULT_CONTEXTS, + "responses": DEFAULT_RESPONSES, + "ground_truths": DEFAULT_GROUND_TRUTHS, + }, + [[(None, 0.2, "3")]] * 2, + {"model": "gpt-4"}, + ), + ( + DeepEvalMetric.CONTEXTUAL_RECALL, + { + "questions": DEFAULT_QUESTIONS, + "contexts": DEFAULT_CONTEXTS, + "responses": DEFAULT_RESPONSES, + "ground_truths": DEFAULT_GROUND_TRUTHS, + }, + [[(None, 35, "4")]] * 2, + {"model": "gpt-4"}, + ), + ( + DeepEvalMetric.CONTEXTUAL_RELEVANCE, + {"questions": DEFAULT_QUESTIONS, "contexts": DEFAULT_CONTEXTS, "responses": DEFAULT_RESPONSES}, + [[(None, 1.5, "5")]] * 2, + {"model": "gpt-4"}, + ), + ], +) +def test_evaluator_outputs(metric, inputs, expected_outputs, metric_params, monkeypatch): + monkeypatch.setenv("OPENAI_API_KEY", "test-api-key") + + init_params = { + "metric": metric, + "metric_params": metric_params, + } + eval = DeepEvalEvaluator(**init_params) + eval._backend_callable = lambda testcases, metrics: MockBackend(metric).eval(testcases, metrics) + results = eval.run(**inputs)["results"] + + assert type(results) == type(expected_outputs) + assert len(results) == len(expected_outputs) + + for r, o in zip(results, expected_outputs): + assert len(r) == len(o) + + expected = {(name if name is not None else str(metric), score, exp) for name, score, exp in o} + got = {(x["name"], x["score"], x["explanation"]) for x in r} + assert got == expected + + +# This integration test validates the evaluator by running it against the +# OpenAI API. It is parameterized by the metric, the inputs to the evalutor +# and the metric parameters. +@pytest.mark.skipif("OPENAI_API_KEY" not in os.environ, reason="OPENAI_API_KEY not set") +@pytest.mark.parametrize( + "metric, inputs, metric_params", + [ + ( + DeepEvalMetric.ANSWER_RELEVANCY, + {"questions": DEFAULT_QUESTIONS, "contexts": DEFAULT_CONTEXTS, "responses": DEFAULT_RESPONSES}, + {"model": "gpt-4"}, + ), + ( + DeepEvalMetric.FAITHFULNESS, + {"questions": DEFAULT_QUESTIONS, "contexts": DEFAULT_CONTEXTS, "responses": DEFAULT_RESPONSES}, + {"model": "gpt-4"}, + ), + ( + DeepEvalMetric.CONTEXTUAL_PRECISION, + { + "questions": DEFAULT_QUESTIONS, + "contexts": DEFAULT_CONTEXTS, + "responses": DEFAULT_RESPONSES, + "ground_truths": DEFAULT_GROUND_TRUTHS, + }, + {"model": "gpt-4"}, + ), + ( + DeepEvalMetric.CONTEXTUAL_RECALL, + { + "questions": DEFAULT_QUESTIONS, + "contexts": DEFAULT_CONTEXTS, + "responses": DEFAULT_RESPONSES, + "ground_truths": DEFAULT_GROUND_TRUTHS, + }, + {"model": "gpt-4"}, + ), + ( + DeepEvalMetric.CONTEXTUAL_RELEVANCE, + {"questions": DEFAULT_QUESTIONS, "contexts": DEFAULT_CONTEXTS, "responses": DEFAULT_RESPONSES}, + {"model": "gpt-4"}, + ), + ], +) +def test_integration_run(metric, inputs, metric_params): + init_params = { + "metric": metric, + "metric_params": metric_params, + } + eval = DeepEvalEvaluator(**init_params) + output = eval.run(**inputs) + + assert type(output) == dict + assert len(output) == 1 + assert "results" in output + assert len(output["results"]) == len(next(iter(inputs.values()))) diff --git a/integrations/deepeval/tests/test_metrics.py b/integrations/deepeval/tests/test_metrics.py new file mode 100644 index 000000000..41df67202 --- /dev/null +++ b/integrations/deepeval/tests/test_metrics.py @@ -0,0 +1,11 @@ +import pytest + +from haystack_integrations.components.evaluators.deepeval import DeepEvalMetric + + +def test_deepeval_metric(): + for e in DeepEvalMetric: + assert e == DeepEvalMetric.from_str(e.value) + + with pytest.raises(ValueError, match="Unknown DeepEval metric"): + DeepEvalMetric.from_str("smugness")