From ed92810708c9b8b455f699efaea1d2021195a614 Mon Sep 17 00:00:00 2001 From: ZanSara Date: Mon, 22 Jan 2024 10:43:06 +0100 Subject: [PATCH 01/47] chore!: Rename model_path to model in the Llama.cpp integration (#243) * rename model_path to model * fix tests * black --- .../generators/llama_cpp/generator.py | 14 ++++++------- .../llama_cpp/tests/test_generator.py | 20 ++++++++----------- 2 files changed, 15 insertions(+), 19 deletions(-) diff --git a/integrations/llama_cpp/src/haystack_integrations/components/generators/llama_cpp/generator.py b/integrations/llama_cpp/src/haystack_integrations/components/generators/llama_cpp/generator.py index 8ae482310..619a61cbe 100644 --- a/integrations/llama_cpp/src/haystack_integrations/components/generators/llama_cpp/generator.py +++ b/integrations/llama_cpp/src/haystack_integrations/components/generators/llama_cpp/generator.py @@ -17,7 +17,7 @@ class LlamaCppGenerator: Usage example: ```python from llama_cpp_haystack import LlamaCppGenerator - generator = LlamaCppGenerator(model_path="zephyr-7b-beta.Q4_0.gguf", n_ctx=2048, n_batch=512) + generator = LlamaCppGenerator(model="zephyr-7b-beta.Q4_0.gguf", n_ctx=2048, n_batch=512) print(generator.run("Who is the best American actor?", generation_kwargs={"max_tokens": 128})) # {'replies': ['John Cusack'], 'meta': [{"object": "text_completion", ...}]} @@ -26,23 +26,23 @@ class LlamaCppGenerator: def __init__( self, - model_path: str, + model: str, n_ctx: Optional[int] = 0, n_batch: Optional[int] = 512, model_kwargs: Optional[Dict[str, Any]] = None, generation_kwargs: Optional[Dict[str, Any]] = None, ): """ - :param model_path: The path of a quantized model for text generation, + :param model: The path of a quantized model for text generation, for example, "zephyr-7b-beta.Q4_0.gguf". - If the model_path is also specified in the `model_kwargs`, this parameter will be ignored. + If the model path is also specified in the `model_kwargs`, this parameter will be ignored. :param n_ctx: The number of tokens in the context. When set to 0, the context will be taken from the model. If the n_ctx is also specified in the `model_kwargs`, this parameter will be ignored. :param n_batch: Prompt processing maximum batch size. Defaults to 512. If the n_batch is also specified in the `model_kwargs`, this parameter will be ignored. :param model_kwargs: Dictionary containing keyword arguments used to initialize the LLM for text generation. These keyword arguments provide fine-grained control over the model loading. - In case of duplication, these kwargs override `model_path`, `n_ctx`, and `n_batch` init parameters. + In case of duplication, these kwargs override `model`, `n_ctx`, and `n_batch` init parameters. See Llama.cpp's [documentation](https://llama-cpp-python.readthedocs.io/en/latest/api-reference/#llama_cpp.Llama.__init__) for more information on the available kwargs. :param generation_kwargs: A dictionary containing keyword arguments to customize text generation. @@ -56,11 +56,11 @@ def __init__( # check if the huggingface_pipeline_kwargs contain the essential parameters # otherwise, populate them with values from init parameters - model_kwargs.setdefault("model_path", model_path) + model_kwargs.setdefault("model_path", model) model_kwargs.setdefault("n_ctx", n_ctx) model_kwargs.setdefault("n_batch", n_batch) - self.model_path = model_path + self.model_path = model self.n_ctx = n_ctx self.n_batch = n_batch self.model_kwargs = model_kwargs diff --git a/integrations/llama_cpp/tests/test_generator.py b/integrations/llama_cpp/tests/test_generator.py index 0b95c03a4..04b8339e5 100644 --- a/integrations/llama_cpp/tests/test_generator.py +++ b/integrations/llama_cpp/tests/test_generator.py @@ -40,14 +40,14 @@ def generator(self, model_path, capsys): download_file(ggml_model_path, str(model_path / filename), capsys) model_path = str(model_path / filename) - generator = LlamaCppGenerator(model_path=model_path, n_ctx=128, n_batch=128) + generator = LlamaCppGenerator(model=model_path, n_ctx=128, n_batch=128) generator.warm_up() return generator @pytest.fixture def generator_mock(self): mock_model = MagicMock() - generator = LlamaCppGenerator(model_path="test_model.gguf", n_ctx=2048, n_batch=512) + generator = LlamaCppGenerator(model="test_model.gguf", n_ctx=2048, n_batch=512) generator.model = mock_model return generator, mock_model @@ -55,7 +55,7 @@ def test_default_init(self): """ Test default initialization parameters. """ - generator = LlamaCppGenerator(model_path="test_model.gguf") + generator = LlamaCppGenerator(model="test_model.gguf") assert generator.model_path == "test_model.gguf" assert generator.n_ctx == 0 @@ -68,7 +68,7 @@ def test_custom_init(self): Test custom initialization parameters. """ generator = LlamaCppGenerator( - model_path="test_model.gguf", + model="test_model.gguf", n_ctx=2048, n_batch=512, ) @@ -84,7 +84,7 @@ def test_ignores_model_path_if_specified_in_model_kwargs(self): Test that model_path is ignored if already specified in model_kwargs. """ generator = LlamaCppGenerator( - model_path="test_model.gguf", + model="test_model.gguf", n_ctx=512, n_batch=512, model_kwargs={"model_path": "other_model.gguf"}, @@ -95,25 +95,21 @@ def test_ignores_n_ctx_if_specified_in_model_kwargs(self): """ Test that n_ctx is ignored if already specified in model_kwargs. """ - generator = LlamaCppGenerator( - model_path="test_model.gguf", n_ctx=512, n_batch=512, model_kwargs={"n_ctx": 1024} - ) + generator = LlamaCppGenerator(model="test_model.gguf", n_ctx=512, n_batch=512, model_kwargs={"n_ctx": 1024}) assert generator.model_kwargs["n_ctx"] == 1024 def test_ignores_n_batch_if_specified_in_model_kwargs(self): """ Test that n_batch is ignored if already specified in model_kwargs. """ - generator = LlamaCppGenerator( - model_path="test_model.gguf", n_ctx=512, n_batch=512, model_kwargs={"n_batch": 1024} - ) + generator = LlamaCppGenerator(model="test_model.gguf", n_ctx=512, n_batch=512, model_kwargs={"n_batch": 1024}) assert generator.model_kwargs["n_batch"] == 1024 def test_raises_error_without_warm_up(self): """ Test that the generator raises an error if warm_up() is not called before running. """ - generator = LlamaCppGenerator(model_path="test_model.gguf", n_ctx=512, n_batch=512) + generator = LlamaCppGenerator(model="test_model.gguf", n_ctx=512, n_batch=512) with pytest.raises(RuntimeError): generator.run("What is the capital of China?") From 95480262b1acff8a0697eb6832ade71e491ef132 Mon Sep 17 00:00:00 2001 From: Massimiliano Pippi Date: Mon, 22 Jan 2024 10:52:17 +0100 Subject: [PATCH 02/47] fix issue templates --- ...tegration-proposal copy.md => breaking-change-proposal.md} | 3 +++ .github/ISSUE_TEMPLATE/new-integration-proposal.md | 4 +++- 2 files changed, 6 insertions(+), 1 deletion(-) rename .github/ISSUE_TEMPLATE/{new-integration-proposal copy.md => breaking-change-proposal.md} (96%) diff --git a/.github/ISSUE_TEMPLATE/new-integration-proposal copy.md b/.github/ISSUE_TEMPLATE/breaking-change-proposal.md similarity index 96% rename from .github/ISSUE_TEMPLATE/new-integration-proposal copy.md rename to .github/ISSUE_TEMPLATE/breaking-change-proposal.md index 8ead69467..71aa2a5e9 100644 --- a/.github/ISSUE_TEMPLATE/new-integration-proposal copy.md +++ b/.github/ISSUE_TEMPLATE/breaking-change-proposal.md @@ -13,8 +13,11 @@ Briefly explain how the change is breaking and why is needed. ## Checklist +```[tasklist] +### Tasks - [ ] The change is documented with docstrings and was merged in the `main` branch - [ ] Integration tile on https://github.com/deepset-ai/haystack-integrations was updated - [ ] Docs at https://docs.haystack.deepset.ai/ were updated - [ ] Notebooks on https://github.com/deepset-ai/haystack-cookbook were updated (if needed) - [ ] New package version declares the breaking change and package has been released on PyPI +``` \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/new-integration-proposal.md b/.github/ISSUE_TEMPLATE/new-integration-proposal.md index a40388eef..60e88c555 100644 --- a/.github/ISSUE_TEMPLATE/new-integration-proposal.md +++ b/.github/ISSUE_TEMPLATE/new-integration-proposal.md @@ -20,7 +20,8 @@ Also, if there's any new terminology involved, define it here. ## Checklist If the request is accepted, ensure the following checklist is complete before closing this issue. - +```[tasklist] +### Tasks - [ ] The code is documented with docstrings and was merged in the `main` branch - [ ] Docs are published at https://docs.haystack.deepset.ai/ - [ ] There is a Github workflow running the tests for the integration nightly and at every PR @@ -31,3 +32,4 @@ If the request is accepted, ensure the following checklist is complete before cl - [ ] The integration has been listed in the [Inventory section](https://github.com/deepset-ai/haystack-core-integrations#inventory) of this repo README - [ ] There is an example available to demonstrate the feature - [ ] The feature was announced through social media +``` \ No newline at end of file From f8efcfe3126fd4066e708872386c06716f5415a4 Mon Sep 17 00:00:00 2001 From: Vladimir Blagojevic Date: Mon, 22 Jan 2024 10:59:37 +0100 Subject: [PATCH 03/47] chore: Cohere namespace change (#247) * Reorganize cohere integration project layout, adjust pyproject.toml * Update tests * Fix for tests * Remove workspace file * Remove file * Fix lint issues * More linting fixes * Add cohere suffix dirs * Remove idea folder --- integrations/cohere/pyproject.toml | 19 ++++++++++--------- .../components/embedders/cohere/__init__.py | 7 +++++++ .../embedders/cohere}/document_embedder.py | 4 ++-- .../embedders/cohere}/text_embedder.py | 4 ++-- .../components/embedders/cohere}/utils.py | 3 ++- .../components/generators/cohere}/__init__.py | 3 +++ .../generators/cohere/chat}/__init__.py | 3 +++ .../generators/cohere}/chat/chat_generator.py | 0 .../generators/cohere}/generator.py | 3 ++- .../tests/test_cohere_chat_generator.py | 13 ++++++------- .../cohere/tests/test_cohere_generators.py | 11 +++++------ .../cohere/tests/test_document_embedder.py | 7 +++---- .../cohere/tests/test_text_embedder.py | 7 +++---- 13 files changed, 48 insertions(+), 36 deletions(-) create mode 100644 integrations/cohere/src/haystack_integrations/components/embedders/cohere/__init__.py rename integrations/cohere/src/{cohere_haystack/embedders => haystack_integrations/components/embedders/cohere}/document_embedder.py (98%) rename integrations/cohere/src/{cohere_haystack/embedders => haystack_integrations/components/embedders/cohere}/text_embedder.py (98%) rename integrations/cohere/src/{cohere_haystack/embedders => haystack_integrations/components/embedders/cohere}/utils.py (99%) rename integrations/cohere/src/{cohere_haystack => haystack_integrations/components/generators/cohere}/__init__.py (61%) rename integrations/cohere/src/{cohere_haystack/embedders => haystack_integrations/components/generators/cohere/chat}/__init__.py (56%) rename integrations/cohere/src/{cohere_haystack => haystack_integrations/components/generators/cohere}/chat/chat_generator.py (100%) rename integrations/cohere/src/{cohere_haystack => haystack_integrations/components/generators/cohere}/generator.py (99%) diff --git a/integrations/cohere/pyproject.toml b/integrations/cohere/pyproject.toml index ae96f114d..42349d9fb 100644 --- a/integrations/cohere/pyproject.toml +++ b/integrations/cohere/pyproject.toml @@ -34,6 +34,9 @@ Documentation = "https://github.com/deepset-ai/haystack-core-integrations/tree/m Issues = "https://github.com/deepset-ai/haystack-core-integrations/issues" Source = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/cohere" +[tool.hatch.build.targets.wheel] +packages = ["src/haystack_integrations"] + [tool.hatch.version] source = "vcs" tag-pattern = 'integrations\/cohere-v(?P.*)' @@ -70,7 +73,7 @@ dependencies = [ "ruff>=0.0.243", ] [tool.hatch.envs.lint.scripts] -typing = "mypy --install-types --non-interactive {args:src/cohere_haystack tests}" +typing = "mypy --install-types --non-interactive --explicit-package-bases {args:src/ tests}" style = [ "ruff {args:.}", "black --check --diff {args:.}", @@ -133,26 +136,23 @@ unfixable = [ ] [tool.ruff.isort] -known-first-party = ["cohere_haystack"] +known-first-party = ["src"] [tool.ruff.flake8-tidy-imports] -ban-relative-imports = "all" +ban-relative-imports = "parents" [tool.ruff.per-file-ignores] # Tests can use magic values, assertions, and relative imports "tests/**/*" = ["PLR2004", "S101", "TID252"] [tool.coverage.run] -source_pkgs = ["cohere_haystack", "tests"] +source_pkgs = ["src", "tests"] branch = true parallel = true -omit = [ - "src/cohere_haystack/__about__.py", -] [tool.coverage.paths] -cohere_haystack = ["src/cohere_haystack", "*/cohere-haystack/src/cohere_haystack"] -tests = ["tests", "*/cohere-haystack/tests"] +cohere_haystack = ["src/haystack_integrations", "*/cohere/src/haystack_integrations"] +tests = ["tests", "*/cohere/tests"] [tool.coverage.report] exclude_lines = [ @@ -165,6 +165,7 @@ exclude_lines = [ module = [ "cohere.*", "haystack.*", + "haystack_integrations.*", "pytest.*", "numpy.*", ] diff --git a/integrations/cohere/src/haystack_integrations/components/embedders/cohere/__init__.py b/integrations/cohere/src/haystack_integrations/components/embedders/cohere/__init__.py new file mode 100644 index 000000000..73a863a73 --- /dev/null +++ b/integrations/cohere/src/haystack_integrations/components/embedders/cohere/__init__.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: 2023-present deepset GmbH +# +# SPDX-License-Identifier: Apache-2.0 +from .document_embedder import CohereDocumentEmbedder +from .text_embedder import CohereTextEmbedder + +__all__ = ["CohereDocumentEmbedder", "CohereTextEmbedder"] diff --git a/integrations/cohere/src/cohere_haystack/embedders/document_embedder.py b/integrations/cohere/src/haystack_integrations/components/embedders/cohere/document_embedder.py similarity index 98% rename from integrations/cohere/src/cohere_haystack/embedders/document_embedder.py rename to integrations/cohere/src/haystack_integrations/components/embedders/cohere/document_embedder.py index 151c4f794..69308ad19 100644 --- a/integrations/cohere/src/cohere_haystack/embedders/document_embedder.py +++ b/integrations/cohere/src/haystack_integrations/components/embedders/cohere/document_embedder.py @@ -5,10 +5,10 @@ import os from typing import Any, Dict, List, Optional -from cohere import COHERE_API_URL, AsyncClient, Client from haystack import Document, component, default_to_dict +from haystack_integrations.components.embedders.cohere.utils import get_async_response, get_response -from cohere_haystack.embedders.utils import get_async_response, get_response +from cohere import COHERE_API_URL, AsyncClient, Client @component diff --git a/integrations/cohere/src/cohere_haystack/embedders/text_embedder.py b/integrations/cohere/src/haystack_integrations/components/embedders/cohere/text_embedder.py similarity index 98% rename from integrations/cohere/src/cohere_haystack/embedders/text_embedder.py rename to integrations/cohere/src/haystack_integrations/components/embedders/cohere/text_embedder.py index bfef97dc3..2fa922004 100644 --- a/integrations/cohere/src/cohere_haystack/embedders/text_embedder.py +++ b/integrations/cohere/src/haystack_integrations/components/embedders/cohere/text_embedder.py @@ -5,10 +5,10 @@ import os from typing import Any, Dict, List, Optional -from cohere import COHERE_API_URL, AsyncClient, Client from haystack import component, default_to_dict +from haystack_integrations.components.embedders.cohere.utils import get_async_response, get_response -from cohere_haystack.embedders.utils import get_async_response, get_response +from cohere import COHERE_API_URL, AsyncClient, Client @component diff --git a/integrations/cohere/src/cohere_haystack/embedders/utils.py b/integrations/cohere/src/haystack_integrations/components/embedders/cohere/utils.py similarity index 99% rename from integrations/cohere/src/cohere_haystack/embedders/utils.py rename to integrations/cohere/src/haystack_integrations/components/embedders/cohere/utils.py index 1c1049852..7b9c90730 100644 --- a/integrations/cohere/src/cohere_haystack/embedders/utils.py +++ b/integrations/cohere/src/haystack_integrations/components/embedders/cohere/utils.py @@ -3,9 +3,10 @@ # SPDX-License-Identifier: Apache-2.0 from typing import Any, Dict, List, Tuple -from cohere import AsyncClient, Client, CohereError from tqdm import tqdm +from cohere import AsyncClient, Client, CohereError + async def get_async_response(cohere_async_client: AsyncClient, texts: List[str], model_name, input_type, truncate): all_embeddings: List[List[float]] = [] diff --git a/integrations/cohere/src/cohere_haystack/__init__.py b/integrations/cohere/src/haystack_integrations/components/generators/cohere/__init__.py similarity index 61% rename from integrations/cohere/src/cohere_haystack/__init__.py rename to integrations/cohere/src/haystack_integrations/components/generators/cohere/__init__.py index e873bc332..c36f982df 100644 --- a/integrations/cohere/src/cohere_haystack/__init__.py +++ b/integrations/cohere/src/haystack_integrations/components/generators/cohere/__init__.py @@ -1,3 +1,6 @@ # SPDX-FileCopyrightText: 2023-present deepset GmbH # # SPDX-License-Identifier: Apache-2.0 +from .generator import CohereGenerator + +__all__ = ["CohereGenerator"] diff --git a/integrations/cohere/src/cohere_haystack/embedders/__init__.py b/integrations/cohere/src/haystack_integrations/components/generators/cohere/chat/__init__.py similarity index 56% rename from integrations/cohere/src/cohere_haystack/embedders/__init__.py rename to integrations/cohere/src/haystack_integrations/components/generators/cohere/chat/__init__.py index e873bc332..dc14c9c1c 100644 --- a/integrations/cohere/src/cohere_haystack/embedders/__init__.py +++ b/integrations/cohere/src/haystack_integrations/components/generators/cohere/chat/__init__.py @@ -1,3 +1,6 @@ # SPDX-FileCopyrightText: 2023-present deepset GmbH # # SPDX-License-Identifier: Apache-2.0 +from .chat_generator import CohereChatGenerator + +__all__ = ["CohereChatGenerator"] diff --git a/integrations/cohere/src/cohere_haystack/chat/chat_generator.py b/integrations/cohere/src/haystack_integrations/components/generators/cohere/chat/chat_generator.py similarity index 100% rename from integrations/cohere/src/cohere_haystack/chat/chat_generator.py rename to integrations/cohere/src/haystack_integrations/components/generators/cohere/chat/chat_generator.py diff --git a/integrations/cohere/src/cohere_haystack/generator.py b/integrations/cohere/src/haystack_integrations/components/generators/cohere/generator.py similarity index 99% rename from integrations/cohere/src/cohere_haystack/generator.py rename to integrations/cohere/src/haystack_integrations/components/generators/cohere/generator.py index 9917f17ea..7bca3ed9f 100644 --- a/integrations/cohere/src/cohere_haystack/generator.py +++ b/integrations/cohere/src/haystack_integrations/components/generators/cohere/generator.py @@ -6,9 +6,10 @@ import sys from typing import Any, Callable, Dict, List, Optional, cast +from haystack import DeserializationError, component, default_from_dict, default_to_dict + from cohere import COHERE_API_URL, Client from cohere.responses import Generations -from haystack import DeserializationError, component, default_from_dict, default_to_dict logger = logging.getLogger(__name__) diff --git a/integrations/cohere/tests/test_cohere_chat_generator.py b/integrations/cohere/tests/test_cohere_chat_generator.py index e93db51fd..cc360f5c9 100644 --- a/integrations/cohere/tests/test_cohere_chat_generator.py +++ b/integrations/cohere/tests/test_cohere_chat_generator.py @@ -5,8 +5,7 @@ import pytest from haystack.components.generators.utils import default_streaming_callback from haystack.dataclasses import ChatMessage, ChatRole, StreamingChunk - -from cohere_haystack.chat.chat_generator import CohereChatGenerator +from haystack_integrations.components.generators.cohere.chat import CohereChatGenerator pytestmark = pytest.mark.chat_generators @@ -88,7 +87,7 @@ def test_to_dict_default(self): component = CohereChatGenerator(api_key="test-api-key") data = component.to_dict() assert data == { - "type": "cohere_haystack.chat.chat_generator.CohereChatGenerator", + "type": "haystack_integrations.components.generators.cohere.chat.chat_generator.CohereChatGenerator", "init_parameters": { "model": "command", "streaming_callback": None, @@ -108,7 +107,7 @@ def test_to_dict_with_parameters(self): ) data = component.to_dict() assert data == { - "type": "cohere_haystack.chat.chat_generator.CohereChatGenerator", + "type": "haystack_integrations.components.generators.cohere.chat.chat_generator.CohereChatGenerator", "init_parameters": { "model": "command-nightly", "streaming_callback": "haystack.components.generators.utils.default_streaming_callback", @@ -128,7 +127,7 @@ def test_to_dict_with_lambda_streaming_callback(self): ) data = component.to_dict() assert data == { - "type": "cohere_haystack.chat.chat_generator.CohereChatGenerator", + "type": "haystack_integrations.components.generators.cohere.chat.chat_generator.CohereChatGenerator", "init_parameters": { "model": "command", "api_base_url": "test-base-url", @@ -141,7 +140,7 @@ def test_to_dict_with_lambda_streaming_callback(self): def test_from_dict(self, monkeypatch): monkeypatch.setenv("COHERE_API_KEY", "fake-api-key") data = { - "type": "cohere_haystack.chat.chat_generator.CohereChatGenerator", + "type": "haystack_integrations.components.generators.cohere.chat.chat_generator.CohereChatGenerator", "init_parameters": { "model": "command", "api_base_url": "test-base-url", @@ -159,7 +158,7 @@ def test_from_dict(self, monkeypatch): def test_from_dict_fail_wo_env_var(self, monkeypatch): monkeypatch.delenv("COHERE_API_KEY", raising=False) data = { - "type": "cohere_haystack.chat.chat_generator.CohereChatGenerator", + "type": "haystack_integrations.components.generators.cohere.chat.chat_generator.CohereChatGenerator", "init_parameters": { "model": "command", "api_base_url": "test-base-url", diff --git a/integrations/cohere/tests/test_cohere_generators.py b/integrations/cohere/tests/test_cohere_generators.py index f22b38843..e2ce10405 100644 --- a/integrations/cohere/tests/test_cohere_generators.py +++ b/integrations/cohere/tests/test_cohere_generators.py @@ -5,8 +5,7 @@ import pytest from cohere import COHERE_API_URL - -from cohere_haystack.generator import CohereGenerator +from haystack_integrations.components.generators.cohere import CohereGenerator pytestmark = pytest.mark.generators @@ -48,7 +47,7 @@ def test_to_dict_default(self): component = CohereGenerator(api_key="test-api-key") data = component.to_dict() assert data == { - "type": "cohere_haystack.generator.CohereGenerator", + "type": "haystack_integrations.components.generators.cohere.generator.CohereGenerator", "init_parameters": { "model": "command", "streaming_callback": None, @@ -67,7 +66,7 @@ def test_to_dict_with_parameters(self): ) data = component.to_dict() assert data == { - "type": "cohere_haystack.generator.CohereGenerator", + "type": "haystack_integrations.components.generators.cohere.generator.CohereGenerator", "init_parameters": { "model": "command-light", "max_tokens": 10, @@ -88,7 +87,7 @@ def test_to_dict_with_lambda_streaming_callback(self): ) data = component.to_dict() assert data == { - "type": "cohere_haystack.generator.CohereGenerator", + "type": "haystack_integrations.components.generators.cohere.generator.CohereGenerator", "init_parameters": { "model": "command", "streaming_callback": "tests.test_cohere_generators.", @@ -101,7 +100,7 @@ def test_to_dict_with_lambda_streaming_callback(self): def test_from_dict(self, monkeypatch): monkeypatch.setenv("COHERE_API_KEY", "test-key") data = { - "type": "cohere_haystack.generator.CohereGenerator", + "type": "haystack_integrations.components.generators.cohere.generator.CohereGenerator", "init_parameters": { "model": "command", "max_tokens": 10, diff --git a/integrations/cohere/tests/test_document_embedder.py b/integrations/cohere/tests/test_document_embedder.py index c9770737e..efe8eb36a 100644 --- a/integrations/cohere/tests/test_document_embedder.py +++ b/integrations/cohere/tests/test_document_embedder.py @@ -6,8 +6,7 @@ import pytest from cohere import COHERE_API_URL from haystack import Document - -from cohere_haystack.embedders.document_embedder import CohereDocumentEmbedder +from haystack_integrations.components.embedders.cohere import CohereDocumentEmbedder pytestmark = pytest.mark.embedders @@ -60,7 +59,7 @@ def test_to_dict(self): embedder_component = CohereDocumentEmbedder(api_key="test-api-key") component_dict = embedder_component.to_dict() assert component_dict == { - "type": "cohere_haystack.embedders.document_embedder.CohereDocumentEmbedder", + "type": "haystack_integrations.components.embedders.cohere.document_embedder.CohereDocumentEmbedder", "init_parameters": { "model": "embed-english-v2.0", "input_type": "search_document", @@ -93,7 +92,7 @@ def test_to_dict_with_custom_init_parameters(self): ) component_dict = embedder_component.to_dict() assert component_dict == { - "type": "cohere_haystack.embedders.document_embedder.CohereDocumentEmbedder", + "type": "haystack_integrations.components.embedders.cohere.document_embedder.CohereDocumentEmbedder", "init_parameters": { "model": "embed-multilingual-v2.0", "input_type": "search_query", diff --git a/integrations/cohere/tests/test_text_embedder.py b/integrations/cohere/tests/test_text_embedder.py index 7e91b4812..657d8df83 100644 --- a/integrations/cohere/tests/test_text_embedder.py +++ b/integrations/cohere/tests/test_text_embedder.py @@ -5,8 +5,7 @@ import pytest from cohere import COHERE_API_URL - -from cohere_haystack.embedders.text_embedder import CohereTextEmbedder +from haystack_integrations.components.embedders.cohere import CohereTextEmbedder pytestmark = pytest.mark.embedders @@ -57,7 +56,7 @@ def test_to_dict(self): embedder_component = CohereTextEmbedder(api_key="test-api-key") component_dict = embedder_component.to_dict() assert component_dict == { - "type": "cohere_haystack.embedders.text_embedder.CohereTextEmbedder", + "type": "haystack_integrations.components.embedders.cohere.text_embedder.CohereTextEmbedder", "init_parameters": { "model": "embed-english-v2.0", "input_type": "search_query", @@ -85,7 +84,7 @@ def test_to_dict_with_custom_init_parameters(self): ) component_dict = embedder_component.to_dict() assert component_dict == { - "type": "cohere_haystack.embedders.text_embedder.CohereTextEmbedder", + "type": "haystack_integrations.components.embedders.cohere.text_embedder.CohereTextEmbedder", "init_parameters": { "model": "embed-multilingual-v2.0", "input_type": "classification", From 3bd9afe7380e426c3e513c0e4db773eadb897c8e Mon Sep 17 00:00:00 2001 From: Massimiliano Pippi Date: Mon, 22 Jan 2024 16:46:31 +0100 Subject: [PATCH 04/47] change import paths (#253) --- .../instructor_embedders_haystack/__init__.py | 7 ------- .../instructor_embedders/pyproject.toml | 9 ++++++--- .../instructor_embedders/__init__.py | 7 +++++++ .../embedding_backend/__init__.py | 0 .../embedding_backend/instructor_backend.py | 0 .../instructor_document_embedder.py | 2 +- .../instructor_text_embedder.py | 2 +- .../tests/test_instructor_backend.py | 16 ++++++++++++---- .../test_instructor_document_embedder.py | 19 +++++++++++-------- .../tests/test_instructor_text_embedder.py | 19 +++++++++++-------- 10 files changed, 49 insertions(+), 32 deletions(-) delete mode 100644 integrations/instructor_embedders/instructor_embedders_haystack/__init__.py create mode 100644 integrations/instructor_embedders/src/haystack_integrations/components/embedders/instructor_embedders/__init__.py rename integrations/instructor_embedders/{instructor_embedders_haystack => src/haystack_integrations/components/embedders/instructor_embedders}/embedding_backend/__init__.py (100%) rename integrations/instructor_embedders/{instructor_embedders_haystack => src/haystack_integrations/components/embedders/instructor_embedders}/embedding_backend/instructor_backend.py (100%) rename integrations/instructor_embedders/{instructor_embedders_haystack => src/haystack_integrations/components/embedders/instructor_embedders}/instructor_document_embedder.py (98%) rename integrations/instructor_embedders/{instructor_embedders_haystack => src/haystack_integrations/components/embedders/instructor_embedders}/instructor_text_embedder.py (97%) diff --git a/integrations/instructor_embedders/instructor_embedders_haystack/__init__.py b/integrations/instructor_embedders/instructor_embedders_haystack/__init__.py deleted file mode 100644 index 88e2e9df2..000000000 --- a/integrations/instructor_embedders/instructor_embedders_haystack/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# SPDX-FileCopyrightText: 2023-present deepset GmbH -# -# SPDX-License-Identifier: Apache-2.0 -from instructor_embedders_haystack.instructor_document_embedder import InstructorDocumentEmbedder -from instructor_embedders_haystack.instructor_text_embedder import InstructorTextEmbedder - -__all__ = ["InstructorDocumentEmbedder", "InstructorTextEmbedder"] diff --git a/integrations/instructor_embedders/pyproject.toml b/integrations/instructor_embedders/pyproject.toml index 63fb9703b..67cbcb7af 100644 --- a/integrations/instructor_embedders/pyproject.toml +++ b/integrations/instructor_embedders/pyproject.toml @@ -54,6 +54,9 @@ Documentation = "https://github.com/deepset-ai/haystack-core-integrations/tree/m Issues = "https://github.com/deepset-ai/haystack-core-integrations/issues" Source = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/instructor_embedders" +[tool.hatch.build.targets.wheel] +packages = ["src/haystack_integrations"] + [tool.hatch.version] source = "vcs" tag-pattern = 'integrations\/instructor_embedders-v(?P.*)' @@ -81,7 +84,7 @@ dependencies = [ "ruff>=0.0.243", ] [tool.hatch.envs.lint.scripts] -typing = "mypy --install-types --non-interactive {args:instructor_embedders_haystack tests}" +typing = "mypy --install-types --non-interactive --explicit-package-bases {args:src/ tests}" style = [ "ruff {args:.}", "black --check --diff {args:.}", @@ -99,7 +102,6 @@ all = [ [tool.coverage.run] branch = true parallel = true -omit = ["instructor_embedders/__about__.py"] [tool.coverage.report] exclude_lines = ["no cov", "if __name__ == .__main__.:", "if TYPE_CHECKING:"] @@ -152,7 +154,7 @@ unfixable = [ known-first-party = ["instructor_embedders"] [tool.ruff.flake8-tidy-imports] -ban-relative-imports = "all" +ban-relative-imports = "parents" [tool.ruff.per-file-ignores] # Tests can use magic values, assertions, and relative imports @@ -172,6 +174,7 @@ module = [ "instructor_embedders_haystack.*", "InstructorEmbedding.*", "haystack.*", + "haystack_integrations.*", "pytest.*", "numpy.*", ] diff --git a/integrations/instructor_embedders/src/haystack_integrations/components/embedders/instructor_embedders/__init__.py b/integrations/instructor_embedders/src/haystack_integrations/components/embedders/instructor_embedders/__init__.py new file mode 100644 index 000000000..f68f20a81 --- /dev/null +++ b/integrations/instructor_embedders/src/haystack_integrations/components/embedders/instructor_embedders/__init__.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: 2023-present deepset GmbH +# +# SPDX-License-Identifier: Apache-2.0 +from .instructor_document_embedder import InstructorDocumentEmbedder +from .instructor_text_embedder import InstructorTextEmbedder + +__all__ = ["InstructorDocumentEmbedder", "InstructorTextEmbedder"] diff --git a/integrations/instructor_embedders/instructor_embedders_haystack/embedding_backend/__init__.py b/integrations/instructor_embedders/src/haystack_integrations/components/embedders/instructor_embedders/embedding_backend/__init__.py similarity index 100% rename from integrations/instructor_embedders/instructor_embedders_haystack/embedding_backend/__init__.py rename to integrations/instructor_embedders/src/haystack_integrations/components/embedders/instructor_embedders/embedding_backend/__init__.py diff --git a/integrations/instructor_embedders/instructor_embedders_haystack/embedding_backend/instructor_backend.py b/integrations/instructor_embedders/src/haystack_integrations/components/embedders/instructor_embedders/embedding_backend/instructor_backend.py similarity index 100% rename from integrations/instructor_embedders/instructor_embedders_haystack/embedding_backend/instructor_backend.py rename to integrations/instructor_embedders/src/haystack_integrations/components/embedders/instructor_embedders/embedding_backend/instructor_backend.py diff --git a/integrations/instructor_embedders/instructor_embedders_haystack/instructor_document_embedder.py b/integrations/instructor_embedders/src/haystack_integrations/components/embedders/instructor_embedders/instructor_document_embedder.py similarity index 98% rename from integrations/instructor_embedders/instructor_embedders_haystack/instructor_document_embedder.py rename to integrations/instructor_embedders/src/haystack_integrations/components/embedders/instructor_embedders/instructor_document_embedder.py index 7a40f43cd..34912a2a3 100644 --- a/integrations/instructor_embedders/instructor_embedders_haystack/instructor_document_embedder.py +++ b/integrations/instructor_embedders/src/haystack_integrations/components/embedders/instructor_embedders/instructor_document_embedder.py @@ -5,7 +5,7 @@ from haystack import Document, component, default_from_dict, default_to_dict -from instructor_embedders_haystack.embedding_backend.instructor_backend import _InstructorEmbeddingBackendFactory +from .embedding_backend.instructor_backend import _InstructorEmbeddingBackendFactory @component diff --git a/integrations/instructor_embedders/instructor_embedders_haystack/instructor_text_embedder.py b/integrations/instructor_embedders/src/haystack_integrations/components/embedders/instructor_embedders/instructor_text_embedder.py similarity index 97% rename from integrations/instructor_embedders/instructor_embedders_haystack/instructor_text_embedder.py rename to integrations/instructor_embedders/src/haystack_integrations/components/embedders/instructor_embedders/instructor_text_embedder.py index 5a2c66e65..39b8d6a29 100644 --- a/integrations/instructor_embedders/instructor_embedders_haystack/instructor_text_embedder.py +++ b/integrations/instructor_embedders/src/haystack_integrations/components/embedders/instructor_embedders/instructor_text_embedder.py @@ -5,7 +5,7 @@ from haystack import component, default_from_dict, default_to_dict -from instructor_embedders_haystack.embedding_backend.instructor_backend import _InstructorEmbeddingBackendFactory +from .embedding_backend.instructor_backend import _InstructorEmbeddingBackendFactory @component diff --git a/integrations/instructor_embedders/tests/test_instructor_backend.py b/integrations/instructor_embedders/tests/test_instructor_backend.py index 27e31317a..f3fd1653a 100644 --- a/integrations/instructor_embedders/tests/test_instructor_backend.py +++ b/integrations/instructor_embedders/tests/test_instructor_backend.py @@ -1,9 +1,13 @@ from unittest.mock import patch -from instructor_embedders_haystack.embedding_backend.instructor_backend import _InstructorEmbeddingBackendFactory +from haystack_integrations.components.embedders.instructor_embedders.embedding_backend.instructor_backend import ( + _InstructorEmbeddingBackendFactory, +) -@patch("instructor_embedders_haystack.embedding_backend.instructor_backend.INSTRUCTOR") +@patch( + "haystack_integrations.components.embedders.instructor_embedders.embedding_backend.instructor_backend.INSTRUCTOR" +) def test_factory_behavior(mock_instructor): # noqa: ARG001 embedding_backend = _InstructorEmbeddingBackendFactory.get_embedding_backend( model_name_or_path="hkunlp/instructor-large", device="cpu" @@ -20,7 +24,9 @@ def test_factory_behavior(mock_instructor): # noqa: ARG001 _InstructorEmbeddingBackendFactory._instances = {} -@patch("instructor_embedders_haystack.embedding_backend.instructor_backend.INSTRUCTOR") +@patch( + "haystack_integrations.components.embedders.instructor_embedders.embedding_backend.instructor_backend.INSTRUCTOR" +) def test_model_initialization(mock_instructor): _InstructorEmbeddingBackendFactory.get_embedding_backend( model_name_or_path="hkunlp/instructor-base", device="cpu", use_auth_token="huggingface_auth_token" @@ -32,7 +38,9 @@ def test_model_initialization(mock_instructor): _InstructorEmbeddingBackendFactory._instances = {} -@patch("instructor_embedders_haystack.embedding_backend.instructor_backend.INSTRUCTOR") +@patch( + "haystack_integrations.components.embedders.instructor_embedders.embedding_backend.instructor_backend.INSTRUCTOR" +) def test_embedding_function_with_kwargs(mock_instructor): # noqa: ARG001 embedding_backend = _InstructorEmbeddingBackendFactory.get_embedding_backend( model_name_or_path="hkunlp/instructor-base" diff --git a/integrations/instructor_embedders/tests/test_instructor_document_embedder.py b/integrations/instructor_embedders/tests/test_instructor_document_embedder.py index b1d0d8fe6..4f01c1742 100644 --- a/integrations/instructor_embedders/tests/test_instructor_document_embedder.py +++ b/integrations/instructor_embedders/tests/test_instructor_document_embedder.py @@ -3,8 +3,7 @@ import numpy as np import pytest from haystack import Document - -from instructor_embedders_haystack.instructor_document_embedder import InstructorDocumentEmbedder +from haystack_integrations.components.embedders.instructor_embedders import InstructorDocumentEmbedder class TestInstructorDocumentEmbedder: @@ -55,7 +54,7 @@ def test_to_dict(self): embedder = InstructorDocumentEmbedder(model="hkunlp/instructor-base") embedder_dict = embedder.to_dict() assert embedder_dict == { - "type": "instructor_embedders_haystack.instructor_document_embedder.InstructorDocumentEmbedder", + "type": "haystack_integrations.components.embedders.instructor_embedders.instructor_document_embedder.InstructorDocumentEmbedder", # noqa "init_parameters": { "model": "hkunlp/instructor-base", "device": "cpu", @@ -86,7 +85,7 @@ def test_to_dict_with_custom_init_parameters(self): ) embedder_dict = embedder.to_dict() assert embedder_dict == { - "type": "instructor_embedders_haystack.instructor_document_embedder.InstructorDocumentEmbedder", + "type": "haystack_integrations.components.embedders.instructor_embedders.instructor_document_embedder.InstructorDocumentEmbedder", # noqa "init_parameters": { "model": "hkunlp/instructor-base", "device": "cuda", @@ -105,7 +104,7 @@ def test_from_dict(self): Test deserialization of InstructorDocumentEmbedder from a dictionary, using default initialization parameters. """ embedder_dict = { - "type": "instructor_embedders_haystack.instructor_document_embedder.InstructorDocumentEmbedder", + "type": "haystack_integrations.components.embedders.instructor_embedders.instructor_document_embedder.InstructorDocumentEmbedder", # noqa "init_parameters": { "model": "hkunlp/instructor-base", "device": "cpu", @@ -134,7 +133,7 @@ def test_from_dict_with_custom_init_parameters(self): Test deserialization of InstructorDocumentEmbedder from a dictionary, using custom initialization parameters. """ embedder_dict = { - "type": "instructor_embedders_haystack.instructor_document_embedder.InstructorDocumentEmbedder", + "type": "haystack_integrations.components.embedders.instructor_embedders.instructor_document_embedder.InstructorDocumentEmbedder", # noqa "init_parameters": { "model": "hkunlp/instructor-base", "device": "cuda", @@ -158,7 +157,9 @@ def test_from_dict_with_custom_init_parameters(self): assert embedder.meta_fields_to_embed == ["test_field"] assert embedder.embedding_separator == " | " - @patch("instructor_embedders_haystack.instructor_document_embedder._InstructorEmbeddingBackendFactory") + @patch( + "haystack_integrations.components.embedders.instructor_embedders.instructor_document_embedder._InstructorEmbeddingBackendFactory" + ) def test_warmup(self, mocked_factory): """ Test for checking embedder instances after warm-up. @@ -170,7 +171,9 @@ def test_warmup(self, mocked_factory): model_name_or_path="hkunlp/instructor-base", device="cpu", use_auth_token=None ) - @patch("instructor_embedders_haystack.instructor_document_embedder._InstructorEmbeddingBackendFactory") + @patch( + "haystack_integrations.components.embedders.instructor_embedders.instructor_document_embedder._InstructorEmbeddingBackendFactory" + ) def test_warmup_does_not_reload(self, mocked_factory): """ Test for checking backend instances after multiple warm-ups. diff --git a/integrations/instructor_embedders/tests/test_instructor_text_embedder.py b/integrations/instructor_embedders/tests/test_instructor_text_embedder.py index bc00f7348..edede9f39 100644 --- a/integrations/instructor_embedders/tests/test_instructor_text_embedder.py +++ b/integrations/instructor_embedders/tests/test_instructor_text_embedder.py @@ -2,8 +2,7 @@ import numpy as np import pytest - -from instructor_embedders_haystack.instructor_text_embedder import InstructorTextEmbedder +from haystack_integrations.components.embedders.instructor_embedders import InstructorTextEmbedder class TestInstructorTextEmbedder: @@ -48,7 +47,7 @@ def test_to_dict(self): embedder = InstructorTextEmbedder(model="hkunlp/instructor-base") embedder_dict = embedder.to_dict() assert embedder_dict == { - "type": "instructor_embedders_haystack.instructor_text_embedder.InstructorTextEmbedder", + "type": "haystack_integrations.components.embedders.instructor_embedders.instructor_text_embedder.InstructorTextEmbedder", # noqa "init_parameters": { "model": "hkunlp/instructor-base", "device": "cpu", @@ -75,7 +74,7 @@ def test_to_dict_with_custom_init_parameters(self): ) embedder_dict = embedder.to_dict() assert embedder_dict == { - "type": "instructor_embedders_haystack.instructor_text_embedder.InstructorTextEmbedder", + "type": "haystack_integrations.components.embedders.instructor_embedders.instructor_text_embedder.InstructorTextEmbedder", # noqa "init_parameters": { "model": "hkunlp/instructor-base", "device": "cuda", @@ -92,7 +91,7 @@ def test_from_dict(self): Test deserialization of InstructorTextEmbedder from a dictionary, using default initialization parameters. """ embedder_dict = { - "type": "instructor_embedders_haystack.instructor_text_embedder.InstructorTextEmbedder", + "type": "haystack_integrations.components.embedders.instructor_embedders.instructor_text_embedder.InstructorTextEmbedder", # noqa "init_parameters": { "model": "hkunlp/instructor-base", "device": "cpu", @@ -117,7 +116,7 @@ def test_from_dict_with_custom_init_parameters(self): Test deserialization of InstructorTextEmbedder from a dictionary, using custom initialization parameters. """ embedder_dict = { - "type": "instructor_embedders_haystack.instructor_text_embedder.InstructorTextEmbedder", + "type": "haystack_integrations.components.embedders.instructor_embedders.instructor_text_embedder.InstructorTextEmbedder", # noqa "init_parameters": { "model": "hkunlp/instructor-base", "device": "cuda", @@ -137,7 +136,9 @@ def test_from_dict_with_custom_init_parameters(self): assert embedder.progress_bar is False assert embedder.normalize_embeddings is True - @patch("instructor_embedders_haystack.instructor_text_embedder._InstructorEmbeddingBackendFactory") + @patch( + "haystack_integrations.components.embedders.instructor_embedders.instructor_text_embedder._InstructorEmbeddingBackendFactory" + ) def test_warmup(self, mocked_factory): """ Test for checking embedder instances after warm-up. @@ -149,7 +150,9 @@ def test_warmup(self, mocked_factory): model_name_or_path="hkunlp/instructor-base", device="cpu", use_auth_token=None ) - @patch("instructor_embedders_haystack.instructor_text_embedder._InstructorEmbeddingBackendFactory") + @patch( + "haystack_integrations.components.embedders.instructor_embedders.instructor_text_embedder._InstructorEmbeddingBackendFactory" + ) def test_warmup_does_not_reload(self, mocked_factory): """ Test for checking backend instances after multiple warm-ups. From 38eb465a24bba932105ce2bd274fec627e7869d1 Mon Sep 17 00:00:00 2001 From: Massimiliano Pippi Date: Mon, 22 Jan 2024 17:16:58 +0100 Subject: [PATCH 05/47] change import paths (#254) --- integrations/jina/pyproject.toml | 13 +++++++------ .../components/embedders/jina}/__init__.py | 5 ++--- .../components/embedders/jina}/document_embedder.py | 0 .../components/embedders/jina}/text_embedder.py | 0 integrations/jina/tests/test_document_embedder.py | 7 +++---- integrations/jina/tests/test_text_embedder.py | 7 +++---- 6 files changed, 15 insertions(+), 17 deletions(-) rename integrations/jina/src/{jina_haystack => haystack_integrations/components/embedders/jina}/__init__.py (57%) rename integrations/jina/src/{jina_haystack => haystack_integrations/components/embedders/jina}/document_embedder.py (100%) rename integrations/jina/src/{jina_haystack => haystack_integrations/components/embedders/jina}/text_embedder.py (100%) diff --git a/integrations/jina/pyproject.toml b/integrations/jina/pyproject.toml index 0fa01a7ab..1136db797 100644 --- a/integrations/jina/pyproject.toml +++ b/integrations/jina/pyproject.toml @@ -31,6 +31,9 @@ Documentation = "https://github.com/deepset-ai/haystack-core-integrations/tree/m Issues = "https://github.com/deepset-ai/haystack-core-integrations/issues" Source = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/jina" +[tool.hatch.build.targets.wheel] +packages = ["src/haystack_integrations"] + [tool.hatch.version] source = "vcs" tag-pattern = 'integrations\/jina-v(?P.*)' @@ -67,7 +70,7 @@ dependencies = [ "ruff>=0.0.243", ] [tool.hatch.envs.lint.scripts] -typing = "mypy --install-types --non-interactive {args:src/jina_haystack tests}" +typing = "mypy --install-types --non-interactive --explicit-package-bases {args:src/ tests}" style = [ "ruff {args:.}", "black --check --diff {args:.}", @@ -133,7 +136,7 @@ unfixable = [ known-first-party = ["jina_haystack"] [tool.ruff.flake8-tidy-imports] -ban-relative-imports = "all" +ban-relative-imports = "parents" [tool.ruff.per-file-ignores] # Tests can use magic values, assertions, and relative imports @@ -143,12 +146,9 @@ ban-relative-imports = "all" source_pkgs = ["jina_haystack", "tests"] branch = true parallel = true -omit = [ - "src/jina_haystack/__about__.py", -] [tool.coverage.paths] -jina_haystack = ["src/jina_haystack", "*/jina-haystack/src/jina_haystack"] +jina_haystack = ["src"] tests = ["tests", "*/jina-haystack/tests"] [tool.coverage.report] @@ -161,6 +161,7 @@ exclude_lines = [ [[tool.mypy.overrides]] module = [ "haystack.*", + "haystack_integrations.*", "pytest.*" ] ignore_missing_imports = true diff --git a/integrations/jina/src/jina_haystack/__init__.py b/integrations/jina/src/haystack_integrations/components/embedders/jina/__init__.py similarity index 57% rename from integrations/jina/src/jina_haystack/__init__.py rename to integrations/jina/src/haystack_integrations/components/embedders/jina/__init__.py index 581b23df5..c98f63398 100644 --- a/integrations/jina/src/jina_haystack/__init__.py +++ b/integrations/jina/src/haystack_integrations/components/embedders/jina/__init__.py @@ -1,8 +1,7 @@ # SPDX-FileCopyrightText: 2023-present deepset GmbH # # SPDX-License-Identifier: Apache-2.0 - -from jina_haystack.document_embedder import JinaDocumentEmbedder -from jina_haystack.text_embedder import JinaTextEmbedder +from .document_embedder import JinaDocumentEmbedder +from .text_embedder import JinaTextEmbedder __all__ = ["JinaDocumentEmbedder", "JinaTextEmbedder"] diff --git a/integrations/jina/src/jina_haystack/document_embedder.py b/integrations/jina/src/haystack_integrations/components/embedders/jina/document_embedder.py similarity index 100% rename from integrations/jina/src/jina_haystack/document_embedder.py rename to integrations/jina/src/haystack_integrations/components/embedders/jina/document_embedder.py diff --git a/integrations/jina/src/jina_haystack/text_embedder.py b/integrations/jina/src/haystack_integrations/components/embedders/jina/text_embedder.py similarity index 100% rename from integrations/jina/src/jina_haystack/text_embedder.py rename to integrations/jina/src/haystack_integrations/components/embedders/jina/text_embedder.py diff --git a/integrations/jina/tests/test_document_embedder.py b/integrations/jina/tests/test_document_embedder.py index 43b6930c5..4dd91860e 100644 --- a/integrations/jina/tests/test_document_embedder.py +++ b/integrations/jina/tests/test_document_embedder.py @@ -7,8 +7,7 @@ import pytest import requests from haystack import Document - -from jina_haystack import JinaDocumentEmbedder +from haystack_integrations.components.embedders.jina import JinaDocumentEmbedder def mock_session_post_response(*args, **kwargs): # noqa: ARG001 @@ -65,7 +64,7 @@ def test_to_dict(self): component = JinaDocumentEmbedder(api_key="fake-api-key") data = component.to_dict() assert data == { - "type": "jina_haystack.document_embedder.JinaDocumentEmbedder", + "type": "haystack_integrations.components.embedders.jina.document_embedder.JinaDocumentEmbedder", "init_parameters": { "model": "jina-embeddings-v2-base-en", "prefix": "", @@ -90,7 +89,7 @@ def test_to_dict_with_custom_init_parameters(self): ) data = component.to_dict() assert data == { - "type": "jina_haystack.document_embedder.JinaDocumentEmbedder", + "type": "haystack_integrations.components.embedders.jina.document_embedder.JinaDocumentEmbedder", "init_parameters": { "model": "model", "prefix": "prefix", diff --git a/integrations/jina/tests/test_text_embedder.py b/integrations/jina/tests/test_text_embedder.py index e2b68603d..a4f6fd934 100644 --- a/integrations/jina/tests/test_text_embedder.py +++ b/integrations/jina/tests/test_text_embedder.py @@ -6,8 +6,7 @@ import pytest import requests - -from jina_haystack import JinaTextEmbedder +from haystack_integrations.components.embedders.jina import JinaTextEmbedder class TestJinaTextEmbedder: @@ -39,7 +38,7 @@ def test_to_dict(self): component = JinaTextEmbedder(api_key="fake-api-key") data = component.to_dict() assert data == { - "type": "jina_haystack.text_embedder.JinaTextEmbedder", + "type": "haystack_integrations.components.embedders.jina.text_embedder.JinaTextEmbedder", "init_parameters": { "model": "jina-embeddings-v2-base-en", "prefix": "", @@ -56,7 +55,7 @@ def test_to_dict_with_custom_init_parameters(self): ) data = component.to_dict() assert data == { - "type": "jina_haystack.text_embedder.JinaTextEmbedder", + "type": "haystack_integrations.components.embedders.jina.text_embedder.JinaTextEmbedder", "init_parameters": { "model": "model", "prefix": "prefix", From 430b24a0af8777fa9bb9f65f5defb44ba220cba6 Mon Sep 17 00:00:00 2001 From: Massimiliano Pippi Date: Mon, 22 Jan 2024 17:28:45 +0100 Subject: [PATCH 06/47] refact!:change import paths (#255) * change import paths * remove leftovers --- integrations/qdrant/pyproject.toml | 17 ++++++++--------- .../components/retrievers/qdrant/__init__.py | 7 +++++++ .../components/retrievers/qdrant}/retriever.py | 3 +-- .../document_stores/qdrant}/__init__.py | 2 +- .../document_stores/qdrant}/converters.py | 0 .../document_stores/qdrant}/document_store.py | 4 ++-- .../document_stores/qdrant}/filters.py | 2 +- integrations/qdrant/tests/test_converters.py | 3 +-- .../qdrant/tests/test_dict_converters.py | 6 +++--- .../qdrant/tests/test_document_store.py | 3 +-- integrations/qdrant/tests/test_filters.py | 3 +-- .../qdrant/tests/test_legacy_filters.py | 3 +-- integrations/qdrant/tests/test_retriever.py | 13 ++++++------- 13 files changed, 33 insertions(+), 33 deletions(-) create mode 100644 integrations/qdrant/src/haystack_integrations/components/retrievers/qdrant/__init__.py rename integrations/qdrant/src/{qdrant_haystack => haystack_integrations/components/retrievers/qdrant}/retriever.py (97%) rename integrations/qdrant/src/{qdrant_haystack => haystack_integrations/document_stores/qdrant}/__init__.py (70%) rename integrations/qdrant/src/{qdrant_haystack => haystack_integrations/document_stores/qdrant}/converters.py (100%) rename integrations/qdrant/src/{qdrant_haystack => haystack_integrations/document_stores/qdrant}/document_store.py (99%) rename integrations/qdrant/src/{qdrant_haystack => haystack_integrations/document_stores/qdrant}/filters.py (99%) diff --git a/integrations/qdrant/pyproject.toml b/integrations/qdrant/pyproject.toml index d1086fcdf..9c19d144e 100644 --- a/integrations/qdrant/pyproject.toml +++ b/integrations/qdrant/pyproject.toml @@ -35,6 +35,9 @@ Source = "https://github.com/deepset-ai/haystack-core-integrations" Documentation = "https://github.com/deepset-ai/haystack-core-integrations/blob/main/integrations/qdrant/README.md" Issues = "https://github.com/deepset-ai/haystack-core-integrations/issues" +[tool.hatch.build.targets.wheel] +packages = ["src/haystack_integrations"] + [tool.hatch.version] source = "vcs" tag-pattern = 'integrations\/qdrant-v(?P.*)' @@ -71,7 +74,7 @@ dependencies = [ "ruff>=0.0.243", ] [tool.hatch.envs.lint.scripts] -typing = "mypy --install-types --non-interactive {args:src/qdrant_haystack tests}" +typing = "mypy --install-types --non-interactive --explicit-package-bases {args:src/ tests}" style = [ "ruff {args:.}", "black --check --diff {args:.}", @@ -136,23 +139,18 @@ unfixable = [ "F401", ] -[tool.ruff.isort] -known-first-party = ["qdrant_haystack"] - [tool.ruff.flake8-tidy-imports] -ban-relative-imports = "all" +ban-relative-imports = "parents" [tool.ruff.per-file-ignores] # Tests can use magic values, assertions, and relative imports "tests/**/*" = ["PLR2004", "S101", "TID252"] [tool.coverage.run] -source_pkgs = ["qdrant_haystack", "tests"] +source_pkgs = ["src", "tests"] branch = true parallel = true -omit = [ - "src/qdrant_haystack/__about__.py", -] + [tool.coverage.paths] qdrant_haystack = ["src/qdrant_haystack", "*/qdrant-haystack/src/qdrant_haystack"] @@ -168,6 +166,7 @@ exclude_lines = [ [[tool.mypy.overrides]] module = [ "haystack.*", + "haystack_integrations.*", "pytest.*", "qdrant_client.*", "numpy", diff --git a/integrations/qdrant/src/haystack_integrations/components/retrievers/qdrant/__init__.py b/integrations/qdrant/src/haystack_integrations/components/retrievers/qdrant/__init__.py new file mode 100644 index 000000000..41b59e42d --- /dev/null +++ b/integrations/qdrant/src/haystack_integrations/components/retrievers/qdrant/__init__.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: 2023-present deepset GmbH +# +# SPDX-License-Identifier: Apache-2.0 + +from .retriever import QdrantEmbeddingRetriever + +__all__ = ("QdrantEmbeddingRetriever",) diff --git a/integrations/qdrant/src/qdrant_haystack/retriever.py b/integrations/qdrant/src/haystack_integrations/components/retrievers/qdrant/retriever.py similarity index 97% rename from integrations/qdrant/src/qdrant_haystack/retriever.py rename to integrations/qdrant/src/haystack_integrations/components/retrievers/qdrant/retriever.py index bf378688c..e59dca3ad 100644 --- a/integrations/qdrant/src/qdrant_haystack/retriever.py +++ b/integrations/qdrant/src/haystack_integrations/components/retrievers/qdrant/retriever.py @@ -1,8 +1,7 @@ from typing import Any, Dict, List, Optional from haystack import Document, component, default_from_dict, default_to_dict - -from qdrant_haystack import QdrantDocumentStore +from haystack_integrations.document_stores.qdrant import QdrantDocumentStore @component diff --git a/integrations/qdrant/src/qdrant_haystack/__init__.py b/integrations/qdrant/src/haystack_integrations/document_stores/qdrant/__init__.py similarity index 70% rename from integrations/qdrant/src/qdrant_haystack/__init__.py rename to integrations/qdrant/src/haystack_integrations/document_stores/qdrant/__init__.py index 765ced0ef..dc3def997 100644 --- a/integrations/qdrant/src/qdrant_haystack/__init__.py +++ b/integrations/qdrant/src/haystack_integrations/document_stores/qdrant/__init__.py @@ -2,6 +2,6 @@ # # SPDX-License-Identifier: Apache-2.0 -from qdrant_haystack.document_store import QdrantDocumentStore +from .document_store import QdrantDocumentStore __all__ = ("QdrantDocumentStore",) diff --git a/integrations/qdrant/src/qdrant_haystack/converters.py b/integrations/qdrant/src/haystack_integrations/document_stores/qdrant/converters.py similarity index 100% rename from integrations/qdrant/src/qdrant_haystack/converters.py rename to integrations/qdrant/src/haystack_integrations/document_stores/qdrant/converters.py diff --git a/integrations/qdrant/src/qdrant_haystack/document_store.py b/integrations/qdrant/src/haystack_integrations/document_stores/qdrant/document_store.py similarity index 99% rename from integrations/qdrant/src/qdrant_haystack/document_store.py rename to integrations/qdrant/src/haystack_integrations/document_stores/qdrant/document_store.py index 4fd724f67..50dd0220c 100644 --- a/integrations/qdrant/src/qdrant_haystack/document_store.py +++ b/integrations/qdrant/src/haystack_integrations/document_stores/qdrant/document_store.py @@ -16,8 +16,8 @@ from qdrant_client.http.exceptions import UnexpectedResponse from tqdm import tqdm -from qdrant_haystack.converters import HaystackToQdrant, QdrantToHaystack -from qdrant_haystack.filters import QdrantFilterConverter +from .converters import HaystackToQdrant, QdrantToHaystack +from .filters import QdrantFilterConverter logger = logging.getLogger(__name__) diff --git a/integrations/qdrant/src/qdrant_haystack/filters.py b/integrations/qdrant/src/haystack_integrations/document_stores/qdrant/filters.py similarity index 99% rename from integrations/qdrant/src/qdrant_haystack/filters.py rename to integrations/qdrant/src/haystack_integrations/document_stores/qdrant/filters.py index cc6b2b6a5..21e29e570 100644 --- a/integrations/qdrant/src/qdrant_haystack/filters.py +++ b/integrations/qdrant/src/haystack_integrations/document_stores/qdrant/filters.py @@ -4,7 +4,7 @@ from haystack.utils.filters import COMPARISON_OPERATORS, LOGICAL_OPERATORS, FilterError from qdrant_client.http import models -from qdrant_haystack.converters import HaystackToQdrant +from .converters import HaystackToQdrant COMPARISON_OPERATORS = COMPARISON_OPERATORS.keys() LOGICAL_OPERATORS = LOGICAL_OPERATORS.keys() diff --git a/integrations/qdrant/tests/test_converters.py b/integrations/qdrant/tests/test_converters.py index dc4866293..0c6c5676a 100644 --- a/integrations/qdrant/tests/test_converters.py +++ b/integrations/qdrant/tests/test_converters.py @@ -1,9 +1,8 @@ import numpy as np import pytest +from haystack_integrations.document_stores.qdrant.converters import HaystackToQdrant, QdrantToHaystack from qdrant_client.http import models as rest -from qdrant_haystack.converters import HaystackToQdrant, QdrantToHaystack - CONTENT_FIELD = "content" NAME_FIELD = "name" EMBEDDING_FIELD = "vector" diff --git a/integrations/qdrant/tests/test_dict_converters.py b/integrations/qdrant/tests/test_dict_converters.py index 1a211655c..1c9eb36e2 100644 --- a/integrations/qdrant/tests/test_dict_converters.py +++ b/integrations/qdrant/tests/test_dict_converters.py @@ -1,11 +1,11 @@ -from qdrant_haystack import QdrantDocumentStore +from haystack_integrations.document_stores.qdrant import QdrantDocumentStore def test_to_dict(): document_store = QdrantDocumentStore(location=":memory:", index="test") expected = { - "type": "qdrant_haystack.document_store.QdrantDocumentStore", + "type": "haystack_integrations.document_stores.qdrant.document_store.QdrantDocumentStore", "init_parameters": { "location": ":memory:", "url": None, @@ -50,7 +50,7 @@ def test_to_dict(): def test_from_dict(): document_store = QdrantDocumentStore.from_dict( { - "type": "qdrant_haystack.document_store.QdrantDocumentStore", + "type": "haystack_integrations.document_stores.qdrant.document_store.QdrantDocumentStore", "init_parameters": { "location": ":memory:", "index": "test", diff --git a/integrations/qdrant/tests/test_document_store.py b/integrations/qdrant/tests/test_document_store.py index 0118ae0cf..8316ee565 100644 --- a/integrations/qdrant/tests/test_document_store.py +++ b/integrations/qdrant/tests/test_document_store.py @@ -9,8 +9,7 @@ DeleteDocumentsTest, WriteDocumentsTest, ) - -from qdrant_haystack import QdrantDocumentStore +from haystack_integrations.document_stores.qdrant import QdrantDocumentStore class TestQdrantStoreBaseTests(CountDocumentsTest, WriteDocumentsTest, DeleteDocumentsTest): diff --git a/integrations/qdrant/tests/test_filters.py b/integrations/qdrant/tests/test_filters.py index a25f4a672..848d799e4 100644 --- a/integrations/qdrant/tests/test_filters.py +++ b/integrations/qdrant/tests/test_filters.py @@ -4,8 +4,7 @@ from haystack import Document from haystack.testing.document_store import FilterDocumentsTest from haystack.utils.filters import FilterError - -from qdrant_haystack import QdrantDocumentStore +from haystack_integrations.document_stores.qdrant import QdrantDocumentStore class TestQdrantStoreBaseTests(FilterDocumentsTest): diff --git a/integrations/qdrant/tests/test_legacy_filters.py b/integrations/qdrant/tests/test_legacy_filters.py index 957603423..ff01c3971 100644 --- a/integrations/qdrant/tests/test_legacy_filters.py +++ b/integrations/qdrant/tests/test_legacy_filters.py @@ -5,8 +5,7 @@ from haystack.document_stores.types import DocumentStore from haystack.testing.document_store import LegacyFilterDocumentsTest from haystack.utils.filters import FilterError - -from qdrant_haystack import QdrantDocumentStore +from haystack_integrations.document_stores.qdrant import QdrantDocumentStore # The tests below are from haystack.testing.document_store.LegacyFilterDocumentsTest # Updated to include `meta` prefix for filter keys wherever necessary diff --git a/integrations/qdrant/tests/test_retriever.py b/integrations/qdrant/tests/test_retriever.py index ed220c5bc..7521642ff 100644 --- a/integrations/qdrant/tests/test_retriever.py +++ b/integrations/qdrant/tests/test_retriever.py @@ -5,9 +5,8 @@ FilterableDocsFixtureMixin, _random_embeddings, ) - -from qdrant_haystack import QdrantDocumentStore -from qdrant_haystack.retriever import QdrantEmbeddingRetriever +from haystack_integrations.components.retrievers.qdrant import QdrantEmbeddingRetriever +from haystack_integrations.document_stores.qdrant import QdrantDocumentStore class TestQdrantRetriever(FilterableDocsFixtureMixin): @@ -24,10 +23,10 @@ def test_to_dict(self): retriever = QdrantEmbeddingRetriever(document_store=document_store) res = retriever.to_dict() assert res == { - "type": "qdrant_haystack.retriever.QdrantEmbeddingRetriever", + "type": "haystack_integrations.components.retrievers.qdrant.retriever.QdrantEmbeddingRetriever", "init_parameters": { "document_store": { - "type": "qdrant_haystack.document_store.QdrantDocumentStore", + "type": "haystack_integrations.document_stores.qdrant.document_store.QdrantDocumentStore", "init_parameters": { "location": ":memory:", "url": None, @@ -74,11 +73,11 @@ def test_to_dict(self): def test_from_dict(self): data = { - "type": "qdrant_haystack.retriever.QdrantEmbeddingRetriever", + "type": "haystack_integrations.components.retrievers.qdrant.retriever.QdrantEmbeddingRetriever", "init_parameters": { "document_store": { "init_parameters": {"location": ":memory:", "index": "test"}, - "type": "qdrant_haystack.document_store.QdrantDocumentStore", + "type": "haystack_integrations.document_stores.qdrant.document_store.QdrantDocumentStore", }, "filters": None, "top_k": 5, From 89e1c2f46aecd4802c80d309620c187243adfed7 Mon Sep 17 00:00:00 2001 From: Massimiliano Pippi Date: Tue, 23 Jan 2024 10:52:43 +0100 Subject: [PATCH 07/47] change import paths (#256) * change import paths * fix tests * leftover --- integrations/pinecone/pyproject.toml | 18 ++- .../retrievers/pinecone/__init__.py | 3 + .../retrievers/pinecone}/dense_retriever.py | 2 +- .../document_stores/pinecone/__init__.py | 6 + .../pinecone}/document_store.py | 5 +- .../document_stores/pinecone}/errors.py | 0 .../document_stores/pinecone}/filters.py | 0 .../src/pinecone_haystack/__init__.py | 7 - integrations/pinecone/tests/conftest.py | 2 +- .../pinecone/tests/test_dense_retriever.py | 16 +-- .../pinecone/tests/test_document_store.py | 134 +++++++++--------- integrations/pinecone/tests/test_filters.py | 1 + 12 files changed, 103 insertions(+), 91 deletions(-) create mode 100644 integrations/pinecone/src/haystack_integrations/components/retrievers/pinecone/__init__.py rename integrations/pinecone/src/{pinecone_haystack => haystack_integrations/components/retrievers/pinecone}/dense_retriever.py (96%) create mode 100644 integrations/pinecone/src/haystack_integrations/document_stores/pinecone/__init__.py rename integrations/pinecone/src/{pinecone_haystack => haystack_integrations/document_stores/pinecone}/document_store.py (99%) rename integrations/pinecone/src/{pinecone_haystack => haystack_integrations/document_stores/pinecone}/errors.py (100%) rename integrations/pinecone/src/{pinecone_haystack => haystack_integrations/document_stores/pinecone}/filters.py (100%) delete mode 100644 integrations/pinecone/src/pinecone_haystack/__init__.py diff --git a/integrations/pinecone/pyproject.toml b/integrations/pinecone/pyproject.toml index 5ada5669e..2d73cdf58 100644 --- a/integrations/pinecone/pyproject.toml +++ b/integrations/pinecone/pyproject.toml @@ -34,6 +34,9 @@ Documentation = "https://github.com/deepset-ai/haystack-core-integrations/tree/m Issues = "https://github.com/deepset-ai/haystack-core-integrations/issues" Source = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/pinecone" +[tool.hatch.build.targets.wheel] +packages = ["src/haystack_integrations"] + [tool.hatch.version] source = "vcs" tag-pattern = 'integrations\/pinecone-v(?P.*)' @@ -74,7 +77,7 @@ dependencies = [ "numpy", ] [tool.hatch.envs.lint.scripts] -typing = "mypy --install-types --non-interactive {args:src/pinecone_haystack tests}" +typing = "mypy --install-types --non-interactive --explicit-package-bases {args:src/ tests}" style = [ "ruff {args:.}", "black --check --diff {args:.}", @@ -143,26 +146,26 @@ unfixable = [ ] [tool.ruff.isort] -known-first-party = ["pinecone_haystack"] +known-first-party = ["haystack_integrations"] [tool.ruff.flake8-tidy-imports] -ban-relative-imports = "all" +ban-relative-imports = "parents" [tool.ruff.per-file-ignores] # Tests can use magic values, assertions, and relative imports "tests/**/*" = ["PLR2004", "S101", "TID252"] [tool.coverage.run] -source_pkgs = ["pinecone_haystack", "tests"] +source_pkgs = ["src", "tests"] branch = true parallel = true omit = [ - "example" + "examples" ] [tool.coverage.paths] -pinecone_haystack = ["src/pinecone_haystack", "*/pinecone_haystack/src/pinecone_haystack"] -tests = ["tests", "*/pinecone_haystack/tests"] +pinecone_haystack = ["src/*"] +tests = ["tests"] [tool.coverage.report] exclude_lines = [ @@ -182,6 +185,7 @@ markers = [ module = [ "pinecone.*", "haystack.*", + "haystack_integrations.*", "pytest.*" ] ignore_missing_imports = true diff --git a/integrations/pinecone/src/haystack_integrations/components/retrievers/pinecone/__init__.py b/integrations/pinecone/src/haystack_integrations/components/retrievers/pinecone/__init__.py new file mode 100644 index 000000000..d73d799d4 --- /dev/null +++ b/integrations/pinecone/src/haystack_integrations/components/retrievers/pinecone/__init__.py @@ -0,0 +1,3 @@ +from .dense_retriever import PineconeDenseRetriever + +__all__ = ["PineconeDenseRetriever"] diff --git a/integrations/pinecone/src/pinecone_haystack/dense_retriever.py b/integrations/pinecone/src/haystack_integrations/components/retrievers/pinecone/dense_retriever.py similarity index 96% rename from integrations/pinecone/src/pinecone_haystack/dense_retriever.py rename to integrations/pinecone/src/haystack_integrations/components/retrievers/pinecone/dense_retriever.py index 3f60f252b..279ef4977 100644 --- a/integrations/pinecone/src/pinecone_haystack/dense_retriever.py +++ b/integrations/pinecone/src/haystack_integrations/components/retrievers/pinecone/dense_retriever.py @@ -6,7 +6,7 @@ from haystack import component, default_from_dict, default_to_dict from haystack.dataclasses import Document -from pinecone_haystack.document_store import PineconeDocumentStore +from haystack_integrations.document_stores.pinecone import PineconeDocumentStore @component diff --git a/integrations/pinecone/src/haystack_integrations/document_stores/pinecone/__init__.py b/integrations/pinecone/src/haystack_integrations/document_stores/pinecone/__init__.py new file mode 100644 index 000000000..159a85fae --- /dev/null +++ b/integrations/pinecone/src/haystack_integrations/document_stores/pinecone/__init__.py @@ -0,0 +1,6 @@ +# SPDX-FileCopyrightText: 2023-present deepset GmbH +# +# SPDX-License-Identifier: Apache-2.0 +from .document_store import PineconeDocumentStore + +__all__ = ["PineconeDocumentStore"] diff --git a/integrations/pinecone/src/pinecone_haystack/document_store.py b/integrations/pinecone/src/haystack_integrations/document_stores/pinecone/document_store.py similarity index 99% rename from integrations/pinecone/src/pinecone_haystack/document_store.py rename to integrations/pinecone/src/haystack_integrations/document_stores/pinecone/document_store.py index 8fe579611..a755b7e47 100644 --- a/integrations/pinecone/src/pinecone_haystack/document_store.py +++ b/integrations/pinecone/src/haystack_integrations/document_stores/pinecone/document_store.py @@ -8,13 +8,14 @@ from typing import Any, Dict, List, Optional import pandas as pd -import pinecone from haystack import default_to_dict from haystack.dataclasses import Document from haystack.document_stores.types import DuplicatePolicy from haystack.utils.filters import convert -from pinecone_haystack.filters import _normalize_filters +import pinecone + +from .filters import _normalize_filters logger = logging.getLogger(__name__) diff --git a/integrations/pinecone/src/pinecone_haystack/errors.py b/integrations/pinecone/src/haystack_integrations/document_stores/pinecone/errors.py similarity index 100% rename from integrations/pinecone/src/pinecone_haystack/errors.py rename to integrations/pinecone/src/haystack_integrations/document_stores/pinecone/errors.py diff --git a/integrations/pinecone/src/pinecone_haystack/filters.py b/integrations/pinecone/src/haystack_integrations/document_stores/pinecone/filters.py similarity index 100% rename from integrations/pinecone/src/pinecone_haystack/filters.py rename to integrations/pinecone/src/haystack_integrations/document_stores/pinecone/filters.py diff --git a/integrations/pinecone/src/pinecone_haystack/__init__.py b/integrations/pinecone/src/pinecone_haystack/__init__.py deleted file mode 100644 index e3ec258d2..000000000 --- a/integrations/pinecone/src/pinecone_haystack/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# SPDX-FileCopyrightText: 2023-present deepset GmbH -# -# SPDX-License-Identifier: Apache-2.0 -from pinecone_haystack.dense_retriever import PineconeDenseRetriever -from pinecone_haystack.document_store import PineconeDocumentStore - -__all__ = ["PineconeDocumentStore", "PineconeDenseRetriever"] diff --git a/integrations/pinecone/tests/conftest.py b/integrations/pinecone/tests/conftest.py index 3ae642ae7..c7a1342d5 100644 --- a/integrations/pinecone/tests/conftest.py +++ b/integrations/pinecone/tests/conftest.py @@ -3,7 +3,7 @@ import pytest from haystack.document_stores.types import DuplicatePolicy -from pinecone_haystack.document_store import PineconeDocumentStore +from haystack_integrations.document_stores.pinecone import PineconeDocumentStore # This is the approximate time it takes for the documents to be available SLEEP_TIME = 20 diff --git a/integrations/pinecone/tests/test_dense_retriever.py b/integrations/pinecone/tests/test_dense_retriever.py index ceb73b687..e0f6dc375 100644 --- a/integrations/pinecone/tests/test_dense_retriever.py +++ b/integrations/pinecone/tests/test_dense_retriever.py @@ -5,8 +5,8 @@ from haystack.dataclasses import Document -from pinecone_haystack.dense_retriever import PineconeDenseRetriever -from pinecone_haystack.document_store import PineconeDocumentStore +from haystack_integrations.components.retrievers.pinecone import PineconeDenseRetriever +from haystack_integrations.document_stores.pinecone import PineconeDocumentStore def test_init_default(): @@ -17,7 +17,7 @@ def test_init_default(): assert retriever.top_k == 10 -@patch("pinecone_haystack.document_store.pinecone") +@patch("haystack_integrations.document_stores.pinecone.document_store.pinecone") def test_to_dict(mock_pinecone): mock_pinecone.Index.return_value.describe_index_stats.return_value = {"dimension": 512} document_store = PineconeDocumentStore( @@ -31,7 +31,7 @@ def test_to_dict(mock_pinecone): retriever = PineconeDenseRetriever(document_store=document_store) res = retriever.to_dict() assert res == { - "type": "pinecone_haystack.dense_retriever.PineconeDenseRetriever", + "type": "haystack_integrations.components.retrievers.pinecone.dense_retriever.PineconeDenseRetriever", "init_parameters": { "document_store": { "init_parameters": { @@ -41,7 +41,7 @@ def test_to_dict(mock_pinecone): "batch_size": 50, "dimension": 512, }, - "type": "pinecone_haystack.document_store.PineconeDocumentStore", + "type": "haystack_integrations.document_stores.pinecone.document_store.PineconeDocumentStore", }, "filters": {}, "top_k": 10, @@ -49,10 +49,10 @@ def test_to_dict(mock_pinecone): } -@patch("pinecone_haystack.document_store.pinecone") +@patch("haystack_integrations.document_stores.pinecone.document_store.pinecone") def test_from_dict(mock_pinecone, monkeypatch): data = { - "type": "pinecone_haystack.dense_retriever.PineconeDenseRetriever", + "type": "haystack_integrations.components.retrievers.pinecone.dense_retriever.PineconeDenseRetriever", "init_parameters": { "document_store": { "init_parameters": { @@ -62,7 +62,7 @@ def test_from_dict(mock_pinecone, monkeypatch): "batch_size": 50, "dimension": 512, }, - "type": "pinecone_haystack.document_store.PineconeDocumentStore", + "type": "haystack_integrations.document_stores.pinecone.document_store.PineconeDocumentStore", }, "filters": {}, "top_k": 10, diff --git a/integrations/pinecone/tests/test_document_store.py b/integrations/pinecone/tests/test_document_store.py index 5c9b32698..a856cde86 100644 --- a/integrations/pinecone/tests/test_document_store.py +++ b/integrations/pinecone/tests/test_document_store.py @@ -5,9 +5,75 @@ from haystack import Document from haystack.testing.document_store import CountDocumentsTest, DeleteDocumentsTest, WriteDocumentsTest -from pinecone_haystack.document_store import PineconeDocumentStore - - +from haystack_integrations.document_stores.pinecone import PineconeDocumentStore + + +@patch("haystack_integrations.document_stores.pinecone.document_store.pinecone") +def test_init(mock_pinecone): + mock_pinecone.Index.return_value.describe_index_stats.return_value = {"dimension": 30} + + document_store = PineconeDocumentStore( + api_key="fake-api-key", + environment="gcp-starter", + index="my_index", + namespace="test", + batch_size=50, + dimension=30, + metric="euclidean", + ) + + mock_pinecone.init.assert_called_with(api_key="fake-api-key", environment="gcp-starter") + + assert document_store.environment == "gcp-starter" + assert document_store.index == "my_index" + assert document_store.namespace == "test" + assert document_store.batch_size == 50 + assert document_store.dimension == 30 + assert document_store.index_creation_kwargs == {"metric": "euclidean"} + + +@patch("haystack_integrations.document_stores.pinecone.document_store.pinecone") +def test_init_api_key_in_environment_variable(mock_pinecone, monkeypatch): + monkeypatch.setenv("PINECONE_API_KEY", "fake-api-key") + + PineconeDocumentStore( + environment="gcp-starter", + index="my_index", + namespace="test", + batch_size=50, + dimension=30, + metric="euclidean", + ) + + mock_pinecone.init.assert_called_with(api_key="fake-api-key", environment="gcp-starter") + + +@patch("haystack_integrations.document_stores.pinecone.document_store.pinecone") +def test_to_dict(mock_pinecone): + mock_pinecone.Index.return_value.describe_index_stats.return_value = {"dimension": 30} + document_store = PineconeDocumentStore( + api_key="fake-api-key", + environment="gcp-starter", + index="my_index", + namespace="test", + batch_size=50, + dimension=30, + metric="euclidean", + ) + assert document_store.to_dict() == { + "type": "haystack_integrations.document_stores.pinecone.document_store.PineconeDocumentStore", + "init_parameters": { + "environment": "gcp-starter", + "index": "my_index", + "dimension": 30, + "namespace": "test", + "batch_size": 50, + "metric": "euclidean", + }, + } + + +@pytest.mark.integration class TestDocumentStore(CountDocumentsTest, DeleteDocumentsTest, WriteDocumentsTest): def test_write_documents(self, document_store: PineconeDocumentStore): docs = [Document(id="1")] @@ -21,44 +87,6 @@ def test_write_documents_duplicate_fail(self, document_store: PineconeDocumentSt def test_write_documents_duplicate_skip(self, document_store: PineconeDocumentStore): ... - @patch("pinecone_haystack.document_store.pinecone") - def test_init(self, mock_pinecone): - mock_pinecone.Index.return_value.describe_index_stats.return_value = {"dimension": 30} - - document_store = PineconeDocumentStore( - api_key="fake-api-key", - environment="gcp-starter", - index="my_index", - namespace="test", - batch_size=50, - dimension=30, - metric="euclidean", - ) - - mock_pinecone.init.assert_called_with(api_key="fake-api-key", environment="gcp-starter") - - assert document_store.environment == "gcp-starter" - assert document_store.index == "my_index" - assert document_store.namespace == "test" - assert document_store.batch_size == 50 - assert document_store.dimension == 30 - assert document_store.index_creation_kwargs == {"metric": "euclidean"} - - @patch("pinecone_haystack.document_store.pinecone") - def test_init_api_key_in_environment_variable(self, mock_pinecone, monkeypatch): - monkeypatch.setenv("PINECONE_API_KEY", "fake-api-key") - - PineconeDocumentStore( - environment="gcp-starter", - index="my_index", - namespace="test", - batch_size=50, - dimension=30, - metric="euclidean", - ) - - mock_pinecone.init.assert_called_with(api_key="fake-api-key", environment="gcp-starter") - def test_init_fails_wo_api_key(self, monkeypatch): api_key = None monkeypatch.delenv("PINECONE_API_KEY", raising=False) @@ -69,30 +97,6 @@ def test_init_fails_wo_api_key(self, monkeypatch): index="my_index", ) - @patch("pinecone_haystack.document_store.pinecone") - def test_to_dict(self, mock_pinecone): - mock_pinecone.Index.return_value.describe_index_stats.return_value = {"dimension": 30} - document_store = PineconeDocumentStore( - api_key="fake-api-key", - environment="gcp-starter", - index="my_index", - namespace="test", - batch_size=50, - dimension=30, - metric="euclidean", - ) - assert document_store.to_dict() == { - "type": "pinecone_haystack.document_store.PineconeDocumentStore", - "init_parameters": { - "environment": "gcp-starter", - "index": "my_index", - "dimension": 30, - "namespace": "test", - "batch_size": 50, - "metric": "euclidean", - }, - } - def test_embedding_retrieval(self, document_store: PineconeDocumentStore): query_embedding = [0.1] * 768 most_similar_embedding = [0.8] * 768 diff --git a/integrations/pinecone/tests/test_filters.py b/integrations/pinecone/tests/test_filters.py index 1e6aeb0cd..a38482a26 100644 --- a/integrations/pinecone/tests/test_filters.py +++ b/integrations/pinecone/tests/test_filters.py @@ -7,6 +7,7 @@ ) +@pytest.mark.integration class TestFilters(FilterDocumentsTest): def assert_documents_are_equal(self, received: List[Document], expected: List[Document]): for doc in received: From 5cbb5293311c5a2b4d237c588c0cbd4cd0fcdf1b Mon Sep 17 00:00:00 2001 From: Corentin Date: Tue, 23 Jan 2024 15:37:39 +0100 Subject: [PATCH 08/47] Feat: UnstructuredFileConverter meta field (#242) * Optional meta field for UnstructuredFileConverter with proper tests * black lint * Adding multiple files and meta list test case * Black formatting test * Fixing metadata page number bug. Deep copy of dict * Folder of files test * Update integrations/unstructured/src/haystack_integrations/components/converters/unstructured/converter.py Co-authored-by: Stefano Fiorucci * Update integrations/unstructured/src/haystack_integrations/components/converters/unstructured/converter.py Co-authored-by: Stefano Fiorucci * Update integrations/unstructured/src/haystack_integrations/components/converters/unstructured/converter.py Co-authored-by: Stefano Fiorucci * Renaming "name" meta to "file_path" and deepcopy fix * Fix Ruff Complaining --------- Co-authored-by: Stefano Fiorucci --- .../converters/unstructured/converter.py | 34 ++++-- .../tests/samples/sample_pdf2.pdf | Bin 0 -> 21457 bytes .../unstructured/tests/test_converter.py | 97 +++++++++++++++++- 3 files changed, 120 insertions(+), 11 deletions(-) create mode 100644 integrations/unstructured/tests/samples/sample_pdf2.pdf diff --git a/integrations/unstructured/src/haystack_integrations/components/converters/unstructured/converter.py b/integrations/unstructured/src/haystack_integrations/components/converters/unstructured/converter.py index 92348e6cd..bee1d9a7b 100644 --- a/integrations/unstructured/src/haystack_integrations/components/converters/unstructured/converter.py +++ b/integrations/unstructured/src/haystack_integrations/components/converters/unstructured/converter.py @@ -1,6 +1,7 @@ # SPDX-FileCopyrightText: 2023-present deepset GmbH # # SPDX-License-Identifier: Apache-2.0 +import copy import logging import os from collections import defaultdict @@ -8,6 +9,7 @@ from typing import Any, Dict, List, Literal, Optional, Union from haystack import Document, component, default_to_dict +from haystack.components.converters.utils import normalize_metadata from tqdm import tqdm from unstructured.documents.elements import Element # type: ignore[import] @@ -89,12 +91,23 @@ def to_dict(self) -> Dict[str, Any]: ) @component.output_types(documents=List[Document]) - def run(self, paths: Union[List[str], List[os.PathLike]]): + def run( + self, + paths: Union[List[str], List[os.PathLike]], + meta: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None, + ): """ Convert files to Haystack Documents using the Unstructured API (hosted or running locally). :param paths: List of paths to convert. Paths can be files or directories. If a path is a directory, all files in the directory are converted. Subdirectories are ignored. + :param meta: Optional metadata to attach to the Documents. + This value can be either a list of dictionaries or a single dictionary. + If it's a single dictionary, its content is added to the metadata of all produced Documents. + If it's a list, the length of the list must match the number of paths, because the two lists will be zipped. + Please note that if the paths contain directories, the length of the meta list must match + the actual number of files contained. + Defaults to `None`. """ unique_paths = {Path(path) for path in paths} @@ -107,9 +120,10 @@ def run(self, paths: Union[List[str], List[os.PathLike]]): # currently, the files are converted sequentially to gently handle API failures documents = [] + meta_list = normalize_metadata(meta, sources_count=len(all_filepaths)) - for filepath in tqdm( - all_filepaths, desc="Converting files to Haystack Documents", disable=not self.progress_bar + for filepath, metadata in tqdm( + zip(all_filepaths, meta_list), desc="Converting files to Haystack Documents", disable=not self.progress_bar ): elements = self._partition_file_into_elements(filepath=filepath) docs_for_file = self._create_documents( @@ -117,9 +131,9 @@ def run(self, paths: Union[List[str], List[os.PathLike]]): elements=elements, document_creation_mode=self.document_creation_mode, separator=self.separator, + meta=metadata, ) documents.extend(docs_for_file) - return {"documents": documents} def _create_documents( @@ -128,6 +142,7 @@ def _create_documents( elements: List[Element], document_creation_mode: Literal["one-doc-per-file", "one-doc-per-page", "one-doc-per-element"], separator: str, + meta: Dict[str, Any], ) -> List[Document]: """ Create Haystack Documents from the elements returned by Unstructured. @@ -136,13 +151,16 @@ def _create_documents( if document_creation_mode == "one-doc-per-file": text = separator.join([str(el) for el in elements]) - docs = [Document(content=text, meta={"name": str(filepath)})] + metadata = copy.deepcopy(meta) + metadata["file_path"] = str(filepath) + docs = [Document(content=text, meta=metadata)] elif document_creation_mode == "one-doc-per-page": texts_per_page: defaultdict[int, str] = defaultdict(str) meta_per_page: defaultdict[int, dict] = defaultdict(dict) for el in elements: - metadata = {"name": str(filepath)} + metadata = copy.deepcopy(meta) + metadata["file_path"] = str(filepath) if hasattr(el, "metadata"): metadata.update(el.metadata.to_dict()) page_number = int(metadata.get("page_number", 1)) @@ -154,14 +172,14 @@ def _create_documents( elif document_creation_mode == "one-doc-per-element": for el in elements: - metadata = {"name": str(filepath)} + metadata = copy.deepcopy(meta) + metadata["file_path"] = str(filepath) if hasattr(el, "metadata"): metadata.update(el.metadata.to_dict()) if hasattr(el, "category"): metadata["category"] = el.category doc = Document(content=str(el), meta=metadata) docs.append(doc) - return docs def _partition_file_into_elements(self, filepath: Path) -> List[Element]: diff --git a/integrations/unstructured/tests/samples/sample_pdf2.pdf b/integrations/unstructured/tests/samples/sample_pdf2.pdf new file mode 100644 index 0000000000000000000000000000000000000000..c304dc004c4b69df370815ba1ab827f3a76ce2aa GIT binary patch literal 21457 zcmdSAV|ZrGwk{l}gN|*dW81cE+qP|Ytd7m@*tXTNZTlp>*1Pt*&sqDd@6Y$+Tr*X3 z)IF+3;i)<6p1H;#mK7AHqN8SnB;MMdn1Q6D#iPZu(Km;rXQrcvG_p4Ql-d4?adJY^ zNE%t2IGW=9QPW7{F+kGrSvnfoTkAO*;n4^hIhz?6$&2tq(g>Sl zBS>y;NC!uIBRwlfSHR5gE{Y0EC?OMVENznr1bD3lLXuvi)6hfr49$3ri;F zUJK0tvM>H1-Hxh&zKpr5NN2}gnh1y^z4t(9aDXtXfSsGh#vmdOfr;OG}#FU2@}&VgkMN8 z+dUXGe!8qvc+{t(NbHf?40V;9xT%MZrbYFwviU9qNUUa+Y8rQ1fpvPSHos^3*s-*Z zg&Zw1D?yIC%Cjt6iB3Q)h6 zag7sfsU6?D1x*SSadKkm0GMq-O>w~Tob7Kj+!4%npE`2(tjtk}%Hu~UhRk3jv|CtI zC?PmH3JsGyVfcCv_rBf9W;Qm|Lz$7?iFz4ypd@#Y2oV@&+7iU+ry5Ib2pQ-zx9Nk% zObU+fooRt&>ZAkU6~rdnC-kD3;UQuiyCFM}=c8XiEPuP7Yw zcVKE!ATwT&T0ZVY07yQ%ML_2Ob6bE}Kle!xX@6#0FijAZ&abvm(7qzPkilINw18`V zW?%d-;E;dtmBymv!>01j!(pm_aTDN^Wkmw+69|iii|2iuBq>HC4=BwQkmb4|bi`=> zq80Fz13rnz2=)r-6$qaKG^6)*5#X2aQZ)zG7m@Cf4MA7r3~o$iD$yn|nWr-W6%U|tIH$Ep$m!sA=S5#$4v2&KisZPEe z^rLTws6}q-E9=eci_^}f7^EmmqV6#p!!h_h`WNWW)fB3dmh(76FvF)uQS~b8#^`EP zC)WTk>YZ?4!3=bwZ(-SbH==3MTQgOHG{7!Iw)h_ReBC6ubZsSHLpt||?Q*!#_r!09 z^uhAMS&zZ(?;wqZPzSN_hsFPnM`TH)2#4`y|I2G2d^e+J(?P?woD~aHl7YKY&?%_;x`f` z8F4Hj{v~NP@g`n3UN?aP`LD&+v#O;j&6(;lZGv9nUNj;J49VBY+d~xzWpNfs$H^YV zGKmYxF3CF7*`zx7KH_hMFO5KQUA$cietFf(xx$er-=*jG5BW++@>4rrRv##8~vum7GcSMo=G_DyTLtZyMZtwHqzMr zwu`lkv0FDR57teq`@kTNUJ#iO*%BFZ&A8Vzu|2WOTxF*H-b$AANN!E;nM9CeoCKH@ zQ!1%sGlw*nS!z`(GfQhuVye;BGF>zf4TW&?= zX%W>Fcm8S_SA{`wovKNmblo_jRy?%^3txj~gMtgcj0BA;jZT%^c`mnHc13o6he(Hp z&v54?-f!IqzVxo*-0<8Ey)3=FO#{NxxUwnoJoP+|epZ8DcHx)Y+ZrU*VVPp_Vo^g< z3X8LAin`4c^%Kw*d!{6eWlSvtKdM*M)XIhDbJ%(~2R-xL(jO_HxFS>|GEr7hbSO_L z6{wCWan;DwKvk{kkE)+~e)pzXXc>471e&%5Jj}%RY|UkUCPw)i!rlX*p?I{rG8AY9VUhx!0<9b&+f7wegt(RsuFo zBw`@3C}AIC0!lkHxk@EYrK%zRUjN+uyoj5fJB&Mu8?oKFz08Z|LGKml)%j`ldf{UK zY2jH0gcYP5;s7!qL>NRFoEwTB0s+(sq^Aq8tHmFeW?F|xa~1`N|3$z#h!cVdS_}Ef z0galHv7F?@tx?I<+tmexN?%A&ptnUdW^j5?Bq9n$0>wpmNTg3_PDDz`Lnv8DB3YGE zt!_s+Qauu{kP8JD6WN2v*}mj{dQ~E1`(z7p`;3}F^LyRNZ~LFSB_k7+u{vF9cH}si zxY+A(?_r4DCA*T9H#$u&yC4e*Eg{eG`~4LAQI}c|MlW$7$$_ za&a0&ywfT;+Yuhd*_l6KMRYTmqUKaR1&vl3mp7!e+cGQwQ94quy<%aN;Y z*EOv>-|OdZ;+NZOqZ-$MvFx)%wme-muB$L#PA1Tvy44@q3g40;UWq?mSMySNTphg& zJZ&quD|mLYq+MB0!%i30EXFXVp1!!Y zO}xrnWUlBa|Huy`CT*|n$fMyj^Y_5=LdUw5&kSG`a344~4iZ+Y&)~hRMFpQ#pWe!5 zdC$c0kE7-e6CWKP!Bf%8xCxOUk%EEl7^gXo*#(wjmPF?6sQCVxZMy-R*Ns~N55%%V zS%$o(2M|_#w#%)72i)^4XwD_KaIexs(%0;}Uz6(L>bdhP^I^a8naw;TJ=PC>XEL$J zv8i~{-#u?4?^4t1hD{_U;>M9P(%G_Iys!BCgDxY&P(#>N9J!n8ccwEdt1H(={F1A* zFPrR3mfN0Rl#^g=5boGzU!QLn;rfb3u* z^-QyN*X*?${ho;l90B%pM&0i5uD!x_=(YAb1zrNH%`@uG`fB=WaI*b_6jFvftCXkW zb?#xoxTNjoBxgZ)qAt`VCb&_6<~`}LN~Q;NqGWIxVk9zo zW;>{Z?CsZm(o|Wtx65;ziT+UUg~ilyDv$VDaXZ`7{GHXl;la*~i`jW$QTSR}JI|fp zYwokb%|P7YiOhpcQg#%Nizmd(l`Dyr@!tA<;Pvw-8~rDJ{3kD^WBeyEWus&L7g?oa z_&?d|AN@b6D;*vkGaU=_e`ct=9uSThb4hz1KBr3dCE`D6N>8hVO3CMb$SakI%bV2@ zNqnc0_%-s~(9kWeM#^9mFa$ts2IdgJ*#w^r!t8rt>Nu~!v|)y0+8}`>D=i$JSd0uw z0(89$bF+v0G*-8yDmzHhe#^T~LEmIX2BX9HDEjE!`%%@q&w0DYdCPsveH**eYDjc7 ziE|p+V&^6zc$Lw#4+g2hOM4GX2IOG&V^1S{RqNeB*wa2Ky23B)wx>dh-HB$h{^m6{ z=beooFR5(-7Mp0+gV}DVm74~n(yQNmlwT2_CN#aSY+pT2S#0045W#KAVk( zd4FBcp68^xC2CW6ud@nML-a(q`7)ZdvEFZstZWFeXM)1HB~@*+onC=yLlq!Kd`OU|s{v)L9h--stgwZO42N$v(Q;m zMAP5zQA!@TfwYn?Iz2=m>M1v8&hhm|z#uf%b-I5kwRh!Xuly#0;NZrC;P63(Xz?b6 zXi;HX#nXdzbxsIuD?`A!$S^1qhJwg$A2nR+Fz@N88q`<_%jg)lUx8V;HF5@8*w*Zt z*0ejMs&~1Rsg^!$ex0rgB#H>Q^UugpjzuN%|B)jti}{U+E5;>=gSw51bC@Iuw>t2G zs4|2Pc~axy1(-R^&KVNJ81<~LjVN6a%SfTIL+b(c#s&OrDrv1FK##(rxq_2}+haT=W7!x)oUMU^f z_q5nvA;08Y6H-B$W^yZbc?_I2(Ocgu8W#eaq&g>tFa7Y@OCO6a**df0Us|z5Y=Nb< zh7@OY%?tI^A2hibHXfx$2h&o+Qq`bV+LD&?O?G&k_jINA^sV>wwC9ICzd!(@7~+%E z(neH&i>P6qAK!A#z~XsL25}Ol>X)Y+qo*8USBrM4;iSL|cC167sKc2bd4e;3!cXF3 z?q#mi8#GL`D(QH9?>35zdM~rIPHM*=qa8I*)EXJD#k~T^-T{9Ydy&pnVI_Kzx8pNY z@m6w*I>Nc_NfSupVB(pC{B7c-)%@(}Q|6)Jqx@d}W&&2ESGGys!vbB!;?;hbDYYL7 z+>eW~ZavPZGv}#ttx#w&YJMF>b@yFEUAx+I_Nw8#PYp`>KzYOO)?Y#`)85&6Gj^RN zj%RknqmuN3)#)>Cr{s7HrhcmASR*q*AcS;`bmacPBrnXz6z7bDzet0m(v4VPP{Uyar`pR& zUDtZ%p4c@P3Gz^G$6INWM`#ZqmA=?_#kB>xj92Z)jRf^(EHqm22h6LWom$kDRx*)K z9XF)f%TNP(HGb%>zTmIu2PJ6rL$=M}8Nf+;a%>f$DSx}WeDb*BIF#V>Ot^4SMZONS zhn^l3pUDBI5E(kj`Bn^o0eT9k1-vTK#HE5-rebLrM<<({okic| zcE%>i2XP3xDe(T;X27t5i%wIWa|RI~(CA8@C9l_!hLsnGL6?OBiw!gB?ifeP?U?cc z5>*&rGhME<8~Ti|SP4x9fpmu3!-f&RGFHEtS|YCvxZK(%P3cj)WO6R;+_KJQ?qwDb z9<(I14ZB=ICfewZ#4{s9m@cD?x?JH^NlHyUG|FsKq?iPFV@@CvPMbbzkzV57+8?SU zYi~ov65mp!vxi_GxJEm8oc7VJk-V&iOFfuyQdB<_uz>Knf_c*pw72v&sSeQX*iPC807XhbovMEGy)*wBKq zAEQoBiOgkkZ~~J*=RzisiEk>A1cY~S9WvP#(P3P`#@H*bZcrfGaON_mcaDF{on)O{ zr6$_9GzR?kTw7gVU*5cRzPY<^S%x`3Jze!+MOo(hS+H@B`b+Y(@J6=^fC-LeDeS6m=dEzFXv=am zBJjsJAFRQwI@B9+_?BmeW88#U*6h|A7qXk0LsWk;jiX3QiHOFT@80?T-?# zw1zb-(wYh1l4sCJNUsT}^un{iT@{TS;@>4+sC1Gk?KE-Y`UvIsCha5fZ588iJiqO3 zJa_3WOqUwH?zenFjpP2W4ASOD$at&c0v6Y=SH-r2-+?&j4EzCIj|gs4h@PkUQGN$g z*2e~+)K2oFs1Nc(shQ$KQ5oh2{dbVEr*U*NAgK*nG$%4CrB@%Bx~J2&p>YON-W%aT2hCD%^MQuyd!xN46if@V@0wPR5{x_Y3}neJcI(SZPX0^UNooK(Zj@UY~%0$Vn8eZSM}$$6Q3Q zc8a@BqUcsluW*oo9exYj-x9EkUGn0Xt5lrk-z2LyiZkS{u1HS&-ks7-x##bfVYPNif=>KAju`{h;)P_v>gpG?AjGL;6z-cZjG zEtNUukqY9YU3iqc8Tueas05(mgyM8-l4~Nr2jU(MJQZ7n+Rm>Ws^p2?$4V6)_`+Zd znTxGizxSd0Ih-Ay@?B@{o7hNNp~bmyTb$z)Ik!xBP9neMsZ3w%L-AD#v3*nU+BA$b!V&h1?6?`(DfYg?moJd8dNMibW&#TO1#w;~XoKjn0 zP8M6M4sMW(9Cp>RJEP)q`77-m8sV5Vy4~a`h0DL2^3jfs!fHAhw?6mx`9;UA)|{M$ zr;I|LJ{Mi@7OjS5<#Z%EHf` zVA0(S^czph2cW$rQrgsyvc_Khs1Ev>jOC>0Bkv&}R?Rb7ltI5!2!2h}iY-cRAF^a)thPzP4U$iY~caMH^COQ=H&o}Xlobwvmk|eLr;Fdr;?qoJ)?%svj zn@&z?-#lMg!Dvw8qLo6Xb6RN|!21pyNP*DLQ2ly*>vrrqrIL2~dRnLMV#puB*1_0L$KRN@~)FRHqV&t-n z6id*akFOYWmR#&EwAtu#jdP(L@TPHo;Gw~hcG*B@JP&Q~@Ab1{#0^S6Sw_821Zr|q z$5*n%i+|R)OfqdFB_<>^Rjimsq4>XP_ReFCiu449?oz0Xa@9HRY%jm$wwqU46cX%l z1oAYsrQ_ZtBdBf%&Fk9yK0f;<;jKIjC~HvIS|bVwD1ljyz>FBW3T3b11$do@@x@Dw zJ95VSJ5FHKu24EU-$h9R>Bn&|Gq0U!56U-ellA?HxhC%ZEWJPH!NOO8FVsL&U4ha4 z8uTHJgbbm>BH#TEu_yHQhIHbXuu8(;)H`{tKbxZfYaW2h*QF6K{BoyHm1~P`R3v8> zGr*)4C#&w8YbBcF&cihe1n!&oX)<_`tNE4dsm}%4y7sfhTR~EHj87!@P`1>M_ZidU z&GiH?QYIFv3-I}!EKJ$E=WxG5O4Dd1k=m2Y=Ou*Hz?jVjhH=Lnc zt{j!rV#24j>yH3i%1|%S*qROH!`vWI^}OI@wMTGB0*H`&;$V_7zjv(AG?9LaxjWaV z5^+gr-{G6b5rV6OgXYyF@++2mKOgwqpl**-&HPM$wDfL1z{Va%uKQ7vM{)jePxI7_ zct-X55p%foQ_sZg7$jGHMvnaJ_l-NUPSYt>04ebuOaS6ILb5hudt@tP zw=`wmEXi>v=>RNa<|9qxQ1ti?(Q(^(J3{aL7n@WLuf1SF8ZPVGruS~jOP%EQ9`P4 z<1lP>vcUb*PHc=Nnz!@{=(-?z6{ls*1vds`4aNn+KzYR+vJw@~8^7G9_z}bjRQkds z7Gs;$)w^5pqCi)}kvPBHNAblU2-ezUj?^zhD>__~*PGLxjZ*Z{VsEQNt zBZSvGs5bSs4418lQD^#_Qt!?iDe8l&RoFVO)-~1aPfL@1Jrl9Jb3-hdpY-(5SB8m7 zvU4#P9v}-}u%g5&ZQUcZfetqv>Wg_Ep-Y(v`%)qnwtAL|aZ4$L8 z=6jisqEx=Ib?VXk!3kxD9l6FSY>^ArDRpMCv)T`?)7Ls9nRlh^G^{qK8q($1XIzQq zRes6gT6~w>$}-s~H8kGD>tfrvcYGC&2sZ786}zkXB-z}Lon zIVw z_Icj0&OE^)Nsa160halQdoE)H)lGU}Q-Ktfdo&2sVto@Ew`5f)!R^`bG~#81Q?@0B zX6x$}GF@aY!HTp73L`1HLH$lIcLrWw z1J<|$Kjqn+TZnjShXf|GQ4lx-sI*fC)#nhh=?oB!%i~STyCapN(91fTAsLkQ`pHLC zgV4)p4UmmL@syhX0!KpUJ{}%&Hg;&?x&^(v`T-HEQj4Am1u=2#ZQ9-pY*|in?q*Id zIy$ZAb0``uudAK87bF!M95l2&Pgo${6rGmFPy*Jao|$;AMLFa*Hd%z5Y_I~_m~SWe z;@MMoKq{Mhdr7uRNG^`{l#}=<2NyvwHM}9JIRbx5a8Qn+pzOUs*!q6|fGs{{K$s%k zVC@^@xLqT^R?4q#YIroIF{*epP&=RH*ksTv{|i2skEPB8=YsVQqNEF{H%D|@uZE_i zP@(qUIcJ0!T4sue50%{h)<2IR!sbGDRmARMXn#CbK;`Xa?2|i5i_(mP^Q_&p-n^z8 zY#q)s)$;S=G1Dmmf8;Qt>i1W@djR@1o`gIq-NN4d&_@N3T+wAnwide%rFaL)@8Zz% zVNCf50gC#upCTVQohUI_NiSX!+J6Jp$dRHVV@KWdF-t{A_lxW@3xkJRu>g ze4FiG8OX^6d9_Qf*qSb6^jB8%6(RhEeUu#Pl;M!TL`HcQ9bS{dOmvo3jt>urVF&A& z=}w(dr{x4Bn#(qvP4u;YPTHH=p6j2ccc4@E)>gPzgOo*g(qISckk`n-?0M#G@`iYM3u2p1fr52i{RVM=U*F8d}<#IPL3FPBNjHE-NGK zPtW*FrgL#7m95`9CVFY0KG?XIu3D>Di{NuCp4HaZ7ZTKr&y!eqRvXynPV5g#C12%{ z#mt>p!-kvL%!E>s@-q6}2{Tdt;`SD`^Y;zQ$cLxJ_bqOeT_8PDucB&gzJlI3w(1^R zq$5nV2^t0D@dejKbNu4FQywx((*s?5F4uEE4IHm{)QF!}1g0y%*Jz-px9&%QddB7P zyx-WFUFL+#>iGbm#dCehZ~&*_A3mX1ZanLRu^39fa2tohIY(%fXGFwJTn4SK>wU!& z-hi^%;pwqi*ZnGj3&-ttksgAJ)DGg15O6B(`y(|egl-S*&IY-f?5mywQ5nKBf_1Pd zYdwwL4O6QmVsbK3NY#eh32{K<*5$xE@59yw2Y4{Dw3s!tM|F(E>yKeyPMxD*W)Qmiav8ogWUa?uM{|F-ryn+ASl9Iax^J&XPEl1lQ=KitVFp0 z{B}P;LnT9Ka{$1F<@AumUt4Mla5|-5+|=n7^<6d`3fI*2)i{%qXhBY=MNAdAyX(tZ zZh%%ho4V&P)thsZYy>9}$%hXT#JRjhB|0+`9YcOe5ZLBVaVEL$K-)P{NL0X=j7Myx z8>086$o*9Rf^u`GN=q`1+WVF`kbDXo{)%2jwP;NhNmk0==;9%dbqHft9=umqSSimo zXMPu0ak*w1QB8&rBO;lrPq{?(tC%TGin@qp_J=i%QSfh#o42#*-|5^{M;uaMQR5g8 zi|QKnFg0zUkewhb0+ra+T%`2aC#w0nr}?7v#^f3a=Sjm0jq+ka`q}W;1s+kaT!g== zmoQs4+%&&V1k@oth%ntDm|`(PvBNY&ID$E{9KcVyOe&<5v?o+OmR~#Dt|xS(VAJ23iwD*BG*u1W9Fmsy$Pi zA8jNW76 zLC?YK1^kpYc@abz9CbgqnKqCcC0K5{?ayJ4FUJfV?3h#4jJ+M-R3z+xj4L1!*J;n<8@#N4KL9=`LRCIRvn%t}yT+TTz-(qoL2KrkBK$l2NE70{2 zQ5Ji*?Kyt|4#Q~#Zhk{i*!?C5a|FD+P~=62zA)lp7?l9*$P+!aMH>VXuEB=%|UH4*487yN9wA<~pw#DYRKPr%?Sx_TGSKzVWeWS|A&?$g*U ztHiaf!1GNtrd70cu@p6cPLD}xYsca(wO#o$Tm#SQ@`dy%kPfws|%&4=0oM^GgTWm&vm&c3@*xAi+*AcChr|-qezs zLP>+Y)PpvlCe#8=r4DZV+Du*0yhjSxYd*MqZs9qu^^7Hqx zQ?Rot*%35zobmc15@jd(u{Cs2glKxroOoWVRgZ zN(x%*@-*Kj4>3^PwJMxa3UIT?Sr<(P+k*Izm-W#i6pFy!cV=K~tit06r+ z8J~UgKaY-abie(TlaQV(do`@cP~i0y+PUcr!Y-D1)-aR&5YP=HP`^5RN?dG{Yp&Gx zV6o;gXy+35KNP3BqlD(6tO!lgF^s&Nk(owJszHgUeSDxA0C3yark;gUo&D*a`x&#b3_he>&*%Wqhf6`igp;Nj2^gx>gExY$q}kEA zd1O|x9+Urj_@zVB#OPF|H+O?Neze5S3vz0$S(IyyRt*)6r6G29bWgHxpF*cBMVw##0^b(zE26=*Y{ zdTT_ih^bHN-c#h9VJ4ir#ylQ?UT3S-HXf-aMQ53O%3WIOBmt%O@N&LYv(n_M9cZ;2 z6NRqQpj+5Cf}zo%ewJfSqN~Sbe|fKHcZ#4`3IK*>eUR-K2L-#kSK0#y^7DHXUw`uh zK%L&sw07BdW#`IrYN^{koX)+xySo%9nBKf}^?ad45>RadS6zZ8eC>dzP&m=*8926c z;F{MCPP#5#hovZ!ZJx9N8OclTAqK!cvui3B)xY zS(LLTwc`&#?$_b#q+mURVm>vx}3^$0NRvm4L8JoN{y zuD38QJYD~hO8J9no8O7F-xU{l1 zja89Pn8`;WZ!A%^dStGAT+XM`%rk9?(qfRg>Rm^L3L2AAZO`6#Z5!7Cg3JZjsPzj!MtO8(=5xBJGQ6;e_GrmT) zX_?WFSE&N83EU<}mCY!~QOl}l`Q0;E^Y@h=qF*(+j}ezK#pzuqhy|)qYybVi8>H*l zOk8dh$3;jf^1H}|$N<64qErEf+XSRe*B82I&57EJJ5pmFVo&Eyu^X1L_nu*!%XakW zUEGT|aAgWiSKGZr+uIkqO@U|J=B*T4EQ2uu4b&WiRyZn3PNHg9nvU8oI;|FC3 z9{vuFIbRz$aDVm5TmltGQCdn8@B5NGmo>MorYfDsnwYGO!rvA=Pw+0SPb@wG4fV;j z@s%f*0L<}tZn9Lvl1U`j&FYC!){FD2#qWWFTB!wf**AIc$k-yCdYXsTay%-cm^Leu zE94}tW88VUO6Mp}TlcV@at|jfqV*Ju<%&|JjMM|#d^sh<&44c(G`d;rJ5+|*wTM3y zcr7q(Rg)jA_+n@DcsbsMPwCb)T7pBs!ATzu?N`Cjy%R3jJ6>2Ij9Wz-N;-#^Do&L8 zlFr?Oy&G)HF7BrVASI(Ni()+xe%;s6SgxdNQiybW>Nb7NuWH77#vrV83{3Wf)oOc? zw4f7BiMv8%N8jl_ZyIbascm~JVytlDSq?}2)|3l#@POd8=ID2&UXbswNFJWViX4f( zd_Qjd2;uu?emWKg)cwm1+JTxZmx>ahic(LeKv{8tn3g!aV5#(sx2h-?yhFV;}tcv-Li!7bZ=h;v8VI8;{qg zib$QdujOyQqr3%vA;&KT>jcWwVlTF2^|SeES+bPz19>b#15INs4^--2pB$f5EhU_o zg{i=t#-~nJJ(fo*xyVS5kCF68YkKv>DGb2kW^nrJ4A;GX)fwi%|2Muw^nY_3QgpNZ z@Magbv33+Raxkzrvvstw|8N-kC`;>EeQ1;vrKQ!?C`64cosAsL4D_UpoQyv7mU<=* zcntLasL9W7gh%_~LGNH- zWbKH@#`Mwjv%}8<6+PXDZ@qw?t*DWiiK!zV3mqdQje?_*mGVcuPd9u$Lo;g=JbG40 z8r6SRVWMOCM-@>#)=#U>CMsrzAHMpGj32m^p6fqK^z`%}?LX)8;i7LukN3x4O8(O~ z|1Ym9x_`S8AEur39sewhqP>&R=Mq8E@as7keVY0om*l@KAs*f5BFjV42w8u`1AGqu zV^{|AHdcDpe<2?uGqZPa6fo7Z|Fe>kdjBl^8TFqTIN)i}v#@>`Vx(vO$p3MDBJ@mb zf6^!NS!Q7UD1Y7=81R^wnEyYeKX(3D|I0EnBP$*=D-#|IJv|-^Eh8Q?9UUGM3nL!O zM{l3lXRl0jY{x$Z`_8;T@lYjQf z#PDHmKKkJj_Dt>{*YOi zKKiHoNFOpI>tD0`f8qW)VmjK7?0>pGr}e4(`0=#&K(^pW;_4l%*swMUcGfTC|rJUs!dw^}n^Ss`{?#+xn6ymWWwW=*pRz zC2A2c)pP=qi^_`sP&6mjajz*|lV?&Y$RfW*Fmot@=JHJxjryU?&vgr@#C4|`hnezW zZz+YJP_gfLiRC;!m1W(N;n{h?dW}3jTbxMV-MXM!d{1WYzOrfABK#2H8Ial2%1N*9 zt|2r(D`;1yQmx%H98R}JHKoDxcM!HVi0&iwsiVD}(P{K<%bWjKn)Q^r<=un}Ol3%y zlgC3c&s}%D0@K@jZ@}d`_w8+_<^9Y0xpbixo|z39bt@*9;ATmkxMUPDf-Xz9OxZYh z-sDXJZQ(c9tJS3bhU^;OLAR-x{VIEbjw+w`{WI5}Gq183{>MKFQw(~Cy)%Au!A@%1 z^Dq*o8l0qS=6+{g?)>7%Z}%(gaD4hpUr9pRQ~{TCx@juw7c1X z7`{k-_Ld=(@RAkH|I7#*p1YQ?Gfnm#hrJ2Gtq;y%=C?>c}48z{aM-TFgPhoRj zCExae6|%#bGHCguAyTxj1ExyjsvIje-5^`NY~xK8p~Sme@O#w`HdA~Zws;Mk{W@AL z*Spp9H;X&~b7dT&4SE@d{CKEC@op{KEA?pm!NZpF%%l(RC?vYOhzqpW6~&Dvm>3ft zd-(7oh>2rn@ppFia1P$|d3fm+e4^~~;EXBUP2N?tRWMB~+qhmph)g=*;3iptw-qrM zTBaOk@L7NsmDi*K`8m+d^tP7nwziq>3Ej}!FZ+S(u_W(%lHOZ&TabD4P!XtK7sGUS z!6>XTvk4Hp!8Q>s%d9y3?z_*|yvJt;!XC|2gjwNA01aUVckJd7ws?!dLhbOBfxlpN zatxwR5BPCIg|dy9=o05P%6jt3r2KHS%P-;Qs%~43CBf)qO2lAi%D{mB3AS!HMcovD zeHC!f$L-~KIbCp>(J^ywaqZ;HCmYUw)R2EN#vwIs2j8u)Mva1Z zuil>YB{qCbv%6|l9pYW8TkbXfCYPtOOw{;SEYpozZXUT>uOHzEK{5v+6(Jdf zWD28-aP`2glG4&g3JMAn#!I6{{nRSw*DTey#C*@|(|D~tJf7TieqoaO)Fdr7S~}t+ z2?i^BSRmIi+JuCWrqkeFK|7S?i=PB`bQbhlGzWj-c zWcsh8?^k|7CEPAJ$v<7Ce8b9>fx1`exGS zoT#O8W0Q-M;d)%f;UtUWoSyNVECq)la(p>ZLN$R8&DK8aCr~-VBR`X|F}9k(p0@`5 zcqT5e=ny~}Y%L|5RkcU0US~RjhXhK(HR4Hov{%%PMdUhb-b81ryslloGYxzD5qfUP zyq5{t`PDDd1)aWq6nIlY`}+MP$?8TyD_S!i{Dce&H+Hh%wKoau@nF4H#c<^za%vpp z1b(>qMp{@<8wfGYm1BRGzYL1CoBJ) zhT;ch|6HvLjxk-MMz|F}hyRF|!{g%P|KRA${-d_flL zT}fR601!=3I50uPs=@ZUwnIpkezETf?e&FU)m&3~l&0r@(_rShfVNzdQOa~EZN@!x zanEP+epzhD2ql>K^&`bxEW0Fsq^!TN9$({~k?bsja{!W%{DPMjYsa&?QB@-vdtj42 zABzPC+xj4y+9&V4qi95@lKWK<=Jf*j^TXnYx|yU<@rJI_XW&3 zxHpL3qH#m+Lasg2>Qhu&@5$WS!0Q7#mC-pZgg1SJY<>oPxf}$r`Xf%O5Lf%DoPH}1 zK!Vs&ov1~AGk_0QEOsk*aS?dW_cy?~ECo>eP zH3xnH&ECtV^@UAK!n=hmaMJj)paAURHU&Hd2r|2E$b!VnAFzK%eO?#Drm3EngC3rc zGkNrLKH{57-YozN|IHe#I8wJMI8nQ6%}Td&c|Qk~l%9Car?n-88((TsVkFG0q+;A; zuCC^-HnlIqqazx|11)#2q1JFU@%@)pe3@c>DLGlil_rr=R32&SxtI_C^*wKNfM9GI z%T@+&fS_TadTQJ^2691&Eu+K}v7cX+TH*zh$6NNboCBX{P}a{el-2~4IZ|bT z)Gm|E7a}3^U}w)7c(c<5rIB4AYp~6FuR%bz>d7^7hl9hZZRVJh>p@R~&q^^4lV%-< zaCyT3%w0u)CQ16VSDy+LKiK15W+8~lZ`w;W{tT3cS+j*a{EcbP(PM{aMRBdAB^sPi z#&3d&+AK`Klmi^Ex%Jx|&_jDILjKiAe0&orG{Iok&_T?$m#lrmF?sM*K<(5%)vO_< zQ@ho63{M-jTBd1_ju>G|G=f_Iz;- zl8WLLxqtw_+Jzyr_by zfCJ_BvxnH2&;~}*`14S0B7J}oT8QtryCz#Wxr50k5^{mt4KfYA7C0uB3=< zk}>yy!FtsV7;O1^7B>uu=#5R~77Vsvg|9d(#ro-GRo`6jk3y^^n!cTL;NUkhgRc~A zfaax-E%PfN*D8T#{5nUMWR)6<8f_4E{VC3Z+U#-3bz<(m(QKD<3rx7E6lo=^{7S61 zJIE7gX3947q?iWP(?C{HpFJwub+6;nz71J@_Ix8?lhe&cEX(~ZoHM+YT#CQphc$W? zUC_Qbw$X}T*-ug2lzIoa)si`?teASlGGM`NgX)?0Tr35VIqg*wB^P>0^ftGmFGen+ zJetA*+1!3>#n~0Ve#Tc6-%~(xMGo&Vkofjl0Ee7=22AO-@*&PCKz;!s(-pU8Gk5CXW1s?=kh5XN_jzsh zj4`oPH2z`<{J6$+sb#= zE=-Qj^-k~FVcKVL*GnRcu1Seq22p$xO_;x0tIWAMk4wcfWQ(G?QVoNz%&$)>Yv^kX z7)H4*t5pdc_X`nj_4QhGU%%MEt;w|yLk0l(9OJC2Kb!hb0Ukq5%E8LIMdPO?;|$J% zjU@SX6{=k!ew7U3@I^||WG!+~bP}N(TgXJ^d)kaH=6kehK z2)a7NnUrRag6KxehtP+qy4E(Ma=S3^q(0>?q&tANvx1H4r95DS+N_ni0LVI-#>DFNxFUwsO2b&?3 ztV~9ElFg|Y(DVLXY&ExvPi@@Y8N`{8u-r{U>qjyj&T1DBxnfqGdGG`wO>0aKLlk;y z^73xYk0U=tC<3Oa3mcv616kQfuhSmvY}la)z@HHyKKp(Vgd?!eFWX zH1jwa&aXZ~+-qcr)~1ly(y+x081|SZ9o#d>%JCXtFV4Vr81O@=>^TOkF| zRN)Ggl1(mN3gsXoQAwV4Ggn<1ejWDaA&x=qM+Wfpc+y$pCN8#%`CS*ndec{PuVVux zv!yL#en(nr=%JqQ>6QK#6vP9U?u9IQju7F4*0p3%BoEWm(FS2{)4iOW{kU9)r5*?z z;=1p$!X0o6CKve1fQi{zi5sO+Rb`{wz9M+xX@ULZ_defTy-v(w*}A3_+FDpkf$&lb z8+_-9jx6QpQO)dgy5tCO_8Uc6L181}5a%u|Q)S|=oVDM3-NX0(2+J3@$2t1?_4ysp z*?>9V|0IF*e~;Y!|D7-W#W?AiSn2*EZ1jHxl>Y6XLx1%DkuPPcEn6>(e)1)xNG(Ai zL-j2of`Bf7Fou?trAzDDDLhy)9CgJA!_kOQlG6to7jL}Ix1p>f60)IMLAN-7`5^Nw zZl@zfrR!+!No=W$2`J8yyCIjN@3^vLhlsn5P8Een}A9 z)^r0;cfF7=02Vx5J-(zMoGt+UG(|gOXfb= z5lmi%n*`EGn;ki-;YpMtfylx0q3zRKEKf#&o*gTi=X}}H7$?Am<5^3}v@Yxib)EKD zCg4vBAg7)-C=$tQ27<^m}}~UJ?@9u6$)q=^XiH13>|82}p-xJ+*lAK$Umk z6L7>@k^DCZz(P!nuP_3?$vl{|Gl3d-?J|E7`%vBvKFJxoNs7OjD-E@>Nu<7x-3CV-0y z$OA2cG081(In;9waHKlnqlv`D5>E4i<0V;>6!ifQ-W%VP&uw~C7&Fd7LW5Bv zBQv8S%hD!?av0}vNY06nLqmj|iuGk@mm(HL4wV#g82aAP?zXtTKfd>`dGG7@Jiqt% z%~G6;=~sU ze3eON1@YsKuh?ftd%U~4Kd6+t-p*aQAJtU;N%u*fqo)V{V{p2|0mZwwqUH6%tY2m= z%{8t6GJIG7!lcV!1XK}x+Cr_fI#K0EhWtXCTbOftWYuu7Yn7!igKA5Gh&u+`CXCuyvd0Y@ zH{OO8XkN3b$g~q-r4`z#PJQ~%!{RCDGL^(Xa|iJ20%X%={^gK-!8P6I?PnCrTK3*7 z8sLc|9kzRdH*TG3P#TRehs+E6ogT2i2LSbd1u}c&? z?%o}ebE;7kd7wZ{O)OsfOE=g{mjOyWKC8{g zWFEnW*$FG$Q2qQoRTJSv$E`a*oiUVaafm8Etu$Ect$8if+dyNz6ExW(6^R~xcv|deoqdLGQCBEO8Q>+Ke%BZ7q2(RC$Kxus42-&#>wT$ zm3DzPbR_Nq#Ee&>-h|KYu8rx7nXBc?N0UTq`{fPw4n!6>Iyhc8Y1d(k{ZjDU=X^Fh z=W0alecm{tk#`W3pGd02Ein^0R=W&KI#z=*Sdoe5<_WFu@8HM{mSL4jxkYYN`&@&o zEc^U?EUHi7a@F-qOM7iZYOV6?!M8+ojk1P@+$xC`rE*{sX|a<(2%hJ|BhuUq?Dy=I zd-6y4OSo*zJ_~?zg(#jQMd?JOkvJQGXrIL$jSE^ zPmR+>2gP^hH-qcyk&MUL^kboxlfna`h8;g=69w%*O2Q#GWp&p(kOxdL!U1ivPK6mu zoM*9WxL?Clj9SLe<@=VDW!UpJkzStNS6N3$szZHdDx%)H!?l?7@AU4lTtg)As#P7d zh$*m(3$vld;fh41+Jj6`RIOG@opOBH)o1r=M=Dat-3FZ9$scH?(?Ol0bGQD$CX3T@ zXQg;q5Vmn9DEYnU$8(1+^%K}HBaSzTlbUO$mYxgOMCD;yHRDD1!-?u^r?nfBDWMj6 zVNaHZ{6Q-dpk1Zm?d@?>5)W{oeUOG{W?g0esh5`RKI@1D(V?Q%BFSOe`4{TNwN=ce zZv>arSbB|Z_%`NQG}e~e)XR8P87=%EeAX|}{&(wrjEFV&iZFwV%Rm1xAzgFiQJenC z8(!(Y&Lb~ii>;Z+63$56PeHn(`ypqlweXj%N5W^of;d&dggfDDnzj^{E99VuRQ$XB z*Ai+a-fO2S`6wKwgJ5dzJ|Rb5E!-m3XUj?mVwn(YXR^b!6GHg?Z z`wL}ziUbEuhU$k2<~Qa}(jHTHM;fcMA0oi8BR#!9odQX)}rC)~&C<<2@w0D)Lr*p}cQ#Ja&Q*UDR1R$kXm4te~8O z$?X;~N5*Hm_f!O74>QMlN(k@g%gl5ZQZe!4K|Y21hUaKvH`YR9mn%eQ1%r>Q!qa2* zqu-6b3=|o8_Pw3U;b;OpUFj4iVKn^DT^?f{`@Zl`Tr~MoJtZT@Q0|qUsXoT{5Qm7G z!zUd5rE)lXqho0eaaTum!;xoyNOS?AC2Crk-q6){&YF35da~F4hB3e4@KhgqR_5|s z!gQ7rtWqAl#;UmM+CE5Sc$`rVR5q>@4aB8>DsJYaxHF(vG6T{C>idh>=*|=F{bT1P z9{q9N6S25h>xS;-tOj-T*2EJ}b^7!tE6Ko4)e!sjeRT2tT{+J0dmuG_4?hlbtQS8X zT9wnwC=j^F-?*ziFV@3!|0>S|bhJ)~Yo=WBKK@TLah&6e;Ond&x$2CeNsfbak(q)@ zf^OQC@8j^0uym)#6#_WM2F75__K&M^W0tkAYeOF1lnTS`v5p;F=;PHI+?70~C{<5( z=~I1s`<L}>Z_G61Gb6-_Z6s=$*lR& zV+Km2GtoklinbVZ$$j|^g5|<#zsM{8$JJIHDdQI2F)!0kV$5dabLO8tF~Nj`EVWO* zUcWG%o$~Hg?dJ=j%Q7u7`C>aaxPNaszme%yq%(kuP9%GJlQ!)FT$UN7g;IlKpco`f z4Wk8cgf?U~i1wSLC3KUG-DE!L+(-VmTnkLTRcbIM0X_-J8E%2V zJb%>(m>>e^9yGfEGTDYgp+UJH4O^J{f0qq>MR?LZfO80yya|**GC;j51ItbE#Bz@- zRKjY*HQ-1M7z_i0qtF_dlgj^koJ#>@FECeA1_RRb*9!qfqfiJG)JyOy2G>Aym1ph+ z_4@}yp@2%aVB8yE={B*y$9}!riorDy2w)L6 4 for doc in documents: - assert doc.meta["name"] == str(pdf_path) + assert doc.meta["file_path"] == str(pdf_path) assert "page_number" in doc.meta # elements have a category attribute that is saved in the document meta assert "category" in doc.meta + + @pytest.mark.integration + def test_run_one_doc_per_file_with_meta(self, samples_path): + pdf_path = samples_path / "sample_pdf.pdf" + meta = {"custom_meta": "foobar"} + local_converter = UnstructuredFileConverter( + api_url="http://localhost:8000/general/v0/general", document_creation_mode="one-doc-per-file" + ) + + documents = local_converter.run(paths=[pdf_path], meta=meta)["documents"] + + assert len(documents) == 1 + assert documents[0].meta["file_path"] == str(pdf_path) + assert "custom_meta" in documents[0].meta + assert documents[0].meta["custom_meta"] == "foobar" + assert documents[0].meta == {"file_path": str(pdf_path), "custom_meta": "foobar"} + + @pytest.mark.integration + def test_run_one_doc_per_page_with_meta(self, samples_path): + pdf_path = samples_path / "sample_pdf.pdf" + meta = {"custom_meta": "foobar"} + local_converter = UnstructuredFileConverter( + api_url="http://localhost:8000/general/v0/general", document_creation_mode="one-doc-per-page" + ) + + documents = local_converter.run(paths=[pdf_path], meta=meta)["documents"] + + assert len(documents) == 4 + for i, doc in enumerate(documents, start=1): + assert doc.meta["file_path"] == str(pdf_path) + assert doc.meta["page_number"] == i + assert "custom_meta" in doc.meta + assert doc.meta["custom_meta"] == "foobar" + + @pytest.mark.integration + def test_run_one_doc_per_element_with_meta(self, samples_path): + pdf_path = samples_path / "sample_pdf.pdf" + meta = {"custom_meta": "foobar"} + local_converter = UnstructuredFileConverter( + api_url="http://localhost:8000/general/v0/general", document_creation_mode="one-doc-per-element" + ) + + documents = local_converter.run(paths=[pdf_path], meta=meta)["documents"] + + assert len(documents) > 4 + for doc in documents: + assert doc.meta["file_path"] == str(pdf_path) + assert "page_number" in doc.meta + + # elements have a category attribute that is saved in the document meta + assert "category" in doc.meta + assert "custom_meta" in doc.meta + assert doc.meta["custom_meta"] == "foobar" + + @pytest.mark.integration + def test_run_one_doc_per_element_with_meta_list_two_files(self, samples_path): + pdf_path = [samples_path / "sample_pdf.pdf", samples_path / "sample_pdf2.pdf"] + meta = [{"custom_meta": "foobar", "common_meta": "common"}, {"other_meta": "barfoo", "common_meta": "common"}] + local_converter = UnstructuredFileConverter( + api_url="http://localhost:8000/general/v0/general", document_creation_mode="one-doc-per-element" + ) + + documents = local_converter.run(paths=pdf_path, meta=meta)["documents"] + + assert len(documents) > 4 + for doc in documents: + assert "file_path" in doc.meta + assert "page_number" in doc.meta + # elements have a category attribute that is saved in the document meta + assert "category" in doc.meta + assert "common_meta" in doc.meta + assert doc.meta["common_meta"] == "common" + + @pytest.mark.integration + def test_run_one_doc_per_element_with_meta_list_folder(self, samples_path): + pdf_path = [samples_path] + meta = [{"custom_meta": "foobar", "common_meta": "common"}, {"other_meta": "barfoo", "common_meta": "common"}] + local_converter = UnstructuredFileConverter( + api_url="http://localhost:8000/general/v0/general", document_creation_mode="one-doc-per-element" + ) + + documents = local_converter.run(paths=pdf_path, meta=meta)["documents"] + + assert len(documents) > 4 + for doc in documents: + assert "file_path" in doc.meta + assert "page_number" in doc.meta + # elements have a category attribute that is saved in the document meta + assert "category" in doc.meta + assert "common_meta" in doc.meta + assert doc.meta["common_meta"] == "common" From 077a6aa93723b030f8793e9f486024f8bdd8ad4e Mon Sep 17 00:00:00 2001 From: Stefano Fiorucci Date: Tue, 23 Jan 2024 16:08:30 +0100 Subject: [PATCH 09/47] Pgvector Document Store - minimal implementation (#239) * very first draft * setup integration folder and workflow * update readme * making progress! * mypy overrides * making progress on index * drop sqlalchemy in favor of psycopggit add tests/test_document_store.py ! * good improvements! * docstrings * improve definition * small improvements * more test cases * standardize * inner_product * explicit create statement * address feedback * change embedding_similarity_function to vector_function * explicit insert and update statements * remove useless condition * unit tests for conversion functions --- .github/workflows/pgvector.yml | 58 +++ README.md | 3 +- integrations/pgvector/LICENSE.txt | 73 ++++ integrations/pgvector/README.md | 31 ++ integrations/pgvector/pyproject.toml | 178 ++++++++ .../document_stores/pgvector/__init__.py | 6 + .../pgvector/document_store.py | 388 ++++++++++++++++++ integrations/pgvector/tests/__init__.py | 3 + .../pgvector/tests/test_document_store.py | 243 +++++++++++ 9 files changed, 982 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/pgvector.yml create mode 100644 integrations/pgvector/LICENSE.txt create mode 100644 integrations/pgvector/README.md create mode 100644 integrations/pgvector/pyproject.toml create mode 100644 integrations/pgvector/src/haystack_integrations/document_stores/pgvector/__init__.py create mode 100644 integrations/pgvector/src/haystack_integrations/document_stores/pgvector/document_store.py create mode 100644 integrations/pgvector/tests/__init__.py create mode 100644 integrations/pgvector/tests/test_document_store.py diff --git a/.github/workflows/pgvector.yml b/.github/workflows/pgvector.yml new file mode 100644 index 000000000..c985b765a --- /dev/null +++ b/.github/workflows/pgvector.yml @@ -0,0 +1,58 @@ +# This workflow comes from https://github.com/ofek/hatch-mypyc +# https://github.com/ofek/hatch-mypyc/blob/5a198c0ba8660494d02716cfc9d79ce4adfb1442/.github/workflows/test.yml +name: Test / pgvector + +on: + schedule: + - cron: "0 0 * * *" + pull_request: + paths: + - "integrations/pgvector/**" + - ".github/workflows/pgvector.yml" + +concurrency: + group: pgvector-${{ github.head_ref }} + cancel-in-progress: true + +env: + PYTHONUNBUFFERED: "1" + FORCE_COLOR: "1" + +jobs: + run: + name: Python ${{ matrix.python-version }} on ${{ startsWith(matrix.os, 'macos-') && 'macOS' || startsWith(matrix.os, 'windows-') && 'Windows' || 'Linux' }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest] + python-version: ["3.9","3.10","3.11"] + services: + pgvector: + image: ankane/pgvector:latest + env: + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres + POSTGRES_DB: postgres + ports: + - 5432:5432 + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Install Hatch + run: pip install --upgrade hatch + + - name: Lint + working-directory: integrations/pgvector + if: matrix.python-version == '3.9' + run: hatch run lint:all + + - name: Run tests + working-directory: integrations/pgvector + run: hatch run cov diff --git a/README.md b/README.md index d4d34fd7d..ae884862a 100644 --- a/README.md +++ b/README.md @@ -75,6 +75,7 @@ deepset-haystack | [llama-cpp-haystack](integrations/llama_cpp/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/ollama-haystack.svg?color=orange)](https://pypi.org/project/llama-cpp-haystack) | [![Test / llama-cpp](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/llama_cpp.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/llama_cpp.yml) | | [ollama-haystack](integrations/ollama/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/ollama-haystack.svg?color=orange)](https://pypi.org/project/ollama-haystack) | [![Test / ollama](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/ollama.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/ollama.yml) | | [opensearch-haystack](integrations/opensearch/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/opensearch-haystack.svg)](https://pypi.org/project/opensearch-haystack) | [![Test / opensearch](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/opensearch.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/opensearch.yml) | -| [pinecone-haystack](integrations/pinecone/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/pinecone-haystack.svg?color=orange)](https://pypi.org/project/pinecone-haystack) | [![Test / pinecone](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/pinecone.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/pinecone.yml) | +| [pinecone-haystack](integrations/pinecone/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/pinecone-haystack.svg?color=orange)](https://pypi.org/project/pinecone-haystack) | [![Test / pinecone](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/pinecone.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/pinecone.yml) +| [pgvector-haystack](integrations/pgvector/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/pgvector-haystack.svg?color=orange)](https://pypi.org/project/pgvector-haystack) | [![Test / pgvector](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/pgvector.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/pgvector.yml) | | [qdrant-haystack](integrations/qdrant/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/qdrant-haystack.svg?color=orange)](https://pypi.org/project/qdrant-haystack) | [![Test / qdrant](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/qdrant.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/qdrant.yml) | | [unstructured-fileconverter-haystack](integrations/unstructured/) | File converter | [![PyPI - Version](https://img.shields.io/pypi/v/unstructured-fileconverter-haystack.svg)](https://pypi.org/project/unstructured-fileconverter-haystack) | [![Test / unstructured](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/unstructured.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/unstructured.yml) | diff --git a/integrations/pgvector/LICENSE.txt b/integrations/pgvector/LICENSE.txt new file mode 100644 index 000000000..137069b82 --- /dev/null +++ b/integrations/pgvector/LICENSE.txt @@ -0,0 +1,73 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. + +"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: + + (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. + + You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + +To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/integrations/pgvector/README.md b/integrations/pgvector/README.md new file mode 100644 index 000000000..277c732f4 --- /dev/null +++ b/integrations/pgvector/README.md @@ -0,0 +1,31 @@ +# pgvector-haystack + +[![PyPI - Version](https://img.shields.io/pypi/v/pgvector-haystack.svg)](https://pypi.org/project/pgvector-haystack) +[![PyPI - Python Version](https://img.shields.io/pypi/pyversions/pgvector-haystack.svg)](https://pypi.org/project/pgvector-haystack) + +--- + +**Table of Contents** + +- [pgvector-haystack](#pgvector-haystack) + - [Installation](#installation) + - [Testing](#testing) + - [License](#license) + +## Installation + +```console +pip install pgvector-haystack +``` + +## Testing + +TODO + +```console +hatch run test +``` + +## License + +`pgvector-haystack` is distributed under the terms of the [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html) license. diff --git a/integrations/pgvector/pyproject.toml b/integrations/pgvector/pyproject.toml new file mode 100644 index 000000000..b361af8b1 --- /dev/null +++ b/integrations/pgvector/pyproject.toml @@ -0,0 +1,178 @@ +[build-system] +requires = ["hatchling", "hatch-vcs"] +build-backend = "hatchling.build" + +[project] +name = "pgvector-haystack" +dynamic = ["version"] +description = "An integration of pgvector (vector search extension for Postgres) with Haystack" +readme = "README.md" +requires-python = ">=3.8" +license = "Apache-2.0" +keywords = [] +authors = [ + { name = "deepset GmbH", email = "info@deepset.ai" }, +] +classifiers = [ + "Development Status :: 4 - Beta", + "Programming Language :: Python", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: Implementation :: CPython", + "Programming Language :: Python :: Implementation :: PyPy", +] +dependencies = [ + "haystack-ai", + "pgvector", + "psycopg[binary]" +] + +[project.urls] +Source = "https://github.com/deepset-ai/haystack-core-integrations" +Documentation = "https://github.com/deepset-ai/haystack-core-integrations/blob/main/integrations/pgvector/README.md" +Issues = "https://github.com/deepset-ai/haystack-core-integrations/issues" + +[tool.hatch.build.targets.wheel] +packages = ["src/haystack_integrations"] + +[tool.hatch.version] +source = "vcs" +tag-pattern = 'integrations\/pgvector-v(?P.*)' + +[tool.hatch.version.raw-options] +root = "../.." +git_describe_command = 'git describe --tags --match="integrations/pgvector-v[0-9]*"' + +[tool.hatch.envs.default] +dependencies = [ + "coverage[toml]>=6.5", + "pytest", + "ipython", +] +[tool.hatch.envs.default.scripts] +test = "pytest {args:tests}" +test-cov = "coverage run -m pytest {args:tests}" +cov-report = [ + "- coverage combine", + "coverage report", +] +cov = [ + "test-cov", + "cov-report", +] + +[[tool.hatch.envs.all.matrix]] +python = ["3.8", "3.9", "3.10", "3.11", "3.12"] + +[tool.hatch.envs.lint] +detached = true +dependencies = [ + "black>=23.1.0", + "mypy>=1.0.0", + "ruff>=0.0.243", +] +[tool.hatch.envs.lint.scripts] +typing = "mypy --install-types --non-interactive --explicit-package-bases {args:src/ tests}" +style = [ + "ruff {args:.}", + "black --check --diff {args:.}", +] +fmt = [ + "black {args:.}", + "ruff --fix {args:.}", + "style", +] +all = [ + "style", + "typing", +] + +[tool.black] +target-version = ["py37"] +line-length = 120 +skip-string-normalization = true + +[tool.ruff] +target-version = "py37" +line-length = 120 +select = [ + "A", + "ARG", + "B", + "C", + "DTZ", + "E", + "EM", + "F", + "FBT", + "I", + "ICN", + "ISC", + "N", + "PLC", + "PLE", + "PLR", + "PLW", + "Q", + "RUF", + "S", + "T", + "TID", + "UP", + "W", + "YTT", +] +ignore = [ + # Allow non-abstract empty methods in abstract base classes + "B027", + # Allow boolean positional values in function calls, like `dict.get(... True)` + "FBT003", + # Ignore checks for possible passwords + "S105", "S106", "S107", + # Ignore complexity + "C901", "PLR0911", "PLR0912", "PLR0913", "PLR0915", +] +unfixable = [ + # Don't touch unused imports + "F401", +] + +[tool.ruff.isort] +known-first-party = ["src"] + +[tool.ruff.flake8-tidy-imports] +ban-relative-imports = "parents" + +[tool.ruff.per-file-ignores] +# Tests can use magic values, assertions, and relative imports +"tests/**/*" = ["PLR2004", "S101", "TID252"] + +[tool.coverage.run] +source_pkgs = ["src", "tests"] +branch = true +parallel = true + + +[tool.coverage.paths] +weaviate_haystack = ["src/haystack_integrations", "*/pgvector-haystack/src"] +tests = ["tests", "*/pgvector-haystack/tests"] + +[tool.coverage.report] +exclude_lines = [ + "no cov", + "if __name__ == .__main__.:", + "if TYPE_CHECKING:", +] + +[[tool.mypy.overrides]] +module = [ + "haystack.*", + "haystack_integrations.*", + "pgvector.*", + "psycopg.*", + "pytest.*" +] +ignore_missing_imports = true diff --git a/integrations/pgvector/src/haystack_integrations/document_stores/pgvector/__init__.py b/integrations/pgvector/src/haystack_integrations/document_stores/pgvector/__init__.py new file mode 100644 index 000000000..613962549 --- /dev/null +++ b/integrations/pgvector/src/haystack_integrations/document_stores/pgvector/__init__.py @@ -0,0 +1,6 @@ +# SPDX-FileCopyrightText: 2023-present deepset GmbH +# +# SPDX-License-Identifier: Apache-2.0 +from .document_store import PgvectorDocumentStore + +__all__ = ["PgvectorDocumentStore"] diff --git a/integrations/pgvector/src/haystack_integrations/document_stores/pgvector/document_store.py b/integrations/pgvector/src/haystack_integrations/document_stores/pgvector/document_store.py new file mode 100644 index 000000000..bb1915a6f --- /dev/null +++ b/integrations/pgvector/src/haystack_integrations/document_stores/pgvector/document_store.py @@ -0,0 +1,388 @@ +# SPDX-FileCopyrightText: 2023-present deepset GmbH +# +# SPDX-License-Identifier: Apache-2.0 +import logging +from typing import Any, Dict, List, Literal, Optional + +from haystack import default_to_dict +from haystack.dataclasses.document import ByteStream, Document +from haystack.document_stores.errors import DocumentStoreError, DuplicateDocumentError +from haystack.document_stores.types import DuplicatePolicy +from psycopg import Error, IntegrityError, connect +from psycopg.abc import Query +from psycopg.cursor import Cursor +from psycopg.rows import dict_row +from psycopg.sql import SQL, Identifier +from psycopg.sql import Literal as SQLLiteral +from psycopg.types.json import Jsonb + +from pgvector.psycopg import register_vector + +logger = logging.getLogger(__name__) + +CREATE_TABLE_STATEMENT = """ +CREATE TABLE IF NOT EXISTS {table_name} ( +id VARCHAR(128) PRIMARY KEY, +embedding VECTOR({embedding_dimension}), +content TEXT, +dataframe JSONB, +blob_data BYTEA, +blob_meta JSONB, +blob_mime_type VARCHAR(255), +meta JSONB) +""" + +INSERT_STATEMENT = """ +INSERT INTO {table_name} +(id, embedding, content, dataframe, blob_data, blob_meta, blob_mime_type, meta) +VALUES (%(id)s, %(embedding)s, %(content)s, %(dataframe)s, %(blob_data)s, %(blob_meta)s, %(blob_mime_type)s, %(meta)s) +""" + +UPDATE_STATEMENT = """ +ON CONFLICT (id) DO UPDATE SET +embedding = EXCLUDED.embedding, +content = EXCLUDED.content, +dataframe = EXCLUDED.dataframe, +blob_data = EXCLUDED.blob_data, +blob_meta = EXCLUDED.blob_meta, +blob_mime_type = EXCLUDED.blob_mime_type, +meta = EXCLUDED.meta +""" + +VECTOR_FUNCTION_TO_POSTGRESQL_OPS = { + "cosine_distance": "vector_cosine_ops", + "inner_product": "vector_ip_ops", + "l2_distance": "vector_l2_ops", +} + +HNSW_INDEX_CREATION_VALID_KWARGS = ["m", "ef_construction"] + +HNSW_INDEX_NAME = "haystack_hnsw_index" + + +class PgvectorDocumentStore: + def __init__( + self, + *, + connection_string: str, + table_name: str = "haystack_documents", + embedding_dimension: int = 768, + vector_function: Literal["cosine_distance", "inner_product", "l2_distance"] = "cosine_distance", + recreate_table: bool = False, + search_strategy: Literal["exact_nearest_neighbor", "hnsw"] = "exact_nearest_neighbor", + hnsw_recreate_index_if_exists: bool = False, + hnsw_index_creation_kwargs: Optional[Dict[str, int]] = None, + hnsw_ef_search: Optional[int] = None, + ): + """ + Creates a new PgvectorDocumentStore instance. + It is meant to be connected to a PostgreSQL database with the pgvector extension installed. + A specific table to store Haystack documents will be created if it doesn't exist yet. + + :param connection_string: The connection string to use to connect to the PostgreSQL database. + e.g. "postgresql://USER:PASSWORD@HOST:PORT/DB_NAME" + :param table_name: The name of the table to use to store Haystack documents. Defaults to "haystack_documents". + :param embedding_dimension: The dimension of the embedding. Defaults to 768. + :param vector_function: The similarity function to use when searching for similar embeddings. + Defaults to "cosine_distance". Set it to one of the following values: + :type vector_function: Literal["cosine_distance", "inner_product", "l2_distance"] + :param recreate_table: Whether to recreate the table if it already exists. Defaults to False. + :param search_strategy: The search strategy to use when searching for similar embeddings. + Defaults to "exact_nearest_neighbor". "hnsw" is an approximate nearest neighbor search strategy, + which trades off some accuracy for speed; it is recommended for large numbers of documents. + :type search_strategy: Literal["exact_nearest_neighbor", "hnsw"] + :param hnsw_recreate_index_if_exists: Whether to recreate the HNSW index if it already exists. + Defaults to False. Only used if search_strategy is set to "hnsw". + :param hnsw_index_creation_kwargs: Additional keyword arguments to pass to the HNSW index creation. + Only used if search_strategy is set to "hnsw". You can find the list of valid arguments in the + pgvector documentation: https://github.com/pgvector/pgvector?tab=readme-ov-file#hnsw + :param hnsw_ef_search: The ef_search parameter to use at query time. Only used if search_strategy is set to + "hnsw". You can find more information about this parameter in the pgvector documentation: + https://github.com/pgvector/pgvector?tab=readme-ov-file#hnsw + """ + + self.connection_string = connection_string + self.table_name = table_name + self.embedding_dimension = embedding_dimension + self.vector_function = vector_function + self.recreate_table = recreate_table + self.search_strategy = search_strategy + self.hnsw_recreate_index_if_exists = hnsw_recreate_index_if_exists + self.hnsw_index_creation_kwargs = hnsw_index_creation_kwargs or {} + self.hnsw_ef_search = hnsw_ef_search + + connection = connect(connection_string) + connection.autocommit = True + self._connection = connection + + # we create a generic cursor and another one that returns dictionaries + self._cursor = connection.cursor() + self._dict_cursor = connection.cursor(row_factory=dict_row) + + connection.execute("CREATE EXTENSION IF NOT EXISTS vector") + register_vector(connection) + + if recreate_table: + self.delete_table() + self._create_table_if_not_exists() + + if search_strategy == "hnsw": + self._handle_hnsw() + + def to_dict(self) -> Dict[str, Any]: + return default_to_dict( + self, + connection_string=self.connection_string, + table_name=self.table_name, + embedding_dimension=self.embedding_dimension, + vector_function=self.vector_function, + recreate_table=self.recreate_table, + search_strategy=self.search_strategy, + hnsw_recreate_index_if_exists=self.hnsw_recreate_index_if_exists, + hnsw_index_creation_kwargs=self.hnsw_index_creation_kwargs, + hnsw_ef_search=self.hnsw_ef_search, + ) + + def _execute_sql( + self, sql_query: Query, params: Optional[tuple] = None, error_msg: str = "", cursor: Optional[Cursor] = None + ): + """ + Internal method to execute SQL statements and handle exceptions. + + :param sql_query: The SQL query to execute. + :param params: The parameters to pass to the SQL query. + :param error_msg: The error message to use if an exception is raised. + :param cursor: The cursor to use to execute the SQL query. Defaults to self._cursor. + """ + + params = params or () + cursor = cursor or self._cursor + + try: + result = cursor.execute(sql_query, params) + except Error as e: + self._connection.rollback() + raise DocumentStoreError(error_msg) from e + return result + + def _create_table_if_not_exists(self): + """ + Creates the table to store Haystack documents if it doesn't exist yet. + """ + + create_sql = SQL(CREATE_TABLE_STATEMENT).format( + table_name=Identifier(self.table_name), embedding_dimension=SQLLiteral(self.embedding_dimension) + ) + + self._execute_sql(create_sql, error_msg="Could not create table in PgvectorDocumentStore") + + def delete_table(self): + """ + Deletes the table used to store Haystack documents. + """ + + delete_sql = SQL("DROP TABLE IF EXISTS {table_name}").format(table_name=Identifier(self.table_name)) + + self._execute_sql(delete_sql, error_msg=f"Could not delete table {self.table_name} in PgvectorDocumentStore") + + def _handle_hnsw(self): + """ + Internal method to handle the HNSW index creation. + It also sets the hnsw.ef_search parameter for queries if it is specified. + """ + + if self.hnsw_ef_search: + sql_set_hnsw_ef_search = SQL("SET hnsw.ef_search = {hnsw_ef_search}").format( + hnsw_ef_search=SQLLiteral(self.hnsw_ef_search) + ) + self._execute_sql(sql_set_hnsw_ef_search, error_msg="Could not set hnsw.ef_search") + + index_esists = bool( + self._execute_sql( + "SELECT 1 FROM pg_indexes WHERE tablename = %s AND indexname = %s", + (self.table_name, HNSW_INDEX_NAME), + "Could not check if HNSW index exists", + ).fetchone() + ) + + if index_esists and not self.hnsw_recreate_index_if_exists: + logger.warning( + "HNSW index already exists and won't be recreated. " + "If you want to recreate it, pass 'hnsw_recreate_index_if_exists=True' to the " + "Document Store constructor" + ) + return + + sql_drop_index = SQL("DROP INDEX IF EXISTS {index_name}").format(index_name=Identifier(HNSW_INDEX_NAME)) + self._execute_sql(sql_drop_index, error_msg="Could not drop HNSW index") + + self._create_hnsw_index() + + def _create_hnsw_index(self): + """ + Internal method to create the HNSW index. + """ + + pg_ops = VECTOR_FUNCTION_TO_POSTGRESQL_OPS[self.vector_function] + actual_hnsw_index_creation_kwargs = { + key: value + for key, value in self.hnsw_index_creation_kwargs.items() + if key in HNSW_INDEX_CREATION_VALID_KWARGS + } + + sql_create_index = SQL("CREATE INDEX {index_name} ON {table_name} USING hnsw (embedding {ops}) ").format( + index_name=Identifier(HNSW_INDEX_NAME), table_name=Identifier(self.table_name), ops=SQL(pg_ops) + ) + + if actual_hnsw_index_creation_kwargs: + actual_hnsw_index_creation_kwargs_str = ", ".join( + f"{key} = {value}" for key, value in actual_hnsw_index_creation_kwargs.items() + ) + sql_add_creation_kwargs = SQL("WITH ({creation_kwargs_str})").format( + creation_kwargs_str=SQL(actual_hnsw_index_creation_kwargs_str) + ) + sql_create_index += sql_add_creation_kwargs + + self._execute_sql(sql_create_index, error_msg="Could not create HNSW index") + + def count_documents(self) -> int: + """ + Returns how many documents are present in the document store. + """ + + sql_count = SQL("SELECT COUNT(*) FROM {table_name}").format(table_name=Identifier(self.table_name)) + + count = self._execute_sql(sql_count, error_msg="Could not count documents in PgvectorDocumentStore").fetchone()[ + 0 + ] + return count + + def filter_documents(self, filters: Optional[Dict[str, Any]] = None) -> List[Document]: # noqa: ARG002 + # TODO: implement filters + sql_get_docs = SQL("SELECT * FROM {table_name}").format(table_name=Identifier(self.table_name)) + + result = self._execute_sql( + sql_get_docs, error_msg="Could not filter documents from PgvectorDocumentStore", cursor=self._dict_cursor + ) + + # Fetch all the records + records = result.fetchall() + docs = self._from_pg_to_haystack_documents(records) + return docs + + def write_documents(self, documents: List[Document], policy: DuplicatePolicy = DuplicatePolicy.NONE) -> int: + """ + Writes documents into to PgvectorDocumentStore. + + :param documents: A list of Documents to write to the document store. + :param policy: The duplicate policy to use when writing documents. + :raises DuplicateDocumentError: If a document with the same id already exists in the document store + and the policy is set to DuplicatePolicy.FAIL (or not specified). + :return: The number of documents written to the document store. + """ + + if len(documents) > 0: + if not isinstance(documents[0], Document): + msg = "param 'documents' must contain a list of objects of type Document" + raise ValueError(msg) + + if policy == DuplicatePolicy.NONE: + policy = DuplicatePolicy.FAIL + + db_documents = self._from_haystack_to_pg_documents(documents) + + sql_insert = SQL(INSERT_STATEMENT).format(table_name=Identifier(self.table_name)) + + if policy == DuplicatePolicy.OVERWRITE: + sql_insert += SQL(UPDATE_STATEMENT) + elif policy == DuplicatePolicy.SKIP: + sql_insert += SQL("ON CONFLICT DO NOTHING") + + sql_insert += SQL(" RETURNING id") + + try: + self._cursor.executemany(sql_insert, db_documents, returning=True) + except IntegrityError as ie: + self._connection.rollback() + raise DuplicateDocumentError from ie + except Error as e: + self._connection.rollback() + raise DocumentStoreError from e + + # get the number of the inserted documents, inspired by psycopg3 docs + # https://www.psycopg.org/psycopg3/docs/api/cursors.html#psycopg.Cursor.executemany + written_docs = 0 + while True: + if self._cursor.fetchone(): + written_docs += 1 + if not self._cursor.nextset(): + break + + return written_docs + + def _from_haystack_to_pg_documents(self, documents: List[Document]) -> List[Dict[str, Any]]: + """ + Internal method to convert a list of Haystack Documents to a list of dictionaries that can be used to insert + documents into the PgvectorDocumentStore. + """ + + db_documents = [] + for document in documents: + db_document = {k: v for k, v in document.to_dict(flatten=False).items() if k not in ["score", "blob"]} + + blob = document.blob + db_document["blob_data"] = blob.data if blob else None + db_document["blob_meta"] = Jsonb(blob.meta) if blob and blob.meta else None + db_document["blob_mime_type"] = blob.mime_type if blob and blob.mime_type else None + + db_document["dataframe"] = Jsonb(db_document["dataframe"]) if db_document["dataframe"] else None + db_document["meta"] = Jsonb(db_document["meta"]) + + db_documents.append(db_document) + + return db_documents + + def _from_pg_to_haystack_documents(self, documents: List[Dict[str, Any]]) -> List[Document]: + """ + Internal method to convert a list of dictionaries from pgvector to a list of Haystack Documents. + """ + + haystack_documents = [] + for document in documents: + haystack_dict = dict(document) + blob_data = haystack_dict.pop("blob_data") + blob_meta = haystack_dict.pop("blob_meta") + blob_mime_type = haystack_dict.pop("blob_mime_type") + + # postgresql returns the embedding as a string + # so we need to convert it to a list of floats + if "embedding" in document and document["embedding"]: + haystack_dict["embedding"] = [float(el) for el in document["embedding"].strip("[]").split(",")] + + haystack_document = Document.from_dict(haystack_dict) + + if blob_data: + blob = ByteStream(data=blob_data, meta=blob_meta, mime_type=blob_mime_type) + haystack_document.blob = blob + + haystack_documents.append(haystack_document) + + return haystack_documents + + def delete_documents(self, document_ids: List[str]) -> None: + """ + Deletes all documents with a matching document_ids from the document store. + + :param document_ids: the document ids to delete + """ + + if not document_ids: + return + + document_ids_str = ", ".join(f"'{document_id}'" for document_id in document_ids) + + delete_sql = SQL("DELETE FROM {table_name} WHERE id IN ({document_ids_str})").format( + table_name=Identifier(self.table_name), document_ids_str=SQL(document_ids_str) + ) + + self._execute_sql(delete_sql, error_msg="Could not delete documents from PgvectorDocumentStore") diff --git a/integrations/pgvector/tests/__init__.py b/integrations/pgvector/tests/__init__.py new file mode 100644 index 000000000..e873bc332 --- /dev/null +++ b/integrations/pgvector/tests/__init__.py @@ -0,0 +1,3 @@ +# SPDX-FileCopyrightText: 2023-present deepset GmbH +# +# SPDX-License-Identifier: Apache-2.0 diff --git a/integrations/pgvector/tests/test_document_store.py b/integrations/pgvector/tests/test_document_store.py new file mode 100644 index 000000000..9f3521838 --- /dev/null +++ b/integrations/pgvector/tests/test_document_store.py @@ -0,0 +1,243 @@ +# SPDX-FileCopyrightText: 2023-present deepset GmbH +# +# SPDX-License-Identifier: Apache-2.0 + +from unittest.mock import patch + +import pytest +from haystack.dataclasses.document import ByteStream, Document +from haystack.document_stores.errors import DuplicateDocumentError +from haystack.document_stores.types import DuplicatePolicy +from haystack.testing.document_store import CountDocumentsTest, DeleteDocumentsTest, WriteDocumentsTest +from haystack_integrations.document_stores.pgvector import PgvectorDocumentStore +from pandas import DataFrame + + +class TestDocumentStore(CountDocumentsTest, WriteDocumentsTest, DeleteDocumentsTest): + @pytest.fixture + def document_store(self, request): + connection_string = "postgresql://postgres:postgres@localhost:5432/postgres" + table_name = f"haystack_{request.node.name}" + embedding_dimension = 768 + vector_function = "cosine_distance" + recreate_table = True + search_strategy = "exact_nearest_neighbor" + + store = PgvectorDocumentStore( + connection_string=connection_string, + table_name=table_name, + embedding_dimension=embedding_dimension, + vector_function=vector_function, + recreate_table=recreate_table, + search_strategy=search_strategy, + ) + yield store + + store.delete_table() + + def test_write_documents(self, document_store: PgvectorDocumentStore): + docs = [Document(id="1")] + assert document_store.write_documents(docs) == 1 + with pytest.raises(DuplicateDocumentError): + document_store.write_documents(docs, DuplicatePolicy.FAIL) + + def test_write_blob(self, document_store: PgvectorDocumentStore): + bytestream = ByteStream(b"test", meta={"meta_key": "meta_value"}, mime_type="mime_type") + docs = [Document(id="1", blob=bytestream)] + document_store.write_documents(docs) + + # TODO: update when filters are implemented + retrieved_docs = document_store.filter_documents() + assert retrieved_docs == docs + + def test_write_dataframe(self, document_store: PgvectorDocumentStore): + dataframe = DataFrame({"col1": [1, 2], "col2": [3, 4]}) + docs = [Document(id="1", dataframe=dataframe)] + + document_store.write_documents(docs) + + # TODO: update when filters are implemented + retrieved_docs = document_store.filter_documents() + assert retrieved_docs == docs + + def test_init(self): + document_store = PgvectorDocumentStore( + connection_string="postgresql://postgres:postgres@localhost:5432/postgres", + table_name="my_table", + embedding_dimension=512, + vector_function="l2_distance", + recreate_table=True, + search_strategy="hnsw", + hnsw_recreate_index_if_exists=True, + hnsw_index_creation_kwargs={"m": 32, "ef_construction": 128}, + hnsw_ef_search=50, + ) + + assert document_store.connection_string == "postgresql://postgres:postgres@localhost:5432/postgres" + assert document_store.table_name == "my_table" + assert document_store.embedding_dimension == 512 + assert document_store.vector_function == "l2_distance" + assert document_store.recreate_table + assert document_store.search_strategy == "hnsw" + assert document_store.hnsw_recreate_index_if_exists + assert document_store.hnsw_index_creation_kwargs == {"m": 32, "ef_construction": 128} + assert document_store.hnsw_ef_search == 50 + + def test_to_dict(self): + document_store = PgvectorDocumentStore( + connection_string="postgresql://postgres:postgres@localhost:5432/postgres", + table_name="my_table", + embedding_dimension=512, + vector_function="l2_distance", + recreate_table=True, + search_strategy="hnsw", + hnsw_recreate_index_if_exists=True, + hnsw_index_creation_kwargs={"m": 32, "ef_construction": 128}, + hnsw_ef_search=50, + ) + + assert document_store.to_dict() == { + "type": "haystack_integrations.document_stores.pgvector.document_store.PgvectorDocumentStore", + "init_parameters": { + "connection_string": "postgresql://postgres:postgres@localhost:5432/postgres", + "table_name": "my_table", + "embedding_dimension": 512, + "vector_function": "l2_distance", + "recreate_table": True, + "search_strategy": "hnsw", + "hnsw_recreate_index_if_exists": True, + "hnsw_index_creation_kwargs": {"m": 32, "ef_construction": 128}, + "hnsw_ef_search": 50, + }, + } + + def test_from_haystack_to_pg_documents(self): + haystack_docs = [ + Document( + id="1", + content="This is a text", + meta={"meta_key": "meta_value"}, + embedding=[0.1, 0.2, 0.3], + score=0.5, + ), + Document( + id="2", + dataframe=DataFrame({"col1": [1, 2], "col2": [3, 4]}), + meta={"meta_key": "meta_value"}, + embedding=[0.4, 0.5, 0.6], + score=0.6, + ), + Document( + id="3", + blob=ByteStream(b"test", meta={"blob_meta_key": "blob_meta_value"}, mime_type="mime_type"), + meta={"meta_key": "meta_value"}, + embedding=[0.7, 0.8, 0.9], + score=0.7, + ), + ] + + with patch( + "haystack_integrations.document_stores.pgvector.document_store.PgvectorDocumentStore.__init__" + ) as mock_init: + mock_init.return_value = None + ds = PgvectorDocumentStore(connection_string="test") + + pg_docs = ds._from_haystack_to_pg_documents(haystack_docs) + + assert pg_docs[0]["id"] == "1" + assert pg_docs[0]["content"] == "This is a text" + assert pg_docs[0]["dataframe"] is None + assert pg_docs[0]["blob_data"] is None + assert pg_docs[0]["blob_meta"] is None + assert pg_docs[0]["blob_mime_type"] is None + assert pg_docs[0]["meta"].obj == {"meta_key": "meta_value"} + assert pg_docs[0]["embedding"] == [0.1, 0.2, 0.3] + assert "score" not in pg_docs[0] + + assert pg_docs[1]["id"] == "2" + assert pg_docs[1]["content"] is None + assert pg_docs[1]["dataframe"].obj == DataFrame({"col1": [1, 2], "col2": [3, 4]}).to_json() + assert pg_docs[1]["blob_data"] is None + assert pg_docs[1]["blob_meta"] is None + assert pg_docs[1]["blob_mime_type"] is None + assert pg_docs[1]["meta"].obj == {"meta_key": "meta_value"} + assert pg_docs[1]["embedding"] == [0.4, 0.5, 0.6] + assert "score" not in pg_docs[1] + + assert pg_docs[2]["id"] == "3" + assert pg_docs[2]["content"] is None + assert pg_docs[2]["dataframe"] is None + assert pg_docs[2]["blob_data"] == b"test" + assert pg_docs[2]["blob_meta"].obj == {"blob_meta_key": "blob_meta_value"} + assert pg_docs[2]["blob_mime_type"] == "mime_type" + assert pg_docs[2]["meta"].obj == {"meta_key": "meta_value"} + assert pg_docs[2]["embedding"] == [0.7, 0.8, 0.9] + assert "score" not in pg_docs[2] + + def test_from_pg_to_haystack_documents(self): + pg_docs = [ + { + "id": "1", + "content": "This is a text", + "dataframe": None, + "blob_data": None, + "blob_meta": None, + "blob_mime_type": None, + "meta": {"meta_key": "meta_value"}, + "embedding": "[0.1, 0.2, 0.3]", + }, + { + "id": "2", + "content": None, + "dataframe": DataFrame({"col1": [1, 2], "col2": [3, 4]}).to_json(), + "blob_data": None, + "blob_meta": None, + "blob_mime_type": None, + "meta": {"meta_key": "meta_value"}, + "embedding": "[0.4, 0.5, 0.6]", + }, + { + "id": "3", + "content": None, + "dataframe": None, + "blob_data": b"test", + "blob_meta": {"blob_meta_key": "blob_meta_value"}, + "blob_mime_type": "mime_type", + "meta": {"meta_key": "meta_value"}, + "embedding": "[0.7, 0.8, 0.9]", + }, + ] + + with patch( + "haystack_integrations.document_stores.pgvector.document_store.PgvectorDocumentStore.__init__" + ) as mock_init: + mock_init.return_value = None + ds = PgvectorDocumentStore(connection_string="test") + + haystack_docs = ds._from_pg_to_haystack_documents(pg_docs) + + assert haystack_docs[0].id == "1" + assert haystack_docs[0].content == "This is a text" + assert haystack_docs[0].dataframe is None + assert haystack_docs[0].blob is None + assert haystack_docs[0].meta == {"meta_key": "meta_value"} + assert haystack_docs[0].embedding == [0.1, 0.2, 0.3] + assert haystack_docs[0].score is None + + assert haystack_docs[1].id == "2" + assert haystack_docs[1].content is None + assert haystack_docs[1].dataframe.equals(DataFrame({"col1": [1, 2], "col2": [3, 4]})) + assert haystack_docs[1].blob is None + assert haystack_docs[1].meta == {"meta_key": "meta_value"} + assert haystack_docs[1].embedding == [0.4, 0.5, 0.6] + assert haystack_docs[1].score is None + + assert haystack_docs[2].id == "3" + assert haystack_docs[2].content is None + assert haystack_docs[2].dataframe is None + assert haystack_docs[2].blob.data == b"test" + assert haystack_docs[2].blob.meta == {"blob_meta_key": "blob_meta_value"} + assert haystack_docs[2].blob.mime_type == "mime_type" + assert haystack_docs[2].meta == {"meta_key": "meta_value"} + assert haystack_docs[2].embedding == [0.7, 0.8, 0.9] + assert haystack_docs[2].score is None From bc32837a2c1ee97a8722c4f41f8be902d14391eb Mon Sep 17 00:00:00 2001 From: Massimiliano Pippi Date: Wed, 24 Jan 2024 15:07:49 +0100 Subject: [PATCH 10/47] add CoC --- code_of_conduct.txt | 98 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 98 insertions(+) create mode 100644 code_of_conduct.txt diff --git a/code_of_conduct.txt b/code_of_conduct.txt new file mode 100644 index 000000000..c4814cb22 --- /dev/null +++ b/code_of_conduct.txt @@ -0,0 +1,98 @@ +CONTRIBUTOR COVENANT CODE OF CONDUCT +==================================== + +Our Pledge +---------- + +We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for +everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, +gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, +race, caste, color, religion, or sexual identity and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. + +Our Standards +------------- + +Examples of behavior that contributes to a positive environment for our community include: + - Demonstrating empathy and kindness toward other people + - Being respectful of differing opinions, viewpoints, and experiences + - Giving and gracefully accepting constructive feedback + - Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience + - Focusing on what is best not just for us as individuals, but for the overall community + +Examples of unacceptable behavior include: + - The use of sexualized language or imagery, and sexual attention or advances of any kind + - Trolling, insulting or derogatory comments, and personal or political attacks + - Public or private harassment + - Publishing others’ private information, such as a physical or email address, without their explicit permission + - Other conduct which could reasonably be considered inappropriate in a professional setting + +Enforcement Responsibilities +---------------------------- + +Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take +appropriate and fair corrective action in response to any behavior that they deem inappropriate, +threatening, offensive, or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, +issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for +moderation decisions when appropriate. + +Scope +----- + +This Code of Conduct applies within all community spaces, and also applies when an individual is officially +representing the community in public spaces. Examples of representing our community include using an official +e-mail address, posting via an official social media account, or acting as an appointed representative +at an online or offline event. + +Enforcement +----------- + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible +for enforcement at engage@deepset.ai. All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the reporter of any incident. + +Enforcement Guidelines +---------------------- + +Community leaders will follow these Community Impact Guidelines in determining the consequences for any action +they deem in violation of this Code of Conduct: + +1. Correction + Community Impact: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. + + Consequence: A private, written warning from community leaders, providing clarity around the nature of the violation + and an explanation of why the behavior was inappropriate. A public apology may be requested. + +2. Warning + Community Impact: A violation through a single incident or series of actions. + + Consequence: A warning with consequences for continued behavior. No interaction with the people involved, + including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. + This includes avoiding interactions in community spaces as well as external channels like social media. + Violating these terms may lead to a temporary or permanent ban. + +3. Temporary Ban + Community Impact: A serious violation of community standards, including sustained inappropriate behavior. + + Consequence: A temporary ban from any sort of interaction or public communication with the community for a specified + period of time. No public or private interaction with the people involved, including unsolicited interaction with + those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. + +4. Permanent Ban + Community Impact: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. + + Consequence: A permanent ban from any sort of public interaction within the community. + +Attribution +----------- + +This Code of Conduct is adapted from the Contributor Covenant, version 2.0, available at https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. + +Community Impact Guidelines were inspired by Mozilla’s code of conduct enforcement ladder. + +For answers to common questions about this code of conduct, see the FAQ at https://www.contributor-covenant.org/faq. +Translations are available at https://www.contributor-covenant.org/translations. From f0d4709780e888b57ca5af10944d19c6a4ff0441 Mon Sep 17 00:00:00 2001 From: Silvano Cerza <3314350+silvanocerza@users.noreply.github.com> Date: Wed, 24 Jan 2024 15:49:48 +0100 Subject: [PATCH 11/47] Support more collection settings when creating a new `WeaviateDocumentStore` (#260) * Add docker-compose.yml * Accept more collection settings when initializing WeaviateDocumentStore * Linting --- integrations/weaviate/docker-compose.yml | 22 ++++++++ .../weaviate/document_store.py | 53 +++++++++++++++---- .../weaviate/tests/test_document_store.py | 52 +++++++++++++++--- 3 files changed, 110 insertions(+), 17 deletions(-) create mode 100644 integrations/weaviate/docker-compose.yml diff --git a/integrations/weaviate/docker-compose.yml b/integrations/weaviate/docker-compose.yml new file mode 100644 index 000000000..c61b0ed57 --- /dev/null +++ b/integrations/weaviate/docker-compose.yml @@ -0,0 +1,22 @@ +version: '3.4' +services: + weaviate: + command: + - --host + - 0.0.0.0 + - --port + - '8080' + - --scheme + - http + image: semitechnologies/weaviate:1.23.2 + ports: + - 8080:8080 + - 50051:50051 + restart: on-failure:0 + environment: + QUERY_DEFAULTS_LIMIT: 25 + AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: 'true' + PERSISTENCE_DATA_PATH: '/var/lib/weaviate' + DEFAULT_VECTORIZER_MODULE: 'none' + ENABLE_MODULES: '' + CLUSTER_HOSTNAME: 'node1' diff --git a/integrations/weaviate/src/haystack_integrations/document_stores/weaviate/document_store.py b/integrations/weaviate/src/haystack_integrations/document_stores/weaviate/document_store.py index 4c15d707e..da1f357a3 100644 --- a/integrations/weaviate/src/haystack_integrations/document_stores/weaviate/document_store.py +++ b/integrations/weaviate/src/haystack_integrations/document_stores/weaviate/document_store.py @@ -6,7 +6,7 @@ from haystack.core.serialization import default_from_dict, default_to_dict from haystack.dataclasses.document import Document -from haystack.document_stores.protocol import DuplicatePolicy +from haystack.document_stores.types.policy import DuplicatePolicy import weaviate from weaviate.auth import AuthCredentials @@ -25,6 +25,20 @@ "weaviate.auth.AuthApiKey": weaviate.auth.AuthApiKey, } +# This is the default collection properties for Weaviate. +# It's a list of properties that will be created on the collection. +# These are extremely similar to the Document dataclass, but with a few differences: +# - `id` is renamed to `_original_id` as the `id` field is reserved by Weaviate. +# - `blob` is split into `blob_data` and `blob_mime_type` as it's more efficient to store them separately. +DOCUMENT_COLLECTION_PROPERTIES = [ + {"name": "_original_id", "dataType": ["text"]}, + {"name": "content", "dataType": ["text"]}, + {"name": "dataframe", "dataType": ["text"]}, + {"name": "blob_data", "dataType": ["blob"]}, + {"name": "blob_mime_type", "dataType": ["text"]}, + {"name": "score", "dataType": ["number"]}, +] + class WeaviateDocumentStore: """ @@ -35,7 +49,7 @@ def __init__( self, *, url: Optional[str] = None, - collection_name: str = "default", + collection_settings: Optional[Dict[str, Any]] = None, auth_client_secret: Optional[AuthCredentials] = None, timeout_config: TimeoutType = (10, 60), proxies: Optional[Union[Dict, str]] = None, @@ -49,6 +63,16 @@ def __init__( Create a new instance of WeaviateDocumentStore and connects to the Weaviate instance. :param url: The URL to the weaviate instance, defaults to None. + :param collection_settings: The collection settings to use, defaults to None. + If None it will use a collection named `default` with the following properties: + - _original_id: text + - content: text + - dataframe: text + - blob_data: blob + - blob_mime_type: text + - score: number + See the official `Weaviate documentation`_ + for more information on collections. :param auth_client_secret: Authentication credentials, defaults to None. Can be one of the following types depending on the authentication mode: - `weaviate.auth.AuthBearerToken` to use existing access and (optionally, but recommended) refresh tokens @@ -80,8 +104,6 @@ def __init__( :param embedded_options: If set create an embedded Weaviate cluster inside the client, defaults to None. For a full list of options see `weaviate.embedded.EmbeddedOptions`. :param additional_config: Additional and advanced configuration options for weaviate, defaults to None. - :param collection_name: The name of the collection to use, defaults to "default". - If the collection does not exist it will be created. """ self._client = weaviate.Client( url=url, @@ -98,11 +120,22 @@ def __init__( # Test connection, it will raise an exception if it fails. self._client.schema.get() - if not self._client.schema.exists(collection_name): - self._client.schema.create_class({"class": collection_name}) + if collection_settings is None: + collection_settings = { + "class": "Default", + "properties": DOCUMENT_COLLECTION_PROPERTIES, + } + else: + # Set the class if not set + collection_settings["class"] = collection_settings.get("class", "default").capitalize() + # Set the properties if they're not set + collection_settings["properties"] = collection_settings.get("properties", DOCUMENT_COLLECTION_PROPERTIES) + + if not self._client.schema.exists(collection_settings["class"]): + self._client.schema.create_class(collection_settings) self._url = url - self._collection_name = collection_name + self._collection_settings = collection_settings self._auth_client_secret = auth_client_secret self._timeout_config = timeout_config self._proxies = proxies @@ -124,7 +157,7 @@ def to_dict(self) -> Dict[str, Any]: return default_to_dict( self, url=self._url, - collection_name=self._collection_name, + collection_settings=self._collection_settings, auth_client_secret=auth_client_secret, timeout_config=self._timeout_config, proxies=self._proxies, @@ -161,7 +194,9 @@ def filter_documents(self, filters: Optional[Dict[str, Any]] = None) -> List[Doc return [] def write_documents( - self, documents: List[Document], policy: DuplicatePolicy = DuplicatePolicy.NONE # noqa: ARG002 + self, + documents: List[Document], # noqa: ARG002 + policy: DuplicatePolicy = DuplicatePolicy.NONE, # noqa: ARG002 ) -> int: return 0 diff --git a/integrations/weaviate/tests/test_document_store.py b/integrations/weaviate/tests/test_document_store.py index 0666151ee..d5b1a2380 100644 --- a/integrations/weaviate/tests/test_document_store.py +++ b/integrations/weaviate/tests/test_document_store.py @@ -1,6 +1,10 @@ from unittest.mock import MagicMock, patch -from haystack_integrations.document_stores.weaviate.document_store import WeaviateDocumentStore +import pytest +from haystack_integrations.document_stores.weaviate.document_store import ( + DOCUMENT_COLLECTION_PROPERTIES, + WeaviateDocumentStore, +) from weaviate.auth import AuthApiKey from weaviate.config import Config from weaviate.embedded import ( @@ -13,6 +17,17 @@ class TestWeaviateDocumentStore: + @pytest.fixture + def document_store(self, request) -> WeaviateDocumentStore: + # Use a different index for each test so we can run them in parallel + collection_settings = {"class": f"{request.node.name}"} + store = WeaviateDocumentStore( + url="http://localhost:8080", + collection_settings=collection_settings, + ) + yield store + store._client.schema.delete_class(collection_settings["class"]) + @patch("haystack_integrations.document_stores.weaviate.document_store.weaviate.Client") def test_init(self, mock_weaviate_client_class): mock_client = MagicMock() @@ -21,7 +36,7 @@ def test_init(self, mock_weaviate_client_class): WeaviateDocumentStore( url="http://localhost:8080", - collection_name="my_collection", + collection_settings={"class": "My_collection"}, auth_client_secret=AuthApiKey("my_api_key"), proxies={"http": "http://proxy:1234"}, additional_headers={"X-HuggingFace-Api-Key": "MY_HUGGINGFACE_KEY"}, @@ -54,14 +69,15 @@ def test_init(self, mock_weaviate_client_class): # Verify collection is created mock_client.schema.get.assert_called_once() - mock_client.schema.exists.assert_called_once_with("my_collection") - mock_client.schema.create_class.assert_called_once_with({"class": "my_collection"}) + mock_client.schema.exists.assert_called_once_with("My_collection") + mock_client.schema.create_class.assert_called_once_with( + {"class": "My_collection", "properties": DOCUMENT_COLLECTION_PROPERTIES} + ) @patch("haystack_integrations.document_stores.weaviate.document_store.weaviate") def test_to_dict(self, _mock_weaviate): document_store = WeaviateDocumentStore( url="http://localhost:8080", - collection_name="my_collection", auth_client_secret=AuthApiKey("my_api_key"), proxies={"http": "http://proxy:1234"}, additional_headers={"X-HuggingFace-Api-Key": "MY_HUGGINGFACE_KEY"}, @@ -77,7 +93,17 @@ def test_to_dict(self, _mock_weaviate): "type": "haystack_integrations.document_stores.weaviate.document_store.WeaviateDocumentStore", "init_parameters": { "url": "http://localhost:8080", - "collection_name": "my_collection", + "collection_settings": { + "class": "Default", + "properties": [ + {"name": "_original_id", "dataType": ["text"]}, + {"name": "content", "dataType": ["text"]}, + {"name": "dataframe", "dataType": ["text"]}, + {"name": "blob_data", "dataType": ["blob"]}, + {"name": "blob_mime_type", "dataType": ["text"]}, + {"name": "score", "dataType": ["number"]}, + ], + }, "auth_client_secret": { "type": "weaviate.auth.AuthApiKey", "init_parameters": {"api_key": "my_api_key"}, @@ -113,7 +139,7 @@ def test_from_dict(self, _mock_weaviate): "type": "haystack_integrations.document_stores.weaviate.document_store.WeaviateDocumentStore", "init_parameters": { "url": "http://localhost:8080", - "collection_name": "my_collection", + "collection_settings": None, "auth_client_secret": { "type": "weaviate.auth.AuthApiKey", "init_parameters": {"api_key": "my_api_key"}, @@ -144,7 +170,17 @@ def test_from_dict(self, _mock_weaviate): ) assert document_store._url == "http://localhost:8080" - assert document_store._collection_name == "my_collection" + assert document_store._collection_settings == { + "class": "Default", + "properties": [ + {"name": "_original_id", "dataType": ["text"]}, + {"name": "content", "dataType": ["text"]}, + {"name": "dataframe", "dataType": ["text"]}, + {"name": "blob_data", "dataType": ["blob"]}, + {"name": "blob_mime_type", "dataType": ["text"]}, + {"name": "score", "dataType": ["number"]}, + ], + } assert document_store._auth_client_secret == AuthApiKey("my_api_key") assert document_store._timeout_config == (10, 60) assert document_store._proxies == {"http": "http://proxy:1234"} From e39b2d285ddb03a69f39a5cebf9fe4a023887e70 Mon Sep 17 00:00:00 2001 From: Sebastian Husch Lee Date: Thu, 25 Jan 2024 06:39:57 +0000 Subject: [PATCH 12/47] Unpin unstructured (#261) --- integrations/unstructured/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integrations/unstructured/pyproject.toml b/integrations/unstructured/pyproject.toml index e199b3c3e..9cc2a0c6a 100644 --- a/integrations/unstructured/pyproject.toml +++ b/integrations/unstructured/pyproject.toml @@ -25,7 +25,7 @@ classifiers = [ ] dependencies = [ "haystack-ai", - "unstructured<0.11.4", # FIXME: investigate why 0.11.4 broke the tests + "unstructured", ] [project.urls] From 7e21bd8ec60bf6e73a513300c0345d461d5b6c8a Mon Sep 17 00:00:00 2001 From: Silvano Cerza <3314350+silvanocerza@users.noreply.github.com> Date: Thu, 25 Jan 2024 12:21:22 +0100 Subject: [PATCH 13/47] Implement `count_document` for WeaviateDocumentStore (#267) * Implement count_document for WeaviateDocumentStore * Start container in test workflow * Ditch Windows and Mac on Weaviate CI as Docker images are not provided --- .github/workflows/weaviate.yml | 11 ++++------- .../document_stores/weaviate/document_store.py | 4 +++- integrations/weaviate/tests/test_document_store.py | 7 ++++++- 3 files changed, 13 insertions(+), 9 deletions(-) diff --git a/.github/workflows/weaviate.yml b/.github/workflows/weaviate.yml index c638773f0..03cbd45a5 100644 --- a/.github/workflows/weaviate.yml +++ b/.github/workflows/weaviate.yml @@ -29,15 +29,10 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-latest, windows-latest, macos-latest] + os: [ubuntu-latest] python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] steps: - - name: Support longpaths - if: matrix.os == 'windows-latest' - working-directory: . - run: git config --system core.longpaths true - - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} @@ -49,8 +44,10 @@ jobs: run: pip install --upgrade hatch - name: Lint - if: runner.os == 'Linux' run: hatch run lint:all + - name: Run Weaviate container + run: docker-compose up -d + - name: Run tests run: hatch run cov diff --git a/integrations/weaviate/src/haystack_integrations/document_stores/weaviate/document_store.py b/integrations/weaviate/src/haystack_integrations/document_stores/weaviate/document_store.py index da1f357a3..4a9f6626d 100644 --- a/integrations/weaviate/src/haystack_integrations/document_stores/weaviate/document_store.py +++ b/integrations/weaviate/src/haystack_integrations/document_stores/weaviate/document_store.py @@ -188,7 +188,9 @@ def from_dict(cls, data: Dict[str, Any]) -> "WeaviateDocumentStore": ) def count_documents(self) -> int: - return 0 + collection_name = self._collection_settings["class"] + res = self._client.query.aggregate(collection_name).with_meta_count().do() + return res.get("data", {}).get("Aggregate", {}).get(collection_name, [{}])[0].get("meta", {}).get("count", 0) def filter_documents(self, filters: Optional[Dict[str, Any]] = None) -> List[Document]: # noqa: ARG002 return [] diff --git a/integrations/weaviate/tests/test_document_store.py b/integrations/weaviate/tests/test_document_store.py index d5b1a2380..f2822b4f5 100644 --- a/integrations/weaviate/tests/test_document_store.py +++ b/integrations/weaviate/tests/test_document_store.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock, patch import pytest +from haystack.testing.document_store import CountDocumentsTest from haystack_integrations.document_stores.weaviate.document_store import ( DOCUMENT_COLLECTION_PROPERTIES, WeaviateDocumentStore, @@ -16,7 +17,7 @@ ) -class TestWeaviateDocumentStore: +class TestWeaviateDocumentStore(CountDocumentsTest): @pytest.fixture def document_store(self, request) -> WeaviateDocumentStore: # Use a different index for each test so we can run them in parallel @@ -197,3 +198,7 @@ def test_from_dict(self, _mock_weaviate): assert document_store._additional_config.grpc_port_experimental == 12345 assert document_store._additional_config.connection_config.session_pool_connections == 20 assert document_store._additional_config.connection_config.session_pool_maxsize == 20 + + def test_count_not_empty(self, document_store): + # Skipped for the time being as we don't support writing documents + pass From fa7281197034193933df773bd02dc6d6d1ee4800 Mon Sep 17 00:00:00 2001 From: Massimiliano Pippi Date: Thu, 25 Jan 2024 15:26:15 +0100 Subject: [PATCH 14/47] refact!: adjust import paths (#268) * adjust import paths * fix coverage --- integrations/google_ai/pyproject.toml | 18 +++++++++--------- .../src/google_ai_haystack/__init__.py | 3 --- .../google_ai_haystack/generators/__init__.py | 3 --- .../generators/google_ai/__init__.py | 7 +++++++ .../generators/google_ai}/chat/gemini.py | 4 ++-- .../components/generators/google_ai}/gemini.py | 4 ++-- .../tests/generators/chat/test_chat_gemini.py | 14 ++++++++------ .../google_ai/tests/generators/test_gemini.py | 12 ++++++------ 8 files changed, 34 insertions(+), 31 deletions(-) delete mode 100644 integrations/google_ai/src/google_ai_haystack/__init__.py delete mode 100644 integrations/google_ai/src/google_ai_haystack/generators/__init__.py create mode 100644 integrations/google_ai/src/haystack_integrations/components/generators/google_ai/__init__.py rename integrations/google_ai/src/{google_ai_haystack/generators => haystack_integrations/components/generators/google_ai}/chat/gemini.py (98%) rename integrations/google_ai/src/{google_ai_haystack/generators => haystack_integrations/components/generators/google_ai}/gemini.py (97%) diff --git a/integrations/google_ai/pyproject.toml b/integrations/google_ai/pyproject.toml index 91fcd655b..1127dc6bf 100644 --- a/integrations/google_ai/pyproject.toml +++ b/integrations/google_ai/pyproject.toml @@ -34,6 +34,9 @@ Documentation = "https://github.com/deepset-ai/haystack-core-integrations/tree/m Issues = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/google_ai_haystack/issues" Source = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/google_ai_haystack" +[tool.hatch.build.targets.wheel] +packages = ["src/haystack_integrations"] + [tool.hatch.version] source = "vcs" tag-pattern = 'integrations\/google_ai-v(?P.*)' @@ -70,7 +73,7 @@ dependencies = [ "ruff>=0.0.243", ] [tool.hatch.envs.lint.scripts] -typing = "mypy --install-types --non-interactive {args:src/google_ai_haystack tests}" +typing = "mypy --install-types --non-interactive --explicit-package-bases {args:src/ tests}" style = [ "ruff {args:.}", "black --check --diff {args:.}", @@ -136,26 +139,22 @@ unfixable = [ ] [tool.ruff.isort] -known-first-party = ["google_ai_haystack"] +known-first-party = ["haystack_integrations"] [tool.ruff.flake8-tidy-imports] -ban-relative-imports = "all" +ban-relative-imports = "parents" [tool.ruff.per-file-ignores] # Tests can use magic values, assertions, and relative imports "tests/**/*" = ["PLR2004", "S101", "TID252"] [tool.coverage.run] -source_pkgs = ["google_ai_haystack", "tests"] branch = true parallel = true -omit = [ - "src/google_ai_haystack/__about__.py", -] [tool.coverage.paths] -google_ai_haystack = ["src/google_ai_haystack", "*/google_ai_haystack/src/google_ai_haystack"] -tests = ["tests", "*/google_ai_haystack/tests"] +google_ai_haystack = ["src"] +tests = ["tests"] [tool.coverage.report] exclude_lines = [ @@ -167,6 +166,7 @@ exclude_lines = [ module = [ "google.*", "haystack.*", + "haystack_integrations.*", "pytest.*", "numpy.*", ] diff --git a/integrations/google_ai/src/google_ai_haystack/__init__.py b/integrations/google_ai/src/google_ai_haystack/__init__.py deleted file mode 100644 index e873bc332..000000000 --- a/integrations/google_ai/src/google_ai_haystack/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# SPDX-FileCopyrightText: 2023-present deepset GmbH -# -# SPDX-License-Identifier: Apache-2.0 diff --git a/integrations/google_ai/src/google_ai_haystack/generators/__init__.py b/integrations/google_ai/src/google_ai_haystack/generators/__init__.py deleted file mode 100644 index e873bc332..000000000 --- a/integrations/google_ai/src/google_ai_haystack/generators/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# SPDX-FileCopyrightText: 2023-present deepset GmbH -# -# SPDX-License-Identifier: Apache-2.0 diff --git a/integrations/google_ai/src/haystack_integrations/components/generators/google_ai/__init__.py b/integrations/google_ai/src/haystack_integrations/components/generators/google_ai/__init__.py new file mode 100644 index 000000000..2b77c813f --- /dev/null +++ b/integrations/google_ai/src/haystack_integrations/components/generators/google_ai/__init__.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: 2023-present deepset GmbH +# +# SPDX-License-Identifier: Apache-2.0 +from .chat.gemini import GoogleAIGeminiChatGenerator +from .gemini import GoogleAIGeminiGenerator + +__all__ = ["GoogleAIGeminiGenerator", "GoogleAIGeminiChatGenerator"] diff --git a/integrations/google_ai/src/google_ai_haystack/generators/chat/gemini.py b/integrations/google_ai/src/haystack_integrations/components/generators/google_ai/chat/gemini.py similarity index 98% rename from integrations/google_ai/src/google_ai_haystack/generators/chat/gemini.py rename to integrations/google_ai/src/haystack_integrations/components/generators/google_ai/chat/gemini.py index 9bf33d8d3..030505860 100644 --- a/integrations/google_ai/src/google_ai_haystack/generators/chat/gemini.py +++ b/integrations/google_ai/src/haystack_integrations/components/generators/google_ai/chat/gemini.py @@ -21,7 +21,7 @@ class GoogleAIGeminiChatGenerator: Sample usage: ```python from haystack.dataclasses.chat_message import ChatMessage - from google_ai_haystack.generators.chat.gemini import GoogleAIGeminiChatGenerator + from haystack_integrations.components.generators.google_ai import GoogleAIGeminiChatGenerator gemini_chat = GoogleAIGeminiChatGenerator(model="gemini-pro", api_key="") @@ -43,7 +43,7 @@ class GoogleAIGeminiChatGenerator: from haystack.dataclasses.chat_message import ChatMessage from google.ai.generativelanguage import FunctionDeclaration, Tool - from google_ai_haystack.generators.chat.gemini import GoogleAIGeminiChatGenerator + from haystack_integrations.components.generators.google_ai import GoogleAIGeminiChatGenerator # Example function to get the current weather def get_current_weather(location: str, unit: str = "celsius") -> str: diff --git a/integrations/google_ai/src/google_ai_haystack/generators/gemini.py b/integrations/google_ai/src/haystack_integrations/components/generators/google_ai/gemini.py similarity index 97% rename from integrations/google_ai/src/google_ai_haystack/generators/gemini.py rename to integrations/google_ai/src/haystack_integrations/components/generators/google_ai/gemini.py index bd4ab5150..bd4f1d5e6 100644 --- a/integrations/google_ai/src/google_ai_haystack/generators/gemini.py +++ b/integrations/google_ai/src/haystack_integrations/components/generators/google_ai/gemini.py @@ -20,7 +20,7 @@ class GoogleAIGeminiGenerator: Sample usage: ```python - from google_ai_haystack.generators.gemini import GoogleAIGeminiGenerator + from haystack_integrations.components.generators.google_ai import GoogleAIGeminiGenerator gemini = GoogleAIGeminiGenerator(model="gemini-pro", api_key="") res = gemini.run(parts = ["What is the most interesting thing you know?"]) @@ -32,7 +32,7 @@ class GoogleAIGeminiGenerator: ```python import requests from haystack.dataclasses.byte_stream import ByteStream - from google_ai_haystack.generators.gemini import GoogleAIGeminiGenerator + from haystack_integrations.components.generators.google_ai import GoogleAIGeminiGenerator BASE_URL = ( "https://raw.githubusercontent.com/deepset-ai/haystack-core-integrations" diff --git a/integrations/google_ai/tests/generators/chat/test_chat_gemini.py b/integrations/google_ai/tests/generators/chat/test_chat_gemini.py index 16a2af236..7b2b80088 100644 --- a/integrations/google_ai/tests/generators/chat/test_chat_gemini.py +++ b/integrations/google_ai/tests/generators/chat/test_chat_gemini.py @@ -7,7 +7,7 @@ from google.generativeai.types import HarmBlockThreshold, HarmCategory from haystack.dataclasses.chat_message import ChatMessage -from google_ai_haystack.generators.chat.gemini import GoogleAIGeminiChatGenerator +from haystack_integrations.components.generators.google_ai import GoogleAIGeminiChatGenerator def test_init(): @@ -40,7 +40,9 @@ def test_init(): ) tool = Tool(function_declarations=[get_current_weather_func]) - with patch("google_ai_haystack.generators.chat.gemini.genai.configure") as mock_genai_configure: + with patch( + "haystack_integrations.components.generators.google_ai.chat.gemini.genai.configure" + ) as mock_genai_configure: gemini = GoogleAIGeminiChatGenerator( generation_config=generation_config, safety_settings=safety_settings, @@ -85,14 +87,14 @@ def test_to_dict(): tool = Tool(function_declarations=[get_current_weather_func]) - with patch("google_ai_haystack.generators.chat.gemini.genai.configure"): + with patch("haystack_integrations.components.generators.google_ai.chat.gemini.genai.configure"): gemini = GoogleAIGeminiChatGenerator( generation_config=generation_config, safety_settings=safety_settings, tools=[tool], ) assert gemini.to_dict() == { - "type": "google_ai_haystack.generators.chat.gemini.GoogleAIGeminiChatGenerator", + "type": "haystack_integrations.components.generators.google_ai.chat.gemini.GoogleAIGeminiChatGenerator", "init_parameters": { "model": "gemini-pro-vision", "generation_config": { @@ -114,10 +116,10 @@ def test_to_dict(): def test_from_dict(): - with patch("google_ai_haystack.generators.chat.gemini.genai.configure"): + with patch("haystack_integrations.components.generators.google_ai.chat.gemini.genai.configure"): gemini = GoogleAIGeminiChatGenerator.from_dict( { - "type": "google_ai_haystack.generators.chat.gemini.GoogleAIGeminiChatGenerator", + "type": "haystack_integrations.components.generators.google_ai.chat.gemini.GoogleAIGeminiChatGenerator", "init_parameters": { "model": "gemini-pro-vision", "generation_config": { diff --git a/integrations/google_ai/tests/generators/test_gemini.py b/integrations/google_ai/tests/generators/test_gemini.py index c01c8b158..9ef818144 100644 --- a/integrations/google_ai/tests/generators/test_gemini.py +++ b/integrations/google_ai/tests/generators/test_gemini.py @@ -6,7 +6,7 @@ from google.generativeai import GenerationConfig, GenerativeModel from google.generativeai.types import HarmBlockThreshold, HarmCategory -from google_ai_haystack.generators.gemini import GoogleAIGeminiGenerator +from haystack_integrations.components.generators.google_ai import GoogleAIGeminiGenerator def test_init(): @@ -39,7 +39,7 @@ def test_init(): ) tool = Tool(function_declarations=[get_current_weather_func]) - with patch("google_ai_haystack.generators.gemini.genai.configure") as mock_genai_configure: + with patch("haystack_integrations.components.generators.google_ai.gemini.genai.configure") as mock_genai_configure: gemini = GoogleAIGeminiGenerator( generation_config=generation_config, safety_settings=safety_settings, @@ -84,14 +84,14 @@ def test_to_dict(): tool = Tool(function_declarations=[get_current_weather_func]) - with patch("google_ai_haystack.generators.gemini.genai.configure"): + with patch("haystack_integrations.components.generators.google_ai.gemini.genai.configure"): gemini = GoogleAIGeminiGenerator( generation_config=generation_config, safety_settings=safety_settings, tools=[tool], ) assert gemini.to_dict() == { - "type": "google_ai_haystack.generators.gemini.GoogleAIGeminiGenerator", + "type": "haystack_integrations.components.generators.google_ai.gemini.GoogleAIGeminiGenerator", "init_parameters": { "model": "gemini-pro-vision", "generation_config": { @@ -113,10 +113,10 @@ def test_to_dict(): def test_from_dict(): - with patch("google_ai_haystack.generators.gemini.genai.configure"): + with patch("haystack_integrations.components.generators.google_ai.gemini.genai.configure"): gemini = GoogleAIGeminiGenerator.from_dict( { - "type": "google_ai_haystack.generators.gemini.GoogleAIGeminiGenerator", + "type": "haystack_integrations.components.generators.google_ai.gemini.GoogleAIGeminiGenerator", "init_parameters": { "model": "gemini-pro-vision", "generation_config": { From 8db28ee54513fe015f24b22d9a3159857db2e327 Mon Sep 17 00:00:00 2001 From: Silvano Cerza <3314350+silvanocerza@users.noreply.github.com> Date: Thu, 25 Jan 2024 16:12:15 +0100 Subject: [PATCH 15/47] Add methods to convert from Document to Weaviate data object and viceversa (#269) * Add methods to convert from Document to Weaviate data object and viceversa * Add tests --- .../weaviate/document_store.py | 47 +++++++++++++++ integrations/weaviate/tests/conftest.py | 8 +++ .../weaviate/tests/test_document_store.py | 54 ++++++++++++++++++ .../weaviate/tests/test_files/robot1.jpg | Bin 0 -> 165554 bytes 4 files changed, 109 insertions(+) create mode 100644 integrations/weaviate/tests/conftest.py create mode 100644 integrations/weaviate/tests/test_files/robot1.jpg diff --git a/integrations/weaviate/src/haystack_integrations/document_stores/weaviate/document_store.py b/integrations/weaviate/src/haystack_integrations/document_stores/weaviate/document_store.py index 4a9f6626d..7fe24ab20 100644 --- a/integrations/weaviate/src/haystack_integrations/document_stores/weaviate/document_store.py +++ b/integrations/weaviate/src/haystack_integrations/document_stores/weaviate/document_store.py @@ -1,6 +1,7 @@ # SPDX-FileCopyrightText: 2023-present deepset GmbH # # SPDX-License-Identifier: Apache-2.0 +import base64 from dataclasses import asdict from typing import Any, Dict, List, Optional, Tuple, Union @@ -192,6 +193,52 @@ def count_documents(self) -> int: res = self._client.query.aggregate(collection_name).with_meta_count().do() return res.get("data", {}).get("Aggregate", {}).get(collection_name, [{}])[0].get("meta", {}).get("count", 0) + def _to_data_object(self, document: Document) -> Dict[str, Any]: + """ + Convert a Document to a Weviate data object ready to be saved. + """ + data = document.to_dict(flatten=False) + # Weaviate forces a UUID as an id. + # We don't know if the id of our Document is a UUID or not, so we save it on a different field + # and let Weaviate a UUID that we're going to ignore completely. + data["_original_id"] = data.pop("id") + if (blob := data.pop("blob")) is not None: + # Weaviate wants the blob data as a base64 encoded string + # See the official docs for more information: + # https://weaviate.io/developers/weaviate/config-refs/datatypes#datatype-blob + data["blob_data"] = base64.b64encode(bytes(blob.pop("data"))).decode() + data["blob_mime_type"] = blob.pop("mime_type") + # The embedding vector is stored separately from the rest of the data + del data["embedding"] + + # Weaviate doesn't like empty objects, let's delete meta if it's empty + if data["meta"] == {}: + del data["meta"] + + return data + + def _to_document(self, data: Dict[str, Any]) -> Document: + """ + Convert a data object read from Weaviate into a Document. + """ + data["id"] = data.pop("_original_id") + data["embedding"] = data["_additional"].pop("vector") if data["_additional"].get("vector") else None + + if (blob_data := data.get("blob_data")) is not None: + data["blob"] = { + "data": base64.b64decode(blob_data), + "mime_type": data.get("blob_mime_type"), + } + # We always delete these fields as they're not part of the Document dataclass + data.pop("blob_data") + data.pop("blob_mime_type") + + # We don't need these fields anymore, this usually only contains the uuid + # used by Weaviate to identify the object and the embedding vector that we already extracted. + del data["_additional"] + + return Document.from_dict(data) + def filter_documents(self, filters: Optional[Dict[str, Any]] = None) -> List[Document]: # noqa: ARG002 return [] diff --git a/integrations/weaviate/tests/conftest.py b/integrations/weaviate/tests/conftest.py new file mode 100644 index 000000000..ed1002409 --- /dev/null +++ b/integrations/weaviate/tests/conftest.py @@ -0,0 +1,8 @@ +from pathlib import Path + +import pytest + + +@pytest.fixture() +def test_files_path(): + return Path(__file__).parent / "test_files" diff --git a/integrations/weaviate/tests/test_document_store.py b/integrations/weaviate/tests/test_document_store.py index f2822b4f5..e988eb297 100644 --- a/integrations/weaviate/tests/test_document_store.py +++ b/integrations/weaviate/tests/test_document_store.py @@ -1,6 +1,9 @@ +import base64 from unittest.mock import MagicMock, patch import pytest +from haystack.dataclasses.byte_stream import ByteStream +from haystack.dataclasses.document import Document from haystack.testing.document_store import CountDocumentsTest from haystack_integrations.document_stores.weaviate.document_store import ( DOCUMENT_COLLECTION_PROPERTIES, @@ -202,3 +205,54 @@ def test_from_dict(self, _mock_weaviate): def test_count_not_empty(self, document_store): # Skipped for the time being as we don't support writing documents pass + + def test_to_data_object(self, document_store, test_files_path): + doc = Document(content="test doc") + data = document_store._to_data_object(doc) + assert data == { + "_original_id": doc.id, + "content": doc.content, + "dataframe": None, + "score": None, + } + + image = ByteStream.from_file_path(test_files_path / "robot1.jpg", mime_type="image/jpeg") + doc = Document( + content="test doc", + blob=image, + embedding=[1, 2, 3], + meta={"key": "value"}, + ) + data = document_store._to_data_object(doc) + assert data == { + "_original_id": doc.id, + "content": doc.content, + "blob_data": base64.b64encode(image.data).decode(), + "blob_mime_type": "image/jpeg", + "dataframe": None, + "score": None, + "meta": {"key": "value"}, + } + + def test_to_document(self, document_store, test_files_path): + image = ByteStream.from_file_path(test_files_path / "robot1.jpg", mime_type="image/jpeg") + data = { + "_additional": { + "vector": [1, 2, 3], + }, + "_original_id": "123", + "content": "some content", + "blob_data": base64.b64encode(image.data).decode(), + "blob_mime_type": "image/jpeg", + "dataframe": None, + "score": None, + "meta": {"key": "value"}, + } + + doc = document_store._to_document(data) + assert doc.id == "123" + assert doc.content == "some content" + assert doc.blob == image + assert doc.embedding == [1, 2, 3] + assert doc.score is None + assert doc.meta == {"key": "value"} diff --git a/integrations/weaviate/tests/test_files/robot1.jpg b/integrations/weaviate/tests/test_files/robot1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a3962db1b7a3657f5c04b91828efcbf0b78d80d2 GIT binary patch literal 165554 zcmb5Uby!?a%rLsZmI9>|cXzj9i@UQp6nA&;Qrc3ixVyV7R@|kyyA&_7ELJEiP@uHj z?eBf>_dWN|JLfz{CYelFtf0+vysto@p7^9 zGO@C;{v(3&{P}Y%3@idHECNY zxBs`GprRr9eU1c@Ap!r&|Ahhm13s+-u+dNesJLjj007FZrFt;-Gie5CfjNPGb$}KS z_!1k~k4H<0D*!+RU=!A0Nz(!Z0GR+@VoqFubU%s!fEVCM1c-eGKw@YCtbrH;*v|j} zASy2*0Er5CNiWTgf>axTKplV*0sxSHiUgo%(uDv>F_KZrtkE$5($4_WDCMl5NU;Ed zV69a%8 zolY7T_a&MJHttC9Kk=*qjsVhN0D5-Bv%>6`g3`&-sYp_V1d7$w`Dvm*MjH0dKtUI; zHkV_@uFWmVR;gs_*5vR20Hg+9qM=v*Bk^DPPl$iApsDsFrPBsr$dm(8QBe5GgHhsX z7XfkvVxs0ULfCKoyUmVI?t5z><}3@3?oBa8$x8qn;zN^0lNJC(0ccSQpW{XT*XRFSfC91j!N^Ap z6}n(7=453;FUKJcpa7o=y)rtjC8{l|F9xz#%&1^hPQ-&_6a3+=*hw{a`P;*pYWV4J z2Zu|b;lkiiOM$H586w0NgPb%1gA_S9j^vi%bj$$20%>M8I<4dX+aU@z(pZrG@)8LG zFbnVkfXG*1KZ-HHk(4fk6ito*ImofW06BbD^`w#Sju|n1+2^Yja3gU+=Q%H3x(H&{|)=dnI=C;B!;w zdF=(5lfy?TkHD}-7r?OoM+T*koHUpmK#sJfp|40a_ansvph+Vk5ws{Fq`~OfFR_NO z)&HUXSDZos*64`g^T@rUAnLHL1TrG)2Jc}Pg=$IUITjRgyMO1ey!KjiSB zwMkh*wr5M?KjkE$$Q1rBq5scK`k$Wv#ly5n^k7_GIbC3 zn{_*j1=BrBXh#%Y4{f7H*ybh`QqCuBrP5me%LA(AKa)gygQK-2@`luZzw}~cKMEiA zKgUtmplimC4dYd}Mk<0CfUW+ISw22qEDxCXm%Sg=vm0d&LNmq%{AaiRV!fIT6;{K( z^h1gQ?FxcX2$GyJpQgD00# z@YNH*{&F<&LH~FAIjDAa)>!=cf_Q~Gq|a{eeJi|>FoEscD5p!8+^|p1q4~zw0Qhaz z_npx))BHo%^|~vGdQI+PoQ{9?`cGy3|E|E7fH}-Ulwi70d`Bz1FTk%z3#PUFR8ALC ze&RisGS5p&7fDWv3_O`g!`@_wNlD2dDJXveXi#M>2eC}-FC>V~p9hSR&uUDsAa&Dr zNu5J}bG5s=O0~HbV5h_~=XF^ZcWpPk0AB73?~3bo>8tt_s6$*qt+ji#DwmUIV`>4K zhy8xsQNQkeQNRD=sC%=A3&Nwh1izVv2|a*R@0Wv*5>$tw}LP9*bRk5=QqomCsUk(2^&xHK((?R1>pnqnL2OhZ`lnE#dF+k)$UYod}+ z;5O?%w|6sl@t~{rfDe|xU6P$OeC76$Bp|0=uIE53v$s!5>Lb_p)dcDad!vh6PfNcs zdU7m4Fu>_$aW)3m>=R->-I3r^_k4dvN+RcM4{`|L`Pq z2W@;2ztD6*|H$Kh+pBm^{=;T)%ONUsLAwKd9zfG8(Rj5TvpBN%N^zr+F)hUiS9zqSB3N2gI*cJH$EV;Di%kRjvp<`I=)#M&&s6*h>6agJbD!E zE_#qC#_gAdRu495a+pg7eo19YSk^v7Zl}n>@niL+x7$k`M2tgvHGxy2N)}ZBD$Q>jq0RVfThEqJoGA^cYWl z2XJm~UDn!BJC*XO(zC6O6Fq2m;lfpzBO6(JDL4BIBG)VwS8aSd_q9?6Phxrb#9#uN z*CnGFZMY2x|HVoDatB98;79~BU1%v#wg;HCTQ4Ly|2gL-xvL^`5Z1Gr30o}t$nnwL zlt(GXLA^P~2HwNMli#tuIP$9@Fi^Zr?afX~0HgLQ_q)l0jF<4103K6kNC5X4MF&~y z&s%Sw&d#9TU#7nE)SKT@TN$C?4#uh$H8}5YYD4baADe1#uTQO^#(DdnKfZJ?VKVdD z+&J8B+Ab(a_Xy0F2+ZsKNL)K>lOdJ3bek=%sWsdf|IsvXd-QpoDQ#lng72I(a+Uxv zWAX>n##p0AW67uTW%<0~GH(y}^cFAiU zuYcBM0DiYsr?xo3uRwT&=%}6xI(6siHOI<>uU>z6f9h3Nr;+&e=Whtu0=)I?wO4wL zfwz~HY3-I-oxraG)#6S>W?CaKR9d9nogR+JZ_Pa&ID#MogSPH`%3Pbj)ka0bFj5Sh zn`XTwG75goD!CL?3s3)&&`fXYm_5DXtDP-^1v;#7Za9CzMKPuqq6Mh4q5|*$wam=X zP9k!8o63-DeUhjXW_mCqA8`Z5S5<*GBSp<7Q=>=Th|WOsfRpi=L8~32&6sXI)yp>a3N=!i+k!F5Lq(m^UYC*JcE$`*$AqaZ#piL!XjP`WqguxF_ zMFY2qLA`xUne7)V_xY=+vkn#vQHzqz2Y!Kvvgw?P<(Y&iSjxrxxUo^z%B4BpAtpT>MAo@!w$kWWk7h05sX4W zy|28;$Tus@Vji0tFyF@Vyj+5ZylD+GW zq-gfuOi?2`LB+ZRcFsQT(!;=WsY=A{ks8sBSH7QRAms%1c1#UKlVI89+yvXHL zPK?&baql{Pzm49YI=qZJtM?mru-zNVjhteO4T)EF@IMX`*8Zc!#a{Gy+_6lYD!F3v znff50UI^RRCm~NZgun%jW&%ll-c)jA3b2$&Go`mVk$(6&fif9>slp&YJ_Ku2e?sQD zrH(0OehFVt`V33Q^pVSm3n=JURTR8qOG+&6xE?GqxnE@S22K%>wL81@}NE`LlSka51}I!3$1rjcJbK;!Z{VAH{Ia!76ppY6#)DTm@v75{g779k(?B} zbRJ(uIn~mi@oJfZf=*k-ei#42cJhmAV(S7*1W&-+?)r9SSq z?6Ve8A)jNlj>+5(+}Kl0L(^vXDw|=%ZLgtj(%GX!mB!6#0r@CA@Ig3%Xa2;w{>^-x zz!u#~+Bm}@qv@=45R986+9c+{Q-!8fk9?`06jlODlterD)I1AUfndMi zEv8MTkPD%OamPiF%98eb#&CQ~yd->`Iyu^r)QD`x;t^KjA#_?d<*SDb9`c=o&Yw%C z4+)rSuuiKZNM`ftum@M&FCMdelak|ZO;@nLex913e?O-Wff>F&7JUpwe#r9irT86KRx8u zK5#oR(T+T$K**O%g4xC6R%b}pJAuRd_&N>0$+-6E51&5;NW91DDjIF(=&h-aO*Je~ zp*-@VGtDDEV&ES$Fd;6%l-8fl<9}7oSc31)>CDfF$CynT!rLg%%&MvyrKnA(m=jhk z$+KZ-PGib7(z0gWYdpQSI5N_i?;~jDvo=4oJo-MrK5vmmX|pKUD^SfCH4J(W35u#% z^?P(b`OQ^8@TzTc@3+lq%;6uRUuu)?@p;2rG2SZfMhwm)w1BvJT9e3k1DgB$7rCH-!rtyA{34B zj7stq_)ao)%eS+>SYXL>VT4jC0yGyx!shzd&MWU8T#l_huYQ3DK8S9KOM%4< zFI{Ka4&l`q#|?pCvHbU@l7@M<`zIqob%C==E1vI)@I1JN7@1M!Q&4zQt8){px*P7z zkE<4%%)PtcK~9ezC^hoq-~H4MJDyLl;Gp%MmS@sfSs|(8T~S5U>bV~5ZIt(W%0L`YG9FWDQM6$7U~dlp=ro^qjHvR z(ANYHeb-5CD1KbH^0!@uolZ3J{z$N^&g#o4P`I~4P=9QjxUr6@-@h4|Zoa*{Xsc)1 z=Ki@qY2mt?p(g4zd32hfuv9lKkH2mG4NZE1T%FQZaSTLQg5AEVIt@Og+N@PQ(?_fo znCFQ21qwT$*Q*rIN=g0(A@UZD>};b26ek&8IB_A~567uhHQVzYE|0*w#7M8# zd_Hw=jqgm4G`Aa9RXGtcX~%_0=gLRn8pwO)RIre`w+i)>1C^t*>DS3!adEWe=*RIe zR}LCRTXW5s&(5-F+#)i~%i56;4Y9vj!q?znDy;p<^ z=qaAG*ebV%Xgw!WE!2=D6-s>*lBhbA&G=nSB#8+VfZ@i^AA|WdSl)J?{0AIva{TZC`M*AsULY#AE*ki1JXT{~>czrCn1-lZU4Y z{z^V&B`j)rB}r?0E8^=%#}XCK=kI^#3kBq#F1QR=hxbmD;eYqqe>=Vu6W=rsD!jBy0aGxYJO;F!#&%qbZ?3Kv=N!#B=XR3>=_YU)=heGi zeYSAgyZz#|0@*Paez&`ZZH@ard-oH0I3P#)}P5UsEo~}cgC+$XzWnN{VMcQ;VT7| ztTi?jHt7;b^p(^bE>L{LSvKU`rRXMga$L~)6Tmg#5nhU53#kA)~ zZkbzXs@&BZeea=cefY@mBM?3};*bI56sR>}Et{ejz>m_2B9Erl3Qwj#n22F=<4;La zTye-NZ3%ax=dvTBrJ&cqbBpe>q5cSV>F&9dGuLqFbLVJ36{e^iKbbu3PG~yL%Z)GK zhfmq9sju_|xbAnSTVV1b+H6kqKI9}E3A1{?hm>y-snf!6$>lfg+R$s}$KpKln$Tz7 z`}fdLG%Qkf;t=+x8A3RkP6Y3zET%4I>dS*8L@CeLs<+-2 zcr3CAS4}2}fotxMS62&^3KU*X+7n3bD2kb>cM5+dn=I$ZiK3&GGfo>yQ}v?OaJFF8 zor%vgojlz*o%Nfgh+92aJ8_V2s6MMutWXMX8}%Pm{NNDB^G(!(@u9a|^|UR%E-YF% zh7L0lMnMR4IJ|B^EK|S!2JyQVFe?z;P-yr#BN?^dBi{1o`Dmb9OIoj3t~{@2iJ+@- zG8z$PDY+-5f$t0F=*BS)dUn-SV&$}D*0DGN9L|=n*xI0*Z{6=N6(_2}vBI#N zywwBWYzf2IU+*EylV_d`1)*R_@AdHEQOEDMJxuTO4Nb}=P<+L%A{ph9qZOi^UM>}n zl*;2z9`2~!2dKNQ4WGl#h}K}R{YjB)@$&>4;=fM-lDV4^EW3K23B+%*CfD0;ob7V8 zldkk^7bOw(NInz&;NaN=1NT7q{jA({ZHx7rmB~vJ<4399pIeaCYm2Nun)5OHOou0> zdu)n~d-UMW64~Nq!ej*_tQbrgKD01ZeZA5#5GIjjgELcnd-_ap`>a5(pnS=!>z*O> z>k;UKrV@*K&8J(wT_ui=y6%FOQ63A1oSEgppiHASjr(6dZ{2?Wri@=(T;9ECT3bvo zbobGYo1Vxb(8k2G(Iy&Kcc+uee$VmMcZ5deu;&9$(Qs1^wNF`2iSdnHzH4A+tBVMY z#pexi*LC;7ilq`6jkHQRjS zai4Q4Rjyxg%zd4KZH2aXZ}!Hd@Ywl?w$BfpdUwK;%46F7VDDZDE^hPe-#~J&@3-pg zzqRToR7n0R`MHgNR3=v%5h^q4+78Le=h`v}@euuZL~iHa_R1|en2CY5#NSAeapt9S zaQW19YQ>jp5YT&;sRePCxuu#-W$Lt+$IIKL1=v-Nw9T5vC%mpNuP=CaK}GgbC;O6E z=Hc$$j^_URHk_=J1qUr1JbZS(VRNwXGh$Wl zR8vgVR`sv}t)v6a_(!ue+n^hqHWG!5kqxs z|M8c$dSH`LK<{Pms!p<32flNztuQgL#CC~{UbP_ey4Om?JZCb%wQC-7e#qJJhlt%k zpcgOCm~~AMW1{#hU5Lf*CS|GIP#!89S?!p%C|VYlJq`1R^l(i+R#YO^I_yfP?*|&G zx2adZ-F@^LtdVc9BU2n3QL!^UWg;ChQB8f*G{bG{$3zZuNj2sL0!6@n&zw0@6LV#q zL{epxtNNF!6AJV}J|M%Ls&W$DIKxl}hVp=$yX|E2-{2W4vu%=`nx+l5da>-O{5oR4 zj?ULTdZU{vDP?Z&Lc@}=Mr2)4U*c*w`vaLV9xqA}>k#k-L5t7a`DLr(7PW<9MR8M3 zMY+p??+(=^TYU=E56YT~`mF@FXUQD6B2^gFZvJ|z+QlV;%{w_{iuxb(uBrWvqF{&< z0sSAlCq1j7x80(hoS!93*x@?bg7oXt;9^v~D1}86tKv9i0#;9jVdWM;TD0k{auvUh zf(W~OQY>ZaNvAJjFHZk(`nyLAS925j-lf>`&8YBE@4H1#dtxQ@ZI6QDLvGoF0?m)= zUhdr{e~>>(5k zEq+nB>y+&Yf#e>(vUsKWBS@)vcg0`EXBo=7lx13GCnE~ONX0TB+{hN=`_w%iU5ZaY zT4aUARin40$UbiK@`aChO{&gC(sK&VIC7WBOR-$_bq9qt^NV5a(YJ2b)IL2e^Uy^} zDv+;6rcV1)&-j5DSmUM%&)yC_W(i=C4wnm+f_~Wc!0# z;On~{{N?nCrnr@ZOcu<^Ly^Csvk!NHQZD-v1zbm=_33LW9C=oJVs!dxlL}q~ZpCEP z%ZlDB)}^?)W{on8#ktUlc{?HXa-AfTTopEe!pb|&5{~LGjgs%?Jq=Ag=3`=L96o`` z>Fl<4q>fsdW%v%dSPE+W_s?t;3~Iy=&w9A;FBAsr7=>#p&_Sz~*=o#t$ze!NY>yil z#?QUVc1Mdsrpf$CTWyc#S~Pu24{mG<_j;;4FVe=zjAPEfk{@sMmtK4B;~fdy*KW-8 zpvM9%4p@A z!bM@lBNRWZ^rGOHW9f`Ae3px?$FkLA-p($+(;0eeL{{IhrtVgyqHBwzvs9Wh4JBu) zk||HFF~e$bUY^~V=o34FP&9*aJK*m=(3Bo$kH<`p&L7@)!j3OH@0Spcqo1X!$FC)0 z&r=KBwPIPa1pUSPDD7RSjVAj1T(FZ$*kv0BDDMftvL$#{{`kfsX{5LVOURJO zb#$fXs%#p}>qKe1?{;^+n5n54$3MSZHxqR2O;Y`_;E2U&!XzQ|NPMNqJ=$tEuomvy zmiMNnB)>(BgHWZ`wX_wF{efeeIb$ZP;rqCGvTl56q{cjdKURg1G>4K|Q*`cxPjpqK zoM5sSc~R~>HPz|%lq1+fu3#Q!!BaJbw8Xmv%_fOFA0{F$@eiXf(vMEBL?3@i+MjC| ztlWFpZRFC!RxvCE`8lK542@i^5pU=QbZAE_HS=bhh;lWI8YhDUIY+C}%YRVnQzu_w zn$6C)1;sS*%-!6n-CwLy+aP0F{BlWELZMBU-LvAQ`62HqwXhw z$D0BUB}o?z(8Wmek38+S?hKT-u1CF74Kp`n$J?hl7K-@rh#U?klT$VKmkbm@sFQui z3l&^{Wo7?GdR2l*kYa@Lm~*_F551x5w<1gLw#(n*HIN=kgZdV6666Q(=D&-3!El;S$t*#m^)J~ zjm_-RhVmZ z){Y3e(#zp9v2Ryojk6R4NplJbTbD$_EW6)V}jZ{%D)K`o>i+BDn}*Or&FmMJih50qym03 zyycNTv$7}Yppo`vJ5d%BzQY@F?_W3`@)-=ZBP-6!pNs5^hXs}7a^^?1A zoaa#((Kn06Un3hBCmJ!Ch;C&hpw8ha*M=5RVZK&9>_`iSwb!tgDEerM1xR-PniFrU zJI--}+04yTvzzFV9Xt&m4J!E>>mF9^&;4xw)CA$SO)MdwhDP zIvmzq+l5XpCz77#J zkfqM$9tWZBbvKjOTlT`QMdOUEi5&G)7Ze?A#}w}g*91GT{Kh6hz%(m~~`^5)e$AV|Qy^sMvbY zaQDq=zU$q}HR4o^YdpU*A*<8GrBOCljm$Jyee!cz6kS^Z>5j`KEvqr2+@?WVEMI? zxbqLzO{0+^dwqqz?BhG-qAXe?P9|5Z*x}Btmz?>w&JhMJKEFXJvvs{cRB9Wi5nEj8 z3a$EL)Dqo4L?P~9!_$T44r;P>7|XEe_tEH$$kbGKeR1<#zrXGVV`T~H_f9O{8`OiypEmx16@#lyB*-5!NfIOcRr#*lFDgqrmavdW65DBUB3~p zz>gf8!m+!q+KO;mC<&wJvmFPfEYnIeo58aD~53g*cMl~J|`VZj-hNE z>-ANg{JzwT5EW0ZtB)xum-uveHhp%6FoB)!m}l7u;%ntFVae(Smqse@f`~Mhb>adh z&7jJAswHMcx~bM!_(pof`lbWL1|v>e@wH+8dD-W2)BG)J3fCqoPE%>srp<$36+?!S z38s31TE7`c^J2M>l}XcDa-6>J6jsw(<6lmu>)DC1n}B;WNN+(JICkUKuwEcLdtyw5 zldx()*&FJpI11)ANlfCk)nRZh5{X_K8;Wp+=~=@}8ruZ3H(jA;c7#Txv)Po%Mn|8W` z^lzgP%MXr5hZpB2l+)Se%{y#q)ldaD9Bu{)J&@^;6$x(`+`M$3pB}EEV#wZLFo_je zPN7`BG$aD8iB$zZOU#W{o7XQ@FpKq;AvGtXjxP3$J!yd^lQ63itFjx&7Uw0V=5erU z7a5YL?$cwZ=~L_ziVln!F|aRY8~!|YB+-wa}DFY-X0M-|_SGLili6ht*q4YmB!@j8CX&Fdb+#I~E_A|21O2=CH2%WGeF06!5{B7AG zroP?ej8XJeuPMV0?qLLu(A=c-hH;159_eSSv{KB{jP|HzGJZV)nb`grVnTctAtp;jPIl}cgHM1sWTu?(EWmxLB3Ds;qp|{_ zzng4gi@U6-xE|n*{CG0jgD0k^e=zaNDYV$3%Pfa(XtY$1y2TKO;t8;-rt~|y%{$EU ztDe{!Q~6T2vodCs3b-cYqhzuWp5?2D9lT|(2`M6?XysB33VygOho}>gV^}v6Ct6WyN#i=d$h$gaZ-AI`V`=`9d=?VlOs79@(Y@)W61jYv1o#&P!WI@!u~hb9lNsem+CB3LT;vQb0o#-{AwWMNwlQkiD< z2!!+R)?BW7BlFlW+uU5y>X8>eQ}7nz&# zm9w7aWhS9q@QldH=N2QKA8L3Q%iZruSmy{x{+{;qO{Cj1X*tre(xYlN(!1M=jjNyi zP&OUwtuK+h$uGJ3sw<`Wvb%YjD9L{C)7WLCk7+TP%(uj|$(SZVJqvQFxE(CiW2a|j z*6}iO3}T=9{vPD-T(R3G>C*63mCzCdu$Eaz$Z)hy6?4eq+?LX4)R#5yr#32S&-$Wq z$;}Oi$&U|50bQpJ_5RWq*A+i98>Sb41=chal>?kBnkjV#2T!8VFV{41)u`h0Fa@6g zw`I3afCODFT(%NXqy5-j$NdpncRl@IRoWAWj2h#u3^{~L^(prlq!GQr#*UTyz-jxm zOJ^5mQ`OH9^3kuMts=Bpa)??J06{?JzlLk63~+N6X0xe zsa-cu%GTw(RwQ(;p*G>#KW0GWi@Ut7{H{E4fwo@Csy}g{MyLPQ< zGWIle>WozqhFFmn1{Er_w$R0xwlk{+InFR>)YYC3h~T`ih6e{72(rCd4rj-~VWFaw zB+o!5pn6=s6S6LYMn+>a>docg8KpLghl|H-{`7393=`3lBQnEItjxZUG2mn4$TM$L z#1dG#vsf{w=cKGVR#vFh-654q{e2loY)z{9!?2aFtPBaQ8(nh4or|T*s3SO25hToJ~jL--Qw={bj zrN$l(W#>cI?)3Tph$a?XuPa%VR)q_iKYMQRj3 zB%VMq#4IwlOTtB@ESmly%N7<}dZ_1XCy*AxJ56AN)8gUq8S1!E(a34`>@H48o^PBX zB}+*WzE`fPLhcw(w3pQOS-V%&WKMm2S%u*w_tum-)m{!LTNoi?h*zF1yB8$t2-_VL zJV>aDTHEKUPmYbouc$hNMk~H1E7et?k_}`FtE$Mfhbv_8QZ-U>*3>=$lKxK9eDOcz zw*H@U+J}Vi|D5SY-0jrD(<;w-2k7FZoU1B2C~|EWR|7>%qwz3opplGK_NwXV5)Fo_=)7$|&okKb7IJEfLb(Q^P=k}Q zgcOb171g(>UeHpJwETJXF*7mI^4hUd`$T1P;aSq-f*}oLW-0WjIHC?jRN@!Qsl+6% zxzO9za$rk@FCt7ru{cAJ@E0TJE>5z<`|yL6Yi@rjYXRAJJ}2v-41YFbM4oJJ+&&Cb zLO>2XpvhowaG4pGs||8*@*vtw91)fW|p>QW~TI*bIfr4{(}t(oyZ@WS2UdThjOYvCL@ZV zrP+7CoNPApz7Ex4p<}Ys7*VbB(y|rbf4!mvl-Au>{$bM7ZALn@MKbeoJ_gKYD>ym0eR!i|hRsf;jeHE@YQ3dyM>wDJS`uJG;%cP_y z>j^NBHNyzwlOq+PvE1xnXPDwp(CXr67_v-2y;30~?CwB+v+W&*;~2}a*Ciw;)|o%w zV60pO{mCN8gOjH_O+@+<%3OI@b}o6cKcy0NJk_8@fE^^!BQ+|)to+$C=__VL(1?BP zr@c$hkPo!ozE#jNzufpX1Hb5D(cs}!KXYo(U7AsEB*t=<#&YjoAc6yd7nkAak~XiyMN(!xZT8`LZRQc#QaLf09I` zTidgGefhht!r&x-r!Nr^&q^498z(~R?W@zJ#F_rFWE#*V$h0ZN_ysuOc+bc!eP*lr zqWiu3mm~olW_%-VOKw}ovN#_Fb&Y&Y!uh`^qN*4y`b+o=-AbvR!~%SJy50=_{s}Br z{;W@cnOF+JwmU)XC_*N$*z+u5`NmLX!G^9lkeJfVL4BQLY2dy5?w()N=_$V?gK^>B z>*%7K%r9i0bF2a(HV$lI!OoQA32>G zuGn0&y5k}W@8UO$^)j~DYrbDp%gR1zYf~mG44l|-c8g!en(AaP=q!abmxymxi45D~ zjS~SEC{q)={g_2E?)yG|Z>*_yoXN!o^V|7tg=<)W7!IbVs2x&bVoeul4AOG4SjPDR zy-4Koi9lM*gJv|psX0l`34mdyMW0?LaZB?!L-`5cbq} zCOrd&^{Gev#mY)e^(0WhP^1vbSPoNI)~5iUu>hB4_K%L}V=dyJoqSp=srE0~%OT~% zl!I+Neypjn*c~(Na9=`kCN0Iu`#n*J|K=#mfJUWioX;%qP<-)oD<&hTG=5+G-Gdmu49v%c)6vo~V_nM?B5X1M#;BaOKZN_b}6T#6Xh19efVe`(Hp ztg7${BSrOnsQEhlm-4FP2GnZ!&WD?RhLr;P(N|-2fKs;A>;R|J$bw1z%{U!53dPnF zpnv0X34G8>%s;-FG&ayNRgxmZsYaJsMF$hCKlC?57`fM$+zXqWJcu?96B@CEkGDKL z0Z>;J)!A8KxYv~lf|)UiU1^$hGOT0lj>Q!jm~VR6(`H%nL=_#Zky zCU(7CeX-vB=lT`oMZ3u`LHDPDJvpJyRFuCl@f(>Aq)u%num0i{6{`EJ@h|G*x!`EH4fV zj^;7dz>ktp2uRiHu$(?JQ??9P%K_#g{H%UCN2`;hcCgT-&D&7}6asjkf7O^pF}+ndg|q~;Bis!a~Q_qyb;(Wby4@O>UZmR}rf8<&&n*YnzQ zz1O%ZtcpcsT^>J5DP|<@+q(FEN;Jm!D+k}%SjWBmk_ZWLZ4Sbc>*)|wR^x=bzR!L6 z>uW|v>dITo2`S@I8S%+vuJ%RxMXiR zM9YgHnH5IH;P;SPygQ7dppQM=badJpaxa{<+zhS29LdgR4`4A>=F-7nbT?NX3kxz^ zx_}64%dVSQJiZOjjM)vDxgJgo-O^qbn;da87V$cX?F4LKTC>h&sxl613KY#cM%m`3 zEqSGu>WjCByx>!t2TIcEQ>2V;e0G2F-s2AGpDwTtr9}X)jK+SRg3d+RJtg0eiuWg7 zE%AfLmp@raRf-(%e9#qt#tTShVG2+xoX?oU2Gz(rx3k-;?c;Ry5G7T}Q!~8LLpP9t7gt_t_Md~reoWefhAookc9}($5k#NJ;DX${qmA_64 z{3VDE9F*Pu`Qr~gUGrRzD1K~C9UE0ir4N1-$c+#mKO1a~;z+n~v#4VPmRH`fGUY;k z-Qn(FOfIFDHPVOX(JqR7>MJ6Ju@Fe9ozoqv0OhbrR~JTcYpJqw-mI6{!EtQOa03=b&8@vgfE+P$ zViV4`NXSVgcu0CCEhqR9_s^G6!c<>DzCki{CbdmYw)F{+QJNn}T6_Jiw^v|b3f^V> zM6q?+#_c}PM=zImSe|mbqYd*piRo(|Jim`WPvfW6XLMixy`pBomXRW4BXSi_EbWWj z6Nd0f53y%ND}`+uGt&euE_TnadUjN0fY#o2xb+`l+xkA}20d!tyM*_hm+3uoj*J^R zW}FfD12q(#iX+^oHv zDMG|}Pui@n?S)Yi;uRP5qR21d&4{4DH!6V+C2fNFr2|4goQC*3U1|L^oP9#udya~` zgA)rqyj@M?*8$aaUvgn%@^i-s`e>FvnT3^++N%fMy7k0rqxgSR^&gVx_N@D~%Qc4S z<%Vp=V8vS9y`v#>4s?F2*JDy+X0*b=Y?r(xsXEl) z7k|dlNKs=jUUQUUM$p-}xAPO$^SIm_o2)zxR+@?gW<@h#6j%C_<3|)H9#b=JDf@qz zNC$UDJD!%X!phT1?HFr5l|pl++1L)4e7Aqi$gAhplAu=7Yr?vmK1yzDEFoP^v77QB zG#n)2sXXTyX!oFx|3@_HL!Nw}MzKZdeEobCp646gpF*1bhe}^h$+YCSXbDqmpiYq5 zz;A{pz|a&eIwD3NgM~c3tHnwD-Ga&L`(cB}YlpD19Y42RL$376cYB=%oV86UST8y) z-&7FpHig#CC465K?4CJpG^t(Av#zPFU7ml9yE;Ekj_&%#){FB0Vd=ZW+3vrvyIVzT zQ%Y->*lN_Qz4r`)Mq=-Z9iF!KUa?0}d&f?c#}=DdL2POlwUy`1?|t9TH~;0ja($C4 z-*fJB-{;&Xlu=lMWW^BikN#{wEpzPetf~}!=00~Baw4)L`&TD%xh$MI%jkdbBO|i@ z&8}AWuBvfWjX0MLJj|)7*+d0QgM=?V*&SuJOur=*S z#CB6`IO`7Pay(t9kV1z1;FiFJk}3j66er89Q>lCCvC2EBZ#%%5J?bc5oOpdzs58na zR9wjS0$qY`dS`=Vc5;H@g^<^{xQ}^7d#bT^lMD3qn)v+Nr%8f#T+~!cjIUYs zC$V_#zXPkhck4Ue@ExeTjQ1}IA(YSpU+GVU1Vn9|T`g9QT?6vorK|VPAF(XA_hE12 z%J61C5N@KJTS&rH?dgbE`|`QhH%!iS?OVj)fOy*=pOC%pz}vNkp0Cvpwb-0?e2fge zuq-QG>i!bx1{p-54#Mik)0GxX4{qDjOibFdcxV9BQ6Ab7O*LCiJJ6;K*V$>r^cm_E zUk%iIW?r_b!d{iAkD~LxdkwC-CFMjYpIPttxkENk^KfE}j8dojd^5#W2vXJ&dx>+Y z=BcbatPAFlSkMFl!IjxY4b3aarc~1dR>P#gv35!_niM!!P?8 z*qlWARK>BMY^2XlvNy^y^5d>pQpz23p8a+b+knhykGODichUAn!cICCjpZ`l79~=} z@XabiT^yj5Q7*F239tETz& z^*WQ8Bb5(maG>>zaHsQl5CjSA$f?Y+8bU1(Z`4D^HiKQX{|p_64L_XiW~L0YcW&Qh z-6FM?`Dkp>=QWx=<9Ol!Opr!_bqUB+Z2Yj4>(fTEPciY7QosGKoIM^@KQ?_mRb#*n|fSwVfaMT4Qp{(xlA*} zPAL1egZ-Mg|Knk=% za?U`6=D{OfK(AVWjq7{L(LtR&1Mz7P>H#ase4?$gY z@PJc7H&c0yQ-H(I-r8GjwZRga@^q`-f7^^C9Mfqf*b^#>byc@;LQPo}sH%G#lV>V- z{fGF5pskyZ1~f?F5RXfv%J}OiDJ)Y{QNM%UY0hfS`t9OUglLQ3#JQBw2+7~7%*$?! zOnJqbXs--b7H&4}JAHd`a@b{Txfb7;u)SM@6WZ5>8SHcyVTN+3QW6dBga3zhR^cW%%B>|S{3eH2QS~|%3o#c&1B7tKTnzMB)SJA;_oI3p z5sAr$i1KnFd9~}j@Gt>d+w)NT8afIxZ?a6E7tlD(5QJs)Hs@`)nVdHaj9WTIaN%ALM$3Xkg%-E{eN)SfdB3-6mI|CRlT9C zzzd%CA2J=zdPZ*06>j!r;9QEIkNjF5oX1;@L7{Fkcj~=*hy3<`_r=pw*XCNv8*$+q zA%ZahBCUcFp@*b}4j*8ev!xNP1+PCfp?MLrLPLN(6qG~Wl`s+D7*xg;#GRLNsA?mm zMYq^p>Up20cGjq0g;a&CtkgSmkcn9e5PmFBY4nEprJVIU57P?qlJ`B&oWE_1qggH_ z%S3&wJWzIofssx__9x9)$e!1%Re)a>uMd{mjdx}*xD(ou)KPz(@@T(f@mBlgOrCA& zg?PE!2D}f0S61h&ZMM~nuCpkj^~?(YM(%(LxZ>TaxM***Tzb{&p9~PsRyXxld zt7lp05jF~PMb!eR7CFTxBJ))mx1oo6n^YkiDYiRvc&GUUKYg}r*1kCxz}*oaCRWg~ zOzTZ!w*z_yuam8?yc|Dylz`O+(0|cBDSJxBJ79*X+Tc45U_Rdt!d|*;8&EG9Puzrc zk^YpktgLZ;##9Vw&fMM4fbRqYM4PcLqCgHf|1T#eD5`2Vgz-v*QRpGy;LSiWfXnhk zZ8`KsSH=B774vysEa*`Za{}(9)JRD6xc3BMKOEMiZ?sWDguQXa+Uc(#G*7Mfk`H=@e&ft;* zhVsNEApd)P=}WRD_NZQ)ji#K2UU1#47vD|;-nBB_C{=@PaB$?(2Mg0{6AXMSHSZfW zIyzc-!Cl>Zn5Y|Cs2|w-N8}`eU27C%OzHhOJrp^jh`%=%p1eB`p2we6p64~IkA;Ve z6kKn;cfZVya>z9E*y7hCk|5twP>{l@zwfcgW!jx$F+8C*GR)&&@MGEz#cy5Sou)Oa zH0iiGw%))QT;4PldIep>{yqP0Vq!Auy|;(nMZ zSzyd={_OeP>H6ew(N)n^;f2z{I6`6HQ2;+Z{da-G)|?lCK-f;G-inzR$v$Q<9m5F+ z%`MnJ4olKV+2G@_ymT8}lalQz__6ljZ`2v7v_rY+gG{k2nYIp3{-5kH0~EjWjSk2D zdR+k6h-p5yr0{8?0$KOt#->zbsJFQj%;&_pY4RE*N#Kr_hY0al^aLm{H=vs)4sKE9 z-{nf1`#TZn6EO5gd*)q=C7#g`#=M(^KpkeM%4bpAT4})G=4K;1BTYd#7tUUd6P*T; zzkL+PLdr9A3Z92H=x|L~paSi!w&9}LU}iN}zw-H&OxEJv*3`|=@3ck(m~4N1u?v=> zl!l`!V!d=l!4v0PQ+2kWam8QFz2E>P95<01*Q(3NpZYjq&k57~m!HdS{=4U!H5<41 zGi6ys;~iS1o@HMfc~;hI&@5liW7M#qf+b|9Z)%JXeJrI0w;#q}D0?b*KHiNiUGZu~ z+c9k|CZw2Y`d17;B`U#%L!Y2AKDID_e14KSoaMAcF(={856xzlHWbG#Wk5b#0_4%6c6sXaZX|DjbqKx6C%HgFCC@NOr;9O zRRn`s96oscWmF!DJS^*4m$Fuh`d%_<8?13H2A7#&{(Vt+uDw)bFwZ@<{_WbYxll6CII86T@f$dS(#pSt&#l< zRrQXoN79Ms=?PZc-e<%p0wGV}&C;MQJRv6J(TPU)ptQuaTS^Uff2R7B^weGm(W+O( z4pB;Pip3Jl0ek%1nSyWEt zgCtBL6oXVw62bpuXPF}sKjqh zOy}f7o>1y9=0Khhf18Wh=b1}Pss-d80lIzT-TYlyL$~3zySL4nJ|A6X9N`y_4_VXgchHH93P`S+(ha3C! z^o$1E??S&rpXiBevk}{v(ZBnD&5ANrzZWO@6&*8)7H3vTAuLuB94~XE=t7z~6YKa`?y^DL z+RIe6_tah|k+`v*)o*I^!kn`O`{Fc=+UHSFBs*SMqW(&3mS6~Vr$ss?%OG4qNZq(N zG9cC>P>`+`f9zumQrihz>j9+OM+*o0EXh~F8Cv=Cr?BU5Ae%Q4{Zmad-kQ(Ciyr(a zNoer&{*<_U?409cY)mzWY|nD?+Gqx8lvyN$#L}pn6WsiO5iN>Eqe-Q$ff6co@^*ZJ z>#{x?hA$1eHsdAktON@-O8`HE_DQG4c4!@63BFp52ME%2z1ZlaOF22|hB2%`i|0ix zL8nZEgY&_sR-7*`-hbs5eqrscdtO^|e}q_&M+pp|M%Cy4oY6=pN@oNQj-nD}1_H`{ zez#1Jl@ryEZ_SZphG@3q6j+mjmIx3F3CBtzhivhvwpk~T)lkyV@U?T3Id8H=SxYt9 zT(_=e4ug$WcL#!Usa(wMad(7rF7N}QQagVr&HVUjaAE&*pFaHx8P(K@H-|*m+|r|= zuFW%dE{H61hyMHzMkFe)Crk_+U7`_zB_SciNyvXGDQ;ocMlZX0E?mX3* zz3l(H*j#J{%_nIL#I?e6Z(E^HrGH(9A>K&zL`i(%eyVklu~coW$R zL?CkD{7IE#v!xm)C*tU@8K}c((mGka*!BipT%a~ve&A!`}-O`^E z0V-1x;+CQ(hn4e?>fF6x{x@#(M>U(uvsvLUHN|Uv2dG5I2XhVCTVQ^TUco|@h)l78DZ}ASDxM;y{O~+I zf+|tG$N6){3CWoaBA@<^!aAe58n z^c$=8_`|zxc;NwOoilHbZ`pu{M`oSjYo&@ew^;I+Rq5$u=L*4N=kyF6>051>;=DrR;b1G2I#F^1V0&otCMH>Wx)%1n|f2U&NU z2JNm47H1XHDMb|x>68W01MQkkc55X=n8+?{uAhI-*Et4dx4 zOzLO!suWFZ-$Yw;!V=#RFPp%_D47wXx-n-^MAmGHe zEvTZ5mC~P!VcX2E-iX+nD)Db-mGCL1IoY^_jiKO*16Tl{K--C3W)T~4&&vEVgdU;^^pwhwG%rR zSVn8sIPC-|;k`@^wKLzjNM6jD6_mthe11$M(MoFflIAEKJ-~XiEgKGbumlL9RUqIs z^Uu#+t$8erXeMq4Uo|$Bxa5soL|0lVqwpZm458*(kVF4;)9lH~A=|nqsvp{i$?!b< z5!XHBs@v1E*le2o&eOLw`8j=tlJB~f8KAf8kdugoO^B!IrHhx;oVE~WCI7s8i=|m> zKuv`Ns_FuMdmM{FAI*5ArruFkS5^*UszWn!hbWA_^jruU-R02aWCq!C06=<10t$t} z2=S(AS+g>$z8DLUpRMOZcpCv9rNqXNgm!!;z8hRnnKn`4(IsnFK*AAL2AF_|Ry8MK zr)LE)ulafuj_@?WqANk*%F0SiRnccyw~o@g;fY?UM-e{J?)%Q%7~IotxQ(n9e12C+QKw^kZ}9pmAHThE24*cI~SFnu*< zRm}-}F9-ygY0q<>F)zWp`7D}}KdcNK)kHc$Q22Ou&yS<&1~^+xgt0s!T_UjgRyqq% zz2nKhTF{P&=u4~5%g@sfp52&0jelGibR53Vxs^T8Xlueb6L58~$ayJFGWxjIH9h;jR;Gt(f09X+ zm;BT7!$JPC$EiD{)L|vUB6qA<6X_xhk*vZ=F6aR&Q!xjHC{CE42@l2-XHGS10ty_h zsMwaTgJ_1$%%7X%T>{N!ARcubG}Qd4=vCMM?)?t;S2nUDF)R}FA$%Qke<5yA@$VmZ zGmG1D0YKR=oEOhM+eVp#rj87yTqA@S| zSrsiz@SkZQyYR(Ka)L&mSN&EGZW^jABn_Rb@qGfcqzWaqnui1?Ve%^kH##9d>RN(Mq^5= zo|>9)@9PqEcf)=bA8pm=1FDKq#$J;==_xuW@=+C9y8uqbYhcC)poEWJGWxLcfBMbd zZ;fJJZ1alK4$RZM^--FlPPD2?LM)xJ+FJu7%Xf3+f4vqQZvVSiUViYVG1y4vD9`@q zTBN3WjyKbiQ-uf~ZaKuZQC(+R!D(y6u{I5T^;USmQjy{V*WkLIu8LN~LIPi0B5w2n z*jKCJyHg?V^D|wODdD?(iCe9wR9_~Hv9G=;u&&I$T{Qb$7A-O2@hoH0epS$^Pkx;{ zBQn|f#cq;vM1k)Qfd5{2iatwnhQZnSaQnG2)K+^KWej+tO-zf0xd>r7O(ro_-!EYB zS*@9C+V@r&KCQenkCTy5R(~%%NA$)^>vFW^Ts;B$tRtRc&f&4Q^!InoulTBw(9|mq zV|Em>DZU8taf-s`?1}S_RtXvpKj|7a=re%Oc?+?mCo62_V?>#h1mPxbXGIORsQDX!P_O5BJ;31t-^4ajA%TIQYpR%La3ZHT=j82XPbLk;&YBr*hR!9?bPz^eeh}Ik`24)4nGORy-gDB|YxwZ_gSG ze^I(Y!~QA;dmkamEYPJPJ#{r7d ztD8|cAv9>e{j4#S3w>^#7_T)0pfT322Udf*7{#<4Q!$}pC{CJ6b|fZMefxI&jKj!^ zyzy62Z4c7KWcqJ2P_o3FH&p^|`O9>F>MZyA=-J5Uqb9z(JU3?Fu z<_?f|ot_NbdV#NUoU>?vn^mW%ngAmr7%|@{15fpSCg#R%b;tU-zyEV@$9UG%ECAoU zOoqt0@#Nsxr>C~{RWp3mHO0dN*}%hHoG2f<#&OI!%}%pDZ-I!MIWWxRb}-|`z}PG1 zr`!chl_p&89eNK~%ate5>!iCGl@hL%JJszIk~>MRMK05bEKo{&A5?lEDi&jEVJQjN zl*%a_pO&=v26NL5e@oZl_D%IG4P?ajNxdYu}(3s!cM{JG^JCQ#?|<1e$I{qWKO^XY_|3WCC4=-*Ym*9qfe;XCD88%)7=Hp-XTg zRmJ9RaD?gF*c2rqmEWN*p&-Bra{_X%fpPZAI$EYVQB()(CnC#FTxu3@;JV4{578WN z*?!w%tUM3fR3k}l{U8~5#a%@PQ2=9M{adZX>Q7Xh&h%1h%jHknY6RcD0Q zny1aa9R3!Hg6SWOa)pgZzD+JC-MBw!!pXL>pU=TOo(u%$Y45?{uiDapm8sh8wbq;m zO+3)z;bt_tx`1Qy+DfazWxBzM`=)l&{}Z}wj_CO8ssBz>=5W=Iu?fJbyqJWkD#9LZ z=)QH^zm&x{LIC(6$l$efujH$jJiuUMiY5k{dD@1dG}gDj^^X6IZxI>4QGiie1Q$ty zv$KQ%1$Gejf4UeO&&5w`V?q%;`0;frH#pRLOF3s z5;1THneR9Oc=yd46rwx`4OK!Yyan=IPr}EyzBLs)-kQ}Fy~A9UN72{R_Mts)kw$@6 z_JieJ!G_e#6mgZn;_T>%3H3!hb|<<5z>7V zdTI^D{mloOA_IZPuQs?0fkG7ZqwHM1ey<*Xct^~9B9h$REU#|m@+P<#r-5QxU#dc; zC3iF$3R7FizM|V_=cHB9|D=Jylp+;^@YZ@hP)t*OPeHhy2Y`?X2cTU)vwn=Ppqqcdn0QMg5A z7f;w8EZIDLG(BU^b8;Z#6p1W-${dx^niTzu!HdjQ&y8C0L9SRM0AnMo&(ZswE*rOE zG>08gp%u4@6EpxqCY4{Tz_l~4MR1~faE5l?K*J7diXmqNG5`qo9x6f0Ugg+;{>Mx;_!!=ZB$?jcsWGR<;Lc-d+l+sUQs)3XSxECE=XQ zpJIy+i~vgP@59-!SemDNa*}6wY^y_@k+2qOSYklwGxaHY=d^midT%k?xvDOV?c-?I6aMU|Nwl)Kz~iZfHrA+}pT3b}>aS5& zWy+gB6p>ciNT9?6jO&oo#yQ^L6vH0+K`0WGjGxe4WgD5|E`*raHICn?x>nWV;XrUU zP(b9$#hYeI4xHC`+TJ{=UoWj1l?$IQ3N=u-t(@I~CUJlXaUN&x-JIkWBg$$21JSi% z(B@eARlanW0RRy4OaK*aeLMyh?8pJoBn7SMHEO<<@j1F*KeF*B)FKzyA$-H=nr4`f ziI-n8IWw!pY;RHG`?&307TPW4?Mk&yPkaCa$_9c3+3XTIV8D9b`t_ht`25cB@7$04 z=$69JA3pi@F|Nd%%9eRrU&V8o1{Wfq3A_&JfA#5+Xu?x8Z>GOkAyr}Ju-QW`%1c_k1C{NHkesaMIfaCAtc{lLGpZmzi z^;I~FRJD;*xj2GFZjwbZ_q!(2h&=m+{EQ6RRxKxo+bA00qr_@cq1PZ&0|pjk?NWiI zrkyZFbdfb-qJxdRoBmgQNxEl9nA~JthttE&7p8q;+JXJ@yL;>?A41y<-b!m3G@UzF zJdx~e5+GhDtcVF4Jgh;YD-LxaR}RR-jA@+cE=o^Rs)&p#${w=S`{*C#zsDs~g#$DS zpPe%uomy1@?;QP9*?u6X?@@3I#}a|xBdMUncD}g!cgtjbC!alguZEp-uo8zk5{Nu&k@yA=1M#xWfs(1Ps&aeuUJFp&+ciR*)6!S zSLe7C@1t^rU6DWsJI!};K)0ggqyUst)&8$&u>QLv_L=RCV!cZruAVOj;oOoHNOTP{ zHb;tO|FjP6t4t-s_BuVzKfxAULSsVn%(DDlZzgkj63a)ytX?bwO`gMXyZL4zN|H4| zIEihi7buVqVaafx_@qxgAtTtMGgx0cIJ6`D+ezRYl&jKtolJp)C*ZB?6qtCxx$HW$ zyrlbuk(I~Anpt>d@m{XA`;gHl_OqHv7nwSp8hyKyGyB_F_$yH5P4y6qiKnrZ6Rmjx zY;cR(d?UXwiAmL(_a( zg(AG011s@;zLJT#pl{(%0PQYm;{jtDSy4 zZ;$bk=uHMaL~(s6CPEXVT=<$gl4z@ltT8C0cynHRXIp=36asX!-|n?D!O zJ1*IJpj4g+6JBg|34w2s_q2c@KC>r$q!h9phc##JvBSJI>u^Bn)FBrS6JulOSyXTG z8Gr@1-vot0f6fg2!#IE6k})m((RkFe8P-A|{MM!F)OACxx~W|izrWCGzkhA60@2K7 z6n4F>teYG-qU!1COV@iICDcy^eBR2z(kz1eOdxnZG`7a1XJFHlb(X;7O1eG&-`W}YLLkAxjynS46F5HCnijEjs7;7??g-ZMy5$puQ4jSumFVm zIW|DPUQ>HXLrGzr7C-*2Gv$~Zp0}=?2MHmYcB(6Dnwrcx80U)S+f@DGj$hy|F7;+AaPbJuGYH-qEk?qwP zZZI@w{hn=y*AUd!_@rJYkKhTwK~C81MK}-d{cknc|JI(|zei5S{6O%n9EE`1N8N{? zJgmR`UmF0z+p_onyNAhkm2$dCjvXu&;k$`{V|V^nqsuOS{X@#^`J%7!(G(r+gOV%B zyQxDShRh4V$rFON|JOH@>WOf6Loj((Iiwwa)aRix7UxCS4w#Fi#QBO-Kl>hTzEr-Y}*?p%ei-7E< zA`)K{tI?OWe2?Cicm%W3koYcPdWgJ~kR;ZQhMPhkbBl_QF9U6DLKwQcfjY|6+^Ly4?Pg#Fj_r+&y3byRn{qTgh8_SQT^vt zy~pAA;~(fT(aMmoC1q$98*rrz{_pyd87ZEpG)0#j{p9RJ?yu{98jivTme0CK$<>dk z!o@vPB&`utr-QAUsv@P0d?QM#;$`~v`gQtsKvIpg=rlfyR`>tzd2o|x@JWw!#jOwL zu{`TC8qIH+B95j1Lp4PYMm*S@LZy8OO2D;HzZxhry9Nwjhq0#-^-4sBt>}5K-hIbC z0W!f}m@Tpl{h_OcB5}?w`7h*k^mYdtEI!0Aj2;Kk@*lkIDHVL`TTsH%GVx83uc-5z ze_ZCbmWA^wM?R*;7eVqW@aAKxp{Ev!9L%8oVvVI>@JXUgRJ*` z@9zmrjp$n{;@~!{Qt#yUaO#O{L6H6%ZRJCEQJThYB-3fwg| zo(}=-g5;Wb+QQ${$IyJV_`Ok%CQQ!G=!p6ay_hPe8g$KUvgpenICF=fYj6oH(54)w6 zo!f5y%xJH+&OG5&Nb%E_g4fyq8>I3_;>hP&XO{pSl`pB>H(JKTRJlKkOEgwlQJoJy zK!2?E9cxfTIP%A}J^M8E-#yRGb-lNcZy{f{ZZ}vy<|wvA?YeV=R^1O}o;+kGk6hv7 zqx6;HYlE#;E>9_xcI$ccqyb)LkZ=?}WK>GydsJp{2Z?-OXzjPWD0fCj&Z5m@iA0qu zwIN>9fp1+*5r^d`@zttG{d%~3tg0Sj zS|6jP_sw}@v2EA#3CM@$>~*c6emNc8N_O_iZT5$_Hjxb*)0c(ORXyQ)ETfD=&BO+g z=F!fTR`I-K{4G9obyFf+E0IrMk7&Pk3KEf>;g|RmD9&P7OwRHm;(m*p?T5|+j@4gd zrLc#9QgPD$0YYrn=iPl$Tx;hmB}UY0srHz2BwYL?L}Kx|$DpZF(_jA1UH2$)4`uG= zsH3;m?2csaYjwJB%G(}FfBe~5Orly(xRs0O{??kOrLY&@hcGn!iQkx3S9Nd1NE}H8 z7s6X4`e}~pqaK-dC`_$S*~Ctvrp%_^&>snmN`E3UdRftCxY&Yhd=L<>Uu|Ub<%3Lw z`%h$}-ZeK5Yv~yg>=D5*AyBm=!Bi1J=s94LlL$`)&}R+|P0>^k$33d*N%N1N{8ma$ zvECE@e9^Z%%9VXEOtoE2T)c)jE!NM{iB@{>c->ukMKaJQ$2#rimtsMnnpsMJ%v#Xa z@dziwrxh*5UM-y~8q1Jz5j~e|vxFWm{RPO<6b*>$;Tbt@zv{v1s`K z^erRT*qelNd>&{!p`Ko38xPobiJcYfi1 zG*#ZIS>73MT1im^VR3t^*W!7$sgLaZcf>DitR?m1)H8DS5iM|C%ZSQjokXg#8vWW` z5@RN95)T1Lr$`vJ2tb6eB<4`&;Uh2g76i$Z{or~V8?&_aArsZQN2@!3+0pr6*wZ0* z_s*=;Tjt#Fi{*sQ(8+&J~@js8$Dw0;BS$#fTWt?)<#@c{i*%h%P7$g`ZR-?K

Z=2uG3hS5*I2UqntbtWufHD^8Lf6=hq-uiM5m@Z+8#Yx zuQ-c<)AF-<)|<9i9~I~kCi#!Qm_Ey8=B~BT-(2`6dE*n_Golzt$Vb+`)Emf?n9`(L zADX}N6!mQqeJ8;W{=QOtwsM5-y|QS9=o=EC2p2iQlIVX}`Fi*HWZM4qN#Wu_HYcY^`*g1yd;#v#Zdd+m39+sV$C zX&q;IM??qt4K_*rnr;UT!4s_OlU760kcp)8C|s0f1dr2K142i2BSLuanns`>6n!4( zJ3{b)PrI_*6}@4y`eA*T$uO^)H!u~RP2*6vC7bI1X2_5RM@U}|Z|JpV|3(ko;gSVy z-2F(mfqndm*uQxVaO|Xv)OTb@|F4;l)e9yg1-GPgH} zQs;6Q8TBftr)St=Jj{AhotW|_iOzHA$5Bl1<#kPYun|>?sZqU+u6${HQo#f*#(A@zNGP^yd%#% z7gA(<`YE^=cV6O8s~liPahpekBRc#!C$hNlZp$iKYEogeF1Go{r|=qEaUMN_ z{|S#dSj|gR`Y0hhz5!mG5rwuJ2Py0@%4lvVjF}6JRL6YQ={W!6q0^n}08q){!JWGm zlGxqKh&20-^bFGsZ9B$?^caX;3;L^?HrKVpG?fcmeJU&Fq*l4&mJnFqt7JScH~w) zEHE_Bv+oRK-P`Y{mtvhulHh+|#O2#&X^(p*$3hlDe=5QhV}*N3&r4jWV5G<&o40P$ zn_tZNKJJDgT7DTC#^4k3*9^^WOjq4EFS-BKY9h=seb*o01cn!``_#z}=Do}uw0d1P z5q-TAZ;#QAVN|35_*2lI4j<)^Pl}DV1Lp2Hw_)$B8|-rh>{UZ7275#IgBj#IK0g(T z>8DwI)6*Xfx*UC*NHk;>S&?51z9lt!89SIq%&~k5-dp{dC{6x?O||Scma7g{Gng1g z>m0@eTkZG06IZGK6&;@b&* zfPgL7P>Xr%d)|g)q{tk?#_LK#w=Nw$S5wfqZN9*u;p``RF^-~%zSM7fKi3dLSeInZ zbpC!WWwj}+uUyEWAT~|;vUhYaKUm>5KJt=31f?t?JgYERwr{U>?p?kQgq)EeudB}4 zy~oOJ0YAb@*1FG)4x;F9>2ImpgrWAXDc+=x?h{^mLiNtuC|c6XiT&@C^tdvma~Rq0(Gai<@fuzP@tyn^yG zQ1|Axj-g-uKKM*09`~S^0fO9d*?+&BjYUApr&QW1p94Yrel{MSQL3 zncFE(##%ydN$Er;qQZwJmATjg1Pq7#DJHDL+PGz;>v;|RDQ+X3^)hXwiTp$~pX5l$ zaMnxh#`kqf_8cu9nXL+Cl4oWZ=-4;(zw246#LUty>P2#l#j&z$T6R3`gW}Xj(pltR z=F4)VVmqc4Bu&G9KlS%-FGdm>5@!*+|m}Ic3y06LV zmo)JCuxFd;o=jEoF!chx+8!plj?`Y>Us=Zn97N?1M6amGT-Or44Uc{otvH6rV*)fu zZ6%uH;w@&x1_M9g^1QlGpXM;indaL%gt2{{dLAl9LH{|N?vG-4Dm68gvkP$$@6joz z?_3&*lOmH$4?Afd$0;XSCL}7-slS6*RLq-W9Qkdu)FufOFIh;F6w@XXB%<8d7uakYlkV)&$u;VfN-Xgzn*VK{=8QB0&uV<3S z(AlQh`qik^F;~lo=nF?fMTRK9Lf6_UnBVqX&XPGPjHkC=(M;sV82401uFPP1x#k*d zee;DP=Bxnf%&2o^XN|B3_8y*h;VTOYCLup3ir@3a1W+;vn`;ht zI%@WXn7wuug#r!gXpoZ@^9#;C##=v7PSlD+{Pi4@gs!l(66 zR~CC1&kVo4Fu8XMwzi;j2R+z}wi2!kS^41npYA;os)8`BQTA0{OY(P#sYZ%yZs`GA zDQ;#QA*EB$Ewb$QTEyUWu?7)u>o&(NWQy8krH7NvKH_1wSQt+l-Rojh#GTt6R4s_9U#Mj&mncHc$sVH*?BTd^++N!2dH8|1zgoaGkIs3LC@a#Q^wtRq`f^Bo=yHaW9;M^-6sM>X z)Z#&r*Gh$yoJ=h-PJN`58Ea{E$P~p;)SEuFd}#NN^&qZN{$*lDV`<{>E*#N|@q|;U z!8~w`Zh!PqzDevMA-j&YCw=0TdLg-ZB)FtIX8~ zR18z{X6u1^avk)2qMU<)&Z^gUW2IP79jk@0b4-jsB{?!SUv@8(uScwF+g{Zk6dReM zDX*5NC`DT=#i^N$M5cwYTDp9G6ftsaB^GvBv<_aAVHW8N|6Y2w1 zf_>?OM-QJoc=qVw(}$1lKPFI>4<3>;3zAXDy=4*7`{Y6S0t{HU*E_J|7<`{_`yv`TsplpU4tCO)mgqWE0b_*|!K}2;9L5Rr7**>DS*) zBaR8J;n83b<9!Y|&E7}t2kcrC=>>OhCd(*SePn;VKuZfScf)j6MYr4DEuH0g{9RNZ zhJP{0Jq5s?i97wBe?WQ86Cufj8I}kZGqG*4divI`s5Mv=8d_Z8%>`r~kW~5HUA(AP zsxS5H?~M5f4G3G1Lp4>9*?idi(f+1d7D=|QcgJu4L-=T zxH%-sX3?ioFI9-BwVnL$-2PwwA6L|Un9uTzpP_5TSd-e7*|a|{Doh-F;>Eg?!#skUXKw(2dWpVlZ^O|I&0r@aKpTd#<)y)xx-VVdp7O;Z?EzWEk5I=I1wEC#sw+ z;e3T}c$4I~Pf3(GKB`V&AsIU-bO_-=-z{XJMR4MEPxEt}oZ`tAi_Hc@es>*JMAbw_ zpRzgOX5_e7w`FYtn0MJ1WiMpD6f+4PU(AK0dE-F1a(&RvGc8Jp80i6`n~j!>cPieL zx&@*+Vnb*7Q1N1z-(bIo6A>NvRU5OoTcubPzS=G3{gExo4|LI*zo|oXC%vGzdn#!h zznHj7k08Ba3?D00n9v3eJrq-dqoxyW3awO)(a2^|bXn7qldRq6EaP~au8JHn@hJy2#2ZmH z)RJt+lC;1-MKImW)SZ+--PSvJQek@4IBy5}c$6e@-g{Q=Z}g{|OeQz5md%#wq9L!8 zmQ44o`Krxt38`?NX~ppx?IzJv=71#GOeZv+DiTr6F_qR+mh(axtfn}ws_JqoQvO_l z=?u3Ehcq1mu-#G763y;!Cv+zo#&=UxX@a156^HAhIE0p&6 zP-uiGr+!mm<`-2?N$&2NrLM(S@i4wFOL?s)vJLooXn@dRd^0z8R+1UGUA-mMw8jQr zBS|L?w&jDm(L_3#F(1^Jd9-1YBW4y@hddo0t9$fWXZ)r!8BdV6RpLT%h+a$Y#LdLV zC~>kM7JHES^;0v>?k96Z*=BzZi>l6>m*P#Hl|@e-;sUuU&QRT4u-LT z-H_+H@ma>p_E5%{x!)-cLG~^SB9rTN z!OW5i&wYqGY}rFIAV#9MHat(P7B4ZmND$ zIfT&RBHCjYl#~XY8;9u${V;6orb+c^!lZXy$l(xZ0tcoJ3Fq@*!EH}PFNJD}arI`y z3sqk`c`VGQ_rUxd+&~Io&A+;7aLvhqWoelH(kDKOO#n9!4ZiPHm9|fgwN>Zh}s|6pG6|*hWs81%)4V&c8LV)MJ`yNbiRg|HqkM4IcLIJ8 zLJf%dJM&Rd-*4nuf+VL6#jI|;)>w{^IwIFrTB)?4+~KE@Ss~j}sJf|rPbCqMvq_`o zP}})-8Gyp3jj1a?{{W?jX7favG$v&ed$<~GI+a<*^p|~q;4SFh59Ed_x8E9n{GocU zN{ZjtP=qf}V{*IW93^i!yV3M^u>Szzbr)VMYv|8A?fH7Ci)n$y(Id8P6dlvDHd%y_ z@WqBy(FjeNtl21eES?*WSt^Q*F1M81WMxI;Kt30wbLOQKWPfEh0X-AE6S$cQEXd~7 zEYc^MBs@qiHvLt{_i_9Tf^gGS(Pb0NH483-3CpUgC`zsVL+*UQ7PB>H_}&t6~2DD$eI_jb(Mk{{V{JzxOe2p43A zS0zjPGThCYoUqEq=XcmG*D^P=6UI-=)TAm>U>HrbP+bqY>vU*3z{H_6L$ z(OsKEn#((m^RI!;{iIy7?4gG@gl1CI)BV?sCE^|&)(NpaFVcNy5FHSUn?Itu{ScqK zbqv3CJ2|I6n)GP=&QsW_e6&y_>A>Vh2eM;&;eDxGDZ&2$N|S_&-nTV*g(nHDv3KF< zc`3B+rg}UZ5OYaA?3Z=Aa;@jI_ec5}DR_tdOWwcgi`_F2{6?#Y_mY?=tb%h}6~F0O zCz~<6m+;Snnj3VSlf6N&BQ!n}`c~uN9_CAiW`7F*0F)cA;jiose+MkZpx#Tug!2uW z%T}5+glkX;3;vveUAB8b3;NIxM_^2fB;d$`AQv z^-)LWq9H;*7F;4YBJY%YE&KBdPu^!WIA_UnIC`V&57{$xx58|^%88hYmARFBps2;X z#;QXB8B6N7W#+e^^i+FLWKTt%*_&Aj8BK2)naBm< zc$5{AJ}mNDe7_qpus;RjLX`Ox;e5t#?1N9kVyC5?iw(XN*G1y<;_kBiCv|>q8FfUx zl8Qx7J&6_SVba9tq%xDyNui%rSY`b|Kt6EK=Lnh1IYkW~?omXPIEJHLQ<^pX(Gi(A zPU3GR;(9-&HWx5gh3K(Ncbjr*Y2I`=fB88Ui@$Vr{{Zl>M}zRR<~@QX%0u>01Aav( zQ5xMj+|RSBoFAf-s8vwSzTyW6kz`f^KI(IGKT-v`9^-N8`{`ng}JB~||b zMOK#iDqu*uhe{QAy6PWyIxE_g5cP#MMb2!Bwu8IVrXbQ{=tF z{K_Tl2sR$_&6$Amn)UW=z$oWEnm@B&!@lEQW14Fya<-r;!yAs33L|%PQDmLDr^SR6ats#3DW0* z9T1BM$-qqy4ES+{+Op=z_fk%)T(@(<{RemGz3=+rU1fY|j_D1#RK`eOoz-&70vW#< zNBD@DozZ1yqms_jS-c#x{K{cSku4^h49aJc0CM*G*XFXZlWEtICL48<_4yjO-zC96 zHM-n~m_TI*sS3knmb3}sHk_wa54(w@xvbE}UrL9d47IYn)4r z^Ay;5yQxlne;G=ZzHqYPRw`NpnFm9l0##1V(m_DB;Z3Cpv8vKruBRfov-VBP!LQ>8pwv$C}(bMrq9_Rm49Ga(%?tiy8UcEzvdhCXZeOE_MnRgwP_j z*e! z)tbX{@uBX#@iY3Ykw$1|{{R(y%=yu4s+j0|*$Ql6cbh}mL*xY1)V0C6e|xW5`I&pR ze^RNI944;|(ZM{v~{3Y502OTcS6b8GJ#`YMG>} z=Y1CJb3f9zZnz5dgDIIPNL^HW3SCRtxdyLS#-wyZuE(n?ZolIEK}SlXnwP}{rkv7$ z6MCp|4+H3W6%HJx4!8o)OjhqBCDnLu6NKvaLou5j!tQHEMCYOIoCvO<*o0l^r}aYR z+Xso!%XP#reA#v@d(uGc7U6r(S}c&M&@b_0{etK<&@8a^yScC8VsP1mnwh$nU8{Zo zlDRijUGTLRR;G>Uvg%Pq>}KHRwOaO9TuQG@a~x9qZkIL=Rj$tza(=z!=d<(;fGc@u= zulJ3iS6}*z^=pUvQ1qA%p}(4v0Tx9Qgl?<74@YxZBG_5{kdvhGViss)ENxmW^ki$W z?6z&m(Rfg3%}%8U$K3|a)S%J33~0Pq)PUdo)uhp3$YB%k($FWo-7_^@`q4Wi5@)GV zVnrET(I7C(sS3DBflfP;%=U) z(A|{7KUR!-FAgPPpRyXmb)2$AwvPE$NAb4OTi&ie&7+q671xOu)j=AaSBD9?Vd%lC z_+K0B)$_!1@jW*BLjM4DGxX()+voUFX0r28VULKd+BJ>QX3$kdKRc60GJ|^H*7GT$ zpm|{s4r-TViqB$huQi$03IkiA2$Xx_54wqjc5`kH58jZXn@xFpDk~==h0vDF&*Gge zDw^iBx!f&K{Uy9cj_whkx`}bsFhc{wbtgKgP>Xb&)1fy}@SR8h0A~LH=$y!`ltFcuiJvRP*6b(^{{Sk@5!s6W0J=x|KjnCE2X}H_t9~}I z3Ov?ccT&TotN2{0#Ap-0C{Gz&^7^77!ws5*-Az*ydufE^SN`H{cV+4UbI_j?ig0Q2 zE5w7p3#N+8D{0w(6X^r=0*);1pNDWp4eLZgtxBN7o)lWDVO2$SJSu{mu&GoYz)SFQ zQQ((xe?<}m_>9_l6!=k{n=rW0skuYTpsV^J5VzI*AW;c8I(6)iHI>R*U|^mf**Y9e z7Fhg4h5VPQ%Ne~2-)>*jRNKE7=g=&S;B8#x;Xx8^y>Oqzfp)5xW;cC*18tL@ljfom z5^jAK?A&{*LT}M<{>z6B5Udviev90y!LR98`z|G#Dod;s(*EtYM9L7ZHnPfXJ5hi&F3{2(jjQ+I(M$AjhNiD*N8@8s%%(H^-eYFrpFQe z7FRSo=BM3yqn;n!DI4gk^<2V}c$Qagy}T~uvU))5d81WuyeJ}k%J3ln0GRVQQ5s!K zh8~zWl=x4sqwtPffX||~S?cvz&H63)#!9b&h zK`e0PI}+oG5vx#Ax+;gdP!d2ddUc6Yh0r zS^W|FzYsbhlE}HoD~}{&w2afz&Qj4+g%aDT>as*=cvECov2Vn4mQP1yheb8h3Fx3tqbyZVtygHR8 zj)&aw*?9p|4jQ+#$b-LYcdMXJHrFz0-Y`LxwxI*Cz-M8qy(^mwRt9B!T z_vL%QR{K`-S0zWS*R@j$t@v_0+PzF~CxyQV%b2Q|Bq=|5EWzHfNy-=LABn8CcU5-p zw_F|B6RoDH=C^oxEbyhvy3EblTp^o^d{SB2BJuGz{g8_agHg!`VN6C-++rHaxyTQR z14Vr*yh!IHFaH42Fq^vu`9=eqx4#BVO1S8^8abt* zRfg}P;X4;sRgwdGXR2vurAX=u_#!V~=-ZWCxr2nMBJzStCsbVCi(^AdPfh-^B`h> zq-Zc5S8s1Ta*gNf(HA^} zHZA6}{VRc=RXI7mirk9uw<9#J4BeC;788KY$sB!TFnpB#9y8&es*%eSbJa>L*zQjY zFp~bbLLZm$l>8f%7WJAZ0obPI`BBSNCBrha>f5i|YdeK>!NiFUXv9A0zjQR@5i_i? zgg*mx=W?=ddaTo=?3&i|PCiQ@=TzJ>yi6)VYR(&7D{YJ6yw?2}4qnB2w|I>CU3DY6 z?L(I9mi0ysQ=S*V2S~T7^(VqTETtK;pTsPrn%+WR+*kIlDWjzDD|_U3?4V2J6DX<4 zO;24^g|u0m+H^){rwRp`+&{h}(c#?Pcw%`W(2>nb7pq|Um7T?#7_9m06d;@u~PQ^dE008}f4 zc_(Fbx)jn>`6CQlR5t{hWo2b#8ZZ6W^_+Pe3VHtkwo^Yqz=8DGlu4=P7)#;R32BCJ zi{VBqkF9bNpc*2Z+?*ZJzTK_lF>|`$0(t0$@KY1|E(d)Vxhjl^5T0ovmo$}+kW~Gb z`%!zxH2$SXlkOK2eu{Yk>{kO@-OGupp%c`ivlQv{#GZ+hx^$6w_>LpK1eN8pf5uzc z99*M*f`{n}>X=UgekTwboW}~Z$fGS442}iPvhiYnm8j1cerONtj%|?x>{VKdG(~zVrSYiG%O%}$g*a`T^(!<(nCa0>;_{e3FZEJQ z_RS{~*8$NL`kjM!N7LB4JSd^2StLUD%>MvNq)2PlMnC0SuMho8*28mpimlg$WZ5r0 zk2ELAJ|wE_Wz9I@)UQA8xeJs5x(Vs?<^51(M+$yb#K$k}{;I%}sD2`3$W=#i{{Rtm zS*P?yvgo9SBq)ESX5>(!x3rR{JV3$7l>~J_1TwM_^IpHvj$UkBa_$$sYWJ*O@xN6^ zH|vYeN@pT-+Ix$4rFBw*8vzihh(9QvBmUB9=j^Nf!w2;zj;pG7B^QvAyXDz@f9YG+ z>czQ|T^lT7K7ALY{-hM$(-naZ6wmNJszGOb;XrFCKTTt9WdRaoU?^um}> ztA%AyaB@8c&z{4_F@ux5%7D;iCM>hYpMt&@{2Vm`Ak<~XJv9lSrx~Jg34kcysTx<< z1rwl59Tokfyic+iwE=`RA;jY;sb(MrSH6AMynjlkg6z`NrUjlQN#SOtZ^ZD~@-b2* zPxURx&g|;14Z|c~-8zV?)p31Ri-dUd0XcQT~ZSXu^A zKf`dUtL)zkD{a|Z2KlLc?aH$143kuD@kb0*H5Oez zbpHDz>x4-SaGcedG&cv6m0-m3eAT0#%Adq`qB+Z@X&0v{;aoUZ3bq&8X3IY_@QPI- zy2_AAUgUjOy9gY#3Nz0o3I71$jILg<(jk-9i~WlD(0hx#7b)Z`MHBUG(+G*m=gE7+C3c@B zBE~Ov->FmZnJ+Y4S{q#uKzn9)*elV;o&>v^ahC|4QFv4E#eXv4A61pXL}$%JVZ7z^ zh7*mVrf_)bZRxj=15u#%Qd^iyIi+Q8jJ8PuT+C7vmj>V`To!u2aZaCdtU0?8) z`~$lu1SNeJjL&r7qh;ixE1`hyjPQCW?2H-$r?sMQba8ryK2{=|InScsFP5`XXAX-l za@Mp~TBhZDyvlMM(2WS?jy~(%JoaAU%P9q4^S%L4<1Kkahnufqk!H=bRU>BMSv+_^z23JmM zCvU@F@WOpjQ@=D^ZPPD36y0?fGnZFDxO;SttsZ%Hqz;ozSN5kRdrz@s{-nc^@RG%(MjhXQ| zAT-Ld1swEE2zOIDsE_eP=fNd(*qgaQQ9Xl^*eIjerhk+sc1IT}e4%>U^+Ept;#cuH z`%7%et_iOn3-YRcFzA8&s7*Y(L{SEFGyro_mY7FCblEGY34^Z1u(sXyVm5QaA^AdY zWI8A1jzHx*E9O1aVlR~m1sAEr9+O#_7?+c2E+yhkr8}zg+da|Qa&YFE)#Vl+hM3Pq zB>w;<&{VDoWx3GoPingU;k2n^2Vvv@Rck&#uL~Z_i|=4sQww=IQ;@Vmg_rL}d4@(4xOTRdR!x zR(9$EL6H28k zzY*Rl@(4Jzv5mJ?3NEgh?1|NCx}qFCT=$2|)itJ|*y2$n#!Sw-tTA<@4pLTQNH zklVuzFd?*1LuwHG#mWTqN5pL8CORt>tdre*xt-JUeEBjmQ(`nu=mQ!V;!QcY*8erj&fcy56G-YHcONy{qfByzGxst`=ZZ;PU9M+;hnl(6v{ zC(LsT!JQ?Z8@eY$ak8qPDkn)Fl~E2(q4pNozBR%h=ueVh={zKAz1aFJxtiXJu7$MiW>!iC~vIxv(G?Tg1u0%M|yLcH*pY}HvWZC__)WnGKx z>xIUo8L>e)lz67eg zDdz}fB7@lsDgxK)sQ&<}k(L~l9racoN<#fqx?vaff%FLAaB?_6CtW>__G#^Ewc559 z!q{5}v;&@YOoyvNDWjr$zGtZMKQTaRS`;b7${H*+QD5{~InhZyv#RW=^iP$tpuEu< zCLXEBMZJVJ_52JDy5Cs6~e1kt`$`FcEYnAlN z%`S)^5dF%A-z0oNGor4jbg9OiB}vH1H9U2|Cjp`XQ-mz<8^*)gTMO*M{b-&|C|BeD ziRb>|PJ6u9yhp-!4m0LcGI6=m3VxLo=J^8MpVdkuoZhQ^9H!0|D?WzUaH}7p)6H2? z&I^bh>Z-G{zQabO;Z!GtPL5Sh3H|UL4m-q4@K@QMHrS%m8e?@d9)4MGfUf2yh-1|eNW?~D83=IRiaEH71}qN&)H)%}ga zt}+`n*`^UsME?NF&=Z4oARr;#$&np9>#B;;%=xDuH3^Y7vpBw7kaH|lgyw?>bBxqL zG?oQ}&Ps+|&;`WJd8ROGQ->m?E$wZy{&g29oq{54YfCcK27IiNggKPY&0JqDN=Q0( zL&XA=Ss@IJZ7Hge`k?bdH3%Vy+9lLzxmkS`*&&U2t?aMvoeZ-hz?yeuHkkz{V(F5o z`gI8J{jQzrnNDGD!(eyJ6hy)~DHiNqSyfYDTLJxWt`_z;!n;~wTv$z}qCJChS!vA} ze4MH%(3>xqQF=u9L#(U95O2cKl@rLQedmqSlt*)NASvl!t>&9aAZe9Dom79+ueZXj zjTJ^0!iuZ>%XQLg-J-6%)Jd;JYqg*3yiGn3m`=6AT+R;5r4+K&)d?gnP92xb;%NUqMQ2T6G8f;xkYRbb6Hm$X6D}^zTQpW3anW;{? zsJhieCz_&-)k2jTToe1RNg4o}@B4cLtAhHJe=3IATB=AlSrrXyMyDQbSKGeL)%ITq z=u<=8m4Qe)erd+CD0rhhs3~Q``>6*|T8oJcwXPA{6g$CtuPYTc9?AYF%RLvVq4|!9 zo{Q+GGoIn%MZn@w*m9|qf+_JO!a%?dO&A{7j@bDO9-D=+rrl^1=Ca5uY};bW*jF0i zWo2byvwS0u%vNvBgx}(bt``(cixkG5Yj#d>_>^}1%r!v%PwS1-B&*3ws*L?7B~&G= zN^W+wm6sB8oawsMP4<-7rY@1H9wzhUs@(_4P2MNfOT&5p05TJX@2cQ(m#uGQ@1qYT zPf9=LYWMKZaHYBRRff~%r4|$A$*4XP9>+JDx;8#)pyM-xaQmw2o!x$@$%C7duU5I@cbCC+vsjGupPz74ZK6eVycc zDygZ3oGFcPwg-}&v{M&XDtU?oJZ;0yn-QGJNYJTD$n#K+vMHaf_&3YVMlp^}6I|0nTqdEQC*62p(4j0@ z=H$`rB`;%Ke3AYn#1T2QWrGp#hcAg`=DImcl9xMdc_O5VP3^HoRA{%9n)-FB;c4<- zvKb2KxsJ-?s4LaQ`k@V;XPI7j?%>{dOs__NLKPQ_V6qFF;a_8AWN_lfm>T0{YT=@; z%`vtz7*r{!vJ{X_!XGT9L;X%&5z2Bz8m~sVd&J6Yg@mxWhI*sfA3R+yN?$8OP{P|f zDr{@BrXcw((jC=I3Jg1{qIDJ6J<;ofkO|n+2M)Uu_(G5vZk16q=}_`0V?6}wOiBrt!K$uSy@}~gg(VWpEqDvqrWi83|AU1#U9Dp*BK}Nq~|J(%Z0&m?}hd%Fs2vAtAzK$ zpR%z((Oo&S;$6`f3S#`0@ZRAl$V@83chOk=6T%Cka`IJO6!5w))SN4{Ye7URvbnB* zU5CtkL}68;l+ngy(NJMocU)LptEpGXd`oq2p30FGM%_x+&0SShQBk{BMnwG=3aeJ_ z5SVESO=e!`&R$A#H`f*@T{B=iri>|rmy)g_yoAvDq4iZ%I9*?6m{n3(b5#=9G}5@w zRM(Ze3Qgu$3s<{htRK3pCxtxD`Y(Ju*SuluQOIWA_)Vt{6~cg(NkDQ{Q!0B7dDHho zP3n29r_H5N-bWe)_h!{hAypu1Q^oGUV!jkn&n8t}`Wau85k5(}W)CA9vvoTB0R|m6N+5 zyzrVInaFoqVMC6%*;@@V?cGtWa|hKIatO-vz|RL|c`lWtn~5(2124K25_P1cF#O4+ zVld)&)qBbGa+GL3Y+kA$Cziz|q~jx^rd2$<$*8pP=(BZJO)iO+Lz1723NIWge-o${ zc56prFh1)07|7oi1vQ zwj~b8_rUT{2T7@`Y|69eEG>c5Y)Y%H&*nwWksOec&G7118TzAtH50NJyiHF4epGc* zlA5Ji(?lewx1EzJpN&D$9nF(lt1CSr^5p8ScwVX9g0)qqiK?4kLVZN$ zW(t%u=8QiJHE34re(AlDrsH>7nG9-B4I7|K8R2wSb!Dc^$y36%RmND|b=Z>ZxJd7} zZI9I4A)RxDdIz~4svcq6q9aM;bXS%pX`_=sRn=Kr*jaK4q~TqnfFBkNpT)g|wqCm_ zr=JbuN`=IYMAY*Ix~A+ME^6wvTdf~Wx2o(udX-FAB|<8m_pzrkjTl;TZryCDjRrQ^ zsh-7gps!V;sZ z18J_l#vwe=EuWg{i~FN_^uRYY2~HE>rBGuBMAj*3jrOnXXjO0^aVdyk{glf@*6UiP z_;M%`(zhWno@IK;PolcMz~psPRN97R!sH8U$usp;c8sZ_gY!)~T=3TiD5VLhrnDpa zy_NC47Yg;*D3w5agld5Z=%&2X@a&EmVc)9BmwQsYI%Oj2Q=e2QWRE;u-wQ5E_}3WS z)U}?(L(Hw$Dg+FIpE9}wGN=&>WbatvZ*3`6a>q7rs8R(B%xh&SVPdSPRP)o9u%^XT zI3}h7^Ak~-!qFYSvUIIA0$LRQ`x53;`>7)%K$)2n-wOqGs@6*TdYn5g*2t>iLyU=8 znwx1Sxwf_q@348{R6(5zBSkCnR#R!%wi@heuBxK)nx@BPI#&#<)jKCuH$*Qh^I=&@ z{{X(uxJg-C3o0tl$xW*2*wE}(3t>yCl7(>QxPrdL)$t>uJrN{)L-$^v{{YCtgdf#J z{B&7#Yf)LbC#Wse9&1c!s=BHw`w8lp>3|KVR6*4i zyXFi308#G^8S7OE!{Kr@UDcJ8^>Xr3cBtod4VR5CP{YNdmKWHSHhQX@Zm6n z1I(w=nw9?ms?+5|Ik;JT({I8tnsL>JMHbd;F%s^xyL|OS#U8Wag~Y{0t>r=% znQ*F-nA@nAo)+1mU+;y`-Xpdb*&3~J-A!n^FodSPA`{x;V~$`g$gX^>*9yz}sb!Of zh<=c*kEBB$Ms_QSv*B64b&xnT9d$i4>BA}j67&|H}x|+J)7*S~@OewQmWwB+rq}DoXc&W5z z?1_7Fw@d~aDPeU@i{NaxU6Y&ENJG{0>axUqk20kaCyPoM*?PKBK|I;8;@>nNA2~l&8V{f0DKF+QVPy>PD84inA)Z` z_H{Ua!mB}33?D^ICxpV&9$QZR&>6Bo27YPN;n7SAbuv!zZ=x-om#(vD+$fB$tt94n zaNU}_$=RAj&X!avZFpBxEa3Qx%FlM+xkDpBJyvL>y!9$(8(03@OUThk{i=wEr$tp! zg%z-%#zC}O%~~o=ah0K5Eh@!HrP-Gfuhnkb4;RTgUtC1F;?v0E(Ny8QGcFo-Q~>9Br-KRZ(QcBt zsX_)Gr z&G+ev9VDU8l$4Nbe9F@cdkrv^R=ab?%F5dmqP`W|nF-NP{{TyLIy3HC&-Y%kGg6-*lhY*bMiu9#OItD37_HJWk@ zjjE0wN>f}z=(nXVg-;g{IEuYh@Y7?W{{VGes_h8_TrG{TuEv;AOJtRjd&i0ED}!At;f{K{y& zcEY#~j+hK4G+h_!P{h6(t17i>uBA1u6mrp3pGCSHgxp#cBa|feyetSBb#l0< zt5rRl;i|vYd@72Q3*I?6k^^q#o9M5xMO~^ZY}Ha~n42v&?5Kw|CA;>*s@Q3PbQvD& zc>Nawq#&rk7Zke%H&o+wJ&jdW_B5K-YGz(*`l;6*xK(q)i&ac3k0PWbzhy=P=8WbP z&z8*DFEv&(y!LB`>S_8ZYO92{tM*jd;w$~uS5S1qm{)B33*n|caJI#jv6R;pm6XOj zk({!sDvT|Kahb#FSk;uHLlM3-Qicllv7&Gd`pb0t`Mb^E+4wH zm7Fl8)Pe=yMCs_JZH&=h6xHgw<4G#3Y}?vb@HAC+6vW|uj(Z!b`&Pn%Qg~fUb*ryD z3cfN@n(t*?B&JoiEUAZFE>@~_n%4?x%Lz1*x`G@^<2?HrLE?U zTD+{zH7ecJ2v!~N-g@|oBf~ujRtmOKTcufB8f;8$QC(SoTsrLv`zt9~P0lA~a>k0K zZz?GTYz%^$)k_*FMVZxh zz*TC~9!TM)6jnt{>9jw6o_JRKSK6AP3Y!(e>rZ1;I+S5-`9C0(2C z$!R4(sq8MLB}^%?IjM|ofX4VyUme_Pv{qJA*|MfKR#wAx4#rR*m<<26FMs;dQSDUFKkT~pcL8Bfh$YS`AenegMpud!9athy-!jKZ2L ziAYn~bXLOZimLk+Ds$ddvZB)qdvDC9Z?jYE1@ND=|HJ?(5CH)I0s;a70|WyB0RaF2 z009vIAu&NwVQ~@ zL9$)u=Z?3Yxjgsu{(13}{{V;leElYC{C;&lzuonpuw~Dmwo>&Det*Z@`x(i+>c4-c z_w>+bx;~%qzBT55j`^cs@VUXa>Foai;mI^+{{YY1{#$;xFZe%CA^qgd$*1e~o%_#} zf3K&>bKCQO!_Xh)bCWpmclF=$fAG1+d>ZoW>H2q<`oBx{{Ds!8=-={ty^e_1MoBsem=j#uP z{yX|Vcj>eD$8Glj-{JJWc-qi=00{f2aQc0D<)Q;XlWHO!?RE{{X}0 zA6-9B{rWXOoY&;T{Cy^Qtk3%9`|tcrK9av2AN)-FcgN6t)Assvtq^qUkiwi&#yXuor?3Gfci5y z=D*^8cb)b>Q~Yn!a`WC#r2Vy@uD-o^{{Xz_w_RcNpGQ9*?>|j_I(qsUyxpAj^|Q z{{Z`O?q>Nr=jdjjIS%nC8|9UrPO|9NwzJ^t?VFNxpb+_(+P+myt|0|6C8sjwW)}&w zZmZK~@!}jE6Vr(ol6`*s@88j%;r#x7y?>o2-XB|<_@9sQG1tEJ%auL) zoMAfFe3z{BnEel3@tuB|r-SFu)BdyT)eQXk{{S2Q6Z~cp4XdN~#@elXyZOoh5WL=U ztIT<69Qj@RelZEPM&e;yiM_jznTSKLOEtb>j0W3dm(%cfh**(cP9Yz!{%=2=*BW(y z!OcfzTJHR&arXZJu4?X|Kjpl+_&?9$A5LGR{{Y3A_&fTO^!ijYv*=;<{1bZ4HU9uF z);Y%a^Pl;e&yz1s_nNOR{-4hNkJRICaeu`6YWC0Y-&y&dJI~{r^z7uwemCPkjywBj z!F$e`@ttUIf8b}_E=?AFFZg_(r_%obfpmeR%h&6a!Y`M6_`^^l9X!{p6;j^Km-xVR z#6C}qslTutWwevKl{r3fY9uwow*W5akOum~AcJyCT}^UsX9)1?K7Mk2KMel>2mS9p z6IsvD{{ZLLN7r8$IW;d{5B^_2;Nku{l%G%IZ`1hK*Y&!=&ugEo=P&KEJUjTy@MiPw zJ?F+b)o!1zhdX&Qts$QX;q>I!$N6|XeKwy@is#1j{=W170Dj*|^!fRpLTVr5@xP=$ z&(iJsBQ3_Kx!<6{hR}gHSI#45m9^&i$W=!7!JbE%knwM4AEpt)J=|c|oaKIZb&Tx1 z>jY2(;1%cNtk&yRaKxs+z~i&){{X9-{U^qG(S|wmPX7SQUGFw)o1Zi2->>q2^Y;9I zhs5!l&xp)(j_iKFsQo`Lf8pJJp5L#`*BSk3&0jgL_ttN8i;%x*lRiCte8}dm^{G7l zes}wSf%M8>^K{Wpx8`f3xD&b8}1`~CCv-&}nUUq4-N=k4!|C-EM0?jQU$ z?f3Qn0F$q$C!g|m%;%r4#`pe5`SbALU1L;Yci8o#g!bh{bkYfcWyK91kA=m|p=3?{ z@h#)E+(-JrC^~%HgGC-U-bjpw?m0o~zw6#<3Pu-SpIP16z&DSKoPm$);}38mHVj>P zJkx&|A96*Gbht@!mLumJ8zEijM(%_}u90gS5>i;D62?eTxl_5#kqOz{BWxJf8fG@k zW`4i@_xUexvpwOjt|k4QZ=yFJ3{OmgO!xgYr0b zibNq0l?g@3ZD1K|IN`BHIePOhu0Bb|80<>f0cW*K#4!WeS$n+yzzdYX=1BT@>{}Zx zR>~aj*F0H)w^MYFnaMGAk#+a4fS+Q zcWybAkq*%4{PJMW-1z#rj(S%Z-O<^L1s(d>n?|Q92i{oW&W?r& z!k9!AAfN0NXsBr`?=yvW;}5Yx&be_q3} zc$+^+TI!o6s8~@LbmkTn$qb>8h;p=ofT4xoJ4r*l4oqsnZc^V%L*~u8@gmKa#_xtr zH;!TR(=|*qt+hAivn|f)V=l(l`3?>rh(ikp7>Wz}e$+18p0w&e#a9%WrTSY*%Ey9m z;*0F1_@CPRw;DY8mKOtV3X;~>B(uWyQ5;?bL0~)3(roq&jnZ+G9WHrncW9Hgx3h(x zrh$H~G$Lrd15U*q9lsM>*{C~QI2_{D<3*2x1-DK@S&iWA8$R}6J+Sk4G^itQf~Wym z_eX){wh#Hxi6naAz9JK5Vqx_8eIVyL%!D2t_$6Qh{*w*u1JiD3j<7>`=dytUuMI+; zm(2|(Nb&Opwkyz#wE?-WBlJ`AGcMRD`+rzx_o%kEc*C*kp^9oX#pfd5jvGkYOjx(c zot3U2ts#@Wp1om-yB!UD=H1+Rv-FraSWaE{mz8%d3IUu=zVSZ%JBF)~z;g`bMPb}W zLvT-Zcsw0lt4ZUo>{fEP)`dgl&qiY^wcY+d$r#8OgOG&VL8*9ylYS%(qD+R?l7!co zJQJ#wjzTgN)<7n&F@v!j_P)<~McNH;5&4eHUp5Q(HUUDE@#v$j5?%8s@B&!uHaoVM zfbL#baUwBU_(a~FvIzrw6u;tLK)EAh*ROEq7HE~VG>I#QO|M;XRk49~-^7;pBU8sE zue9ag{+r#{kC7EkyUmF#)oO7K*+~ku1J295Xfk{XR>8)b&Pm*OctU#L@Wlg#S3;ZW z>2FT{K1vdPCdD<|_vYft_n~ADBFn6WD_KsgaBr>6VseRJ&>F#d3=$0GvB8q8z>tA$ zM%mTu5Dpwwm4F{hZPX~4H7AZmHTIQJ5?v5LM%4(mv3eHL9ulirIfzPahIj^fsst&- z*7_4ioX9^3;lf1evQ0O+)V@p?G_9KnQFKRW~4wXf+*Pn{%*QmR?2n1+~(Ys z2Pe6dN)EPpeC<5oR)5CG6T7Y1+sO6~gXw4C!?3EIJkPR}R?7SGI%{S~ z*l+CFV)0`24J(tGgJ*bJ|dX5qoP zU7s!g14tCdGIwJOUa*NDasOaRC~V9MnViow{e5!k{nE`(?6V^!YSnFzgS&$5$0WNU z1(2ZRW#^C_S37KnSz*od_u*tlwn*u($^HEXdkYf8JjeL2jld;qbf|BDtwhQnZDvn=zCzK zj^0`hntpdk;;*G<_A091rQb#NO8Tmg z_G*m&?!|%%*x&_p?fiHLBp7Kp&Olda%)ndOF8YPAD){vOH3eyfHbjSum1qR{{YHxWMd9;zKbzdoMlwiE{Eq{nSdl+-l0B6 z#pB1ki+z%OSRIht5Xd5`fjLWPn2klnju8^fh@)ti?3Lo!C8OmTcF-Sa`!YL;+*O=6 z$$+_l8m@;=BNRQ55EndUm4(A&eQ1GE6#OpS9Yxd#p`br<%ni9;RR=`QIE{4qnF1=^ zC`&Yn1>@!ePO~ycl8&RY8uM&~1XkDsS4Z?UxvXCMOX`V}VTl&}o)I+Fm=4J>qvJzI z|7-`oF-CegB4xQ&V|&(=j1gYKFoyoX7A}I(Uv&Nx71r8G5fK=6{I==)DdT{RwIm|8DR5o_Ffj6HzIgs%pc~MpDq??uVlcC81Z*cbm zTih5SM5B>_h{ylqT5{oqjg$=ts&;TIW_ym8>!JFFu}eFJpmL~B(hc54J_%c2Hz)t5 zGiN6Wh>s|jG4u~PTnKY~RH2u98FiGGBlqD6G05#An#q{HT%)JAriwiq8bba3Ben?b z)EZN~)eaMRA4RF+ERvczV(d%UzQNhMha~~0bw6jpU%V|( zzKhCC7LY%qw)Xr~yZI37ho>2?F+yLKCm-}0rOQ}L97B{19_J8jc!OWQCoHqK_ zEQeNwzL-HuM0;Q4PeLdObriGMu8GZ9AG0xL%NQJGg{$y9fc*pmf&6&)XGze%Z8DG2 zCB`Iqo3r7byFCs(IF}*hP9-DfCJ2np7<7dNqN5|-r*;~il=P87cDo$Pj+v)neICKg z_L9~kQMDC$rboCywj9Hy;)e+?GyU5h*=Ukj+Z@Z>WJPg1#>|M$QBgF|Y*Ux5*I!m( z?I0rJ+TEx4YERw9@`4V(s$vDADV~sJ!~Y_~Q5WK30!+Y8e74ow^StRHEJegho*7g} zkhA#AV_Hx;y*(KpXBtX`QR4kV5u>U=|DQ%lGV-SuQ&Z|4)U!B@LfOGD_+^<>A`Q8< z&%hlIE(qIkMh4EzJXXLwp^?RfN~f+eX8}JjJ>a~cRDivyVAT$74=vb_;)hw+^YR>O z2d+Nrdj)~uUf4ocbn^hQ+$PE; zDSls(-i&%2i^|B*pJKcoN(dUJO;(aA{Y~L!Ojs}0n0KJGSMxg(ai~I6F=SOXfSNc& zujpkbLDuqarAssSQI{=H9oNE~LLiKNc{|j{K_+vb!^Rk7 ztG=z>PE0D(KlfK-uhxvLsG;$m<>h;lfD%y@t`PqIl~b9mM4$8yy=;Qs?Y(b5J6+*c z>g(1IXCdX!Fsv#Zt5T2ueshh0HTd){dHnXLMBuyGae$45WKk1+{OU&5;QM5XZjC!h zuL@bTFOI%Tv2jZ5RkLaPz_Fq#>l}KV&Q7)Xm6dIvgZByk;eUTPI48575Q+=lH(UFh zQQ6ESekAQ#u*R6pkd+`R2kqq#L4O>Ax|D5YS7P8rE7~g9{%M%vw4{X4jY~drVmB4g{?Grs89?gk9)%A;5Sw>>`-78<&N}33-$&fap(f86*pqk z`tSFsGdvOZW-^9NVo~6&pbT==ULxM-@+Yo!3mdNL#r0opWyf}8ZSRY>k~l+;eEmm@ z?bwn?nQE+S9$Di7#&_MrQrv$|e%)Z^t;qZo#I@FKTwC!z)qO&Clk~uP%wgwEirVVr zo#BIzozNILhu@h%3&xv|SXL3dMI%J&{ziP9MQwcR!J8KC!c||B?QOX*m#sHz#%0F} zkIedQ_un~mwb~x=VWr9V&-)g)k2`^NmjD~5{3=8TKQT4$?jCwcJ`60e04X^~d5Q4t zh}N5Z0sd^5=^HXwKKiu&EKJF}162(nL)zZ-#m9iNgPIim(?hmeh%6`rmHNXENlfgS zflN#KI61}P5Q@#K<*+7v8#j!RWJKNb+{=px%{6y1+7~b#kl4IpP%#$B4qx=+CEXdj z?1%8tts(_nSi(v;evItP6*EB}Zv&>^=Wv7Ou<=zOln4z7<$)}&8CScuY&!IXXsHg#hNRSxThHS${&(gw+;NO z-*#6pev)`Bu3J|6j8b^~*_^7c0TOCk+~qSlUFe+HXHq>jnrlJ86G!=9R;oqM3#c_p z|C^mR&QKs9HN%)B0f*t(-`B61mzqpRNIeTU8AChPXQA<(ii!=Bwu89pi~W)nPchCx z$!Lw6PorLa(=Cuntem=mJ7exZ|LHQvtOCe1&Yiwvy+-~ z`}?TJ5i6XWB;e$C5W-@xolJ61@}f}~o<2&kREklM^(CdXBV1}b7Et{;53~{fVA($X5beM z(3iMKm|`$HI+m7|G1lkpt#&t7)Jw?A5$Jv)PY-*ocz-%~VJ zS7?N9Z0II@YTis(#S=yOz{{2}{nr-cH_Pv&^&fH%{$_Lwt)Q#e= zipcx>Utf6jM|`^JZhC*R-rxK7rr^14tCPioDf8_h!#)fA0y{|o$8l7-yb?lb?jnJX zW8h*ec0OF08_+|U+=r0edx&L7)8JjIiFQtl4IPahVS9zI1dj1aRFdcloF}< zP{(u5a#bpdRYFi7Ha+sj3)$i1CLnhU1wxcj7lW|hQ9D?#8)ONi%ZC2}q>>HK%g03F zQx0?0pZuX4z1snd#o65)inuL5QiIdGFyYB(?Cicgv>dA`;4Oac%L@opb~RK=4=Mfy zFJw~n%FxtdlRUua?eVivi+X*-#Fo$WFQr66`%kT?f`P<^RVP@-l;o;NYr4*3rd4hoVb|J<=! zunq?~tnV-o$7$l68w_T3N?Yr4r(iT1%3KepjKx{di+`}1;z4MoU4kMn$A8h zIDN9=;!@Pn;P-rO)l;F>dTZ}pmX5Z*_X4}d$OlIdi^!@p7aJ%`*I zw=n(QFs-T^?-17JM&Iwe#ACGP1-&G7w(HWP6zydqgOgvIm|BrCg^hx!kA?nF9wqiK zjwn>B_BBU-s8;Yo>U&oY^6*?HXWnFOp=QH)(OfFXBDzNDQ1bG!Rc%Lvbg`8`J>?dO zr_t!f^=l`8b_ib$d-ya$qL=%r zn`e!NpekDYxXbu(AE%2L+8Bum?wQ=$SNRk~Ce?l(BFC-l&obJGci4m^d0YaX+lL=x z?u}6xDiwo$WR;J4D~!~qL0#K43&h?^$I0S*#9R|D%{{s;ajTGwfBeCgaCi{b*9v9o2)v z189th_AO=i_r50_p10>k8UWr>y;X7n{GZPOPk_xwrY|M~x=ISF*l!N!~#R#DjfELrCM8c(;T-Hx19rcVdP6V$#mn&Le zLmuGt*~EXC+Eqr5)IAOlOku@$njW!vy6J_p4%#CakrFs67hsz9aMW>bz=#pVg~4p{ z|3P!V__?rRFMxpXaj`eqS764gRP^=j1^q}$!;2b5t@2NSa2)Cqcg?HrF&;k}bR% z7_Zq$xGMaeEA59`MlkRb`_N~eBif8mu6Hv?AV&cf#&uFVaI#T?9d8^2+$b>034!Ig|`vY&dy+DR@{ zBo_`R^mdgTG9=v0JD2VG@%4O0Y}i-kwX_)DL)P~`6__byppe29w!6D5HeYUnej_5w z{%$5&UFS1hZJ{L1xK#YC=v3?8(ugf3$#c%`>+=9QQ4VdoOU=gpIKm+H7+(uVe0OSJ97TsUI1XwSFrB8gpFt+g$=-W z5+~AW{>0Y0Z+O3yvMrrP5VwrCAARJHJ{&Ex1l#Q`5m9k*d&Bm&&|^gW9Bwb_5f>az zz)2uaC&?~J$DQ75I-8>+z&I}B*UV{xSamhzl_?)|{b8!mAb5B()qdX+$`|iX2mYsHSY4ce?JQo5R0$;9V^j zgA=Da57wb|7H>ToES5dY2hjPgJoEspB-f_8s18!_Z*>WhhvZ}+g1r{<1TwY=1EwiEPG>gd_)m(E%$9luQ3Q3Vg zyi7eaQ-i1&n!NFitWpmtkD{5GDRGrDXhT?1Y!!<|&juLbgc8GfZPFUJ5v@j?K)(ZKZs=d2_7#tDp}kXUn;FWy8AOfMx6fWbbJ4Wd;Ex zWEp-)_Q2cps}1j!{>|S0nS4R`{N>DzPNJ6Ju@`6-h{B>K{G2wEC0|8RU`h zZ1JhR`(gN-KKK4lvcU>6>PxURP}ks-PH}Sp=-|R8m^8t@jvhFSH`k_LRl5-deM}?QfRMXDf=Umq^TMXv3wahkBm9 zWzk<6-jAlohaOmwaaNN)*Z+`6eX;9QM#D;^U651^*K4c7lpZ`}F>FRS*!kcVWs9bB z+5O9z+R~I+0~4^!QMV$-+8W`;GL8$Wk>MlFxkaKf7cT>wx}?n#ZCT zRY2CQo-6~?RTWq9uX@ZY{_cr2Azt1F^2E$27rjbJ1aje*=H+&<$X)4kyv7q6=o+$V zpq1L}z=gah#mVn21e>+vSwHWdZfvK47CW*;0=`+F{9_G`On-CuprHy1{#)t2nia?j5h%Cz z0e6o$igMa9va28DUfJEn;TXlUP!wbr*F3bfWeRVwmd7Q^Kq#QTvC$C9N+GU@-3|jg zg;UT7BAGIXM!3}0_6#Y`{Nf#96et;Yl~F(rV27G??evpFc8B6FE_3%=_x(`Y39 z#I&U64e1{T1*3A!^yZxWr{B~Qes7|JF;FrEGL9m^FQPwA&Q2!eQF!S=kI`-tS+B8p zNR(MU#j}h~=lhca^7ND`(Z~~9wf6O<%uZC!GZ2EeJ&kJ1nqMeLz0fe3F&Wr^&RcYj z8~XQQQZkx%ZZ^Mrdm9&&c|WqlKJy@d~oTFHS!BP<%DU@Ufkg*b%~U z=d3v8Jk(B?UGmTK2t-3#)*^i`aJA>1sQbx(3*N`@aAS6N$#Z~Wxs6au;gpfkz`w4Z zWzJnoz|D0p&s%SIhoE=3^pMjJtulleDSvWGHs6JbMz$m8Hk#giFLb9UBQKcW7v7Al zJN4de&;Mq;?C1G}o!mUrEQe*S5!gG6W2vR?X3#4hoss3mX8T4S#CVEVz{jDas)~l% z%05R|kbhB#`^CMcelh44Djr?oRTM(u5=+Y{3=!@XYPn}VJfmbYU})$(S(|2w*L2IL zcJ(D)Qv6F2&?B|slkk*cV%9XXx$lV{q&R{{G|x4c$#11f{RgO^71Q>YcNby01)F(h zxn^eG4L1r&B>6DQ9!f_CD3_+tR%}VAXZELE)oBqpV;rI?TuPufMYuiOkW|?8!;2JR zGB>XnsY>yyfc381T3W%OB*C;$)0ICtYTHVJ*B?~)Oj-l9U)+k$Xm6G}5N~O@vqx(5 zf{EYE`lcvyE!LEJAf(v4#(P$`OxEo6HdX+8OwFZQ;`q!n8Dpsv_*8+%TKu@K!H98> zQWM|;u{@wz#qQ!XPL|`}%o2QW)#SXLK{?hmQ)@W@yqUPjRZ%Uay?6N-@h?T~t_Ppo`&8pqGOvc2ZxjU$1 zaXnO=B|!=o;33W!A98}7Uehp-c%MY*$aC#5qQO+Wlp7I)Riue=Bjy$+Wai&ax+2y( zZYP~6($nzt`%haFw_LW-H_DeZ#J0 zgl3*{_Yp6@bUhady_aPUR}-x6%2&B)e|LGi*8L%SUW4fx|R6ydxK^G2$pR=C-aP`P^Xx{VMAFdhy0g5*ek{i!C%Ufve zys7+Eu#u(<_6nl0m!<*SzEZ|C;^4+9y&NJtu$sXulIWFcSVoik9sj+zhXJue`_pKY zJlb(>0c#z`m$%T;PbVJ-xtpO5Vd2DwhRKj-4=XL z)+`+H)DKklbV3o zhs)ic*6=xw@@YT`mv`IuqL&^CNNmvnrTo9l9(r%Ejw!@@PYeiJE`v;}bi1uZ)+iVc z)uhbK^W7-=D%C|so969oXIV0%sHQE7@k@OnOXm!n)jel!MoBGjyr ze)ga9qC!Ja6t}AUYL?EYrTedBXleE1Zo=OHE*IY2dM-EIe6iDsdNK9ZET1%iiB-Im zF1B{JNWTg}_ahBfNmD15mGcfgdKj7bR^zwE`{v^d-kVZS)CfYFGS07;E{Z05Ioo4t5iGGc{tL0iGVYmtaqszzH-=T;#tE-1h4AA=3t@M!0Pi}AU}^^@Vfv_Pn& z9HU~g!6y@4k?0k2+9aMhYGp>GBP`-38kKeo$Wc2a1^ls$Dqimyq>lF@6<~ES_2TA0 znH^j<{0$B*VSCH&Kxt9-42-Ms&naBx@icNHVTfh&6Z^=oL(c~%INv=L35-(92Cvs8 zRlXz*7T*0sR@fsw|NbPVN(h#7(>X(oeeWpA$=C!mD+HN8;Nh?!v8PB9xc2bAi{nnF zFY*pOz4cdlha)=K(~U(KZTC*HJ5!to75#8ASc$ihN1cIhtm6%|e%<_$S>l}aKtuSp$w;<4_K>Xtv>Zj=)b}{`YuMU8 za}#3tLE`6x$1P~v@6;byKO6?CV6T4MC9uvw;Ka1YfiA;KHa!(8|-zm^=V0pB*0(ulIZ3ZJ12>Y{r&4`{qpH81l@L9UhP9j5o42500!mXT6da?E84-ai&;>q;#T3qtRpAe?4CdNdo^&I;yB8zPkN9Y18g|2URVhBd ztk9?v`<=5y3uLi|{)D@)_CGCSJT0_yKlo&85G&A+xwb`#<0-0?3%#g?&tIQGLLR#) zK?1|3!=ThJWDqRqZzAK(0^Wyq+@#wMq>s)r$sKB2HWXcvAhZ$j$76oPTAIZAI2!`~ zKB%;ec<;b5;m$tK`Bwn=7h{N-xp?jE7t}+4Um06Vr1t9A#=MdtxxRg#=~VfYN&}mR zN@<@j`><2J@N!&rt%RfJ-ue8)w}ZLuvR@CpwD&7jt~Uz|xuER4zuVs(7g?aXFuZ$w zao+mK_3=VUH-DiG(WSY4L$j1mR{6X^-@M@*Yg=h;{Ns-_eI(5Il&W<$gp;Pgl3i(CtAVdy^NA$+NW;t(nh| z(i^At9J7*cJBe7*CFmJoKJOKGBc1%k8{i#zo9*HywD{1Fkw zZo7dFQ+h%4ngvtxv1ik?>@Zt;X*;0v(z8wPoVpt6g9PqRemY-DL|Va!WbzCMq_MbT`odOhhB9Q#UX zDLl|}9qJ!s5M2Q@8B|t?;J?s%jbH<%x08>x7WVw&>Gd!G(dME4L)NpW#>pm_%(fFh zk<0X2@2|8(Hm5A(NX8^uteC%}bx7Nc!xfvrD>Gh0kBluiMS4Z%9k_K@!Ai-o>q%B; ziol)X&9p~PtrC*fk?Z-N6B4$UOG^F#rOURvhNXrJ_$pQzFFYSrcD5uL55V3^yOvs7 z!UALsyvn~IWJZSN*#F|m;P@O{*^?jQT;TN)d^)F;6+K=C-0aZLRet^LOX&uwzvm$P z&vk2AogZb9Np<5W_b@>X$dA|5zU&53OcTs^}w2A zTFMm|Rig&#Qw5xNei#3+IOeG(6eTd!r@oF(jr%13+5qoZCLD0E{4diT5^DJ`MpGWx z6jEB-kMmEp zS0>PoyQg3CRzL+o!VcEq6!M<`K=4@M zctI2hXU6Kn{Ju|f;d!5_#dOpNIzOO9L8MbYmONIl+rYFyk$Ok;m zj9G#w)t(kz{6^u>H~k9hA7!G^ZI4?0{yCkT7Q7lq z^G9H~5!N{`{}j@ug$#K|aVQe!8Nvp&paEh=a(T8EuRWK&!mG zyy;v*?}GxwMIaG*?r^s8bGXgiqW1ae0E_l4!vqi7vRsLZLKC585hc5UO3e0emni*Z z*yQk}==g2j64)(+E9!hsrph5R{fJwmE*|Y2if4Y={B}v%o-s(%d|jBA1eRR|B%V0> z(rfnhjd9nzNq@RjLt=k{2{)^EZZx(mHVM7}+&f-31n*f{K^vK1GPI^Dyc@0qoWf(R z>5IW=XDXp0YuyE%6iRipB*!Ikq4T{3_;`Y7>Il^>_fo&b%?eT#^zdXk*QJ*!_E|zDJtvyL%FY)q2dos^h0tO*sa46;rbrcGzo@IJzvu zZ1x7(?PvKW*j`%Qkd}VW&X6_*NU(XDP>?Jejc%xE^yA3Paal2d4#1T=6#H+ zH2kyl&0#g?D|cc(r&WdDK2x2Wx&rqs!C^~*&&ld_hqQU(BNx<1q#VTVS%P)?b)dr( zADPu#(?`Jz76JQV=jIt7rxLrW5#i`8pQBJYh)>>)_MrTjU%(i=LjD4gOFr%$o?(nu zmk|QXLps`M*4g^dh1#SaUJN-aU!?qy133{$kR;;i$PrQEUS@pX zsGYiPFjO>0qZKa3WHDNcwc>x~5HFwZd%E&Cw7DuHp3zKlmTK*=x?AKF)25kk;w*kV zHTAsfRytmY@X%kj=?bu_ykrlS z5%KDP`v#)p>_&;*Jx9$4npFXBS;+js_Hdp@x?KRZP2=2Q<4Q!ORnyax$-doZwY#1u zJS{OZJ8-PjB1*_i>F5HGlYhy%%h46O^w8tUFd>=VRh%`NsO54d(}g8e|io$ zr#NsXl1@Y{oaPJpyVN?Q7&;Qe>qR$phW0*jC9(f@SGn2_!VjpDPV2ty>98QlPF(&r z<9zlm?RH{h4B#9_R8dDzre&zM<7Bp3>#NO-toM*kLvm1Hj-(AX1j*#`Of|iz13%H_Nu0w(I%WjkF3aViF z?)5VqP{j3ym?PB1rQKyDUQ0LiluEIo-_<|r+`3N^DFApR;J~S?lFg|KKvYoq7=??dS1}{P6jU?$mR~wGV%g4?mZG zRNy@`&SveW)C=pQ*@1TiFPR z^Q(zmG#H-@9Z?HgX~!NRYm^sj#8gkN@g5$#m@5HvaXz7Gg=Dm>Xo6B|?QVbdkNA`O zjAfqlgwc%)NwfDdonlBDrSzIjck^P){0+M zbn@7>6Xk7MKL^EH{DnOp$U%+M9>xpRh=)N98P1-@cajw}J0qh+(Dxm%+ic)FgHFS% z4Fhq8cLbkkp^OxBO%CnpJ={0ENqa-{=rWfzKI#IB%?-x1h4Wk+FII?}S5Q+XP(M_% zA0Yj_^^f^N2tTrL8R`i;?f3qcPBwMBDA6p!c;GKC{3fFVf?cVf0~|K4$mmaQcHVY$ zu2c2q4W#kbgqYl~b-EtrBrE;NOfG$SuTq%L%N9T& zk6)B1%3Ft+{is=&{C%HbNcVgpxDE}VVY0&&@BE7E=gU68Zv~N25>W|=f@=J}XLv_T zLO1o0l(Fp%b$(m3M18dGVAf$jjNn-MumFD}rnaZ*-l|BMll3NRz{&XSrt{08W3#-` zcj+=667Bvn+t?fFA${b(S6i7g?evmW-su-FSW)!e zJki`?D4}C*wX)$Q&4&|0!MA^p=?h9K{ zIB@LYK0jqj4ltuLE@I^9wPk0=kzWDwJ#V&nv1i{1PYj}^PYN}Ik-Aq^n|Tu6l$mp! z<;atOO8+CG7)n~4aSbgjq5{>SwQxx?A>zfKR|x*rt*ShmY|#s^KrxuJ>W2W{TKPdD z@+Zr)3%i|M`ETrcRu#uhle0;wgFiBszxa#X$q14OJLT{2{>!tP-5$qMmAg62*|F(p zi`rbB``%LI^{Bu`y{(g**l1zkx^ClqZxPAhH`*kF2+`9pqzhaU|IdK0V~Zt zx)p(TkC)B#E&KQ_{-G=61ZVn?``Zb>VpufIG#@b5Da2b}HV>{GwhuZRCi!QH(qT(G zQHOV|da+T|%30b?3N0W?RK#0-U4&a6*I4wz$?pyg{{FVqSMV)<|H{5`eqcua`{j)^ zlHj&uLp;55fgtJ8HanFt27;twSY(sQVXLjOK($1FZHdgywF;g}`$^6rZY-B{#srmr(FZmYnU#}d6FaJ7y8abP5UZapL< zrB&78sZ-_T^T%Rtf3*IRg+#>wX*=Y@t^hlmZ)P3YOQVAy@AT!Vy%N2=;;Q>Mg8#(@ z|BBAcQOv>U`4h)nXB-Tqb>c4BU8Ffo>Y7I!i#&HU;^~doZ%(*+y<5_GC{41rz=_e~ zvi}r5TMF+Zp7`m=2hIjc2-7ox3x8*~kbjF~s~WQ%?a;{Vs&|shN$QD87(?gjbJlYfahp5;j=wP9IIIsO=CU5wX zNi}>|RzLcg!zV0vK_i(@!hFNOc|&w2BKWamMwRsdwFpG_7IJc}{UP>s~i+QqRv zUyLsA1~Z4BaV=O6W8b0&GH#rY zB(5*QI`TC$y1_F#ZO@W8>JdS|xv00LqZyi2_8%U9`42ENc%QFwWLVj= z^mI-IV;N(k5+tl7Tp5Hh=xsD!X*spG@=;Cp^^Jzf6-djyrZ&76Q*<@RM+qax)ExOZD%t>v$%Ka9y6 z32i-UtyK5x1~l%XrXhcGQ8@QK!x0m!eB#jH2HqaLGBB*5T zx=3|}ZAZz$^d0vXo5wsA1I`tdZ)M%MSkK}%c+6_y0wz{?k8z!_e&RA34ZJz^BM_nI0Cip48&D%%Zz zTBul^7`uIfV%t&1XD+Gt1Rj|xbtq>jvPyl zl-RGe@>l~t2e>d&lYe(1(lP8XV!_Is#sBQ)v5|eCQSr?(i${zn?^?b_=Dho)uQ2cV zb?NT;KtgF}K*5vG(Y1j#BCSuoC4AfhXpd`E6V6o+nc&b_$>QbJqaeQZ|7xTjj(2p z;gdJVeBw){7$yb$2{YKJFOi*amY`|7JXQT`F`+)-Z9M6y6;;A}9UPm;mm@nA%`*s? zLAL1#>%zSPmr6+Ii3qoz0Pfx77{DNXCQNdzCW)FV52aoi64fJb&r@riNJ=Wxy=p&qpyvt{M zyF)_!(v!{ED0Jx&SN<=Z+Lb8*R?azA%$+C0zjcJN9npG@&vZWjGxt9~E+T0YA%5Cd z)~JqPKJI4RluBZ&BTFNi@{_&-0peEvjb81E1lD@BN1l$-0i3A5^WoZd$40 zo38FCSMOjO^z>wAS>O}V3C`ZqZfAIt-oc403uX8E`J`Qnk*=bqj~^>1h&~-VV`^8d za%Bi$13W?X%IGqy`e>(mL0^FQoUC1~rugk#d;4Bf)|EqT3}UlEX*sp%3VoPK(;CIr zKd9Mrx?=L)gLwY@1pk#p%b+(E_Cs%5HfUONA+NupQ?Y#4@NYV+A@nM(tfU(zEj3mT z!`%}S!|q-Cvh`R{g;OQH5RAFmijerq_qijY=)>QifF!;IDLpS>c%SrD0$lQ?@&^ zBV>;xD*25}P{8u2f|II3Q9lb3i1LG9)FONvT^R(i1J^_IDe8y>Z4sB@T?x zH96jECB^TKvXhfcJ92}6r$0FN9ylDLB3~Ipw|+4K^}?UFZ;e-v69joj!TWm3bacDO zQ5|-9@qkJS{{R_byU$(U<2G@Eo(*d^Jx(t_d=H>q3=#4?`{8%q>1< zY+lpw7->nYJo5KU5d;)bd^_qO;xdFqW!^{x0WQ(NJehH731K*E!;S|vL{B>VT;7q&ZE#%|ms}GV974QQ z0v;xYZ#XcjAQDybbaRO6jBPWp>@Y<@f}vk3oV^G?g5-@OXj>0`j`9%)v5(!#@ZkQN!-_`14=&d8i4>e528+i8 z#;UEIIkt^!mw9$ueD#WhHQmQ%ky3R+@NKMamC8O z7W#6ygqJOqcIQl|Th7*T!Nzxc-;cfxiILJfF1VPD;k~tva(QPH1}d%fnA)7f zS{z_Gsjqm6w)ns_9f6aec4q+bL4Q%fp zK+Rwln)^&nWH6^bF1o@Q^WN@t0M^XCqq1AhB2ZD_W{pfj1Z`h2zc(Ao!A7DQiQLC5 zO**U|{J7yEKpedR+l5B;0)lhx$ZHP*-D7A8PiZtgz5F?9QMnG{@B0*1@_Ki&oBVGSVZ1HZNig@)}#6z5wj zhN8}4rRE2hzj>`xWo($j;J32r-m$BzU2lQBWDrr=6dEDUb5Vdw`-Z;7aGUxV+|kPZ zTwYpP%k~Wiu3qrjF8~iPeEZ-m%f?E>4Tvwj>()LcbdTA0m;KIjX9jO3a2)(C%2nQ- zVJbJBc)~9G+@0ozh81!XjeeMuTznhn6UAKR{RA>Skmi^V${+E7b zE|>wF(2ssunigN%12<1c^@edByB}91Pz8l_;`(jAU(*$-w#qf>%f^H{y2pS;@^JR* z=GVt#9P17MUy^>#InmZrbKh9U8`Kl!-X`>A1;^0ce7Mq(t^=ROEN#%!n>xaRxk`qb z_UQGAb*u0A#7;TJ^LhJ*8VL5qKU}_!3N~H#>nb27<`%tPoKVMKf71rTcdI$p@r|(L z`{Er{gATjI+dvHDxC#cOoMSaXMClw^gao73OK$fe!-T~LlwLhzr~@w>);LFa-f508 zX>9S3>)D#%7pj@1)t>vyZ+@KKs-HaK>XUwX#HP77?;D2s+;xKmcj4v4*8}Lwu;XzT zc|*#(dOrDCX*e@|ef$Pqr@isMMu_s>Z@T;QDI zt0fnYId`rc;Y;6v=jU3U{>}ZzO)%bU{QBIo<_Ov|_GZEO1Zjm9A`1_ChxAFx!wp+QHUa+CpeFXo>Ppc)ZKq< z&|;IuzD%M#6tei`&B?dFoC%~CA!7TEMB?Vnn>D94@=gAj3h`X8z31mPIVkGnmVCdA ziyJgJ&w^uuw4?i8uo(bh#BXH3IP{{(&EsMk9S%DIa7YoZ7Vs1(hn|^+-p7fbHId#(Z~J{AH;En2QE~f$5|+uNtzj%v|Sc-LpoIoT0`G~W02&So^8nBv%j^d=?}F)RDzmcRgkCjpG5P;!rx z!gxB#+<|;>n;Nfn*v!SC#hU4K%zc=()Wu zPEfc5_r<8mUU$Ym2B?$aGj>;AIL*5cxAUByXC@5N@L^jAMy6FMeZ74TN%uc|Saa^o zIHB;oVySW`CPx5yvU%QYVLDak{a_cPbH~PB3OAXKjTubR>s^1mx@o8H+lt~!iPg|z z@lv>~yB`nE00fm2azG}a0e~E^9@CcxUTW?0lE`)y>0CgH&3VU=yv}ADL;3;baEV_` zNy8*`p1yIbREwnz23#~C=WE@;IRw~3PZ(C%f_f~!dZDpK@U_hG(0N9TX^*OjY zLW!_DpDqZLAiR^`r)A3?gjD4vHO21^m@Ih0MNv0UY~q^4>_v6G_s&Xd6*iY%Tn<}R z1~HjbcD&W(#bET^_Kh5G3da0C+OV zcG=v628_6` z0`OtgQgd02!k1^lVaCz)XT=_W-fOXP28}Z70XJ0X#8Vxefdd-?D~o?G+F&YWkkL3F zo^w&7y(+XHFvJW15ViK)5THOSuotXi1%UpTQL#dt=rCRexMyE{LCLACrhUmROv|>jihT z!{;4e$BaOo13O%(xHF%Unbrrlcxkw0nZqs4f(pbS-V9G&QNN4 zhOnn}GMm;L`(UQm&3VbH;bWz4y4DUDa~p{byyAB+eEy#}K6UEl+B#r-;*nPLdj9j{ ze*4c+7cKXzLjlt3nR~S#F0I2S!XukV_x&z$>SwfoHIjOfl= z`CQP8@pGU7hDNKdsr_O-FWb%s_fBRS<;k#AAY)UIz0N)5n$-dkN-tWDZ-f!R)}dbs zM=Z761r7+F5V){ZRCsWCfZ@m_R?(GNo9q~kH>@Fz)DvdeT-g9IHGddF1$+xD;}i%1 z!VN#v!g=vUPiMT^3n}&Y;XB7x29u`R^y0u$Q4{0T?;QkA2yrgp;Wr;#mm97;zHWg2 z0~q@alx-=XU%kZv`;8}j6uqq+~llmYC!_}`qUzKWcbib{>=$N9J7j^ph!vXgy&LJ0ALM1<3n|o61c| zjZ75O-Knh)oQ(z9qK$dh2zYePG3gXUdL}9&fTBe)_?Vl~g0U!C@~e2ksIP9FNM&(X zkX3dj4%kuQ(f2aO1=JDV5qUSrRObSKMvnS4xVK+ymhU+=3fwCVsD@~#VBxYn%z+(> zKPC=El9l;#0f<+8B4Q~?Lwh#NzsLr>y(965BMHsGsG)ccaDk$={{Xmw*a6Vf?~$e+ zT?PmY9I37`ND<;DzWBX2Q%6_>g6mx0c#&hCGQ>c<9Ai?T@*Lv4F7Uxpfn$rOs1W^d zGhu~j{Z(Qa{mBTe_XuRy??CdeAln%H5t9?afa1GLZKRja0x4y8swT`!oc*?`K zujeeQ(atM04z3F66BcpazWHxHHDh*d$QeI#NZ?d@s6g%eX1W_qAiB`)eYw~I zjAJ(J>3&zUB2+Z!Du3=tWp*bF^ZxO1$}OWO@r(;eU*CfQCIGW;>f=lV`9gt;`Y~$I zVT^}s+60@3(xdDBsFK0)vc#8(bM4#g|-ByKcf6yZbm zb{Ks~$!tFPX-{Yu=cUOigo2TNZXIIV$hfJgsWnm71;CGl-glOE=p2;zaAW5+_?!r; zA~(-2R{_z%p{`KSR^$&gW_P^Fk0E-|=Nluhj&VIM+L-9l`1^O9G*`KZjR|&+Es0mj z`(-vo{jpbSAFJyIsHZE|K!_cPdFLu2y>f3U%A3aCGv=5D219e-oIwm1?UNvEfA7zX z5vhhj-;m?&ivvkJoZ+`{?oaz!Cn0=aSnbt3+>oO+Cmdnk*U!#fFCX{3ys-`&g< zRL$?b(VD}{$DO{gs|RD>ez8F~L&lpoUqJp|NtOZ0fQnmnS2PG(mkR{u}e>l6k0wm&@;-Ll1wt-UjrD zZ|4(%E1 zxSx!>C)>q1Ms>%?IP-b)l3Y#$;^ru*61~)7Ll0|WELd-WqZ!&dPC5OKrPbouY)#V=5X`l&Z$>EK27&{SeB&%>wTCD zf>EYhG0J9(`3wq*CMqDTX7GA1BfO!@t+;NT^8WyNqXg&G$hLL!Gj_U-W5!EWJXb7R zXBEaZXDa!^H&?X(0Joe{HA+RqQHLNuj7SX@PJev6U%4@w6dW@+)voRcs7*<3GTP%1 zN^ZEw?r^3i?Y%y7ZCrWb4*ophw43i-eloLqJz{9B)$#ge9bRx}9r3Tz=Q`4lKlz-! z6Com3ldrxvku}avyL&RG!gDxqA7DIdSsB2ViIz5}$DcSeZ^})~c#h_`yddAjf1D&O zH}S@qjx{YU?iFNjG4~zz*HMy*V zo_%T0nx1h3jXv>d8|8BAL#(Udb>j+R53$I`^Jm6D~v=)^aiq`#3aSKv(Vj)rWKYZK`^Mgfd zn!0n6D|NmuAPpZM#t6GU)Bc~%A4Qtm>Sc})P296ro!nCCX~EadPSM?P zY52r^9NC;yE>dq(2K&ne`L~eG6o|asKIGuVT@J@}{NxmJ7;?loMUaTxxFYWv7SyQ_rt?8v}1Z3C(F{`qkg;x;6N`N7j}s5g&UD${9f zH{{L&O*iqBg-DH=dji^#;q!a=#C}p}{ccz*Wi5YP$%w6+(0K zlnB*1J8&weP@;Lhb7HB^&(jE4uSBrMYT+IHz6@<3q0Fq_{{UN*7=fzoejbMkD->3wp2!;y(zcvaVXFVF0PDAXsSDoj8u28-`-aa`tq0n=`F<$#n|O^ zd&;{f$5=2#Yxd50%E|`0#-@-AENC73$pXi{Z#K;s7NM>T1+b5%<2Q%z3lVC@6J#ZlJTt_;cUq%Gmm=BC-}tn_r_O@eMWe3=CITg=P8sI zeC8zVeI`$Y$R1O}fvqXx+zOGk-@`nEaIDtd~D{BLIf?^<`_#<4jF@;7u zIgrC#E>$ykeN4b;LIoT9Vy63W`q%OYM&0R|F;K|6^Z+L(ZA3pM1cDHGa6fdsxL9+5y$$6sixKq6*F%W%Z zVQ%+lSWxXaA>Qx;l}$tb;}p?fCK5mcm!kkRecYFnZuieR#$6)q!UK0*;vLR$@O=KTN&IcD&++bVSzP zF`x>ZI@YJ(FAqlFbFYpvg$nX=dYN(!uOnT3^Kd-@baO8+u5gMVq^S~XUHWlYqpk#h zpNAPVZ%!29143}*NfGXbyZG-l0Ex*p-tx;ph1QH3q-NX6<2MmmNz3t@^%I-JjN0#w z_k86Ag9dE9PvdS;r_1|h%ASLl;Prq2h-uqxeT+H)IM4vVGA_k* zFTdw5JXAH$1}p+G0zicPYXKV9CobJ_GBT6p{jeFbe1F{J@;|;1{CwOu#V)t~W3r-G z0-v`Sf5>861VLZ%grxkzIE+w;A56NZdSJJ5_Z;A1d-5u?<4lD!vT6&d5-FBg2f7 z)4B)OgBfzF5Sv{?ORJr2v>ZyUF;u1O^_+!L0pZLYAKl||5Y`*MH|gWsTHs3tW+ z0nz2~7{UcZbk*W|m=&$&`pux0DJ?C{irHo^^!}f|ccnUM51gsz88o`$xoNj~Q)#~Z zeXtn!=i@ost!wG*95!!W?tB5SSaAq0uM-^t-r2$3#dNxm!{kk%Z}Ws*9VqS4m}=BQ zmgsGMW)LN528m+6R|*hlFxG|sIp+%NJOuPvaDLX5^LPWmPs@}lx|pqgxUdq4l%6t> zV;09?nl|WkXB^FOmrZEB=*T4$>UEk7MeS$K73_Q1KXk&8eQ)_pK8N7LuBNczEzXnh z@ccMBSD;UyPumM8B84x+=NkgyL%E3BI+48mV3k2CQRZr7jUuoecdx!fh3fJAY{TFc z=W+PL2U>q~&JZmQ7oRla0LA1vFKFs~Oi~A>ujkVEanN?_K~|wdytQow9>W3YPZ5{|D|0Cud`oY38`=Oea#kLNEzm$M4zpI!RM+8=4DoA~pK zpiMvZ#kzJIdi!LKo_PCUE^mje@w|)0k2yRuRG-!kim!CfN_+0(qmvz!9CgR6n&#qq zUnFo7W5!Lu-{|7KYf#>tjDR2ac`7hop79~!#MH3w7hQ1gE2~MYk5%_3zl^sS6|T9} z#E9!caFd*{81j7MKqieh&QYWUypUVKL>u@PW}!#QUPeB84J|o72p|V2U73 zKBvaAg`lN8c7`GtbX6~hy8FOw4aIbSMl4#CRN>#B7_FmIm`{0OB1BV@&}H4vN9a+CEL?Mst4HI^QeSaF$No z2tf_%a^u?0q8YtG9z<`cF}5QEvVw!lJ>oP7_S0V&lE{@%*8I4SMH>@u^^IGOxhbwS zi6XJvtMfePB%o9*Lj24DAn8Eg7}JPoE4%*lS{6j?o{v5-s4xH<4w~-dodsB3>&ugR zH4yo6DM0I2N33lHRaZg&@a@Mq7qa$Td{W}Q@2AcV`3sfk=1Sz^l2NU#v?PmBiWq8!&$TYn4 zAFs|E1W>@X@bIt+-(#~1^k-slwu7x4-SfsE$~GoLwf5xp3Liaaowo!OIp1hA;|WQ!oANwj&}~1}mSa_UIp3dH z=xsNfl!=fOhd129bsB%h4y1(hCMrA$%^%YX*7e)0uTn|AdBhG@m3TJ{HaEQi;^Cf@qrDRe0*NE?uQ)rCEE?5x zhVen!trTjLT(Ef&*I@G-SDCP+>C?Mr{{P~PsK(226qs|j z#@64yXdAC!ZNv$^q&tBR<<43I%Q;_o>@Cn8P9_fk=T3{+oO7Vp0s7?S-IUi2d>e z0dDf%)29?+?^qjp-vz);(Dj@;ZhX#m7@L%57!N2PZRFmN@wj6}p>?hNk2h}WKM&_dC2tHtfJ{Wj_?DB^I!U5$<@feZJ3=$ttat~7Ias}N8@;F zpB-;G_;|rnQ0~rsU^9@7*6|W*s9t&VkPvmM>`m_xm00RL@75qjrG#xx@g(lWc()P2 zQh`kYNB3|A7C&`-;XVPj5%Z2PW}ax@7)x4Oi1^kdt^;vDd?}(+pxZX!)e;XhVr3Ey zD877RnyO?-l5!qHoM@qfyzij$cMh)0xgtgdjq_S^DEoTqn)JyYa9z ze7FkWX&*QV9N^2(Zb(je9W0#1_&#(2Bnmlhg8#m14#GFg#HrL;Lf6h-j=SB+m?K;PyN^tK2 z1+K9s>Gv@m715isUpUe3bGHY^*SA<%(DG^n;|1(n;MP15o#@_FZR)$<+bZdd@b4w& z>+ysawb1&?cSpxRd=hC{&BY|7wBs^5FQe~|1$G~N zK&!#e9x_!lP;S9sVR?lQxJ6GcNNi1-*7Q#qwu{IOUs}dOUYh_FCJL556m;^5%rno7Wf`L28eI z{;;&KEf=lXmv=(&4j>{L805O9rnlzd<92;$W^qS@iM}!3or!(3kAn~b4XSWpQ0i4e zx3x?dXae=4=RB}-oNh@RAByq6(-4p5XM<7o&)*+wyz5Wf3p5ES4X%5az@*7-iK>WW z)oZW-eD1l?A+SX}Q@@iGZ%qmO^FGx4278<6vvsb_yI7tsdUuW*B6ojW5HjzlSSyAhtQ}1e<>xCQIp8<;!uU(SQxRXk;}hlc zjcm5hjNfhUXMlIa)@V={<vG(EYW3ZB|d*lK&ACo9~NZTXf{oBo-t?$7HP#MS3oQW zB4>Kh|%_n}RO+_{20GL9=}?kBoSPd%QVe6(xrQ*FEFJ z7)4n=-g8wo$*(V1%e;rka!gRq2(Oh)GvtD=es%GH?GR6NxuXL4@tm|_!Pop_Zl5^; z=SdE6;_RJZGzacut8TNk;PM}~Sz~aASIT7YgwlUr@Rw=l(})1 zQB%JLNEbj23*&tbeI*K)YZZ+?opuoJm$hj>`U{FP3UJ1C8RL7e9kf>CmY~?3`mg!`u*}PG%t;0 z)F2wy?U$f{%Yhy>`N}5??qD}brXSBLT(emtVr|EXIa!HN1R&PzI@?&)=R{>0*W4tpv4)u zV5Q{!y<+=(YXsrSF13QymQ^m2Il8qM%s4)MX8K4*beDLbFLy^FGPgho2LkZ>GnPU^ z2F?`bW4CW;8kl5`(bDsooc31m8vFkMyw;F*7jPc2KvdW2K4LMFPVnZm#Cikaiv7y4h&)*h&uZ_z)+p2059i^mZ>>d ze;wdKH1;31{9@`cB}4e84#gg6C*u<`fzXfVC~H-g^6kD{YzvHM=z0s`(i%zAlVyje z@?pF~ZK3Ul?u3O-uR1AF=#f#ssQ^CsQK>XD_+uEq84zT{8o)s~45k zGFu9T=U#p|&0QC9?+d2Ri;>)SB>r*_EU`QI#Y2m|{9(4u-S70lSL0hSH>{Vi{(RsZ zHJsf~cySvtrmgAc0V)d#axU0<{j!r`;B;UoBU`+rZyucFeB3stZrl21w%OPBa&NWu z*@r|=I_n*Tp@HU|{{XCX>gz8e@cdzWFIvhH-FSEY@q1`4{yZ5+!?~IpK=aluX`N)P zYjt@1_(60uegexWcm=7!OBI)j83$?r*Kkot@h(zH|G0v{K5w93Q zbQ0{-^VxzmkAtjHBU&N7XoHUw<3oWV(35w&}#t9Iw^sRxv#&R=0Kc06Ty;}x(*MFG{iIq$NRvSxADK64wMPZ zJE|f-tf>-c8jJ~WgyXS;KWV=oXuh$jHJYiR_g#9 z-j2*f{{SZs-!$vK5_A5rEI=U9$Q%PVM|hOkI-mD=&ks~Nz?YYN9GF`~SYfo3--k4T zxa{&wb=`yCoEGx^{C~U&yXD8jr8Ro-f}>r&#v;y9XU)j>D5aqAUJJ~^MMku+K6%M< z7ghpeIvcJg{A|l43s+r%@sWgR%29PM^PbOKbbIRuAX%nukm1haH}%JaHHtepRaY2O zt?o(uVH|+xhrCC%V|2Cn!l0qf$|sG<_QO`y=@T}{cp<&L&0<6X^F%(`Z)Fx&e~gQB zp#3o4q#^G$^r-cx9Vj~h`{1Tia1L>FW}+CEY}D27tk^mnL2*F6b6gZhOBvxg$2o}2 z^Fvv#ZNDbo@tqNj!x7q%0guz*_bucez=s-F8AXr2&K_a-xU?siDnD2``jF= zy}q&OExA=jHw+MV-l@E|yRG6qOsAdjW94qK3PS$? z-@XTs0PlbcEBoR`^#1_U0^1Gf%L#00 z`NwSwF3Mhg@r}BhcHD4DxH_Y+86YWNT)cQN%MT(vO78W%hD9iJef;FKi%?#tSp5$F z0On(|avJ&mv5xecW=j)Bjhm8IB{dbRJ%SLE6wC5qi zax2T?WIU-dGhh*w*{u!&W&uM$<@ew*gOA1o z_6=dF8kE=c(Uc%h3uZmf*~`8K5LlZRB^9YVjpH{)u(Y`8Uw{J>xNU6v^Nj3+S{Ez; zO9xTPWf2Ml5a!^jsj!8$cDOVt90IBL%9M3FTlq0WO9|?J92hHm{YjD1FB{|YjgjTT zN8GqY(k~{=S3|f8_&&Umn}heo)e)&lVTw^iiuHKHb;`tk9$X0=JNx;~5O;r^sDo~& zj(frcUX%yM4W`=1ts2S@Y?WkIt{8~HJ49r--VzYYzLe*jqd2yD$`?eljSg3qZ`QIv z$Is9D#YzXCIMGE8CpoidM;hru_Vwckce}I!+c=X`z8n(T?YI46yq8__;=bNT zC$LA2@#`vUL3o;SZ@GXCs(ErtQ=j?3UGdlVmK+|F{oq}pUJYEaly^9BDpYlR;?4ju zSCF9VQw17$^S9#-G+Gbe8!nrwWmA!z9pPSQ_3h1pVBn+D1obd&0y^8QH^py*7#$4A z3cx%5@D_2$E(&c2SUNZh$+R)7p#_Cl0H3+H95O0UybH;OlWFF0JH^N_Z!5fH)G7Pv z`N%C!wtsmH00{A^?>Eavplfc4E_TdmLuGlw5MCdlj*Fu2CfU4DE;in@;cKgO1XJ(U z5CU0L>h+zGNyaCdFF1e+ zRO#Y*%@OkU>BfO8(s!}m3XzJUt1*!b{z|jQ@&5}82>~i%kw;YSu34wCZ zDXn2;zb1GKzTDG}`MB8C=`u|h-N%y6+(jihO8L7QYyz`niFAadi<{j`;4J6(TiXfnAk$2ub=N7Ehc%E|ljN=p_ zzia}9>f=0&ow%(aZXR@8j|RLBF$#$)Jg+y_Jgxn19>WE-d*dg^^N%ZL=n;P-n}|9z z2uJNE5TOZO-^R1XUEMUlJ}{s);NdTQ@r{5KgKP(E(0%#IvMNTeo#ons@8-Bb+9#eI zgr?dXnwvZAx9yaiH_usFsNG5RTTV8Db=~}7M{i=Dw+xKVHdFjkAKFoDnvk@8^$ncX4k3F zc)^D4(XquMq~%+jh?)XzHjk^$YSftsLo|Tw$gW=iEBEv#&?BZRCHX@&><5CKz zgIUbLNstI}sKz`nt$fZia>T1ASwVpIq%L>TafiV47}tDGhc`6n_>I63b=|!4p3!Ua z*Xi@~i)6Md`sRS|ZR+d!#3H85_^@577q2Tb~ zj5ZY+tp~ij;}AA&&Suw?wYq8OV5bn zLlw}}F!B-@v9U8oE5`>Ir41%hOB>HODZ$S%E_Vi8qqxUO!<`a_h+*9rDbB>R2|^L@#+U9)JeVUo7GJr%-A?uskBsE6#GC}ew^5)D3;zI^JCgXG z@*ZG!#KQ`&fZlBO09oE_r~s$rkunQW*xW*7rHm&_(~FS>87}_-Oc25@_S0TYW)pQK zL?jUQi{O=?#;kk%9v`?{q{rPG1TXK@-n; z0MWCPc_}1$Mcx7IoQ$U#162-8K)k-Slug06T;uDT-|dl=elmo$*0EWl8vg*UT1}>? zXv%7h<|aQ1zHkx1>XT+a1FRItft?Y-J?>+5jTrWTyFKRSU2i90?8Mt4N?1;Ebtu|& zZ>)+ttrwF=>6hSFZ2fWB8M$$KlR-aRrI6m8Z_)bY0RbcT>kFXin(H;Pa4jnFgaM;i za9~Sb(x*9suAu&0AxRzvHS%?u*c4bz{EkpA$Py2=@s=oFRXXEVxT1{=3D*8OHJW!@ zv+vF@#{q6M(oWon`MaO@dB4|tnaWGzrn0=*GP_l^@eG5=&eLCGAkjo&{bqxgT4#)6 zL1#mWKg#l8*1PMS21scc-#HdJ5sog}bKdhwJZtsd4%=S&nB8X)E}QS@9Z@B-OTMx4 z9|X5>#kBb@fDG3a-xx_CQ*x=jeBox=F2L`CPI7qg*F#)!gMlyv+A#t0myf_?Y1IJlu)vFMfY%tlHS~OA(fs94Ri5SnAg~?s z7(Klo$uT?wmYh4|1<|YJ*{1_$$8P;%Vvy1u@o_3?TX;C}l3k5$`E#GCl$z~g?TCE3 z$5tT?Z`T0_aI^NU~;y`ESZbUOv*f2_6T;5dilQ#m%A*s9sDtE3a zf!oT83Atu5op|@5Ft`$k>u7V0J$z-s5fNLh;%^PE1WQ}v3$VpSR=nU*7Icbu7r{8h zX4KULUjG2scpD8s4?6Gl&CtZFH{MhhwR95}54eQD&8N&<7PX~E+H;#+t9BRKxT_7= zU$KK%Wv8zO110fki^>DnIeyO*4k7c31>ol84kaEGKa2tj0uP^GrX0E3HJas`Zv6cr z4`4NT9pXl)@Ojq%0DNx*dpULcn1*aZ!QVNv05tg3*@Wl;X1lmeI?=BUGjNe3XYqiF z5=Z&SkkAwEKj$hG@1*N0a4PkQ!2{zzN!#8=)!z;$O5U8&lznrC z#c1ilL9@an%Xx>cKIS{x`Y$7q?b7ZX;iI=G2}H&EMBLisy-n(V&S^MKkk$_kiQU#( z?M|fs0Imf&HM>822cU0wP>fy^hh)jIIc>Zy2H@Ur@@Aq3RB(`ANrWsYBKUWZBZeXu zaOXH%dx?*1Ten`xGQALhcKOG%QYifBUwHt86=9&<{U%bU=bw!I0L4H$zh{4KU;^u% z^y>l3_|G2Oo@ZI8r93|I6Zsen2Mjq4Je$62Qf&)?%7j4@{^4_e23L)7O4Q^`O0M)1w1MG;Nyq`)!r?dJ0a{t7)6$-(1yg;Qj?Gy zt{Se2;ROPX?gjxgSSFW8E+k96p6<1YR*mwXwh$Ev+VMHTWJxF$Pb%ooGC@t|%8#>b z+7L6Mo|76Q$nn`QO5WET^5<|0j=i?~&3-l)I_33+<72^`2wh@wq605j1z;e%Hd14v zNmeI=^uwiY?*93?`#H09a4&2YK4vljMr+@k=Dn!B7a4#FmAEs1I2a1iJ`Qqn-3H$5|D;ZyFZEu+2`B^8DO4t_z5V z?YOyImd^ZUl)`!OpO*1|oUIC%v#(fd+vVTeCE;t-;}&`+TxRD#)AN)sDK4JhIU1M; zc8~nv_^f_#2E;m!@fJxuY)_q?OOV9kwgB$#L@6}N7Syl$}5U6+5HDS(Ze}+aF&h z0-EGJ`~LtqC93Mjrz`|$skNHk0PT*8d7QtM!4*yd>P$!pgK0@P z$vBiDH!zN7_DA^hl(7yt`Nf3q(+s;MeP7oh@Gv&fz5r9e-J;Xd35VjG81J zoZOY$u%9k^AFo+xJVu^$kCP!)WH@eJxpp1E^Xnx*_Q7F>%v1p(M%N5iEc?)PhL+kX z_pStFcNG<0Cx13#%F&=u@C)k?3Y($u9OP^W?Oj2f_||E}RSn0iv8*fe-a7~y`>v0W z=LS$!V!W@PzB)i`mA9iLypDu%wW|pjj-q)my97vI6Gz(xakTG`I2yQcv*SivrhD^- z-B0H|a%Z=FWsKht`{Y}#0bHjIyFNT;0{G+T=vQ3jd4K77$yp~VdAc!!*zW!f=ePjS z?N!aDy>qlD7^=bJifI1;SfXu=b>?8wcFXbOER)fBJ9OhTs%=}L^5Bu+5h>2cz04?3 zD0W4ZaIS-{cduA!h<*0_%vBhn!S4d>SVvxShH2L>vx;+bt$w^7vD`eIOk~&v4-eZk z@Osf-oCsZ;jwR5%{r)pY6@9us450A)Ls`hkUjG1$Z4NJMJ z`OO=)bHvK0Ht@LzYHg{M<7#T_6_f+1H=LJ>oJS{y_3r>X8ek;b?bDR#wa($yw}rq* zMd7&y;7*@u`{7cJOXu;IqJQtxAle|smAbIo&Q-7~c9(U6ryIpXcWZh%O>nfu^{qfrULRWMpXg>Ic6obq<3+r#S~i%td| z_4dsOpx@HGV5qZVE}mRBD1l2=2H}9A#u42y?hgZz8+3l2vUWw|dcYb&a*iA!LtMCS zQ9O$eMk4Mg^^z?wTkjTVA~O!t$X*Pv)ZbG6F_BcaUa^T3-AqmCT->(dHw5!K$|l`B zvz_yhStRd|#!B5zbHS$$``acF)*N02Wwc=5fSw!awYu~V#FRTv9J zHQz7BMdS=vH99aUs`oxJ+EA!FmRXGjL`qURhw9@H1ZW3mpSEhmH?%)rj6<+iGkQ5W z_|4QKB=gM1Y6`%*SFEavND`aocyKX_^&20I7_8SLT0R53Ss|cl)q#od9p0R<^r-v| zGgd?c`6R+RgL`@VV25@A>+zNyn%u@(uDENg*5ytEmSJ1$=M!%~);&AXepg?n387m2 zelQDQHe##+rRR9~2UAX0gqKQlg5_D{0PJ^;?ttj;2yq_U`eRc}If2$j?)~?0=6dDt z0ceeJ&PY|WTHVAJ0vs*tHsgWO{oxMXaD%;>JtS!Qb`ON(DI>9eIZ#yXS+@@*pnoPi zM#HX-37wlr-w;&kV@V%)KoCG$mfrCDQ2~YHQO8=y5z;^~%?5Mo;-uiJ-e;MH1&76= zZ9L}Xw_Ww? z3o3UfCNr4pU+XD?9c9wicqUde;d;V%8Y*u6@ecn06Nl8xIyuu{#%->8x*M0|M$^~( z!R0tHrVE#N)rb?U5_)*ip2;r;v6|&)9T^4XfM{5}_TV)ZPG27x1GIYA@tTbub^idq zFlSWlc`ydW*Bu{hvq`T!-1k}N{Cs7a8?DYTRoZ^r>lHRLD=%WL8P);a*1Ku%r{oHnsk zRAqawi*eFUq8!eL2&tm3YPH+0Q~sG(l+_{d5nQ{`cbF^R3C9jolyn4SFd+{dwdbrb zI_&twf~j0lxw$Lp;_l-W;7dmFtUe|nEc7`RfgOnJeDR7}Pdno&)popRIKt`%5P=?q z#I)3Pm%>wfT4j18)%~z#rqh{<-6@jIjeLFaIxB|yeeJ*w6q~|1-s(K%Lka5b!0F?8 zFS^QPgu@A-KujSEAOXRn4?CzF9IiD3M^FS6v@FIHDk5mqg)VHTB1cRiF-a8VryIi? z?Y@S|ox4K6UNKji0zlY1rf$F)*D?h??->${S|>&ZpS8c=0S<D{l+6SYQJr#CkvzY9y0y^2nX3yM0e8Y*z6EaG<8aOguEEdr4jvo2^kLcsX>$C#$) zN!a}5`lr6KCT@^rV|qnL>;@>ONE-vm?d`{whstH~9<{u;jq9I`tE;%3{NVQLc5KBY zYJ6&9Zgc+f5cjBcnrmwDUFRhuFZG)oq1k3N@=hT15wVA=6U|4S9B!<9A1$CT#>1>BR-($h!Mw zzGqnr5*m0Q>^+WO5`;Ft6o=-dHA-pzVEp;p73WOaA~kV%;J?1jT8n z2g=u2t6DQ-uWkW|Y6GW+dcg~HbMuJ}BzW`ln-c5H=kbd;zV$E-TMkE`jEk!;KCtLp zt!KU7rV#8Wj2-V%y!gVQH{ZMn)(Lsv#zLwm2_{*$_%S7*P0PG7jT>DP+W!FNGn?w6L$m>07|0M?;aAQys}dlhE3)IeP)?Zr zn}7s)RBaEA@GE-bUbly)3EQ#qKAaSPs@cuc#POFbkl2oFyD}ksF4qtWbWv2{Q(ER2 z-Aik|`pxbduG!XhJKb`D<;c5PKG|}7?*{S&a>}wvz4w|{^_)g8KbPt9P1)0n0eF7d zw$8Tsz=}Gzjr?PIV#}xmt`idI$BoyV0T#|JQP`LRDh~$-$DB!_ZAX85fa75eQhP92 zMGnJRwOd(LX32eIhDZZ#PE-K(E?sPnHNAYd4RDp-L27~EInR{c*I6OIFbi*W zi}u{SxQVq(#6LLi0T^FzZ%9+wVjjP6(cYv8-i&DM&VUiWb!Q#*{R4|i@`+olbs^PXN#_kp>M*Jp` zJWSm7ONuV9kED8jOEquCEmvXd>i&-7gdeFzFXRe?%n#tO;KIXHx;B)fpxs-UJiQ0iIVtyeHRiOpOc@AETlZVr>y4r#HdqL9pZGjPO>Y-^NwnV>i)Q< zrF4{U>Gs2or&ihW*bl-7RT3l17+h?{{T3_ z0$!G%hbJHrqXs{78PFjj1Mpxj+Ve^IjwFB)rtbUe3lQsC^Mf{^^cO0Sig%~uAy7Vv zKTHiY5<{m193XLfz!pBgj9AFJ*W)UvBb7>%HO!K=|%|yk?H439QhsFCorcp>=ip9(>r>7+y%2qyg;heneSa=Vv~AY)Jb3FiOpf{1x_xz!WNt2+ zY`!ZdUy9)b8?k)M3n>;i5^A>*tOC;-7fLB{j99EQ{GzA~%H4}{k760`x= zH=oxSBRS8NZ|LI*Rgit$s>++VTR25OS2*sgPMp||zk~SCZfr5m7@IS8VxrL5!QM+i z0lMY;;p#${jDJsyfK8+3>m|_f{{U+~K;NA9pKRil*8c#Rl;S47_@G5wybmyS!~Xzp z4ud)pxx9t-HvD2mX-zpTz6ZuQnDqrM(~Jzz~M&96ti;%$5Y zxHk7@!<^Klk9eqET@N`FYBvC;hvsV)(WLJzrtpsCKTqc&Hi}~rwd!$zItuCM#%#LZ z>w|l5&av95y>E=M(3<1=z!_Hg8sKTP?XI(jCW*Z!5OVx4Si5Xsbl7m{-U$(Ci(7QV z_wNzJGCtU3AlkQ$3&Wc$8eYcrp4^YN?OX_kmgoVc!#l{H)G`bW{D>cp@V5#L+d)`$ zFkmVohP-euoHeCIRoi-f;RY&fKsZW2lL&~~e1Fpj$sGrgr|s(u)SD)!lQ+oH8_a*L zWD@pJ+FPTea$!-bqL6ylFbaqxWO)1FX(r&aMKD?f+V#Hg&OJrh9aGKp;AoqpX*Wrh zqcjCcb;c@trwU)r50u$qyhCta{pN&6U88?FZuvD1Oi<~5H09_R6K>{NpT@K6jW^!*&SrgWfeTo0H26#2O^RXG(O!;FBHuuq-g(X@@W{qadQgj`Xz zv3COC-+mAEj1Ym>8p^dvwa1V{{TFgUMbeP-WpikVqzeNNv&fl2{=>s z$h@?(W(V~C@P16Z-@V2@OWPd2Mje_Af(i;kn!)C^^Log&Q+OPN(^mu-g>17bX_YAP ziw;MB7>QMyCN-w3!@-mqG3R~aw|l!M_nJBH-+4~g%kLd7_uvVC&IJ)ydfMAG|yE)o;UwAb25|7=)aS%<4G$QCXmVl62 zOI^u`5wM21ll@?V$fWDLoK_C3-lTtfiL($Fhurk#KwWpQgU)mdL_ZlOny5m%7iX_r zWE2&&9TdNOJ;9BSk9aJKEexDu67GuKJ}?jwx+n4d;@v_vHHRzU#T1Cv`~lPj@3nmwVFtY)m~K zb8^!Kd~X^Dm48fRj7O1;`#9noJJTQ2H5&MUL+{QFZ~)QNU0dDjB&Mnr{j$=9Xa;Zi zzo+Kk%6RjY zYNN^h^@}K|YFqioVFf^(8_8?enTg3<8n zIAC?YXPn$Dj^b^=*aErMjqsZHylkkQPCCdcnqLoDsGwa`tAOd(PX7RGU2?ki-|G$U ze421wS`7=EwS(>bObT*a%jd{rM(SRl`e9)#aI7)lXgP9HM)-G%NDrNNge*Nye%U8# zmR>Ntvh*DP09inQM4pf}tVDT6yYA~OAw&!43Wh8VtyZi!0W)T+XYRP-0>Bq;6IDZD z-~c(zmLXZePSdYhRHa%IRN=lchKeWR4i5(ytvnVznFX8RoqNP&O&{0WtA{i*LfsxO z?BGyUrlJRKy5|m)+I!`mv1G7Q%eQz&)s%edW{j}j00Bq=w({33bcXlngN*=4Ix5Zp zIahdMTMf|m_j7kgAN0UCv~T(!mXwLyZ>6QmTc*fL<@vc5_+Jxg(T$8g}(NF!v;Df&wcygA3#237X@LtmwXq7v7 z{{YOOP0lBb9TlEm7%-bBX20Vi){i>YY&0D58_3qt=H1{fx%bh4l}%1u*}8q+@*R!j z8vDgK2yXIc*kaRRd}k}5@q6=}NET@0D3lk^?qJiJUB|quvg+0Q<+TR+EPmKliZV6U zQq_de>A`R)>_5L55UHZ?nK3lV`pvK~_H6)W&W7mCK&>1akOaERWLPb#^c=WhJ;5G# zrf*j_YhsF|Vfn~0_kW|j2PIf-*}dG6wl*&f{oi?XDOfZ- z%ZMzL4zro}%8`ORwe4QqqQGp6I`fD$(kZoZNTYa%Mgy7WO()J$(taPdEp~^X{qO^7 zaq4))t7$e*?*ndRf&7@r!oK%@_zLciOxE*+M(*Xh6P?|DF)Ow&MxUGlU31SrY&Ev& z(i+Z%yXe-u;vYcGO{O1|F#=8yCdNaB0)FVY%0-0otitsf0NS)49IV9s5D$P{H^iQ= zIMS*RRGtg3#ub6EiLPF7#KcfMY?su>WuQlXYv{+N%s_^B4b*=c-Zsf=a=a4^;9-6j zc(@-Y9x$rruXm3)gx&J5P=8z^TNP5y43mXj0*B`)BAKdzXT~FNipIyEd9xHLL5R+F zWToH8WgQUqpLvKHY*-6^=0H#juI9CYU49*$IJ`-As$Ui3esNYuRro zl$ax0bvJ_)0D12iH^~=V;IInt0c&u;MY$bTGWx+G>>Zbo{d2^C>`+gS_njf3D1P}S z)wM&3jX4}^Igt1cGTu-KxBAPscitfF1)7+M(^vv)^XC$p;OHmkDhXc;)fhjrf`^a-r@SFIU9Lmm~bM*p| zKN;%4<>k3u(s>2Ns)oI2aV-@NfPaj5mC>^$*|xR)@JFwm{qs^P_A=rw6rG1(86qpk z#l$9pk8N;AObh4N+ZgQ+hH;XWroRS317Ysw>p>~JBUM${adVCx&v%GnSrWe$D(mH$ zD}aN@_W{W;Ig@A%ht5<)fD)D2U13ih4#jmb-9qR+o0G;}-Qm-MJjBg{Y=h3Z95t1} zD3ZYQ;}xNFhznuIt}z7#AJO-4fb2QAK#of7m^gGO?Nk@k-X&4?B@9@_l`Rq7`OcBm zbYFnpaG?vq&sg9Hf(2;DRN~-R1UNO%jCpGcbpv(aF$4qc7uHlaX08~4=7vm2e1G-n|?}&O< zAXM#F8<~-UKA(}tq!5BCj+fs{l;3L?&i1${gSB5AX0<*uVLUp0VpBT0=5>zf+!xkf z+YlMNRqELQS)PQTzHk!Vog>5V#1@s!n+0 z2co`rtc0M<9RMZW=Q&hUf39-oK1>}gTdCvT3MAh}Z|RcqhPSQW_karneBLsqp!dYU z!rzm;^i~CRa<$)mLXY zkZN;!38~g{t3;ik+t6WDfY_EB*8Td#B1cQEHeeRm40cV7!bJfJcnkQ#8y35TJ~0ls zY^Uh^zzqYgBfm?MRtuz-*H;rUFwhB%}F%aYTtNJ6$Om& z_s&d#EPLhN4N3}<8W4PQlOw`v6abR1_ zr++vt*BjBt-vJqK8k~7vb zEzvx5Vv-?Klb={HP%j&IaoEdEA%zkEayG!A+Ju*xj9{t?uY1#kMmmSMYkqz+SEW8D zFB9~+(qulzTIbFkM2GHGp0%E1>kN|UA3JNBtNMRVZre*ab-2#^$0X1LsJDNN4U$Y< zQ=Ma=ZwtfDgScTxP?vBe9*mDsC6YK;szLT`lN1>roa*()a`}J+_U7K8Cl2zok%oi3 z<8?JRhXbNIn_Dd^io9M)?w$R=jS%v5aSL zldWvH?XM!b-uz()z9c`aDr3Sux^e(W7mnslqRA5eF@3~Oz~-S@z0(|rET4^-?&oCp z*^~=>!<}Mm?mRu`EH{h8q2crW;lOdtc*UcMuIG8R0Zl5(e^`Kc4>wHxvOuW3q0alB zFc*`VxBk{8c8GF1{CLRe*|T@nHljzEa(6B$0EU6*;mNH%zC=N;;jVJlPL zd-t7`2X3wjbuyY&roh{qE9rS>oR^L377;gbbtm5b*$|4~5A%+T4YfJ_F+yFeHHw-F zoKM)w?B00&J~Oab^fq~MD=B^Gzz{$kZl(FbBU<0chOOx=JdK=1FYbjx5Es9oSB2P4hwyYqwwtTfvI zqI)qo_A-@i0~qk1(c6;QD6Cmb0|3vH(BJ{9vthR8hedJF^B!reDXh?XXFTGC>50bc zmTCnx<*W?qE@Pf}4}dX%0TQA?{=8w1NnLFTuUQ^8i*Ad*)4Z61ClVV%{9x=0wCEv; zUP-~%-y{)NK@Z<0r;fSa1-e4tAFdQ|qR$_UF2N4O<6d(>ocv5zA|_C(Q;&BSkWFap z_krkNpXVB`&t!Fj6&f?44)9T>7nJV+ctv%8tQ|9HsBd36saSh?&00wAbAk}Ij<#%m z8-^m4H@enWQ#nVBTTSpoMi@b}v8P%&n(>@%EfgNKTZ)iKoCHhZ!h|&nr;CRquWWxD zbAtsyVWdm6dfp<714HkuB2!(6S|5ZXhamE^X8NZigXobK^a!RxH$b!p3w*;V&O~%I z3^_|HW7ZmkQWLK@N%DWjLYlJvaMJNLeP?ew3`nmtttz>^niyAdJZEXG>lGwpt3;zG z%g$;v0^t_#ZgNspINj6u#3TZ+<0n#=?g4Nu7}mcz^K65nvm}u6hbnk*P|^K z7nDtXv5Zg++Z+_lAHR$o86-9O&_`S`#F&GoHu&IE4<@7_pY2akDS*>}(FiyVRLU$zByD9d=lT*p!y zfwv9{#?k6WoF|pF4ZPK!esB>tghUJ@O~5v)_)vkHa1_pAXgC99&}p)T%>h<^`(sKx z?45eSEL0o-(?K|Ks0(a(*&~WYQVY)b{agw~P+QFYt;=i#7jkNuGfcEolKC+K;4osk zfZ=$s6uVhMtn%Xwx5NY6<-`uf7glY2;#3Hs!_(&@-mIYM#RZ}8c*^q>I`!i;Cd2cB zpl-^Uw={3Yu}#{XW$0)tUOnPo3wM944K~i>%a!bR-k*ab-~rV*AKu_b(@#mfU~Nxc zG9w1LRo~+`=0}ei94vgC`t^op1Zkz#6{1Sg`~#_)@V#hR&akXL&=1D3bW;>k;Yh~> z*iU@qWjIYD&u$A$>!Fk*^6>4|)hvFFO%NlsJ})@&DS#vw*zjRH>Zx1&YbZO+(|pH& z4n2WVG<4B1J|o*n9oLKq8%{Ulv(K#X9P!8LiJYKvMxUl3ri)nVp3DJ*ML^)|4JGkD zEx9O*wQTTW8>%MyUs)2J#pi_j3Mn1vjq&RWksfxF zH6okdL~D5$`S*h$6tTqi`NIbc?Y~%93X!4>=MTjoP%pO@9m;TS+!;s*Y@VDHd7ZZ} zfOR;oD3VEi4Z#lGb-2un%l9w=&H}8jd=H!* z0R%y+Cm(p?SlWtfjn7x0kZgTHEN0TzHi0} z3ZN7*734#JO1@4Ebk!pGXVgVxW;w7M5&`# z`>B9%JzCC|4bymS0SPk9s_|E_*B!1TA3Iqp8 z-_s)E)pO311CwWIiLxRkFIjyO!1z-VbY>ZZXh&`kZLzj<-b9*RvyZco@*gb41$DXy zG%kz6mmsj62OsMv3h1qV40KqzgQMK+&DxJaqwkFrEgdKxvinr@De?MZmWN6T{k>-l zETXJu{NNm-N(#OlF&V@aJ`6xbTFn=X@f#Zh*M71WQ7n3LK><^-oZ|)Ba+mnQLJ97q zpFT5G_!E2h@tpP#Zn51$3IuKYW}9iuq;ISY(Kj$7Dcu{T{;+GbIKAg2uR28iG8+_} zi>!+7&HA#|1kaOFsknqGfw2e2!s(-Dymh2P^G5H}#mo%_fQ zis*(t01)#&-rQ**z3sn@SF8(a38Z>7n_2Dn^N@sm#4vdy#+_rT z3LIZPS1SZobh`V@TM!Q$`Ni6(t;PS4MNPyn7Ijnj-FA97#_huSp2sEwTD zMmZL*C^{Eq$0{EKA!6zB4&RQ01YuBzzrG~zU;9{4B4{*i{CU9GifdU`tQS+M=N)K@ zphc{>F{w2;dG+TKBu9Wv=lgsNAx*#6#aq$+{USBS5A_U#sG18(B z-w5R*=r{QP0C^fv=v{Be&KJTj1k!xulEUo|cv1yUv`v50IJRwNzkl_W5R-Iy_`vYB zdfTrZXCnYZf1> z<0Xp&i0kcg@L;>&C!8?UZ;gI(lE#mL@Wk#J3d-g$lN9UBkM-t%DNT+SaqY(!8T@Nt6f z1GceoLr`7-{c@fFXzx3S1Ir!Tpy>kD>P#r`o}Kxuy1*NLG| zT<;pR?(2H^#T%V+@o=`lHzx6kw5_N8-Xyd`W5Rk&8inD$gJC>kMPNFH+2;}qs-rH$ zw;iUm#!_RjqkOj!pcA4@PPN_sacZR8T|V4Ah_y^62ZTzV!^Sk)oh!B1IK;eDNxU@g zyE%R1b34>e(%|-+A`slG&OVTxtMluF-foPv)tqcHP6GpBYtNi|Qbcu)g7P1D;v^$1 zRQ8;Gu@s0LbN>KqAdG?Y-}8X;3AfD3Fc+EJ_m29PbH~mxNgOZ()O=+oAv6oz#+yRG zM*Z=0Z9LU}9P630PM2JKVxsJhsFD8wcqkw((&>9d@77pAM_+G)2A(h;B9~iw$0XHO zQ;&>~B=JXk{xJq6Cg%SDTt72*PX0_{fKLEDa&H0fmc)LB5>-`UL-T_=6eU}?S>`%= z+4GgVQ^We^zyJ|Nzm70N6FLUZr0W2n1G`*&h4z|E=Tt7d&x}RbuYEf25~jp@e|%F! z8U)+_0GU`tzCz&eY?`=sV65BDGdhohoGMb@wLj+`1Q&=|^On%QCcj*isB8xO_mxXl zgi-x5KnMie`e70x5J!hvGJ{l#I1PxLZJ6;ESJusa@W6$k({DaF%_ZDdo5QP#Qlg>q zclX4)fDbaMI&rIF?xO4C5m9d998a8w)6emh2y7V}JI-u&0*9BJx)Lkod&RE2DaP}4 z8K+3`o0ydY?*9PJD@a)$v_lFr7z4)n!L`}ovUz{^5pwuaCHS}+TMPlYbf=dsqNkBz zuJdh&TqXNp2YXHIlO$oL4LQM%kUuT}AkGr$@rKu6aB(o*(0Nz+$f_WkCf<3O6Lr^< zAnmI!96o*GhQyAlZx#X+UW|(w8WxZ59X0_v{WENnv;+Q(w@2yFPz_uGVqR;ic*dOp zHO{v_Fm~WU4oA1v8YE4gpA*gm!if8G`Nuj&hkmuxP2v#)MCaooQ=Hw4_R9eie5v{M zgi0wyb`SG02LLKxT+fWFbP}wO&C9|fKcZW&DUWcH-2!@qTXj-eHjuGny&FaYVwA1uBp3# zvIG2hag)i=8e3I-oMq%CI88sCP(nH!$ET)Vd}Y`c=%Mz(L=kkm>*uUBAsMfm_RFCY zQ-rR!)&Q(opbO2pFB;pT4};m1=VG*-jo3*&9&rP6xbKgguv&1Pzdvk*b|<4E z5QK>p>NzAtUc-NP5fG4?U5CyCu>Q^OSd>@V!As{50Tyr-{NM%91gxKf5K*=ijNhEH z&^xL>oF2Ucmu_m=Iz$@zG1AKPc7I=d50I_1ZR+5mDr$(|DS&ebaC*Xv06E#e{KANH zD-4~s@^kuP3kQxp_mUDqYVj}zkakT@crY}?bFcZv1+@{7(e`skW!F+-?`nB-jSJW; zxVi{0E(?OgTz7+QgG>U^eJ-X&+}8@2*74?@eB)w<>ra0e0i-!Ry9c)d)U>YjI_n0Z ziJ)C^=c5tY@u%HcsO}T;MaQ9 zZ|#WGb+I{n0R8e~!B4uFVvngDdJrI;^VU)eppV7D7jktSxPVY|*B>|u)Zbm0;+;os zu{NE6`?%PR*GAKsnzLbf?->BLbNgnt(LL#Z^BuUik=Nmg@u!4qJU?{E~**Ova6w@Ud-Fai)>hsU3I=LNpbZfv0^ zE0k>uI6IzmLb2AyxxSS6B=YA5jU;DZ%ZHS`9&-6F8Xwkl<>raoH~M2?1$n;b84m4K z>QVvS^@SVp-u~GwsywaZAC%Tg1L67enlKb0!&%3d9#_07zW2ny79Ir`BY+=lA722D z6ho;Tf(V3RVw`C)N_r4OA3}oxt@^-piD7&m@!$xWNPXM}(0S8Owkm5|C~M~oJ8~2% z&NYZ4K(8TxoJbMjJF~w=ExULlsm5Fe9pBp-+vGiFi!Mtk2TzZ-ARbaK7Q~1gwV)5< z3&m9i8)l_xkXiDU;9z3KX#5Xu7;QGd<@GVDK$}IEeYlDQIl4XX5OSzNr#l1Z3Qv=1 zW8-}}aVB^Qt_qNaIC$XhxmW-o0~6L+0Pnx+902aFh;rVs;)d_60ik#au6ga{paYrxV0rDq)?ZNa%zuvxemnpLxa@NtA--lnl$Hce5#oB=mW@^yxXV*_Wrp_yI3 zI?6Y%A3m{FIC`5t*=d0!rqyRScdbGx+?W#%fF;rAD*?6T)}~$8XmDk#2kdJ!yMR16 z#L-8?x9`>iIxPPH+nVXsbetw|O8RUMk2oty@|=Hv7_QrX9Jj^_c}_U<-T<%=)#rWd z7za#cIruWBkn!EmMJuSseB2NsVYA~J(JwPegbgo{afVe?jFTGEnfnIx2~lhF1U6SPJmUTrjd7O=m%Z0X1vWP|8pVxNmsyLL%Kxr;^daSnWfR9e!~IQ6x91hR}n7LgYM zSP1~`!$%z8T;2HGGIXW!(fnWv4FanEzVef3=-5n%QC+}K(@4SQ0Cg9g0v%!4Qf2#jy?8R3>!FoM>;5aC|r}k?sB_%=A_UA4cM$x9^+v}VZE50D2V%tW@ zozca45Dkx#PmC6$H^%zrVcXdR=_#9XO@!xJ%~x_0^bA{!8nN%runxg`-n`+_1$kZN zR*FHv6^f2NdcgcU_nM`xU(@%?4HQcD2Hc>#w64dwi^@p&+HZ&6EEX%XU%!uxi@0l@ zA0XhfNO!r;EP1akwUDqb$uI_zq2CjXP)IG|CB$&3XgPgix1sa=dBA~HEU90*VR#4; z6{aKr0pyr?lC=x^V0H?l!@b}w4b7vV_;59faQT0%ceO{+>j>C~zx3lAiim_J^!0!_ zPz6W`@ZKB_?wI$#J}`z#ni_6T&L4&)7mj@6cxfW8=UT%M2wFA8UNBvaNL++i@cDlj zuyl8M_grfp82dVMwTQaY`{vBb@@wDU8FdX_0N3%1 zor7uF+{qPb<(2XF$FKNIX?xW8%~Tcjsg!{yX5PEXK_2!QuMy2ViF)m`yFY)unjuu0f~`eEMRZMWJeds?=S?5` z_c9fz%@b+gvnEC=Kwf2-)3pjf#P%Vm83^<-`6yE3KS0UE+BM*?)Q~Vl%4tWaRIuK80+xl1PF7u zAEqN!gF)Zl2f*BDJn=AQhvBEk&NdNy1AnJ^@>5WG`F!Kc6QXdxxtu~!6GP+MFm%$w zfNpovJ~LJ~p?f#X%0VCY;{q%vzP>SsrjfEb<9*@|2E-36E<6VWd*`p5_7X>P z_U9r1l$$&IFp;EZB&tt2~Dp){?I09~#7ML;~GU<@b!UzQ#RS zhzbUg8_C1bF+dR-**7xcmLt7M#N#|*9HyS17ytkSf*#Fp6i}=sx|<9e2x)ZAA@PQl z7;Q-T(}Fb!3iNK@dAP+#5E7&?+#R?%Es(h2|&LGQj5lgj?z7zrJw zj~LBEcecaj{xKC$O|`W=p7E_aPKm~$ms*YcWLH3QzCJN;D0}`hKnl)7uNuG_1a6mZ zBo|CgzqiitVIX+d2m8eeu(WkDdF40_{ft=1Pa{*~;}|g|!;0Fp$$5vq{O1HxLbH~* z5VfN{&1(#5qOIrW9F?t9yJ=5+87hFZJKsH>;E#z%0yLr0_~!zQyc~wD;nA<#41pjZ znzx&PLIYJpTOIu6BI&w3bBNL%ZssswcMqBbC05AFd3ti$Qn(vspm8i@XcH z3#qA)aw%?^*0qaT3A^v_gqo#oyD$#IO~LEu9V~@CFCV5q$LaUY4=c@I`*<^j@L${8 zj_3)fZT#S^3%*zS#2Z_>9`Gkf^0S8l$PMKF@CnKTMr-2$FHzgcjS{2_zWir7zz4?p zJ!DX*HbmAX6j*5uf5uM)Ym;{9OaUQYHQM=K7;Qv^e4G7n77);(981MrbMLGM!;dt# z(bhdG4^4IR=NwJm0pur~b>=dAXm~y1V$-&5z7cXunmKuUccUH zA$;-sVDcxi>(23P-Q{@uW6NR0?{D*nY%Z-2-xU!}>@4^1Ae20NADrz7+UGT6#;v}x z>S;+$za}>kTV8(O)@-X8W5!CI`3;!#)@)n@!PdQNubiIf&E3C_aM~@tCqFr|7W(OY z+|;Z#;J^gqQLl_ckrtzO;(5g^QyTR6m{<~mC>uV0-)yuB9SCrIxG@Ck3aj_Qs-ZE0 zss}!=jHM&XL{#&F%>n%%`Ny<^^XK2=D^Mp3IUcctDWD?n(cjwwb&ZQE_@44x2c!`L z%Ad9{CJNTZngi75F3_cm)9z!@b}znk=D>8b_ZTx!+$O86OaWAm^|*TDFGBb$=chN> zsQ|9MWgMeRUO)2$P#0ZUojKn%Edx8^Ksz zgMKIPjBo)7HOE*BbQ`U6fvN)Wy*T&>QX0iI6GEd4;{fCjcdS{w9!H$)!gk$Na+boGUp>_B3iDiMwt>H^gl-obN2?D%D{hUj76d#G= zV)=A<{ACc{^&L!M2&r2>8}j7HmhF4L)=pJem>qjZjeuupqn{{UWbH4b3l*-(F6 z0i)rg{<#BIc_;0R+Cuohue=Bnz3)>I87aFi8>~WTP46Xa1mSU2o*?oV3$;rO1CR!f zS*{p{k}tD6cA36k5@XCZ5f_6002oS$&P&1T))BlQpEp>6kzg^eo0jOP0FH_003r~m zXz%@Cl7xUjplJ^}lL6pxN)-WTYHtFP4aM0Wi{9}*&WPfUU~5QOgTy1r$oiQVg@&ua zJ@VxMXxO8xJ@tq(E%z`0ih@=59+43H{<*yfEDIlJ zW>O3lRzCjU7%rI`2B)k2VJ+xHdGiO(R0F9|fAbkoI}Tv|u|%jjB=DNt$*$eJADi9{ ziXwl*gSbwiziRlx1>Z4x_?VQ%4tI)xEZtogq=8DIfjpithLPOgSHE~RF52M6v*a}4 zOL(;L@sq%LHXS~FX3*?YU&qEk#sctYxT>U4jT>gX?qeMR1wHZl^PEQXqkZw~5@IE4 z?uhFJ^3^LH>qj}L(u4{G+#PSuIk@akzYayxN02>i!(Gs%>kWV*<;Nj-jeNN^E1-p( zYXb>aZ_h!JMU9_5nXklR{m{*>BB|zL3h8H-YakPLpYxk8!u8IuKqEQb(iwC&^7sDp zLA`QGU-g9Uu+-hk8UxNrNQY!2KC;~HC|h8K7VMZ9@*7Z`;H%2VSmXeU>XkDL{x7W5b3FhDC(4NDu+`_&j7xiZ z9vRidMU1OPFGswcFrew97JJj1R*yZ+`@nk!;~(F~1vLgu@jNaO%%_%_u?ET|55@%0 zta^3##C8zYwI8>v;UwPLUyRdA1gM>Vc}|e;5LHL7wkR4EyGL$QjUt-6%^N6i9sQ<4 zb}I(ydmMp6deKkM8LPA!(o>!;1Er(Aqo?-9R{_vFOtV3@P%l*4VJ!fg@LuPC4n{5U zXtn6+$p~_)ym&7+D1{aAe;B$cdOvp?a0c|h#w$P`3CHo2fo~25wI12L-_8Zv2pXLI?jm-kwKo3%yrC=R@8=K!p~T?%ap3Fs z`(vh29h>70@DB>_8lceL_teG!2CcgI@M{Qd9SJ?3haJN}j_NJ|-nbzWETg?lNa0cp z2UZVjh9<3F9&=zsdj9~guSP_9# zcHAq_SP_aPOxQc5Llw~kVZLw9CjFMWaW;x3z}tDBIbPK6l#J(je*?EzEO-XnrsW4A z&lyN-ZFIkWl62Giv`;~sPis2ls_NlH*9W%K>ugA|sVHpAlpGoeU!>0E(g zZ?V;z#2t~+?|kgVMM!~X9GzlZgHv&9&OO)A9H32ZJp985D{@8U9v1{q8Yzx(A5>^B zLS48;Q*wd8d%;=06;oSs(Mc8PaQ*gSU5)RL<06qKyZiaUCebeU-foO#U2*=038hwm zy?JSfRxi2E5+b}!{{H~pBuX7D*!shqwF2=x`}oP40U0>&k>l--?NdmHpBSQu^||-K zO~vSHTTX*PX!I-0|6a3TKL3y6RK=|VRQlHrH7xNc~%qyu)lP~ zMyb13js9>VEhPfqzpK^^C0|0_VLNN_sf4u-_P;pED9Xn{)W-ClK>f$wELN{+A<>9K z53>?1fx}Jz0KBkh4z&ra^9|q<{1{sz@FaGA=4=(M)A5Gsqp|XFyf#UC=Z|x&eD>6n z?}1KTS9n5lrrg-8bXjetv0hTmJL|`n0T@#hrGWTM6T8&*1}kpXOU}+; ze>u4`TJ{r@y=xj4l8?QZGg?ceHb2%NHyhsx_QX0N^ZWhrS3yWkSG;+?LXX{kILEex z$oENwP!2)ckT5F(9uV!h5@@v7FU$O6QQhdqN(tw)^ODd6+*!}@i>L(9j+u#V2<+Y? zU~eMG`8{MOhLExM}))ydWHQpW&HXCNWX`g;_r;}?PU;sVuP`&>E z++l_i63X-aV!9A-XXts!DCpM9+V!tEK&k+aHS5oJD3@g-8sgz5DajeV?EbLYwzM0r z@kt3_gx6Xo93b6TOS+oJkTMW=t~Bon06EgY^Sq(LMpH)cZo%dkX)sH+`G>Ag7$$TY zZ&|BB&pdvAte650v{x9LZpx&<4OQ9@w+eCvId!Zd+?P*v-YYc0jubbbIGD5-C3T_m zFh&@F`%XMyf~nitzt=Pr2bs!ze>g3`ya54vJnG|ZQRBA%0KDG8BXQikc-{cC1n7+* z=HV3uHqK!Ao#!?IKE^FU%mhT(kCF~Hkb*kAGpVdN$ofh5$}ly``1P7uLyM{77Rv{R z-<-W2Ghw`S6b7^nJ^R9qOx+lhAdFu4`{JiM?9}J+lTA}dvK~xH&=QxK<2EoA=ZYY$AQ}wJd`4dep!U)efD{_kqKN4l#y8=&(OHL^Sce{{Yp; zs8@|MzVBFI_1H{CM-Y{!2lwu18r8Pg95Cvx+mn#nj&ruKhcry1m#>9h$Fep$O53) zF1O*z;Ha(AVvT-&{{XkfG`2IL-@lBiC+1@hLDHYTQ~?zd3QjNmyxFcD!2C1DS_NCg zZY$;!^gnzf$~9!ajC62*Lt5R+5)_biFwr)4_)~-9yq1Z2r+FdQJDlOB#O*5h8QIn; zNl+VIzBoMK2;dxm63T0gGDAky&Sq4c^_GO$7hW@ueWIQsVvInWc!R!q!XP_ChqGKh zOoofMA%4@0N(EWA=v=G6m#QjP@2~dj3R2AR>BySsv5othTTv_+35^uT^lEJ zxr0@QR+DdB?^#fyU4{oxn~4JrsUB(N_wO354`&dG$PC4H+4&f2^(M2O*mv3%0+` zU6!0|;h2b2lWrwVAjl`*2tX7Fy?Qcj(0Wn$p0U`%o(&uC8Z~$0rvSf<6r~9C7_`#Q zCcb`hP4BNCwr$A-;helVJ{p48efj%lje^Lc&U!rNvIDvO;ZbxP=)aBNB}(11P6pda z#|}X|38#$SkS^Z!`(dGG>#3`tPY`S4|B0>W+Fs&#@M7QUOx zn4w5}haKq69aq@!b@}};HYwo*zYErI5E)cxJ{oTeAyff{^0*PXgaw@Z)%TN9HflH| zcnj!1-e{H!bsx^|0`2hv-FVJ)*Q`}QQ7I1|F+#4kA-}iI7LlY&V>X5f06-?hn;#!P zoOF#G%_qma1Q1>RzqS*w3(=wXfI=#bcFw-o`iAf$=`jjWfHIJa=)_8{Mr?bt)7}k= zV5-x6&jE!*04qiZpz`FJ`6Edk~FC;^j(H5cfpWn%#}xb_j7_s$5m0X5$* z7%hN3`gfIT6?Qno(k~rPwljbOqndIL?}-`h(atKdd4{*o#!X2BCX(K8t5mmW`@i#> zsiWcwCh`W;eS8~#?PbjaxDOZoVDQULe>=d6?SOLg{9{ejvy?gK59LjYCpx$~i=?WW z=3uZ9QkD;|oLn$YLDxB{N_4Tr6awD4ef(ktQ8+l<*@8~n&|Ue?;19k1+(CdHf(`hY zU99!3_`p&q@Y9ohVaF>j!hNuO-z?lSe?&EL1uMB3_`$Y7Ulqh^ z+4Y0ujYN4y%h6<3+IYUp_J7Z8|k9@y+#&_UajDkm_eFlt7}Tlg4Rm=sTY{ zL1=#l}gsD{QJN+VBE9y?+}Qqv|IOa)&$*vkBFQ<8LEO2 zj9=kQcT^#?mE$>`8j^4A)@-J{jCY3$3#lx>9t_dC5H-=)S!W?#U45`9LEl{DZpUSq zaRFncKeh*ku0;->2{&$fwqe8r${qRvnqMcn6 zpNs}GqWrCLOdYAHuXn4OXeX5O{dmffb+oLTo;c?K9aS|$ueMGiAYMOn`pr;mr?US5 z7~O)ZR?m!+NIbV+z6PUR)-vJ*A9=0IWRVAo zSMYJ}xk8W%U7<DxZu@ga=9Rav7t3h1q>)A_=4Y&ak~C>KgoGF3>p=zYZ!OT&snyjz8m*27eqkW$Hd1}7>+w`-QS#%T@RVYK2!{<&%cIXSQ;on z22gkwbK?vsL|H_8cJsGbPNUeWsh|*~F|tAdqFWJ+eJ(deA-s&}c}8h>h~0otX|H%r zwqD177+V$RTk8TAs}(OkA>%AtYqr{_f45l8YaVA-R}EIT+1HjByv5nco$Cd#FoDhU zhm|<6t2@4M1Yv7Qv`zyFp~8uPuJ3oKbAe5RoisW&%yuj54xW|{8pPQ^R&EXD!q7vA zo73N0!ty&&cApMaX7UTS@6q_c-h*Rm{jw8kI!fLp?*V}af#)n~YE>D0TxNj{#EU=c zjJviigr4v~4V0<+A|6$z#X4}!5_3F0=5HVz38#Oi zY|z=LUxO}H3{}qa#N4pY0r5DGtR;etG?!=F+mTo_M4%gLy-P3m1$a` z5b&O{Ar~p6{cm^@KzduhdVBMLD(j?n9>3lqt(DU?`ruggCpYhs6N+#>W|y*Rad9yc zc01qu-d#j1t1R)ntH}7{Ej29KW4BRmhlzr`CyybN;hn>rx}1aEUz~_(CE({MG)ddb zUU-gm`#yDcHD8Sa94*VlO=H`~LtK>cB-07Ypb&wje;FD!ivFJZlq}WVStM`SXOpLO>)GwF`_?6(H_<^g9d) zG&%RpSE%{QFAn$9>KH_%9jZeeC?Z=O`{Q{}lVcroqwjzdrA@_hu)#*_8^%oBCDl*I zoZJl9yn*TS<0-%t=K5z}9&(j8s0nh_yyjlP^P1}4PunjLE9mllB2*7J%ahf+H zy)gLCFr4XrCB_h4*q$+-(kr?+EmAS0EcJ-w3s5vpwVQy~E2Df&dB*Z1uY|*$HQJre z21O(=cegnfzBUKJ>pE&)U)z_gM^tUziQ~VV74kYLLEm3EZ;TWL-u!m^#9g8eeeON{ z<>@PqV5z{LgZ}_Cj(2Gd-%||;iXI0t0%d>vEN`tH<+)z4XI+MZt z;ch0P_MOLb-YsNZK{0s<5f!WAxLuM}KTL*^UT9y9vLoIdlBl@4h?Op~MoWxB!RuOeyqzHlW+ zjT<%T&6buycK+E(7LBhTY~Uab>qP$X6k@C2^MPovc1rWk7qXrCx}ga7IHC5}hb$dV0$wIS%Y%t|09mZwZvrrw0M{ z{{Xj|fD-PMdg~1sBeRq7jfyBs%6N_uL9Vv#x=cng@m9~CF*GN`PqO2xgqxcA8yDi{ zw1gnF@k|x~MX6sJ%4U;A+Fe)MkdY{ZRSg%<>|q#Y;N>tY=bOU};Ebgu3k2fpmvzYoq4}Xob}_``?U#-AfHtUOZwGkU2+M z_mmQIr*q%L@);OEfON=WA4^9-rT^r5z0)kP$Y;Fx`dHHbEHgo>~nP>W5%gS}E z8q*;y06M+@060o83a+92I22?WpzpbO!AOgzx0(B3FpEjizc~X?blI!q_T$siU8JAZ z7*Q}K;o~V0PRMD!Vg{?myCXxMYEFrw6Wgj?g(d&camS^}RLL>1D& z@z_7cPz1dVKJT9yW;;!&YwwH|R&F3C-w+xNa+;n#edAJ1B5HDd@lGCt3FFaKzh{g} zp)V16`OC^^0lr)Y-2p?~-VMPb0UPpf8#qYy{HAD;VM9pZ7nQfstXi$CQTV~u_k?)CHQECL$MI{yG}Qpo#LJP;@olPZsnG~$RV-U-ji?-pqL8a4c2Zd=Ez zi;_BFztdqZ@LheUoFH|}tuLvBh@fvpyWYHD1`9b?pS8;9 zK(I=_4|!RM&2$5Sc(|YpkN}&C(e1c`nqCW|rNW0ni{+w7HK%J3Zfj>92|bgav#i~u z3}AL3aQ=>7k zvhTMR!ki%-ToZPn(*~z|h$-xm`{VFZuu_DiR(CUgm!YSc-Z1246;-KF@(u>J;sgcj zyOR7H>A7Xp*myN^MX(XIGk&xyyq)R zIh=a?WdKr#W`yB-{1Soj4tJOFgEQzE)f(^v(9eeHQ#ujH@0tI#u_e}<10## z)y)E(Mepy1aCRHJ{9*=1Fpzt03#|?y`Ds1kEbi2KeLv1RqA4guU2DPX14I!iqVs-t zfQlQSj+{xX15UX9vfj6)@wR^$QZXGm>D2vj)j;+=F|Gx;V;Jw|`pHyt=Hv*0Iym{tnsoFN5NUKO!1Xh5q@6Q_95Bz29fQmBh*7?@^7HeB4eZ8&UDUefQxE_flD&BJ z=M-%)04-k|`M3xw(W!ZVY;-|@c#D(?uPfh^_5EDbqRj`tK5)Q{F9~%x#O6TkH2T62 z2y261KR5xobAX2KruXX&8bs$r`?)QOLhpZP5mR(G%zkldMQ;zsGYy*%EqMO`jDkf2 zkJI{s919jPRvJVO&c=BRUOsS?Ddw51ff%m+VGO8 z{NWo%aT&AC!v@8(Zr^hf=}}I!l6X7Al=K<1v0aY@I$S*jO$c-}X#3X#R8d`u8%~Rc zO5TB896fIkG_45iE8u9+JLWvrxqI_cLr;?wh z@zM{pkJ|(TOBj>R%nbz{hy(F)qKNIPo<+dHV&O4|29Ua*z2X2A>__((En`Uc#sRQw z(Cp_Die#EK?Dg}CtQ{$``X0V9LqHI%{mtz7$OIAOl6IICRhP93(dQveUQ|(?pT^Oq6uI9wwe^u4GSgm5fHk4lJ8)|&cw29b)&WAO5f3`C-f)CnYxy&e0 z^Sm_b=|FIl=s6yK*nj|~?`GNLm{k>9UOFZ!bsd&Xb&3O3n-Z7KaG@R(zgRbsYDAC8 z?>s6l-5!5zt!hV5^Yi-WBDUq_T`=0XMgnH<}LPI;}_u~Qz$~!up zPk6*sZPIa&4F|gGAn>cWGZ8kLp2swr6y|=88$q zfFbQ!uXsQWpf)@^vfKbb*sDVAetlse?XGD;er5`8CyxGaSQxT3{{VdCtO@1Zo!>Z+ z)hCtN{qdHQ!C^OxIc=H(;u5&BZvF4$BfZYSkNjn1j`vSF%dr+kqk+hI4e9pGjkCP(}mwTS4sxp)$#L+g%v_U^X1M+E2@=y zx7Un7ggqvWJhI0&UI6L3ydQj9daCJy{Ng(&EH(cC%x1uJzIN(+)&>X$9DVW}fb?0e zvRWp_hhGVaRS!L!>S2|T5I*@#M#3A|mpI*?k^ca=z!R1{USAG6A|zIt4kX3XXqxdg zyaJZp746#nrd;7>gMxR7P@Pzv=EGoAgFX}1D9pU;j>q=PEU$=r{{Wn5z);>8>HGfq z5W6>eH{Yz7h`K?@sdQG=tJ9oyZ+l(%^nBvzs6B@h*Ni~uO{k&z{@AXK4Mm@a&Th!< zh`j#z=+auE$a#Ko1~f)fRCv}gK{^J0aj4baiVxe~4G?w8N5aS#|bX|qlv zk*_s!Ix=i5>wYlt-l!4#OygGFO&Ha7G4$Lm=Rmxvwqa6@-PrGp0nkoKzCTPV2v8bRXta*_c~=pXmaK<-6B zE3SU~z(j{zzv~B3P;?F_8Ycq0{{X*uuOe{s_QI&NmYjTG14e8uasY&M0TfQ0|2+SzD}?gO!jLGt?1BI0&zLOvh#^U zU3tCNyaCd{u^GQE4@U%=UTFH(&ae*}qJ?K|;BZFN)kPnC%Y{~03cmF8V=fh(ntorm zSws`^!xeM`p&tkFk~>o`7JczS&^bTfycjJ!&w9SSVH>f8H8G;pSP)dR?WVq2XgEjlHL5W1cit%CgVC{k{+VQyn}Y%O>mk`q z+3(NJJl3~BB_At{fe_do@4pxUaE*g&a#C<~3BOk&pc>-r_Bm20phIf;^_vqxqB=g_ z{NuI*>EG%5VwZA|yZg7iQ?LojQ;zwcV98b%js8qgU>|VTpNuK;Af~x?zrIFj0BdX| z`}2!b^V(!kRXUjmj{5%qx$6f6N_Ot}_{3GxCzDrEr>%8bA?fLHT7!Tb6UJRPQZ3ix z&JMPRs`JC%u%iQ#ocagGCe$JW1AlMtE>)^Dn*09%7%=j1Q9a@%0n?%V@PY|0+ZV^I zSm=hN{e5PGeDuTHhE{pi*XIoglaah055SlO!M+oIMLzgzPP_N_#|9=wTK@p=S=Uc-7ni)Gn|pAB zY)veeelnv~-y@uxkFZ&qpcw5!59h2*bYnr)%T1z?p?=u_rM!#1;z%Sn9bYN$))5pJ zRdu`*m{}&wP*mr8FZ;P1Dr=}tum+%b4)f5w1cUcHVuMv3y0hmf*;itl`Ezsw%BimV z@74rER45_gT41fj@(e|xhVV(>)rPI6# zmQ^*=gIzb!ZS{avM`nr;e0jw*RJ<9vk(vhS>Sr1&rAD=T!pWlc?qo`YZk->d@~EM7 zmXp>XV?YfxEbC?{aF>9Df8I)r?OsXm#&qOc zI7aeyi4ZR19zD3gQ&te8{a~4eyrkXZHx{P&4O}#GPIph&7Now!Zt%OcJ^MdP=NYwh za{?Vd-fO%RW2y7}WfHA69SQTsXs8t~*Vx{?Vbd|H?E7yNxko{mGJrsr`!Tm0u?{{5 zAQOqbyT(n;aoT(M^_P*nO^!J?jHI3j)x?K^AkMYHkX^#jntl9cuIS_<$auKxN<>0O z#mgZX(vbfE+#2Q8@qA!ty}(uSnM$GH1J|PFsut;*pD(Xi0V1*k&)#E+s-+>bU1cR} z*L-x(=OQppBgl>+g+{+C@%F|zP)kj`@V$IwlWD@e_ij+Md;J1AwiV%RTwUD&(SqQe z==a`0bo*1r^x^7zvBO>&L_-Cc-rfGpJU*?aiHd5CHpOQ!)CqFy<^b3=AIL3!)` za8;=NX(ey*=Pi*3DjWG8 zCQ?W{yqtDqcD02#{2du?!Pb~*xVMro!~Xz$VNp1Q<@VegstK^{_l=4c&o^4vc=CYj zlV6Njz#}j0kDQu-4GQx=YWc$NDAhODj_^by-A_Nh3AW2-C9zbJ{@Fw*8;JAiFe}5Y zr(R|(Qpr7@+**T{kXiP|fTkAkzm6Q#K+xJ7{{VP#q<5izjD=`f+BlBU*Lt~Js_UH@ zN`UkXKVGnaR1(oI#P1-eF9he_1EC8WsXj3bNv51@zx#M$Xx*U7K zBpon4p_aoB0GD20I5gBgbn)XB8$@>{uX7y~hXcTVz2HqMw`IKc7|wz0rP%X2{%`|L zI}ZCi+(T5&RaxN!&sQ3DoR&%aXPgVRH-0hfl#opv#`^CNLqx_gfy>lf0Yo1(2aR)i z$&pSXl|P-}#4UF~TscmfQo{|=oT#SGpIEj;Z&L_Jb5dIR#6x3>ko#eC>4Fso5 z{U!j}jW|@kG9V`XRTy--iibI;;CH^Y}b!@2bC`;8ti@K z04@7;U^PI?{ZClxkll)J;&{q!4lKLH<^(J8kWF<(e~bWt3CP}hedCziEt>U*3K|vC z;R!*9>F51qxIpFs&%CfpUGI!!TL#{};SP={ua8-~kcBSP`+jf}e93!!zc@Z%Z*n=N zjC?%e1(t=cADk@sF1C5dJQix+a=Xx-YxMVwc@wfTe*Izqf+xv$Gp*md*Aog z0^SKPhrx1mg>47FpBP0D8fkj<=L1P68`}Q>S+X{o?t8(7dF!JR@dJq|^M>LnI1Vsj z0a?q|0W5VQ#$!f+{1^WKbDRe99G^b%iA6?f(LDTP1xoP)e;Ex{ts?#L=-fNN0KuLs z@)%j6Zz@4Biqv>GAB?SxbdcsfI65I+6JNe;8%Bj4_E|B>w~M-Gq?ptydKW;Wk5^I zt~)+B&P^26@vlDcPLT(o=iR|oWUBf90C*Zo6=3V%+Z{n9=Jof>G_7zHHLdhuz)Hmi z2jc|dj<{fKoili?3=M2vXP&VW8ev45>o#o{fZX`?iD;Hz5m-mTic>)Co*o?1pe6V{ z{qlGT5PHGB%nye7J~|T34gdl5~MJ%ZT>d&d7a? zK#K3#e#fhtSUy5Ng}^C8G^4rdvqwB}K?tOx5dK^J(anuRHIEEcoE5ga-c)VZ*Tf=lFuwKt;KBgBATti} zK`abXa7omOffywuc0lU^aw(;{FA&I-9MOMl(KZheHJg-<-HjW^vj91p0X4qxZ9p{b zpM2-3W!W{^^MDF$qC-#48dZfwczzE!_qAtJ_4So`M7s`;cspPv6Pv^{PLZKb?;f#Q zI(ARcxC!Pmba4(4-5#bjoUeX@bJO`$(dupE*a-jAPn zaDp1yT)9XONu$(}R6`&Qzs!$@joq$;?E2{jfAzE{H>3 za4TU>M~{OFB98e9zw06@ha=|-vIEh_$oGVVk0Rf{#%Mx|$hyG_7n`GACCjQ^!q2u8 z=-um&&L-7eNIDa_gU5HLwDFf)LFfHt*b90sFy33S4WrgZZEdaoJ!K5oA}OuzxmmWo zQ=DHa9{D~eSSiSEo)g2Jf~(3A=dbmLX&5}OKb)e_bVI%Iyhv(owr~AqwgQE>c$6$s z0`2gRe;BZ8Ue2}g-W!2VD#zpJ0-%P5sXPSl^ul#H5kt-xNEWNi^n1s|1c9!2B=0hlLXNKbwu@?7)~PnH<2g8 z*F4LRX`&V=Wk&2*oAKi{7s1D$Fu+e8S55VhXe(h4-_~`sNml!gRb#zZZ`i<%zyu~G?Y=(`Rn??8;_E0&S9rIYtN!&f&`7Le>g6}*O-6PG%(+bqI&m`wu2HJAAAUQ@;}*vi$Eah)$@a` z6`CHO>nH(DSe*EosTp9Rf6w1HHY853C!AP}L!Cz=y@Yg5xG^LkuT~elZG=l3)&y2v zv}pPE;*1{j2Y&b7CYv4Ua{f5eX8F17BHgbCvk55*T9SIg&9Su-pKln{^EK}dfzHvu|0A3%OU|&mbe3H zO0;J01{5}}?8Rn^yw?YMuO=&&*}(q*Stl}0xQ`^>rtw!`8xMZ)+0h^R%&}l^r>ri# zJ(6oEMIqAdnQ0|GS?lkV*5CYGaxsMOeG@oJL>y8xw@&^5D2gz=|B6sP- z!*4NZ@MkcCt}6TAMmh~%lJj=|0IZ=1)~TrZ2P!lHimbgy8(Jf@;ltVq)MikM8H-(; zux7Dk(!R}N9KoLfi5UKR-@Jph@=knADTj@3cxK_JgN^Cu67YlG-5m8|j8kpt>WlyzJ?~9fIjrsXD}~6VR@yz5f6h14P5J{pLn#uWugI z^NKt~o+*BEyosZKz5p!I#{U4EV&{B4@xb8?iV_3X`tyacmEF5P_a4xv3d!+|M61oZ z=a&EzU=pvWOoyUq*B7}a@tf8Get z1~6+5biEgHstQ1zbAB+opmQ8v+uNJQ#V-?t}YArDdRa;4$fw|9e`pdb!_~R_>-aP3|^^n?DaQON2l1K%< zht6}hPcKh-Dxm3O!RqHb2y0VDRQJ#%Zdx1|%e#GaW03`I6MyW&M=Cm6eEfRH1qizL zk3#gFIL$=pGBly34}Krt1eX_${^^ucV0Y)=#xS7H$A{mnJRk-DaQBmU!=-<(=M|!0 z92EKfF)MLVlY`^>$7+4mHHmQn)e3m~W~2acqvO1{T9a!K^*^lFh3bkTKEzo%aDNgx(mCP{Q4BzS)KL_m6hPddOI zj|m023~1I}^@4Q?%JYHYS;!UqpT=n&ftoi?E=s^h*$;R^g$1|$Ii&@>3_m~b1GU>s zImk7-!qZuvi<$EzL0VSb#(R}4AA*H)C2y0a-kNjl-J)Pj?b6Ryn3n=fZzHs zAv8?}-E$upFjgklmQP+Wn?y!+_sTa&GjP7vPms){~WUl<`QJL3`y19i$FK_t9~ldPoz zXE4v3ic%w8>DTOL$1(D@llR3V!t|{7f>xl@m2nq9c-xANX8!XoyE}O?p;(L4aS95L zmRuoDor}!#gee8vzC0PGbJ(263$zVip72Rkw}XEew2Fn$?fc?rGVL9|DTeF@>@UU* z4xcI~+Y4qyeFL(7ywQMyyAi7=I=h^7=FBzvSg(~4pw`!*%S z%c}_4Z;K3BCPD~}D{r$fG-^_fbnP)jY&ok#a+jE|Cu==o`q`eNS>q?y6 zmLF_%Iox{e#*vk~5A(5v(qFr>)l&_qJsh9WJK_4Q#iPF8+2j7Me962(wKS$o|!t*!FM zZ=YQAOJQNPds?a=d@gia?6oug^H5swAx6p~zV}?WnNySbjG-*W@{-9{c_xD6dqy4Q zm)0ko?ER%yugi{iJ#mSyXKUnZ^e{p4^Q(}vea(}xRB~9zx{mef>(InL>7p|~-Z&}Z zN7r6oTrrL-@K(CIFmAuy=-)xny9(>@=LbF|D$DmjL}xVCuiCzBr4sd z{yh^OPGv>NS{vH#|LkuZnv!wk>~?g+gZl4xJB-|8nDvXR;Q8#>#)ZaH)-C%Oc*5gk zSo*%7Hw|Q8_3ZvJIWWfJ+EwZJ5pC}doZfKBbAQ}!o3C~;d+zI(-23wsco+gGU;rTa z1KnAJf8Y=3c>4c5SLFYJ+P?pO%lZSl3w)pZvzT_Z^n-uEsU(YTrz`7O^5WOMKM?ru ztMBt4_`-aVrGxSD4?Lc#H%r&;59k>Ack#%mEavxQ`t0jFb(e?!z^~`$``-utTYc@N zuG_=>Z&_~*!w1Gj%;?dT>V|Sn6G9V+bL?sv(OC~?-+X`d?OrC>80lTm31X)KkS>pua<dxCUyA#5`CWwqfuJ6eVy_VVcICJvL%)Uo2ZN@&+4d=hlu&?tOyNDUHejXU?%abl0 z*fGobKvuPFT;H?I_b)T;US^&Mn6S@RH$UIO8^2dMZ4YPMU*~B0zGC$C@~DZ|V<+}a z&oq0s;=B&B?)7}I@8#qr4+l@Z%-s1PoM9zq?n?+?VOJ+$_Z?h%{$S+$smbSF{s*oO zx~~1Ie|zx*bsaNTPK#yiSedxIV8hbn6}mS6Ht380PqWAWHtTB}jDMN={YB>67xVj` zCBHjJtPd?Tv|rQrY?kZ)Gf=}K-CuQ~d^pfjUxzsP-^;?fy+ zICrmO{C@!+{NE2pt>?Qu4R4(JFA&}8ABfJEy@?PV{(B4iZ@d3Cewq30dFHp-E2arT zN~}^uQt{*ebPr8^Ts*Sgy@c~-_0;FRd;YKS_ZKU!M<#@shM4YKFi$!$X=-N3^fFq* z%d80eiXwK?>Gw;n6zWF%{iO~iUQW%?%n!AjN^ePawpcu6YI5$@iMgYa+eQDF?>B2u zW|q6P@%6{Nd^z~}+Vhh;o1DzUA6hT%oho(`WQ}|tX=5{Jx0b=oitY_Np1(gbp!L$5 z#cM1N_l|s=)o}LS?B?Wl=hye^ZcpwFAYQ$#f136E4sY2l&MeO_6{iQg0U>~AvY+8>BF1hk z{T7l`{CUoOvpEizzqd`!>^qqGeZ1g(oT2p7A<}Ozy()eqHBWJgf z)A^4Zqjq}MZ=9^XF*HE9Osq?Hz4nPqfsE(tG;`btaKVjzNxB8d$1bUm8$?D|ufAIS z+B`c()VEm%d)c`!`@g=k-e*o+N9L8@+h}a*KfN*L{N!=j%?_5doBO|HjoT&y2>8N` zS+a0ct15swriOQ+fKjN5U;O5xE$rOnu_Ag_#=||H?@GPjJh~E*)Q65(H6WnXn;!EK zo>{WjZJB@E>b8xqN=Y~4SNIT>`>GyJbK67FWpEt&6|zZcy}87oj*3^Kme#KY$|Uo z6^ox7qInk}%k<9Oyz2O4y3cMe8v);Xt77AW?q7j5s$ZYu0!HUlN66mV5)S4PCbceK zD#9fmFS*PAc9Blj`hnO#{GX=i$yPSGNL|^kdAUyh#gUu!{FeiPQF0gl806iFS(caG zt!Uw;9zJTC*fZ9A$j5OQGIiLt*ox2!{Sl33G)COIIrnUOjfH7%Uq!3m_}b23a|M{G zSLZ@DT6trbbk=p8GI|d|QDTgWK-hBOy{SF-)_q&lFP%B!V#miP5rDpT`6q8!#o;-N zju-qBziIcI24NermV}1*DGCeXs{tWv_unSpy6xxPh9aU^?x0_~-2BMJhie_D@K4To zbM1UvB05BKNFY5%$(9uoiX*3eC$6~XG;-RODPQkJeDYl%wc^W(=?n6vRL!?rwwoNg z_Hz#Us>pYN@56e=xmwGvS0-a}UgR5zzTWY@o)lzu+(%Y+wb)X*aNp6B^&Ej3-#gCF z;ALix{?bYhMttax;+@?C|DeBa`D!9&Uf9V%x9gpoy)``c;9ug?KVG|4b@>}(*2|`k z=y?XS-h45&;zZT^Z@fM7+FH)S>BUwzOxKLw`0<~~{kX#FpeVmN)#QO7nkPuOd%CF4 z?!8F4?4G+ILQG>%9rkPJM^I#RcPz7(tPClc@c1W}&+7~}{{twms_0@**v0KHw#<34 zNYSYGH}h(4+|r}tl4vWJ60v#uc0kktMpjl43Gowg`;_N;mSoJR^WuAH!yOAE%%l7@ z%{_Lau4$ae)Kw0j{m}Q9%n%-N2UnJFv|l#r(2}UL;%ged_v^xSO#uszmdu>K(KqK# zK#XkK>*YDzt9Mo!q?YBwvkQ{(%RVlh$e5W?k4*46Gue01lT~StZ&uwy%9quRxqfbn z_oRRye%%JX6FRQ?+~zfz@Z3$rLGBM75sM*Fc-;jfEl!m`G8w_Rq3Aq~LQfk0#eAIKqg7d-e!MZ> zOW#MHSU!P7>w6-GYlhw(+c$KQz0=f>qh*1WQ-(*=&1TKIj& ze*Bf1tBGX;&XEPBN32!9c$qn-QPb|Ytxu-_yXlhijo+-C>r*rwE^Q`WFBBx-vh*xI z!`Lf0KH`$>UD28Amq?z9?;E6Vubs9xigor`Gg|MtJqJ&w78uSJ|! zyM5O++5UL-nz5~Rg8X31_r|d2^=BLp_|nI^Z_E4aTxKxuz^@5MgZ{uSP0Y7wR`rHZ z{9;6izt`1UtaGC0TT<#om*$_lTxhsvZJ878K-Km!n=jtiZgH|dezr4Z+sx7)coyxH z`Zf8Cw&vojoub4uICaszIDB10RD$`2wY0Z`Ux??>lRHu;uc=m#p}Ynz@?Y!xy60V7 zTd|p6-MHBPa%GU&*L(EayWZ!i*L+=dAD`F8V0AuNl;70x;P%q(TUI<6_`HXc6!lBA z?3wz;#LXSD$?NC!ce|XAykXk6`m<9c3(d&fyx9JAbI8%{0hx?}KhSmQ)I}?W{iK%% z*0+A|y_?;|!Cny~9>xA?0DA*KbrLOx{O6$)Y5@=f5ZL2FZ!sZ_Opxg$UKXOO4b(}y z^;_)m(gEaWIGZ-qB8z4m=xB1-OH1nCC5p6|-D5?2kV%dUb?_U7iaDep@G+X-FC-QS zepBp%pi6d-c>+4P5dnuLcbsKNj~E&XHy|L+{^|9t^f0(da`+4@>CSID=8Pgl3u_T3 zIC$Gzd;#o(l~|4&GD(y((ki~DzCBSeZVKP+z9GpFvC%R5i*bW2+&<=deXdI#NpM(V zi*v|2lxC0f*uR+P3vvZoWK$I^1bl@|AiovD{rPmYWJ+U>6MLmm%Dv0rEl@FuKTuE) zgvLHNKm}dv2AI`!GDQCZ*!7mHL{5o@ zK5IyU{{t4a(NN{6LQ*ep9NxHWS1x%!L+U8WVR5j$68qq;_DF|_E~KkDp9muJ((g8= z4_DpX=CT7juD@9xQ0pdjLlmonyGi7bSnK^*lT+vxeLL2W3Q!o5ZVO?hck6}4#CV2+ z*bmrl^RNUrQ%aq)K9-9Hm(7zV>qWg=c)0j}veMXMlHd~kC4vPb=JAGWCfeIa^i^6}db3|~AE+#h zl=IH!9C~^$)?dd>=KxGy*; zL?da@YdPxR@lWTxse!Y%E!=^YuA8bWB9XlsGeg*@<#tyL($xRiMlTHHYqLF+6s`IcAgcZ%jA1Qdn&zP z@;=ML9;AE%qdVyyZYNk^pat(fKjwMY`axYfYRdsIbj`s=JAJ{Hth3kVqd{qipT1t> za?FnrtVLtix%U*4X~Sb%zqK-TG~D4|8vZW@|1T9s1P1oFw@@sNBobsx|0Uw+{}A!b zf1uxZ)Nl1PEU=meDtDm;{dcMHsdkzJ&%79}TwVBk8u17z=b`P$!@&gVLJ&kVXj4lA z_nmqxNsUzbitQfp8md(}BjwS(Su?w(%WKc(s*R(6iVX>pFreqIdjAcP50V<0jLHel z;8NNx?@#b84Jc#+(>Ln+2E-jh*aI5r5V6n)BitWS=e1{ROSN&=KA_x-c2f*W>u`gG z20;wCK{60Ayki}!RW1Ad;GRZ*jz4BuB9`Zzl5ZpCy3;KgLF5WGX70X;Ik+XzTUa6F zInlbSfOurCQ0}Bl*aUfOfD{62PQyh(gm(O8iBO!_Y|a7gJctEu_HBbQ^bd3@o9erHM3JB7TN^`if;5NS&n1L>E{FX?umQZ{hRbyh9(m+@Glu7>}bPCmf_>l$i{4 zFTrXZX-e;oO~f%zL_WD&MtJpOe?XQ}M<#!*TI`i))#{bQ5N0qJNJDnuxI2McnyTpH zpriO!!2>1V!!~rZIqesEl{6FiB20ho0<)2B{0#*4lpL}}(d4D%Go-?*7$rkkQ&TmE zyZ7y#bW>RphJ_Fzv{b*69He|}K5$LlW6s6fClcO@Z^ny>Mib2p<0-K}=cmt)DBC#H zL|`h<7{uMty1&Kb5JSbrS0NiWl2T3xDo7D&$D;i-EGSRozT~dfr<~TqXIKgnmBT0F z?s7%b4>{C9zG;fP#C;tBJ`L`?(b#$uoJW@?JwaqK*U8&@SrSCrV!eCW*7V?BbGu4a zH53Si#wPSU$$ixzQ+20y-iE95ugmj#0r;v?vQown#g7@Aw~sDOkrkBwMtC}<&Q6AE zYc6{l$A%c>D#%L87}VY}k0ja@Q~I5fDEHGj45C))s*UfE%n;ZcsEq|t;`8uAyd&nm z_C#w1MrjCE4KE=i*Vj6ov0crHO#Er@uv|wme1x zEaGnJNAdxuf`c? z@KP)dkXTyCEX9);)={>L1H{#*qsN;O3_%_Jn8%kPe-HJ-TQ$98wI!ih>7O>wZeGPa z7>YG;xw|)=@Y0^|MiLZJLq<>8s5^v!sVT40Rx*UL=1B_`-GS1CNt0~I*Y6*}5UZQv z%u>JGLk>OuWD&(%?z&j!oB;5r%2%~HT(7&eJ57?X>Eu&hBk$s9i(~i z8hPz_37^g{-st8TRFz^PFL#9Hq@W_Ms%4Y=8P_MDL_)U|p)P2Sz)hl0XkMxujD=U- zt&wS_T`shYni9H5Dkwgxj#)gdJ4T^gRGKl0A$p)ki!K$6EfUpjP2TLrxb4KPmiy#r zH)X@U+^ZIxu9&y;89t>0AgD?%%iR*X^z<7~uCY6dGt-BdNru-FLFsX^og+1Kf`~wt z0dU?VWxNC7JaO?G$1wa09#zf=HL|}#qD_i6o|cnFGr!#4h!pU6`U7Kd)XAdN!PwkE zkzZ+W;h+(h6z5Ph6lqZo%&jj74anMx$HBi)vgq71S%TLy!*RkfDV@L@~-+EpqygS~i zwqu-VO9<8XAY_xNsm}yDSG231F<;Gc$lF*%r+^S~&i=A5K9)K^msgw@T9mJ-O_|*? z4Q2EknUS(wCepIpB2SBZkT8YDE`fSxWyN!o%4Q()`K8(~yzJl`G;_=KX>mK0GnFmo z-m0AKI}C(UC0(83hiTMF1`bEs7`AzNg7~mf8*{680jn{NG|#Xk928z!2yTgK16`!r z!@zSs@@-aLa2nHaf@2@3@EVW!0~ZWhuFoaum)d%2Q~jarMQpnbz~VvTV z*8rtAjrs!udI=p{#n?RgHsVReYftu*v7&&cKhR+CV`sb909jS3nT4E^nv!wZKWV=c zGS2Bo{sEQSY;sMLw0q$C0+jvuQn$$-vf7GKc27C|jW$pa{|9UvvM@Y-$R8l-`)|Cy zYI^Y84v(Pf{Z_~MX3ip0t!an)BK@vBtf`y(u#3KPb0F1ZNSNLUvDzfAv5V=W|4#e^YGzZ=9m4F@p{4W> z@ruGdSMG&w{{vg5eJ3CC=*4L%D55uhcX+qV<#nCT*O<_)n2X6J9lWF#v&ykEj*kx{ zuXQ8qM9Y0nPWGaV*E?MQv3FBQg;xXkAq%d+H6%>!z?e-Ao2*&SKg&s|_o8CsbAJzg z3P0Qmpee7u6*+R(vzOV$J|SVzM7n6bEP}MOt|^kYFgNBYeI}KzC%pa+={Pbzuu;{| z`2+94tp6k#VIkt&zt-)O!j+=?vGou;C|BusqqlT(X03#2qtLs-6}H*aC4tl-;^#Lr znct^@*>*onp@N;mfM`IJl}J@@Ku|ES(}ggUPTGZ9q4#Q|2#8k=2I4GKg~a%*vK-c3 zq;I$Tlj!G)KX7vfx1~9Gf3L*KZfktET>)owVeYkw^t%%YY(wJRLrV?lozu1IKY&~) z)zW)f^p22b%0StWRy(Cl?GLlA^{$S$z}O89CnphxfUwrfXd#y>`AT7B7ek&%NC0M( zR7F7A6xH%i>l5mmh-rs^uR3g9F392hhWSBeZ<``c4HWLCFBskFMZr3_LT9CzCee$y zP}X2_lRk&~4#stwtwjTA!72dX$IGG*Xo>We(X#cE;6$s<%*Y_qS0SCb{U<{|z=yyBWL^M#ccLpfLk^I`k`Kzg zCO>c=0JMd^UVLd;cD5PXV38vy1D|My`wJt=Ze4I;`5ZyFT`eQY+g04HQBqLj6 z_7h|gi>uMa)_Hs8X4!lsVFKa#)3i2}{ANBtqAj#5B-u&Zj?aYiYI=YE1=T27>;jJQ zr|if~-R_~zSzzicEp{;P0!g)6m6XFC$DSm=JQIsV8+1Hg2o`mufP)tk%r}X@0m)S@ zX6PqUwu?yGZW8@#M(BFo_J1!f&^bF+Eh0;$*21!PC(f`0}pm()c$=Br_Ju#rpdW}9} zO+fE2#Qri%%Abe(r|z zjES{TJ*g?4Rv8bIJGtwhA~Ol&LBr531h=D_IO=lwQ;Il(ev_Kt@)U)OfnNb^CQ63| z3O-cC|K@52$X)lVby)<0N5k;mah7lkG8@?IkPAUXxGerv84nL;1Ap5#(A&k%{)dg4 zm4iE_Mz-$|bifbydA$egZndnHH|p0??2H|`ynSekJA*PCQZbt$U)E33cWSX?I4R_e z;-U?3k!`iCZf_#S)qy7 z+-wRP`?#W=Gmh=V4ODvk0bz|?h#g;00Tp#Ry#8qVk}!5JaW^W@Bi%rdO8ApVEube@ zemZ*%FGmZERIfB%L;Y&hw!xS^uT0%dUr3&@k$i4KTNf7MU=$ha*U zwbig5q?_Bqeo$jicy+_5G|e}xh5bBM^D*0j^o$a$3}s^x*fkr9V0 zZZ>seBzNVjWffBF*Z|kej->ko5x~gV*2XQ{!`qYBvEUms%`13-uGCyzKWRCq96f`~ z;6Bm`D`q$D9z1d=AhBO>zmY_s$Ig?DOYn>Y)L3O86TKFaiKDYj?a+^GgyBYzhd z&of{fwqg?a=Wqvekj=SAaFR=ZA0(sOxGQXClw`6Z@xc*8I@ZAcC=9;>iEe{CPd8oC6>=xK1LiCl{q5DqiUv4tfx7+>b* zssn1QySXSe_J zDG->=cjdShoV1zTz#Kc#tloG%puLt*wFZtj5`8Vw)6Cu!een-KiY@axks+)2cDTs} zA-;}TQR`U2-9}V;68m*=HQ)zR+r^l!%{rnoGY{lwE|I0on-m;Hfwr%`-Jcv4TPcNQ znlCYs-u{|BB(~6=ZNwme1q_+K$PIX3b|(Rb9kU^M}&Kjw7*N_W9DmAP35e|#sAe}n=-4CPA zy7i}UmX68pfj0gh=on_#Z5Bw<;4G7vt-7gV5HUT>4!Ujd={>onJ@jk1w3?%KyXt26 zxRvjwr&_#PKeDuo*MOl7gJjZTLjj~_0J1eqhP;vs*iLZsSDVt<05`lac@P+-GPp%V z9;j_+dv9_dgOY_WpRynq4ifE#LvQ+{xIxXi&jlc7g_k&LfIic8C}EiP2XaV=P*=R2 zu}zh&S);?Y;jEu5l-LCG51}3vGk~P@?MsIZh@Xfv^t(Y{YCOze@nQza>84k-oBkq6 zV&r+={>IH|qU8Pj;%|DogcC$kfEKTW(M{(S~p+(SxBCR$AB!H2j6rP08J-5~PlJ+JC z-f-CwPw`ZkClKh?9B2Dklyn)ae@l{WfNk4Bu)SQTc&VtQB=|*A`8R1|-+b-0~mfC~C z0s@icHNPtRgdji7JQ!3PcYmFweCqyy4V$Vmozgib5VF1D8Y#XYd#kdT zMA`;NX@tu)zsaOuXm1u%tCJX&ekuGS@gSzt`x<1c`Ad41i&(MkXqoiHPQOBd`}RZq zd7D6c~OQ} zDgnt?(ZDL1WS-=Jp@cmbYO=-2;NqULnBXYQ&3P7=F*R#f*~WarZ5jkrK#58SyH=|O z;Jx-Z2%DSK?v+1#Is5oy#VRkIxGSHfc(NL+L6E8n^cv`E+z44FKEW8#it>wF+~tzN z?5IBGEHgW8bA}y>+8#vSr~+WvsDO|zw0Fp{Va@Z>^s}X?&YO+ASnan`MDJ6nc{=4H z&v_x9&iH10-a_g^joO}JKpMXtM;bc;2zg24P!67fo5l+L6iTcDD|8(~pzht{ ztS#~R56XGTzerfCqX;KSga+^yF|5*{|1rZw{Pw_1Azq>bYvNX=ClKxT) z%(V5y+C6BG`6{@Y7gAcOzii5{Kmj++wvjZGIYWA{9u?q7u(LF_hr$R{&DRm;K$X5! zaW8uNFXU%RC;bzwk{mv?rqaiTXl>TfufN-M8JyBWRlu_SW$sJRcN!OjL4vSGXXHpH zz%yyM1_DcKMLM_0$w`2So+y)i*@hIV?|qpe28ID5XceJg@^lg_g$GvU1*rK~;1|k3 zkayixCfFx(7tps8G}!%Wu9m4g`*+c+#ZqzZItJ7dDw7G$zE13W*}|aQNhIq%>PBFn zaYW9auDa&opBmugBdiq2~?H2e8QhR6r)YmxAlXYhx zf;6BKW0u}Jhk;G1mHjtl%7&GYBJeaG?_&W3;H(G%EFy%!-hrSb&|UM&P@qo%`-88T zg4+dgks2I5L?fJR=~!kCY=!e+7*q&`5td>#;;rP-XzG!vDe?*f&pcXi8rm@us$Ma> zAy3{{Ur7M-$?@?843emXiR!#jZRQiuVqKmmx3X1sc+ewhZXh4p4Eo zeKALms)Q@h-*Tzmh7#LX+XgI=dotq=dON25ZbGV=Dl@`lFPMcX1Ibk|jpPJSEt#Dh zDy}k(DdnzcZ66KiOY&D~A(g(#exkT1#cTVVwiNwL@hBfFU`>5PxkJZOVRDX>$m+LF zWSFeHLw=)0D4gcRIs~O!GsBj`#=8Wz9Kex~0%JNLel#ls-VR?kr%w)>*rO`JLPesA zd@#hv<|l>)i~+1lp1a+UxIjB(H4?0M8*{t}^fwQYg}d!BYR3p;!1|O_WzxuW0TsH7 zGY(7zHkEsoSPanCX;?7c&s$mTQu4+2qi(CyYv@GEwVJ*q)IxlSRm~=PJ@ojqWE#pCg%r z(7c`i&<9VI9sc&){2Fdku;Y!ne}G^x#)+MmrfkJuUrqPXN@N(EZcuym12$+#fbzNS zXI2ADnzkCo7`6yM*+ML0rLgWwqTNyf{Uq+(_NkpuMdB{J6oen;yopvLio5@1N{PIX{GB%;L<-UDpQ&Peb($D=7WsOqJ1a|q~a=d2SA3!$EO(YGp7b)INZdRs$P%rRT)2(!{- zGTol#H|kk3D|&2*q}Fs1{U8O#8joDA&k7ae^(At#CEd3LxE*W@A^iZsaiWIgHw6t( zTkgZ%;sGnr4j#6YpWO|Mk@b`oBPLep40ex zdjw#vKhat$X4}o6%JZ+Sbl0uZbo(;0jlR-um(DbScTihIU_N%+1ge`UidbJxa?|84 zZ3k3~yh+YYaBEx5^`&8=8knR%h%Ukkio0)=z}J!eC6BTejah=ush)nnIuT09PE zirUHiWpyFNo`W>gqL3zFjc4%?A$3M(w8u${u^r~%*jZO* zn5x{76l}*m3QV1N+u{rtlIn$r<*j~15Nv4ng?i`!95#mw$&~lRQ8ml(MxxD(*>9Gj!1_SK zHG7i8LPNbti&YHd!Yi=M9ph`g8KH0JMgeNa0}PWn5<0&r`Zkq6rbD+a6f1m^iV7H_cWMM7bPD{Oyh!D$dF%O_R=XKL%hNjUHb0Bx!u+J1%b<%Yd z#-)*D&#%onZHj=BrQ7>M_L3lcFJx9&FwA-d8#ce%xRD0}57!1X+dDf+dpb&I5lO}h&6atydHIC2F}GJc z%G&a*V*p4i;<*6i#d*Ose8I;RcV~XGmyPc~sISY91Vp3uxG~OqN9pbw0vhS0NX14x zsAReuVH0VsGQ(<0iihW(!kI5}0J&1B)|W*P{G_mcoMw^FVjT*|v)n7|)n>rc-KY7! z%F5B863lOBs*!4}ngzua<6ZtD)CWaw{hZT_U6co`PSThW$Z}@y$_I14&>*lrTEkOW zk)ET_Co1brGJ(eKZhAA4AKc;*M5QAu7YWIAm=typj8E_e81o3TZX03({fr9h7s1+H z`gvqJQ^Rtt_ym}T%%FXhIY!8 zDSV*qW@Up=hgJaz6omU#!mrYyvjh2&1+3#aHY3SW4gfP7NGO00;RnjJj zNhtqm=?=%7dtr_QSnk=Bxqu{i>i}`}7|K-9ci$4j15^cnb%O|Hd3QcHUs_K?d~x~72Qs55yP<8;$3504dw4b%G86tAW8|61L8QGjayf=cOhJ70YQdt(9PTCI+n^LCMjksnFOGt zi7Dg)?^aTf2gbHCgsG+}6SH(ShRM&A5}`XO=>P??iI_eygkYpQXUo0oyw@Pa(H@$& zy%dn@O>JD)kZVYx`>lsO7f*>`9Y8Z+LRt!PsU}dxNs(#k+n%d%HmM22e%b=>5-QnR zLqAm*x*S_Yp4j))ObTb$c>|ac1UM5k=c&{pieKU6oNlWv4!>ipGby0-Wm^H*vl6L7(3UYURVo3Qmmm0nC)?epR(vBx$g9}J=%Opr;mkLxlkY;UV z4%H;Z^o!f4+OTGvRVB?8rDfAPkSAa&WxDa3w7SquA+Q{bN3(IHiSQYt5SfsOgnYeG zxt2L@FVh;CkaOP)w-CD_?_b@5&_;yz9r_+92;PZG!jy&a%#*wfkessC4VodVB}qps zQM|!BNlN*3DW2WAg$;{4Qe2|kE>1rtMD;XITwEYp1;W?(e);Pt7QM*HOa`fh&`E{Z zOYo6E3n0b2$WC?~@DGh}zh}s~Bv$HYG{k9|&!{nImvlfE@^|a)5*c%}V&5=GqZ}c+&`Eb~~yI z$*3SeAEU{DmqwQ%HP~FxS_4tU%H3a&9m9ZE0hTr_9Aaix0oc1G4X0pS+6a2Cv{Gu6 z2>j~|#1?w&qnH)=EMZN7%d{hUy@q8ima6QOWv=t6;ziB0oZ#Dg?4*K3M{jun0y%KY zd;69S{p=dJL>7UPV1RRk8V4n^ssd20${OU06gI^0CDnurz)-_D3>lsZuD9dtx`Dl1 z9(rM1X0S!ohEE9{rqf6UrptXuT6C3ZnGsGX@}ZPKat<>Hi(NV3I=2~Tt;rzGse>uN z^*p@eRinTvKpqN~brttyd@6&vHD_(OD?9nPPgASQ(ebIeL$`mVMP!e89vY-OGwjpx z%1T!Es0BJR2xT}$O`tx_G8TQ644t4O1jBXW|2N$sDSelK=A)e?#@VO!WRBkr4OIw^ z3?Y!KP(oUpyJe^l8NXiHfmC87+b25Q8L1Bcdm_*~;q*;4$KsNx+0YO%M#foTxKNS< z#>6Glz$D?G^dnRN=ED3*fMq&bilXC`adROvvAY3QP27RQc6S(|v^uIvVQ9gDK?;Un zs4Y)Iu?BjprG<^;XkS!vd7NK*kirwqUn=a3A>2cq^3xRSIhkVJms zgA?Km_Fi~#IDS&6hq{H1r#3bsRSum9E0pSop<|~3d}<#Vj2LDq!7Uh@P}5*h$vY88 z)(YTC_Z;Ca{0PNpJ%a3Ap}@kIr2*#q1|pY&4WOY+K4*wA@~}{f{=JLLcF@kdPm{nb zg8dB+IA49!GAzxf)wWd8q{f*w3ME5pylo8oH>sb-(cS#f16obq3TX5+wvzI;X;DD- z0z$bnK#So!T`RDPfrq$G;$<8XMr%^qXH=rov+E0{E!>DJ;KtAN@-tNs?A-SN=)f{ zh{Yr=X201WLDF)UT?T}hgW?H%eHrgn3^=WTw%aihRALNZSh^1w=D8-80SYt$UT6WG zT?o5{Yep35)&S=`A=!>tjX`@V<*dYpbh7jKg+(rl$I%Z6&vb*tzLKe8>KI#S$tK)S zubkkvf!l;Om0P#_Xk}Q{R%<3Tmj%50X%M-5sU5lO>cv)r-D3*i+!2Y+g9wh!4)^E37T?v zB6k|ZL(C=lJOZ#lhw$JEf&00sTPjeAy1oW_V90=0KMf<)f<9}OIO(r6sd6UCZO2o| z)X?Cugy6Pjr4-zMmg9qR`w?tqgb?<@**BItS&tKfZtSuwF|>h-V?I$McPMY6w*`W6=dEZA9E1+10l_!w zDKg;L-djMv^S(@)V{b#<8YqXth}%NHxxG?7lqZa&fm2J|UeNP=C;n8x zod%Ri5wwx4G+HY?yd93#1cURvUL?sBy-~y6TdTrkbdq+WsDxI$m*@tAiwZ)rYb!-N z#*(i_sgQ@a<5lC!ol-`VVEbBJ2AF;~Kl?Aj!c-aQ@E#(1N+CqrNwQ2?gbLh_q=sy^ z(MJV+J{;8r_IB^Rtu$xwRpi&t=kLuWK>~Qe3@;%>T<)O3sH)ND$H&T3tZzYh*v=aA zroE7c`~(jNiew%>J#H8NO8~Sso(Q-qkIE!4w*(u?MoWv*Ngi5K@4ke7-8f-Zu-aiU z#ZICzBT;bppgLv^hZG0&WiXHHGEswUUu5q3;BpXqqa9A0(!}BjOJH=N1wpTssOW8w zG_L~4i3GGFu9+8UcTC&OhtW%fqg~D&(_aIBO9=QLR*|#1mHVBvTS5 z0WlhSG2zREG3hEE2yI9dh+4aDBs>QMIsSncc0E>tl_day>*ReJw^B@kF%?Y$udefk zI?=q_#24^zn(0L_n3gdyR z)}#T6FxxS<2%uO$joF><+>Wpi+mirExqGt-#AFyDqxB+cm~xk);|CH3Fm)>}MC#%H zQFQHbQIz}pd1q#4XJL04mW#{P0Tu*BTag6uHVcRgqOO-Zm|aFi#Udi}mfhY3M7*{r z8Jg8-WYn>9*7O+JF7L8H#vp?>7?`16?EwEF$tKK`Trp~KAkzTfBjJkR%e zdyT1$j-%gFUQ2n~|ETIlTn+ipalrlGRwoXYDtAqxwiCypoNSWOuB)6lE1)3jpo#@O z0vtzXEAca@y&5LF>=pfDyw=HU(vezf{$$bR5zj zJZl-{en#cXh-Qaox=Iadb&xABM#`^XXNXLu!nhxK6*BWJtR}aOE%r+yFxU}5VmUEVQ^k}*JIZz4vvKkAPzD%h?2{kXDv_p0chp^ z2{_Y$s=KM`EyC&`*9grU-x3}xj5$%RG`CXT6M&8Sw1TKBv758t9GuuvV8oU(e6Z-K zhNzLMph=IL$r^Pv5_XYCWN9~||Died-69GVe&!!hf*3R6JooTXuDd~GDpX9PWxF12 zSSQiUAOB^3HJaRZ&s~-ji0ikAUQ!lRFZ&W1tur#(29j}4uS*d==(ZcA0M2wncuckL z^l9Y#Sy3|+<)i1Ac@*nR)(8oF*lbV+LXOw-^(rZ>^^b%D$$Fny|WWCsZ)4c>+HO*7Yl z)jS4jPa4)F%luM;>$C>?W=-HSJZ~qBP-W3b5)E8N&IUba)>p1cie=%?mwQlq$D#rgJ>Q3jp{H}`?Nwj4**V2@?amRJz`}@I&d}A<4ArmvIl1CqENy>O_g65y(pL4L~Tch{`N**mAO-;v-XZ*r%J}Wwr z4F;FZAGfQFIObkd*KZm>i`wt6f>@?+)5`iD@Y9TgLRY~M+41uG^=&7*8!Y3j^CtQK z*FTWDr$0I*`0rTBJT3z;J=;S{4Lfozt1#9aEU(1m#(F_r4}NRY4^EEzJXqzN$vH7f z;}xO(DycK89OHpn8u=2W%XY#@0A@6jyOV@oq!e zq9I=E=(Of1dt`n!NBPRiL%o<@;SxLiZ;a<+x*sjA$f6={^=9V{l_wEclX3!d)+{>q>)=E-3r%ZywJ*< zidF7M(Nz`saWLsWZ_2jz7(c&dc$jjP2WI@4jydq03uBk-fw!?XAd1HetIKJwUMf$5 zwFL_1&HWRE?BeGz<%g;JRojjxkV-U7deN#CkayH3ANT!IWq3e|n*BR-J*WG24iPg_ zgzaPxXsuGZRl<10?QAtt&X#cZK`fjSjBQf0G^{i9@73&eXbUViFw(3Pjc)Zfsc&)R zn6048Kayl#E`YyZI1nPMrGOnp+h_cV5y9^4DVNS`Wr&{Ej-yupTeYThH5N~pBTaRx z&~4I8A@iS*9n)wVuJU85OfEF?N<%9Bo*2ruPulf6BaTXqFAyorb8NUK#J}&5FX2^< ztl}-Ba!qMzHUt)&$x`dM6>+==DXJ>OJgds7G+g#?n#d|&v?a>iEK}RNyB&XpSs6X){_*}V$azWVPU;>(YRsV>7`A5#d(9)Te>6m z6;*~3H{Cy46%_NUtE?@nmIiK2NW&^QYjP}R9(G0lCWV&%hg#A{K3z_-LiK&Bi~FMW z$Kl|Zx$D*zRxIZ#_}iT5(P3T)Uz!XOH)(ml(6qj0_f4~}I`*``XFVO|-qton#VhJ+ zQTV%2#tDx)?$h}m=m@#jQ-SO*fflQpT$efE)yIaw)zUcc9AynAjKTwM%rd|{qciyE zXU~z4eh}m5mswul%zvJd+Ppd4$R8PAPcrAUSo1?Z1iI;MUE$F>?TBBG7&`lun>%qR z=qv0h04suzrWqgM>+^65K4PonN^8=I`a%Tj_^+u5TNACpD{mN5^@;zQoF8aHhZK^! zHB${l_C$LP?uZ*B8pX(!Qp9d@4rOm^87x5DAm**)4y&4M#4`urG75p!bu+_qiRAD` zWj)-{HB*#uNW3L!`Vq{>>>pe5aKj*x@`GVz^HCQHD z<4fiS3sz@~DpkBm9elw1Q-sJP?i?(fys%+(6X=wXoOY#qohp5m zZ!zM+QT_;r1LHh~^1{9~YEgNigsxn_)DV!)35RFSs3wR zn7fAzvHC5g()cgbnuYXn+Vp1l>PpfaVCu4JovKH73B6asIP<0FqUV%w>^qPa#_k^5 zr~gj9r3||Ci&p6%pEm{>>ORtNT->)+!2CzCsGB2Ob7=gVX#R_xYJq}%8Z2m9T;f-p zNIzm3R(R@fy5B&JPZi#HLM>8jP0CsbBgkjGo7X`zDaI5Ost;qbnr)Ai);xPi$sB*y0IvA>^+ z?yO+O5tn)-N*9i2jj7K^ldDA|feKh8inZ@pR}p#=ag#Rp(yLiy5I<7f+tqJnyz$Vor-g&jqY7O$sioxCq<*w|aD(Sz}110DO=c&}AIOU;& zfK7094fOllFmeNYH3|MJ%$^!MU^;^t71Ovj?##Tb01v^COG1`7%I)xm){w!(4;@RC zRBHW!d=7y%3mg$+&vehyp;oa>$rNo%aKF;I8t=0AC=p^y@4r$bFW#b**6=YJY&dgIZ9PoEbW zqY2G02`^+0@YMJ}+$G$t47-HX64AatISI$<4rZ%G`MbR_WXGF);8~ASldK*u#%vAw zHJ*VNgJj>hkidLZV#m4ZyQo()CvL-|jDLNyoiwMo5Yv{Mn)S*J@DWG_w*R)mtw@<% zu}&o~^y9F1YsgxiV)|gqtCG$!ZwU9a3*QAbE&bDGl645yA5`u>&kFqwHC?sVNF;TU zKWnGPf8&ywA@zC2-U`WePFmpDMdczil5<|tU4uW}W$TSSh@}pOSA~{HmYWIEb>crX za^8$8cj>Mn(#&6rHW&?pm-4iG)O}vU9`8?sSp(4`5ihSP4Uk&@Jk-Yz+lw0V5uL$6 z7HPj}gW@k2Vi-_wADiSU;ulR9qw+40>j&9-r9TUD;bWTM=aM&T*wyvf+j%#bBpo zNagq4%rn+{+EHzvhSTJ1P!g!iSng({zDH8%zPyd?_rqoSihPa99I90Aqt+TUqK18m zy;oT`1eUU+%&;J#NEGpq9wh9&qG*_GZCtE4`!$KLXpO)#F``U^J6De=V>dZ$SsiR| zBp8M&FA&nIDf!pMY|>?DHLN{K>LLB`Uzbnob}G{1%ry{yWY5EznYa>vgl|vpwMlAK zk$gYXUGJNKT8F2R$6%xd#ymF&*7_R|&B)Yd{P zpO8&U$%65zM-WTvcaR>|4)Mjmz`7QxR-RYQ0R(m@ZojmQ#?JP8e&x@jpYxpbl%;e> zAwQTyHureeE1W6oKs9NpJzq8pC3QIqW_PD-*_V_i5YLmZEXfKim5y%u8U9$}zmXL+ z;^BRMWpXn3=L%MPp89ubROypm0~hzlgxtkDn7I5!y>fmrUmf$J$`>K#hHyk-8bqOL zV;86uxM5BYxAU>S(Xwd|!TP6{Q8q<5?Lb5s#?71>Nzyo}pW(!bDw;LqTt@{QvVzO& zeG@77zz^r8@#5R}^sXlb1HChQH=b2B>(6``|HsY~ex~JGOKRUx)<(;Dnmo5TEW?mx zOICbhx_9}n)fxH-zdE`F+iK=kDss#7!QvIP^>n?B>5OT;O~mI~i4o+^3T z2aCb4H^wxbMfOH24O{^exI2*)m<;YN(!sj%4QN{zI)mrT+F5gr#cSs1Lo1zFyN+~0 zx_XWIr53N;GxI&VKett40()mc($B_f-C^fdn`~%oW$jns`bPB4ueitzFo5JPdPkDFhY{xawxv+2Q zH2PNy$QP*QBzeoIr+AU-)NwKTf=*arB3N)WM!lVyrf1%ONr{_jji;LEY2#9?$Atv0HT7BEKblG}c3V@6sr=PD7UeyfDvuOT!( zjo0{FGOx20@hY3mab{liQj*v;Y(fhC@L@$LWtfAVSeEz&zV^=Yk1V|}Z?5-U%gCy9 zT2^v~NL}PmXU@1_lzJlDhU9}S;@%-{Nair=m48iEM?M>?$8;I_cYonl#PKS)yn;Sh z=C?PJhydFY&75fE{&=DNq$XAM7Qi=9a*#7%{feX0f%?Iw>4Ms!A-kJP(d9kVa3Zsj zg`ZegE^Z{pye6BGnh_Xrj*Z|x%Xg<5QD-x*(oAm2c-xLyujR`#f{8=q zJU$~*7()>}f65)3+N`eANIYhIL$+}{Kx$5wPXC<}78iNCAh{#UaHsb9@JiMfV|)Jj z2>nHS|Nr1MXBP#y^2{97T7?wGJj?;oAIEf!<&p+)Z$4@hNf}bJC~ogk9FJjE)vVjm zEg0CD#OLc{j~O3|0j96m40NM7mGd#-w+);~c96H2 zV7eCE0E(tC32w|)mRm-bE#P7&u{j33s5YrJE&!wk*C^q0dYQ*VJBE+O*1lJ0;j0#k2)&X z5}5pS(`3WDvpAuT^iPNz+xa$qsOm&emMq#6tesMlTsPYzvM-o#X^UpkC=wY{&J0AG zD~Q?c%%%%u)5R}zwX7g!2(_K6tO&~>o`}Twgzie#T>4Zcy*JnHd#rWY0vdjqtHxw) zMAz{n5_isb7iCy2h=u;kURey$`AKFJTIMQyY3vM;T`)3JIL_ZR_PK9bmc4TSnw@W^ zd@`nRj{i$~vSmJ;JIgs`mz4e~AI^#DSNNw&QW5{PY8~V@)cQMi{=@IcQIAaZSk9A) z$3jHW)$tM?zHie_1)lm$?ix{_X}yOg8*xsQn3}2peIjFuZIb-$6(C$UrWWTyf3_OTuatLP`=@H}?}Ko(hea=rF4mg|E}T zo-@sbsJTBTrJkz6*NyqDcSf9r9JE>s$pWUg!JZ@2i8n$1vgoR-eX6Q8`K%D?t?S6Q zT~xvB`dE$on9)nV$RM{)#U|NDj;=_s0^FtG?|D^mMt$icQSpg%B=4CWEuH2?Z2eB2^t4uOx@dL}DAKqC7vgLF-J0qLP z?3deCOl2DNh&l~(;-h`}oF*mZIKlm614T+eLuG7|g8CvAebrJEVOVw6aV0d&5q@D3 zgeqT@b>sdicdy7;+WLYu{)Of`iR~If`PV?SJ9kWG2Eb!RaD*b>=ZJNpf+9N?g*sw} zGHeoeNR4iNNtm?Davs&N50dCVhvI6u6B>Mwg_giTk%YT{$7ugp?WgPlCPw)^Uz4Rl zD?aE416iU&=m@2Bvcj8K5knT;#JvY7F4OD^s&GE+9yt{4Vx&RFaK-7M3o4o$ZFG46 zo=p({?Qb_y9v&TvXIm^Fkf>`j8tYB(By2a#hq&7YIPRD#9^d7Ao4RoY#A&5ORk*ts z*DD@aOFjLzceX(&>_^<}J&eg<0@{8nM1znR)u~$Du&Yu5+bwkDJyyme2b7jsx+OS8 z8;%qXNw)-abQ7_A098^sIcibKR;A5Ia#U`0V_!tWpSAhGO*O5hdBxi>d1kclFA0P> z4U?ca^zCHBCyW7BuBQa+FAdmrw?jVJN!!o<3ZY%8ZYvhI0+s&*pX1vT1Kx>@0+)mx z@k)fe!vYef$)7$>6)C>UrjB53wLxq5(sUJjbVlaUDTj3~b3LPG^O4AS6{A@U5aKRzc;Im1w*Dq^ck9_V#aDTzZDHJ;(Jmrq^*X< zR?EI%9P4iwvDiQ=1^;x#lqhQ4P7Ag8Guhl2F3I~bA@=C5y?lLzkTCoPb+D!33enU* z>vX5DbDWx7-gKk`rdUjQMiy*iP8tPg$k_+m{MQ%HmS8deA94kda3E^3njxmV>3qs> z?mlli!WeFKSkTQGPnaL+;$8lcwQ=jYb7tS|NMb+EBUAjr;1;~Vl;=z8X@bpduv!zh z3|XtY6>{IRrzW+K;TPl~bL5L2`HdF;*v2+DL*}y;!q*P*h8o31q*Fnw&YaXaxAgYi zK>6QiT?Q}tqKwfhl1vH5YRHLgu+V2-^riozY9u;%5bgaCqy{gQ;u}HoUkB;$*xO7S z3-f74ib^u|>H)<&%Me-Fw=|k{I5a3IL9)~IM3r;@R^D!<&JJEXpkC4r`g;-!7x?Eiqy?Yy=OY#4L7}D8n2W@DIkvDN!PnesHl5G(u4^ z5wf$;qeu8GLKAZfASXgm7{~AumXJl;sJ`FDeGg3Dlu!rf%aRgwid;>?{9aQa^zBu0 zh+efW!9O3qs;1GSkiULAER^=aNtIl?;pF8z5y$>${0X z4RSosY9$fZ4g>#|Wy9v~E>e+Yf@8LTQC&Pq7x^IL3PZV+`AMm`xR{Zp4lGdVv9OaIoSH~h1UdwYd zy5bslbE9Sy#QEk82;=7tk&cS6o!aNunFpP&&|kxCr0)%O=-8o2oLar)dxKd-)VODy ze;h>b9FMPxN9_dI_B_!_iSjYrPtpmG(Ht?d^mlq3rd%Y|j4czEGAhIZ@rr>1uT>*- zs7-h-XiV@ysO&<#1sM8LgZVo<{7XOMtf58Gt8_T+__Ft0<`m}2?+Y5(!kia%tHmab zEHBngkyw81jy97Hdq}lO&3K6XfX8>F`mn!qCFCDpkfSOT-cH}*2BF* zZI5taE@e@~V|tnri_W3rm1)ulAAfN*-b2+QxEe^wl!2xUXqS3Iw8*s6K?brujOTO-w*Z|i~li`vqb;Mlj#_-F2VC#lwHuP0dF~_ll8s*0sXUXB&DtiKvDuL`{X7q65zo?ItUu$mVeW|b^r6JU-eB$nD3+i=+- ztIg}_?MCtrOgd~`lPp3#gGvmSFiMXq%SyjS?#}uDw#e>2Fng zN9c8KqRvy1S^-6Ju~l}9uAaPW`On_g==1yTy>rG;lr#>G`N6R$?FJv+Q^>2S5(IL| z5L}QO%YKI4NQ`L|4i4HP1jU2MkrTIyY$PS!u<_l3YjqBFm^$k1UHJ09BfQRa0TW}z zgZ2XzH-WLcK$o^s6>I12${Mb>@}Au|N$8h(+OJ|i%ZFVNAm-1{QbY{gP0{jlAai96 zk#MMjVcKsdBRWpE@4bfjJJD798=5uzx12mlbVsz+v=(GjaPiaqjMOMcn%lg9Kyw;} zwOSwCBPTCj*bua5DVn^2_>I-$SqKU|^yi(JVFVM~4O zi2t-G7x|1rH&b%D5F5@NmQ%AbL*P%lTY z<4H>K?umWzw6*v3!>T1tQ}Z_L(1!mFh|6aZOQ5yJG=)+b_D^J36aijVFwH)bsfSG+ zI>~FY3U!6B@dmyCet%8k?mv^ibV}HP)EPe#P2tNYkWyq!iSYZv;>2~VqVW5ai*FOQQuFK%6BCMOc8O)6eM%8W zZ^*VF$hBDjr}iJj;Z1u?rvJY#-S6Bp$72`*GC$$)ryL z_b1L1SmzF)))#p&j*c@h+<^HCgg?A-37Eu*tYBSZ-2HQ*?YKrjz7v2q@MSrC?o>Izc?YS1#Qk zSyr0b_-`*NH1$wZ{MWhVxibIWj%R-=R~!k2SPNSfeT-Igyusc=KO8F;nsia~aEsw= zW3Iw5eO|shCIZ)@!X$s$LL=ITzet%55zm7ZFw8uPW8?MzhR8-+OhXV@oXz0BXpl>M z&Pr>iDXqPBj}0udN@I9R1sMMHu>(}KY+d@JSWSbq4Jm%N;-&__)~qEOsV*AwxvTQm za*i6l$9btNVqNnb+2^@W+w`tE8|1#eBLTaJ5Cb#gf zdRk>lSsl8`L;I91gG}3d{xd?)w8>b~LS&p#%$^NF^4`4Qll0<#${vU;(L_IDm;rbs zN)=QeXV3ZsxK=}TxMzk_7FyhQV7z)Pqd%LeXRdd_Wk|EiREWbu;%92pFXNMaK+j+-<^$7AJi_Axm}<{t+7oPC8|G`$$dGC0XTaGIyoG8x zY57Bug(c@!X_kggALmf+h?b5Tav5R=;_5}#aSxPbc@U%E$-geK{jWU6YbYtzhIU-w z`3H%Ddxi?b8G*RKa~bY{faDtRpt0}8kcT1URCcj{_wyGQ^F6bq3Di97=~dQuu7kPZyqu2% zPSvLTh|(`GK;s@7vLEC!AKYI0Av$nKo~|Og918ozYFuk4pQ4&_4DMTquY28{?C+GX zRC#;d5AgzL?P%Sa{xx?V5NFS6?-G74LRPJ}#hY*QTdVb!l{zJx&R&Ft*&-Od#pWDv zTcdZ7UnaJhTAFUsD!G1u3-NmI85YrRpUg8P2c?lJEJ6J_U-=vNv?4;=Kk=qqcKCn2&=rI!cK`~aQ z4YpF8{ddLZ(vy)xSzZ3Zx7g&V!b*#Vz*SVG8zyVgb@I0`SAC$#)|E7@U;}9sQdD-T z^FPb=oU3NM%^0qZU_OY_u>nlzpFRmLX;QBESK(z{8ao-{ie-WxyXx zq5L$}+2Ys88JOM?%st5#>Q&UnEtZXastZ!leSZfV=9>O%rpzjNj zM`@V7FOb2nvHh5AkF;%-M-OOT;i)aaZ%w6;c6W4)!kQr?=7(agLQUlLnEn1)1{tS} zdEOxHIzzaVD3_rFA5jk6$NR1KH=<+FvjNSzF@N}+kV^jN8dVxl@Xo6(dkO!R_a~@n zz9b2;!{ylDX~RoW(qR@7!c2ANEhGN^h$0=xd*{OzYH#$BZAZhMMOkmln`S?}2bi;g zVT{_yp`G8wu+jKBJEFF+pQXsX;=C9Y6JWGkPFUVx1iA2%@r-!TY7E1%!tt`AWylE) zkz-?3wmmnQ&kGrXm8!N+1L8T3i2{CH^ILTCdXF=8nmH6j)5aN6Aq_5@#{C5TI;+`` zKNxbp!7>z8%x~BDT_v92jdBs+y{A4FfaL&$1BTnyN(dDeUL{|_CG(Z67d6RO!I+Hk zD&miD4dEbX;ov-|fYX}@uYUA?^$5k!n2MMyNKha0 zF{1EBkq}RL+V%umE*XS+Fzc7bD4{Y!#*UsrdT=-2R>W22t6mh@h$>W-O(>x{GSXpZ z?X-;b6rIe-?m`SG(V%#9;7`e%M& zKKSEv2nGL$UolP0a+78hlaepUWpDOYmoUcKGRH_U6h__PjKngoHu}s3k6^4iP@m;O z;LSw7o>wW;Zs#dwLQ-IAx7cRWSZ<=Y@-hm-vZfrP?b$&%Rs2R+*R0n~I7um2@_pn&>73U-coY!*Mita?{Ynsu!3(im-Fz*;i{>$du%2*o84jbnd4uV{ukF7OT`7c8u zk5V||R7Ov2!{ifkqNBicpaFSvmQib|X%M8EqZOBa({!deYE z!LX>JLy6}w`obs`TD`5wW_5HFNY*pN}WgV+FA5<|U zA~fB?*w=%wo@Q%cuR>1qd#NLNCbJLcY8S`=w=+K^E#p+a2P~p@@si+gw9o7rMAGKT z)ImQY`mC7+NM)_KY8Eg%8{_{4d8xRM;kRJTN=^|z)!g3kAt%2-N>BDK2@ubc^ZZ;5 zIojhQvyJAB*2yGtq0wL?UYh_}Hb~a<&BkztlvB@6c7<=0?%FGx-946hm4hDSOXfz) zd~ub)>?k&;d7ar|Ur!b<>{$P5S=K{-R$_H3 z_|~Tv&w^Y!E3#7Mf27iKh#A;^0*C|oM^Kl1sDB7%FJAG2c_}E|C|#~Hvl#A`hbbgv zYSy+6vbp*%JTzV#1TH`^haf&KfY^GF+s%NKQ%VsU`Cqk`Ja-|$lRu!9fW>QJI@x{# zX5C;HhFrGj!by`*z={IwqcE0Hy-^KhWB{p7xc8{0JjE#IW@!SOl;m0aQ@Y@d4wtRt z^jsNp=Ud$TM-M91_!H*lz_igVGPo^<%U>*Z^TW?O%D<=7eVi2~$^SoE zjQf)|-2T`fT^zDyB_ltkANMIO-I8gD*c0e@DA96!9oClhY)Xuz(RWTz$1G~`%wLqT67g9LAL*Tz}wN<=hXL&wvu;Gk{m|ykR(}DEc7*M z2T~O7UN!k{o4+8&XK6DMOzGdyWEu0H*K_t=82D8JOvv(5eztdwqnPMy*@|ByJA`VN zAT));numD2wiiQcbdzju;~!NpnW{IL9qG*v#D(uyxj| zTF$9UO>*q>-8FOWAMUYThD8(A+{vnhf=QHTwFDUK_N7M>j?dG%M>Mk!7Edq)^1|nAum@Pm z9HH=MC{y)`5rCV{D(8H4M808^EZkV8lbnOar&i9MEp0Big6b?#bZ zAH!oC-zoD4inu1umqJ(XnaJioY5qZXd@d3-$%c_WZjMx(dyf%QtY*w>-vH6`k<~yz zmQ43=iMd8joX48nR1$mc{s=6udBJCH4BXae`eda)Ylo4qnJBjQn>%~;%w5QG(X`Vj zc1&vWJ<4mfEuEx0Jc;~G9jinBCH>MLA16j9L&eCbpjD?(u%(VF&2PcSbcAF^nkwGO z{Wi8c$YJVWtiv$1dweeJPIxeJ$`6YVYQ&BBLa_(lxSJbviDW2I&=dhwQ+;~ z14(FlqPQ*V#yToJ8@FzkucqL=ayM)Hn9&if_~r(m^0-H~Kv&2{0<=HZog$m2m= zdh0atmpSGuY-grs$|TpL{^svu;t&^j0Gr)bC-TesO>e0Fiw;BYYAgY#zp4B;km4U- zio+K$ah_U;_b6w^8w1u*Q7z;r9?X>1kns<_%og`kOXZ^M%PutdDQB-|yB+_-U>G;+ z`Z+tN9_KU1rr)-t1;dX_ZUc2HCcEK_doI5>BUFxG=x#+eVtjFzxEl1wzd*S+kso2* zxE7tyjgTLKtg&j5(TJ))CmM;96d`etu-9RlcdrX@DifQ7-_ze;BAOr`FaTo$N}Hg z9Ms)gU8}3gx^G*WOkv|nL5Hy>P%=v?TgObTF+<=RJya057(EcXH;vd|6z#Oa^2z6s;9|4=vPcnw82>!nrx7MUV47xIW}H0-vAq4;GLWichfx2 zMTaj5+hl&HFFnA5mhtL<4VJSI+8Ut+{^~1yeS%H~LL+8%jv(e=Tp0G!r%dH>G}z z&W}F1;vie-9|Nb)J0-B-fN*&z&|HcC z8%dn1CHlx##4b7rC7eQDiU`941~XRC@$FV#tehye^_d?g?7({=F`Xj)_AT}1HJ?+z zmd~2n`#HU;F(6jg;$A(f-G#1+>Hei*Zn_RJ+CE<@SceCCn3`EjM#pkJh_i%DrPN`u z8~f?#u2j^>P9r5z>nKtS8a zUXapEZ_vHV#G0WtQXj`ZDDnG3q?KaPN8vhIcM16n84tLs6A-}b~JE41M76j zmK*}A7@15_;=@wzq-8^k^^;BdSeRZ3oN;rVLWXo)+?nd{Ps=-RF&j%w9|jyL0j^`w ztCY=FC39&cRd^>*4{5|rzTe*D_~x0Fj)iKB%CpGMN?cjwwkopVy#ena_;uFkz@WG! zz@$ym?M9%HDVNwWzRI6VTd%EECmmn~Y;`O5t%NgyHrzWCviQF$dC13@NyMz+ z3t|&9t{`b%>-1}L#@)!Z1n#OZz*yzpo1|4e^>;lvSRkqkUK$aQC<|CtqEtu=skpc` zlF~H#Vg@SDk0&0T!`ud}ZmE;F8jZiqdU|}tBrD-ArL};4={r3NIrO7xvD55xQbZAl z=Q8Kw&}rEv7pmA1h=|r`WgIeCHcz@?G0=pZSk$wrVL2uoaC~$|IjIX=>CZuwSrz;s zVEAF;moty^Q406@PjVEq^R~G97hNrNMWUqiW!P*=E&KQC&pzxE_Uc zp=zZK%#o;u;_d}&!v_Ukxq^a*GTwr05Fe6;>HORoe-8XA^$RJIMr^;wD)Ms)-A1L9ORIqhJaQLK?(G#OKo-*dlY&5v4 zXAoQHP}Ay^$U%jCS|hQmnE2iQsntwu(qX)=-XJ@wS5_E>>Ses@O5Hfb$M}#rYEonemkdZy;HK zrPPHSN>U^wU$oNVS-tDHQik0avt2hhf&VY%;t$0ryb`o2zftCXxyy}w$^dqwOt!@i zSQ)+KVxfL0s6qC50wtYn8{1<{*wDHcIjZXpzq7u0S|XimhckV|2xan=r2_5$c5fL{#6e zDGF@OVC~>^&Ba|4fDG4DD%Ri%#CogW`fxuPg=B{>?ijS3kE_s$3w$a+7Q?Pbhx!<1 zfR(QEuQ*8?|N5i~96nbZaaP$mDPx)!DdeDk7or=<)~p6{yjZ(n6GeQkH3j++F=24 zj~43-VWwiI#n@?}x~+bWu#OK{4nJhz!VWr|br9)>br9Cg(<~W_%CxCHaI5KhuBij= zm_RCQ`>Xj;Dk%RQ)dyW8h7gy->|_zw*?kb-J&trm;VGJ$aMyK3*mTzl^1)V1knrzl z6RCZ3?e+l7dGB1kll;!UA=#X(aDCHq-V{0;f6PuCDb$e5(-iWpbEM}NeB{Rnja~iA z?k~vr4TE_jP!4_|NPm`a`S`LTPHxiliT{T{4CWih(M+v>UDUlWJ3TzlUpN)PBk)Xs>VxDCk1*#6PIJk+A<|6;i!*7@My=Z`A2Jw?9UGe=t5| zn$Vx@Hm#sl)qFr9s0x#iv;ZA-*ZADg0s7Xb>evPEhVidm8Q!#><(l;&U2$5UUef64 z(6JHN{UJ3`F@Q1O8Q76_0^Hbm?$`Z=@hWk8dQAIt5nNJFa=FDwp-UB&?5`nn`Nq@x zl_W*NGM@%59_CgRw#zEFU_V^0oj0cLpYsEksu{rh4ZSdvU#z)mb&A=;L3R&S136E z$L4B33x$dBnN*q^L_yl7S?Z)!S-=d4hm)xBn$9jAx^q9=U9}-pTuXV!gPG^okzbn= zmgOT|&zPSCH1Vo$m|q$)zv;$jG~wmaMvVW3v(n1j>Jb;3KRhtP$N-<9Ddu`VSYY@5 z`M+Ua#{^<>=~68MIk8#nidA*o1fga^Y|yEIm8p>8IQ|^cs^GnlXmqL!)&DUuS`r}t zXdiQN6NFu-AEdTR1S#=uc>9RP({nXUMnTSE!#<}R-1WJ)jMdhHOB#w3DYkExrmMoo zO()e{jWiTRE2Kg*d~hYE&_q@EiK+z^bEw!G7h4=#yQ$q^o1om<59VXg9*G>D~Tv29+I&GjO}Bp^GOO~HL$xSo*kA_ z#SV<4RGLXKe58Vu9f?wXufak6lI_Bn0mAYRD90@fm^$HPfae|t2C_eQ5S`=MS+q;# z8QVYJ9YtN%chcBRQnG_$x`DbtA;8H8yEXF9iy?5dB_7^8;E-zFH16e=!<1P0AzaH{ zcNgYow^6QG7}3po+vF8))Re;4rh)S;TFR;JUt8d)TAHVEcPX?rG+fQPY|zTaJKWo# z2%HmqTe{l4klsU0%@f!qtcKhAA!7`dw&NL#%B{D*-n5WY9bHFZ3Q$+vWzvv?6e=2S z5U7dlXv*Wz?z8&Dn{lhs4ab#p$t!ZEJ+~tNKh1y634H$Co-w`J*mqknB@#DO0pIv1 zJtJ8yah#vQXrVbp?|*1V@Ut!B80H_QDBQCK9s(u_(p7FxGB^w%VtRyz6fCQ!43oOa z4K}RVx}c}{+KET{$w5OE(+soA|5RA$%D3e^Y?l`K-`Ph3rlW;_h%L-{lN7qft&~zS z>bEu*U?xtxuA55}XkX4WrJ5By*$~*8yIy3%*HOq!lejr5P77R0OWrM5S|*TgaOQs@ z+VC=ZKrYzHzVE4qI`FFyx8)uqfb0CUfmO4aaH??VP1I6&2jEk&D}%5#>k-4SFvnI0 zjZe_W_r<9)3JHrKWqzRxi0bnRDv|wv*Kpu^lgd5#nO7psws?Cp9yLw-|BAjnE~+~H z|NS}V%$YMVGaLrQK|s#HfT(B#5#s#}AP(Ld$qcpZNT|3GA!}w@+h+g~FV&#tCC$c5 zMXkHP(ah+wbwG2mtjOG!HrpLr+9ki_x~|)rZSC*zKVM#O&YaKZdEU?ac|Y%mN>vX1 z@C`L_5Xy`n2%fB1#@8jvJ~n%vk9``1dTAOBQI8L94VSHfNcS@G*X!}fs9P`m@!8v>)k~ioWXT-sxc#un9;F7Tib=i+LOc0sM>+O(@MzAneT8D$ z72pXIf6FUYz}n$x>11Ws8E>u+1`o0=RpOt?+DnhR=U=ekeSW*q(GgoKkSEv4hyA_M z9d{7V>;&d)z>^-DxD3mcy7b3Ev~@)Uy~Ypg@~IrtvX}GB@}ryam=vz7saOzMvK%$6^zNYFO>b4hyWo9hs6 zPhO;1c6XADB2UNOX{Ogpu+PzoIYH7XhwWl-H-fwD{BL3a%QFTQL1*k#dGg=Ij&|ut zG@qC1OF5^{{dT)=ju~*xWrmRD+H#k8;=56b}6^yesH9K z&m7Q%of1~n9#e78A*ne)u*!jzmA$z4UXus1dlN*h`8u+H{9q6BA6V9I3Q1dsWTEKP zns|1+-NcM(fMc}&V3RGawNs_9#6YzB8`5xHvfBoMb(xB9?>$drXuhxE*~;{6|;2-?`S~7sY?CJj86VI?kj|-aced%gd5pq1*4!0)3)#rgp3(mxvhOW zpW4|=j<&b=;V3gE?bmpH3k{B(ecd6;m@s;b?r2zHvUg+J-XG)pT}Xa!ocK!1yQ&!d zikgS;b2safhFIK;a`Z00z~V}SM5@rw;iBuD9MkZ-$i>VklJ+WA>L1y_rYVq`UJbK<{8v8kKZZ##MmJ;nk_VXY zI__A|64u(qUNI_g4_2^C?V_k@BN$aAnO?;EQ1QQZ4E=F(>Z=yF(%||^2>S~ z7lQgro|&n!5weQ2n@tq)#4G zRmdsrtaMRSEmxz0>c>^~imD9*{a>1e46;Ujl8%ufX}0AlCtC4@dfIB(+(o@sITvW{ zCY!8_EGmEQ3K=ucs7yA!L-x8sV^Jz`9VAU?@SNRCbhtksjhclgh)zZ#f#inP%qhrsmQziQ6&gVPF|B#>)ZGA!PI0%^Z&+QE5|}v>}o3O z%4)|EntFb=BVeRyHdWaI>oj<6y*_3vMQRpGPao$eo*Beu9BFRd^91WS?gOc_iu7-^ z<#~*`Dt|>^ShEk-6g^pCyHH6PUGA>OA0n{EJ#BYO7xQJjy#2Js9ls1?>IGWQ*i&1i z3fQM>VjE4Lw#a5$p1VD&I{VzR#5@izZ;IN}Q0N>$a%~#Vk_0LyC9cHKz}?|g|0+}B zklu^f!#kqa`k<`r=<9tuB_=`M(Z^3O(-036$<3UtC$?5hFp@8)K^U>g@sU4DF(cbB zCJ_6Rbwa(&stAGQxigy-`znajHA`Vg*BV({Vjfj?TgpvAtECVGVeGsJTl$VsGHVMq z^(tlg5A?43%%f+9Dk8sZb6;VxvsF(rKsy18hvyJ?m@=D&`(`S)(q>ZL5O=+i`v}SQ^8~!kg`EcrA|Qa7M8-L@ zz0LkKZ87Wo5g#^s{wvS8Ocl)Xke(F(*3_OP9qKn^#-_RCzQu-(xgg>-YPXYa^*Ijk z+ymV9hJnX?aFrBU6q;M%`H>>|Bi-H%+l4l{Nsd|9>HMBg^;GrSn0k+^-DSF!`(xa? zGr>@2!J%nk5|v%4tASqX=V`2!HI%~gPc6*T*t9k<##Bk+g{GcOmvWFI4}n*2K^!W*DjYNNjI`g z+n3zF8r9?CWdd0qa>}dQ&cDOU9;|tS>B79n-_#x3b`v`YxnO? z#cKUIU4{{YFy{-7-)mw%Q3sMKm4~sKGsX#8m4Ib`jk%^a_EBt>)`alJA?j}G_T84G z(uI&N@F5{|xG{CzoRks-f{y{!cyd|qn6-^FBEYP>vxR2nh{L@C_HnjN%8 zB=?07JwP6_X)K}wpJfR+P*d>^-%+p!wmmJHwfQyn8-aQ!9CsIU`(Fz4*mjXhO%%D3 zEBBptm^sCgb$yhtnc3Ci6Tu*IYo5d=m$YEVIrT~X51eE{lWkloh4kTOJhc^Zv?5Jv z8O;-b+#O+2rrgWx!$O_!nZw6g9@9~)N)u}E)n?<)x<0GndUEPNYQTPC3@=OotJ@V6 zo`$GWoJnnse{$x7llkp0od8iA@P^0h)f;{jn0oriOY1P~^Z>@d_#`mwoaD zD*8+3hb*;_&X;U}SUOP>!cJX`I?a*FcWUboP)*te4A&m`x)ZBeswn;}7+Yak1?%Io zP4MEeoHrrodgZ+^1<0!=pJ1XB_2xP#{P z=;b!!Zzg%M|j1gou9;`vQ0eGt$H4Y7ao4^z9#sW3q>~ZkG?;>WJR=(5Q)#-moD5cJH zV~SXlaHx8xcj0Y8XQzJC{s^i`X7I;`t9gHpuzT{{(cAuQlb*iU`~GjU{U&t&ksGt} zGg!_V4N~9C(%oH_JE)Gx2nyn%0uP&NvEIOmzZTsF-zGa&!NHYAM2ZEzwU+Sps%gCTiIFr*tPw3_=xpT! zkZP$QujgE9g!nx*e&R7{3oOLBmrmlI$=>*G>2Z^_wuDn_;n`kp63}wH^StnDNK_Dg zR=j}-4HZm-<1MgNII5-9dpXU{qeiC{6W#o7e4Csgg6CW+rCV*u&(uC%wX#3`3*f2G z?OftjrwrFq^wL~Y*)Ql2C%^Z1@)?=N`^UWl;VPUcMF7HQ7liZ6yri=hthE~Xz&OnN zHVWMutpDHzJt|suB5^CD^=+|XN_dHy&m)C|1u!OQ&NJ6j$C9qIMA!ATNcw=D3?X1Leip=$;I zN%DIxlvg%)4@xWkKB$WSuLn}ftC!BD2%Y*ZZYhPx0wZ}3I(JEQjx{#nErcyQ+XtY{ zs%T)$6USY(l|}sR`+p{JO25u-p4e9vhv6@-#)t-2n~2_kFM>&tm@;O*!N&S9QSo5z z9WyoWX&!R3wGlTpHFZm4L_AKxurw+>Ko4NQwH;kj49zSPm$%6e5+=Q)WZZ!}_0 zAO(wb;Gad9Q7Vz1eQrDZLrCTO)WsuXt(WI{nZ7MhB-=LXbf-1>-zMb&p(0dU$H4D+ zV`Z0R6S4Chtjpd6Yx`{xdqvFol!|4S^?2Y$Fl$B|+o^Bh_b@@u*e#7v*;#9!cfkQJ z0ts&RK8ar1AkPR%G^7rG&_D;9l3}J3dDPsat4GqM5^c}M;bexwFsvZk5Y`jeBGmeo zS%O#F7kJm*5Z^7e4yIXXmB2ZyC=0Kho(vZk-(1FJ8nIZTO*Q}TP@d)?{n9Qx0xScW z!cSu3@7s)!{~epK{qM^|OhKEED~odUfPG@Zy6gC(4nI#fLwGR-+Nk@ zx+dceKa1p$55h1rE}G7lXtoN1LUHTVrVX07E{Ctog1X9vzti|Y32r0BB_+POIvyXU zIs5dMHSyzh|A{SZ;PHdT5~m?d+ll{_W7u(1;Sp?|DR@ytz;z?@wq7*CfdvJED~m`+t=q z61I#d1x5h!v@~qdv$-v+Ut=O3nhgGMcXK)Nj;^r15Ku+&|(Np{g39R!I9om;fu=4K&8)& zTQHmi9Hh{>CumukNh0BU8m)~4o>*f%bp-MhFQ_PSG!!Z+Th&=Ia}{I{nN%R+34%H`n;pFt|E;Nq6{RIa7yS|lSJkKJ z1X#@xIZ@0GV}&_{oMV0Y$-!%d4AB^Vn*+UEE)0s>{eWwLzOQP<_ZGr%h9Kn26VAju(7Z8ISMxgpB~!3 z1*x03Fgb#>O|__F$qIF+j`~CHjF^&>Q^Jj5Yo_-DfAW~=zt?`k?K>jutH|_CVK~l# zeavslJ#(k=N8P(wLgUmiAlhIzh3a#07jWd_6C-<0cV44neeHrJGr0J?f~0A%F2Jod zaUGh`g zs4GIvJ= zsultbm5af``VS~-hlPsvU>g|$Uo*mDqf;NXd>3<`@`+s|Qq#WC@2TQqDaNb9ZF)aG znonubPcDzbO=Wmj6Q95nFE!3xCmQUke^=Jp}SevwTVhMNjR#Pe&Lft+y=Skw`NbmZv}xQ`Ns$K|sfh{%b=>cO(d^E}n%?Dc~e^W+0D zN7Z&LZbjC-8_<3q6spo_Rl`iA7Klgv5sHAb?N6U>HFRNEs^=lhQlUH)QCe#|dQp4u zM-??gmTEETL%OTpE5_q;g2xN@>KK$fOPBOg#~_?^cNb`IV@7{=dwvyho!k|e-X~P6 zciq-!d8Y|BImHxOfv+WI4*%wC0@B)(A{ZTcS<}mohKvy(hLMvBtK$luY^0(<29AcQ zxi0Pwb-8}mULfzZGg=>vx^K9B}1CXbmbwUFchEotWv57 zX(8S5DucR%v$J!2I1GXeg+x^{st2<>CK7&;yQ9c;IMitIl;%t=)t#`&+N)@P!c_%J z>W-(Z6`${z(w!Z6e}5ZlRy+$?FXuUKYYo`mkmiipPHQC(Mfgk?>Azxtc$w!X{>d8_ zR^E0|{N5XNv%E$WsqntZ_`p1%V4y`%GS;SxjVE>e@ z`Gmn<31|B?j7L%QUkVHt=9J@O_nCJpmm6+=fANI6Hz*lHCfjV$P~Yu z*CsN$chbMNy{JuE?=b6_U@sFU!p8K25{yB@xq6vto_xbF? zs$;@qc#7HlaJ;LKN>dkRWe5S*Wi91m+=Ffic{SkLn0&Oki)8Eh7p@BDG+hz*aof6d zj+EUL2mY9K8Y3~2EJ1D*XmLU(oR_Gt7nfIXL&Jirq9F|@<^Q~-^vI*aUpHQ(53AyY zY=(#R8K@F4TuV&2nUsFSjHBtS)-+MqZVBA%Dc&|E&eou<}E+Jk%X^7nmyEl zSF7l_XTVtpPknd+tMWXG@n==b%UMjI7@jdKWOwOc??oh<%7ORxORo%m)ZApu@ed>3vEsL}AB5wl2J2_}N8kDK+1h}3P@UHH`#b@w;xMX}K!D4_8;=lp zpo(F+;$v@i10UMf^yXpp8^YVN5A90|la#aE_Q{ENn)ioRymH&l1D|~QDfDFSLl5<9 zHcnqXLdVVXl%+iR&GZAG-@Zp#Z8!;h&JVX&OS4M!xbc0Bf{;>d2`x`8VOb=g;hSVC zPKQ!lt%tL^ow{1MhF##B-*bZu{fK<|3v z=4!BYgy&CiR$6fVsnh$kES6h#OY7iOHEiFd*MvQZKNFqZx{s%=0vmrqgQ{%nCTY`TlBET`c zOLwZ(|HkINJk}2sKjfx}#}2+-i0|+amF{!eOjYgbxZ*Pf~jW99yVf^%omOm*%0_kl6mL7KP<2xtug}Ul%9SQxex2Ue} z11oIJ7l=T$UDpMW$LACFjplvknKoI`^VHMC!j6v4JJ!|4;gwGK82p2`Q~@2 z@m$}@Z&#F7#6*t3S;<$jVqd5~W$up=JXS`1sp}lHhI3TvQO!shKu1gx$2<4&s^PZ) zYt(wbj)N()o`7#Go#gpr)a(nQ>wqq9B8nl0T<3#^FoBv#+eSyFsxud!M&v~WQb9%K zz>xTAc^gmOo};sWztVz+L*d`)jws1D)Ma>#nr@ArF80X_8mf}(%Kh9P-$(K>uDP+S=I={Dg+%pae&ybTf0Pwh6hX5ZDk4Ne<}cg{fzdTeWHZSvo1~iftUkTq<1U zSVd`|qxP<-x9>XE{YfvZ*?bG9)wd)0*Z4DQ2!73}dVxTN@i_`{6hckpe%!;SA+Zgp zs;0cL^512>ftd7U$ziH8(U<%C`3IXzA$Bm}#j2X^mu2(FUhbg$ZhS8*syH;n886^# z{a<{zEM#Bm*QVfw&KT0}P0#4o z{V8pD12+PZ~5Shco&nBp=cYQ`##ad16ph zsHY`LL9W+nC~k`A-ujYEtyquz%3P6b@Z7mAXx)a42F_>l;w~NdzTSs`U#$DExx-{Qry&-q%qt4S^GYRvF zjpgjG67)cxJ|S^6E+tWfo)n2h-+L7fudL(LlHuWW_JI)cs2ArWUDyLqNv0MnOJQNO){wIa=F>ZNFx%QGJRK-eKa7{i+&&u$0Gj zB;VPOwmNeF%G3W$*h$3V)AmxO*e!1|)9-*F@L`&swEwWN*0f$7PEbT$)eAfFNB6so z;~M@2d}RuJRuuHw%oV0-PFN;?Vv==`wCtE=kakg*L0X3N%M?|KuWM31%Q<62MsVlaR}@7-N}!)|%<}(;W6CKzjSS7*Ge+@PJ7Lu!-*0%o zRjK$+=MT)dcxyNZj~H-uT=FSR9zsd%d4o}q^ms{7ik`So_;CV0OoRNXuvpOImYUf* zBtM~hxUWu;UeSdIJes$38n?w4#G7i3Ipzq|Ue>oeI6K1r2Y*q8tEC)FiQFG_`Zd?* z$kb?B24CmJ-{0@Gf8V38Xb1}=d$WXX@O+?g@6MH)ZchyQX-@I#%Rz=iJ1Z1qIoXqS z&wtEMP1~ZOcJq6uxixO?3w=_WW)a=tcJji~1Qy2+VAuH+TRc(keg(}h zjcUZMy?E_g;b{r1;)Jb8i0Y-CkS}!iW1}mbJ2$i})$IgaayXRrdcuvgv3pnEL{14X zlcruHK6mXLQi>t#yJRwh8p3>+#*V60rtGQ}VlP}pny}x{TyeoWt1l+>sa{BZLao>t z%-7@fu@+0+hx4p`{>o==pq@NqH;ZBAZZXDML*yCfO+m*(h6av%f_7^yevmtbXSGLs z5k{Ku;azO_0!-STpjr09N=oxR%aPo>snI6Ok%UG(E9-S7?1F-MS0PVU;EV`YbVf>` zYr3_~X4!ZZwpnLP`6vOCShCwW{{*tU3UUOpiF@4mipyI+6SburhvF=WoZ06akYfMC zE1|3Enec5ovR0gK0`1x0n)JCUY7aCDunyv-$Z_1U)rU@BKHDH{i{w0#3|+!yKzLjd ze@@l}EdXn`#N@vhO{9+S_LtPn8fe*r_Jox1X$H_xlWJE+&SDo5uBW_&O>t=6s%8Xr zJk$(KN~LA(N1%H+aafh327P6@$f6CZb-HtWwkA-I5B`rseO2mkMzxprj(7<+{i<@@m*0-)sAVm@Q3EzG(2V z@$Va<(d`&xa49*Y=Pt-Y+C&I{&GQ6@m7o< zCL~`&N`(5d^geQdAHz%O8`?mlPUB-N6t>NwDSOJ;5PC6&H%R71q!Ub%u>H>{Z510E z;=dh<#MeO_NV0u-(1v`DhR9Ct*dp-Zo6CR23-fI))pQBbTrLL;Fwa=J>M7gzKnOZN zgIH*RG@+Rq8Pkr9=`PX~Ti{KM8WPxRkiCzw(Al?s#@I$jXp9WDWeHdya?`Ruf+su> z9)n%k){@457vm0F`uFLiGYu|F|7*m z&ycl{k;=z=sVF@cw{XW#IATtVsOQ;elpJ%hO<+e?EaF08F{s-nlu_=ri77v~jCF9~ z)$S`0>wt*BRSNDBc8}ursaqlRtKPON)~GseA$+^8{gdwsYql*ZnA04O>t430n3HJO z``y)IE(erdW}&~f9`z=b)?seBq$|Rc5&AX{Jt(BN2lKp5xAL&mz*+JD9B4Kfl`Nf` zb&K3>u=$p@uh-#(aWn0ywGhlvuveS;3Dt_ka|1i1^-x*OX&e}$mMrA)^GcNDO%2F1 zO-&T&1w%Nv4COkkAMljBsFM7@Xao~>2Eii#X~sG>ehHjq{i(IQqVY(iDV4aOnA}0E5zwKA#Unm)HQLgva7OF|mnIxRj^PnBfhPN;IXP)XIK~i%$L;d+J zxKu!0!h>sg>EA&r zj>JMCp?;FEy9;R>QJp$sn=e(gM8TYfnbodh`(IupwN?d9wgeD=8tybp8XpxNM>kp0 zk-KaxnZLM}7~_e~J(E&?{KbAH-=U&_3-j~YlP1X{R=f~%_oe6pJ3(Cc2njedfSaT= zOzY%ul^(VG%CTxZgm>kbb3zchUW^eR)SKHy-mJX55$bd0z^Z`lXXb0)HkC>CE36~H}>A!Vd{0*)mACdaj`Q>j>cgiZ6)wn|g1dS#;3{F=z0 zRpD@B`xeL_QGgdK?L7WCk*y}!6komAgtF-HHv=>P=N1A2a)=ZsF&dU}HExF3$>{Ef zBeyP6uscF_I3PIsHtmp!u0W>2`I@4P0v0(Xs;U*UO1o zcNx03z3%pYISnlmge(Ialrm59#tuXNv~|%;J@u8()#Xllo}VBZM_9o#hQkYC)sb3! zoi^>SHxnm)tYQ#`>6Ce3C%aI~uB9>rTuPXE36Vq_+j>b7C&8ha~TGK*{F;NL^|u-om~0ycP3 zy)Afe9txft%{T6$ba<7Hzd^5^ixKX*34G6B^ZNb$N;vX_)X1mzNmRtM=}{hdma}|N zM{LAag2h~D*9>{p{r(F(j7T(JC~&imeQJ$$Mh1&W7>ZHLHFRM=K}A7*?h}F`<2Lr$ zBNT@#_e)ArGhz5P!pnX1AlS#c;$x|~R`jKi6emwdwellClBFe6I;D&iZ&FC;vkxJU zq%aY;bx7_l!80vhd;lGR64o@MEx^JbCwdc)nlcX^gv61vP1Sd2tNnWFdaEg%?aJ}q zI$5+o0W6zfYf*8SYz>k4&5=b&OB1y;6P7iyepE-Bg2}poOyXs9&H}bnc1gh=@s=c# zsU!8Yc8uNm1Z&A|1j#qhUey~+(ab;+#AR5bx`Xl2?$LdehZ1l585SB1^NohI{Vk3n zY7`bk*_TibUeEtCbwoclF-9NZFFB@(Mt>T6toV((Hkk*YrK*sNK?M1>(C1z1ls zb5lgTVLy?B4);Qc*?S1N?p|$f=^P&^Xe*+Zq|8#p#Ot)7hrvYyeB$k}{trSey*LDw zYZ%s^d0mgF7@Zo`Q`g947xVcU3~#uHP#{r+MB^y|M!!7AxTBv62MHO>LSFVM1G*8) z>xc-4AX^v2>vhoQMI>7r}O&hntZ8xdt zh!!GO%Y*ZC`V@3Ny|c-YYW}qMLoB%$(yO3dn|B%Rtl#5gs~w)VFJbz(`5cS;dmQ&^PH-*{; zd&^@fc(^bc_VnUHDKLl3bO?d2(ZzKfFB(5MPCutXeWeXet6(|?on;gRDrxMHtAEy6 zr5k5m`6bfRGhrRzxTqi!$m!N?##&7=hd-1-d_ zPx6W&O#imv287w zV2ZHtixAg}S5jjVFRatrm#6#KJ$4MgWA<3qV2KFQ&@BN)D?!Hn+NeJJ(;5&svNs+b zK);3L5RJ&;0#`!nQH<3JQSr3Se57nQ5OoP`+4hIS!es$>)Mn6_H$A+Fauah7=EkPI zZS(XaWv0PWUx>nUQ^GG0akUU-PDN+svNr~2FV)30T*I0dIXlOzVd&3IvWuHEs$+BY z;y}%y^nvZ0+(JpGfnSo?i|q-^+p1YO*A3LAg*F)oevrdk7yA<)kK%Ayy!vwI*#W~$hTz1uoCvBW>|;~_q+ zg?zhWY@5VK`hO9SS6zhbOa6Xa$i&z$kubx;{8un?j(6iu8ENHuW<^n}8_m)S?U zAlf{X#g*}9!I)EZ4wr_qZWw|U_*>x;8S(H+UIV4QNYAk z$s5~}o!~KRpKV6q2ar|38@*UtEP{f)>pRospf=V)pm4!EJTcx`_H)|}`7~A&oKdK6 z>eOlZSHR_@^-N^vUw`2O#CncVZ)TP0y;QNpEoq@d@rcc`y^mUrAMJ${sfZatq|d_4 zKBUV9WfYq56e_i>k>=!GO^;}qd*Hc_v@L?88}Yno;TlpMks0g*={;Q89uYK1P}EUu z`Elv9vZ$+$m(D8tWmcj1a$n6|w6A5VlAlgiHYY-w_pKH~2>L^1jn=1>VWE#U&^Tl= zc0Sn2-m2_R=uMTF+x?mlsMcMBh~&J9DWO=gSgK=DH4?sn){UY$2fIWA>{v)AoFiNX z`@9F+6K4)!2&UywfjPYw#!U3qoLm+bCN7J^>2|9o z{UOVApKs-AfZ~`2D3&y8u?qqD4;%q}r~P1iQ4=HT^ueNFp3@KEBmz`t_1g-vf)F47<(-d2NP-oKDyVfrR6>`B!O8jYl6wehmg6yE{TQ ziwRd*9?hzkt*$#n2|8ojbc0ZKRXP_;YcJ~W(U@0Zk}d=|{!H%l*w}0%j=?7D>VLT_ z&ZHB15pj1JxKU;(AN95hT~}~rh}%EJVPjBOoIfG4lw?rX3Nf18+)(>3&VJIM zJoX}_{P(Y9YN!8%ka07Iat$S_G;g zFU-gLfi#S4OmQn1nb}9RGUvj)Ll`qRmcUo)2t{EN1DS*Jw=`cr zA2_`=nyLB5RPY4d)|HYFwu1RFKqaKa!`oC=>fE^VAtGZ4hqrbmL`LcK@kSehWKGhT zzr3YZI0oU^UL Date: Thu, 25 Jan 2024 18:11:17 +0100 Subject: [PATCH 16/47] fix: Cohere namespace reorg (#271) * Place both generators under cohere namespace * Fix issues found in pre-release checks * Pylint fix * Update test path * Keep licence in __init__.py * Pylint newline --- .../components/generators/cohere/__init__.py | 3 ++- .../components/generators/cohere/chat/__init__.py | 3 --- .../generators/cohere/chat/chat_generator.py | 7 ++----- .../components/generators/cohere/generator.py | 14 ++++++++++++-- .../cohere/tests/test_cohere_chat_generator.py | 2 +- 5 files changed, 17 insertions(+), 12 deletions(-) diff --git a/integrations/cohere/src/haystack_integrations/components/generators/cohere/__init__.py b/integrations/cohere/src/haystack_integrations/components/generators/cohere/__init__.py index c36f982df..93c0947e4 100644 --- a/integrations/cohere/src/haystack_integrations/components/generators/cohere/__init__.py +++ b/integrations/cohere/src/haystack_integrations/components/generators/cohere/__init__.py @@ -1,6 +1,7 @@ # SPDX-FileCopyrightText: 2023-present deepset GmbH # # SPDX-License-Identifier: Apache-2.0 +from .chat.chat_generator import CohereChatGenerator from .generator import CohereGenerator -__all__ = ["CohereGenerator"] +__all__ = ["CohereGenerator", "CohereChatGenerator"] diff --git a/integrations/cohere/src/haystack_integrations/components/generators/cohere/chat/__init__.py b/integrations/cohere/src/haystack_integrations/components/generators/cohere/chat/__init__.py index dc14c9c1c..e873bc332 100644 --- a/integrations/cohere/src/haystack_integrations/components/generators/cohere/chat/__init__.py +++ b/integrations/cohere/src/haystack_integrations/components/generators/cohere/chat/__init__.py @@ -1,6 +1,3 @@ # SPDX-FileCopyrightText: 2023-present deepset GmbH # # SPDX-License-Identifier: Apache-2.0 -from .chat_generator import CohereChatGenerator - -__all__ = ["CohereChatGenerator"] diff --git a/integrations/cohere/src/haystack_integrations/components/generators/cohere/chat/chat_generator.py b/integrations/cohere/src/haystack_integrations/components/generators/cohere/chat/chat_generator.py index 0ff29ce14..c632bed83 100644 --- a/integrations/cohere/src/haystack_integrations/components/generators/cohere/chat/chat_generator.py +++ b/integrations/cohere/src/haystack_integrations/components/generators/cohere/chat/chat_generator.py @@ -12,6 +12,7 @@ logger = logging.getLogger(__name__) +@component class CohereChatGenerator: """Enables text generation using Cohere's chat endpoint. This component is designed to inference Cohere's chat models. @@ -123,10 +124,7 @@ def from_dict(cls, data: Dict[str, Any]) -> "CohereChatGenerator": return default_from_dict(cls, data) def _message_to_dict(self, message: ChatMessage) -> Dict[str, str]: - if message.role == ChatRole.USER: - role = "User" - elif message.role == ChatRole.ASSISTANT: - role = "Chatbot" + role = "User" if message.role == ChatRole.USER else "Chatbot" chat_message = {"user_name": role, "text": message.content} return chat_message @@ -179,7 +177,6 @@ def _build_chunk(self, chunk) -> StreamingChunk: :param choice: The choice returned by the OpenAI API. :return: The StreamingChunk. """ - # if chunk.event_type == "text-generation": chat_message = StreamingChunk(content=chunk.text, meta={"index": chunk.index, "event_type": chunk.event_type}) return chat_message diff --git a/integrations/cohere/src/haystack_integrations/components/generators/cohere/generator.py b/integrations/cohere/src/haystack_integrations/components/generators/cohere/generator.py index 7bca3ed9f..fee410eab 100644 --- a/integrations/cohere/src/haystack_integrations/components/generators/cohere/generator.py +++ b/integrations/cohere/src/haystack_integrations/components/generators/cohere/generator.py @@ -7,6 +7,7 @@ from typing import Any, Callable, Dict, List, Optional, cast from haystack import DeserializationError, component, default_from_dict, default_to_dict +from haystack.dataclasses import StreamingChunk from cohere import COHERE_API_URL, Client from cohere.responses import Generations @@ -148,8 +149,8 @@ def run(self, prompt: str): if self.streaming_callback: metadata_dict: Dict[str, Any] = {} for chunk in response: - self.streaming_callback(chunk) - metadata_dict["index"] = chunk.index + stream_chunk = self._build_chunk(chunk) + self.streaming_callback(stream_chunk) replies = response.texts metadata_dict["finish_reason"] = response.finish_reason metadata = [metadata_dict] @@ -161,6 +162,15 @@ def run(self, prompt: str): self._check_truncated_answers(metadata) return {"replies": replies, "meta": metadata} + def _build_chunk(self, chunk) -> StreamingChunk: + """ + Converts the response from the Cohere API to a StreamingChunk. + :param chunk: The chunk returned by the OpenAI API. + :return: The StreamingChunk. + """ + streaming_chunk = StreamingChunk(content=chunk.text, meta={"index": chunk.index}) + return streaming_chunk + def _check_truncated_answers(self, metadata: List[Dict[str, Any]]): """ Check the `finish_reason` returned with the Cohere response. diff --git a/integrations/cohere/tests/test_cohere_chat_generator.py b/integrations/cohere/tests/test_cohere_chat_generator.py index cc360f5c9..c91ada419 100644 --- a/integrations/cohere/tests/test_cohere_chat_generator.py +++ b/integrations/cohere/tests/test_cohere_chat_generator.py @@ -5,7 +5,7 @@ import pytest from haystack.components.generators.utils import default_streaming_callback from haystack.dataclasses import ChatMessage, ChatRole, StreamingChunk -from haystack_integrations.components.generators.cohere.chat import CohereChatGenerator +from haystack_integrations.components.generators.cohere import CohereChatGenerator pytestmark = pytest.mark.chat_generators From 4ebedd4db97d4b50a15b74b95a4e8213fde92546 Mon Sep 17 00:00:00 2001 From: Massimiliano Pippi Date: Thu, 25 Jan 2024 19:03:48 +0100 Subject: [PATCH 17/47] feat: Generate API docs (#262) * ignore docs generation by-products * add pydoc config file * hatch run docs * generate docs as smoke test * split the steps for easier debugging * split pydoc config * update pydoc pattern * single-page version * newline * upd parent category --------- Co-authored-by: Daria Fokina --- .github/workflows/chroma.yml | 6 +++++- .gitignore | 3 +++ integrations/chroma/pydoc/config.yml | 31 ++++++++++++++++++++++++++++ integrations/chroma/pyproject.toml | 4 ++++ 4 files changed, 43 insertions(+), 1 deletion(-) create mode 100644 integrations/chroma/pydoc/config.yml diff --git a/.github/workflows/chroma.yml b/.github/workflows/chroma.yml index 89b6a5b24..b7f158cfe 100644 --- a/.github/workflows/chroma.yml +++ b/.github/workflows/chroma.yml @@ -52,5 +52,9 @@ jobs: if: matrix.python-version == '3.9' && runner.os == 'Linux' run: hatch run lint:all + - name: Generate docs + if: matrix.python-version == '3.9' && runner.os == 'Linux' + run: hatch run docs + - name: Run tests - run: hatch run cov \ No newline at end of file + run: hatch run cov diff --git a/.gitignore b/.gitignore index 1815e02f8..8634bc259 100644 --- a/.gitignore +++ b/.gitignore @@ -131,3 +131,6 @@ dmypy.json # IDEs .vscode + +# Docs generation artifacts +_readme_*.md diff --git a/integrations/chroma/pydoc/config.yml b/integrations/chroma/pydoc/config.yml new file mode 100644 index 000000000..fd362d7e0 --- /dev/null +++ b/integrations/chroma/pydoc/config.yml @@ -0,0 +1,31 @@ +loaders: + - type: haystack_pydoc_tools.loaders.CustomPythonLoader + search_path: [../src] + modules: [ + "haystack_integrations.components.retrievers.chroma.retriever", + "haystack_integrations.document_stores.chroma.document_store", + "haystack_integrations.document_stores.chroma.errors", + "haystack_integrations.document_stores.chroma.utils", + ] + ignore_when_discovered: ["__init__"] +processors: + - type: filter + expression: + documented_only: true + do_not_filter_modules: false + skip_empty_modules: true + - type: smart + - type: crossref +renderer: + type: haystack_pydoc_tools.renderers.ReadmePreviewRenderer + excerpt: Chroma integration for Haystack + category_slug: haystack-integrations + title: Chroma + slug: integrations-chroma + order: 1 + markdown: + descriptive_class_title: false + descriptive_module_title: true + add_method_class_prefix: true + add_member_class_prefix: false + filename: _readme_chroma.md diff --git a/integrations/chroma/pyproject.toml b/integrations/chroma/pyproject.toml index 2e531005b..ce4641611 100644 --- a/integrations/chroma/pyproject.toml +++ b/integrations/chroma/pyproject.toml @@ -47,6 +47,7 @@ git_describe_command = 'git describe --tags --match="integrations/chroma-v[0-9]* dependencies = [ "coverage[toml]>=6.5", "pytest", + "haystack-pydoc-tools", ] [tool.hatch.envs.default.scripts] test = "pytest {args:tests}" @@ -59,6 +60,9 @@ cov = [ "test-cov", "cov-report", ] +docs = [ + "pydoc-markdown pydoc/config.yml" +] [[tool.hatch.envs.all.matrix]] python = ["3.9", "3.10"] From d7a66db982e9082d1f43ce5d5ebeca4c7c1a3e8a Mon Sep 17 00:00:00 2001 From: Silvano Cerza <3314350+silvanocerza@users.noreply.github.com> Date: Fri, 26 Jan 2024 10:07:32 +0100 Subject: [PATCH 18/47] Add filter, write and delete documents in Weaviate (#270) * Add filter, write and delete documents in Weaviate * Fix linting * Fix typo --- .../weaviate/document_store.py | 150 ++++++++++++++++-- .../weaviate/tests/test_document_store.py | 31 +++- 2 files changed, 170 insertions(+), 11 deletions(-) diff --git a/integrations/weaviate/src/haystack_integrations/document_stores/weaviate/document_store.py b/integrations/weaviate/src/haystack_integrations/document_stores/weaviate/document_store.py index 7fe24ab20..3d658c316 100644 --- a/integrations/weaviate/src/haystack_integrations/document_stores/weaviate/document_store.py +++ b/integrations/weaviate/src/haystack_integrations/document_stores/weaviate/document_store.py @@ -7,12 +7,14 @@ from haystack.core.serialization import default_from_dict, default_to_dict from haystack.dataclasses.document import Document +from haystack.document_stores.errors import DocumentStoreError, DuplicateDocumentError from haystack.document_stores.types.policy import DuplicatePolicy import weaviate from weaviate.auth import AuthCredentials from weaviate.config import Config, ConnectionConfig from weaviate.embedded import EmbeddedOptions +from weaviate.util import generate_uuid5 Number = Union[int, float] TimeoutType = Union[Tuple[Number, Number], Number] @@ -239,15 +241,145 @@ def _to_document(self, data: Dict[str, Any]) -> Document: return Document.from_dict(data) + def _query(self, properties: List[str], batch_size: int, cursor=None): + collection_name = self._collection_settings["class"] + query = ( + self._client.query.get( + collection_name, + properties, + ) + .with_additional(["id vector"]) + .with_limit(batch_size) + ) + + if cursor: + # Fetch the next set of results + result = query.with_after(cursor).do() + else: + # Fetch the first set of results + result = query.do() + + if "errors" in result: + errors = [e["message"] for e in result.get("errors", {})] + msg = "\n".join(errors) + msg = f"Failed to query documents in Weaviate. Errors:\n{msg}" + raise DocumentStoreError(msg) + + return result["data"]["Get"][collection_name] + def filter_documents(self, filters: Optional[Dict[str, Any]] = None) -> List[Document]: # noqa: ARG002 - return [] + properties = self._client.schema.get(self._collection_settings["class"]).get("properties", []) + properties = [prop["name"] for prop in properties] - def write_documents( - self, - documents: List[Document], # noqa: ARG002 - policy: DuplicatePolicy = DuplicatePolicy.NONE, # noqa: ARG002 - ) -> int: - return 0 + result = [] + + cursor = None + while batch := self._query(properties, 100, cursor): + # Take the cursor before we convert the batch to Documents as we manipulate + # the batch dictionary and might lose that information. + cursor = batch[-1]["_additional"]["id"] + + for doc in batch: + result.append(self._to_document(doc)) + # Move the cursor to the last returned uuid + return result + + def _batch_write(self, documents: List[Document]) -> int: + """ + Writes document to Weaviate in batches. + Documents with the same id will be overwritten. + Raises in case of errors. + """ + statuses = [] + for doc in documents: + if not isinstance(doc, Document): + msg = f"Expected a Document, got '{type(doc)}' instead." + raise ValueError(msg) + if self._client.batch.num_objects() == self._client.batch.recommended_num_objects: + # Batch is full, let's create the objects + statuses.extend(self._client.batch.create_objects()) + self._client.batch.add_data_object( + uuid=generate_uuid5(doc.id), + data_object=self._to_data_object(doc), + class_name=self._collection_settings["class"], + vector=doc.embedding, + ) + # Write remaining documents + statuses.extend(self._client.batch.create_objects()) + + errors = [] + # Gather errors and number of written documents + for status in statuses: + result_status = status.get("result", {}).get("status") + if result_status == "FAILED": + errors.extend([e["message"] for e in status["result"]["errors"]["error"]]) + + if errors: + msg = "\n".join(errors) + msg = f"Failed to write documents in Weaviate. Errors:\n{msg}" + raise DocumentStoreError(msg) + + # If the document already exists we get no status message back from Weaviate. + # So we assume that all Documents were written. + return len(documents) + + def _write(self, documents: List[Document], policy: DuplicatePolicy) -> int: + """ + Writes documents to Weaviate using the specified policy. + This doesn't uses the batch API, so it's slower than _batch_write. + If policy is set to SKIP it will skip any document that already exists. + If policy is set to FAIL it will raise an exception if any of the documents already exists. + """ + written = 0 + duplicate_errors_ids = [] + for doc in documents: + if not isinstance(doc, Document): + msg = f"Expected a Document, got '{type(doc)}' instead." + raise ValueError(msg) - def delete_documents(self, document_ids: List[str]) -> None: # noqa: ARG002 - return + if policy == DuplicatePolicy.SKIP and self._client.data_object.exists( + uuid=generate_uuid5(doc.id), + class_name=self._collection_settings["class"], + ): + # This Document already exists, we skip it + continue + + try: + self._client.data_object.create( + uuid=generate_uuid5(doc.id), + data_object=self._to_data_object(doc), + class_name=self._collection_settings["class"], + vector=doc.embedding, + ) + written += 1 + except weaviate.exceptions.ObjectAlreadyExistsException: + if policy == DuplicatePolicy.FAIL: + duplicate_errors_ids.append(doc.id) + if duplicate_errors_ids: + msg = f"IDs '{', '.join(duplicate_errors_ids)}' already exist in the document store." + raise DuplicateDocumentError(msg) + return written + + def write_documents(self, documents: List[Document], policy: DuplicatePolicy = DuplicatePolicy.NONE) -> int: + """ + Writes documents to Weaviate using the specified policy. + We recommend using a OVERWRITE policy as it's faster than other policies for Weaviate since it uses + the batch API. + We can't use the batch API for other policies as it doesn't return any information whether the document + already exists or not. That prevents us from returning errors when using the FAIL policy or skipping a + Document when using the SKIP policy. + """ + if policy in [DuplicatePolicy.NONE, DuplicatePolicy.OVERWRITE]: + return self._batch_write(documents) + + return self._write(documents, policy) + + def delete_documents(self, document_ids: List[str]) -> None: + self._client.batch.delete_objects( + class_name=self._collection_settings["class"], + where={ + "path": ["id"], + "operator": "ContainsAny", + "valueTextArray": [generate_uuid5(doc_id) for doc_id in document_ids], + }, + ) diff --git a/integrations/weaviate/tests/test_document_store.py b/integrations/weaviate/tests/test_document_store.py index e988eb297..0682282f3 100644 --- a/integrations/weaviate/tests/test_document_store.py +++ b/integrations/weaviate/tests/test_document_store.py @@ -4,7 +4,7 @@ import pytest from haystack.dataclasses.byte_stream import ByteStream from haystack.dataclasses.document import Document -from haystack.testing.document_store import CountDocumentsTest +from haystack.testing.document_store import CountDocumentsTest, DeleteDocumentsTest, WriteDocumentsTest from haystack_integrations.document_stores.weaviate.document_store import ( DOCUMENT_COLLECTION_PROPERTIES, WeaviateDocumentStore, @@ -20,7 +20,7 @@ ) -class TestWeaviateDocumentStore(CountDocumentsTest): +class TestWeaviateDocumentStore(CountDocumentsTest, WriteDocumentsTest, DeleteDocumentsTest): @pytest.fixture def document_store(self, request) -> WeaviateDocumentStore: # Use a different index for each test so we can run them in parallel @@ -256,3 +256,30 @@ def test_to_document(self, document_store, test_files_path): assert doc.embedding == [1, 2, 3] assert doc.score is None assert doc.meta == {"key": "value"} + + def test_write_documents(self, document_store): + """ + Test write_documents() with default policy overwrites existing documents. + """ + doc = Document(content="test doc") + assert document_store.write_documents([doc]) == 1 + assert document_store.count_documents() == 1 + + doc.content = "test doc 2" + assert document_store.write_documents([doc]) == 1 + assert document_store.count_documents() == 1 + + def test_write_documents_with_blob_data(self, document_store, test_files_path): + image = ByteStream.from_file_path(test_files_path / "robot1.jpg", mime_type="image/jpeg") + doc = Document(content="test doc", blob=image) + assert document_store.write_documents([doc]) == 1 + + def test_filter_documents_with_blob_data(self, document_store, test_files_path): + image = ByteStream.from_file_path(test_files_path / "robot1.jpg", mime_type="image/jpeg") + doc = Document(content="test doc", blob=image) + assert document_store.write_documents([doc]) == 1 + + docs = document_store.filter_documents() + + assert len(docs) == 1 + assert docs[0].blob == image From 4ddcd5e7453755679b0b7530650e89a6872ee25e Mon Sep 17 00:00:00 2001 From: Madeesh Kannan Date: Fri, 26 Jan 2024 15:15:32 +0100 Subject: [PATCH 19/47] feat: Implement `UpTrainEvaluator` (#272) * feat: Implement `UpTrainEvaluator` and co. * Address review comments Update project structure to use the `haystack_integrations` namespace * Update README * Fix typo --- .github/labeler.yml | 5 + .github/workflows/uptrain.yml | 56 +++ README.md | 39 +- integrations/uptrain/LICENSE.txt | 73 ++++ integrations/uptrain/README.md | 36 ++ integrations/uptrain/example/example.py | 32 ++ integrations/uptrain/pyproject.toml | 157 ++++++++ .../components/evaluators/__init__.py | 7 + .../components/evaluators/evaluator.py | 199 +++++++++ .../components/evaluators/metrics.py | 366 +++++++++++++++++ integrations/uptrain/tests/__init__.py | 0 integrations/uptrain/tests/test_evaluator.py | 380 ++++++++++++++++++ integrations/uptrain/tests/test_metrics.py | 11 + 13 files changed, 1342 insertions(+), 19 deletions(-) create mode 100644 .github/workflows/uptrain.yml create mode 100644 integrations/uptrain/LICENSE.txt create mode 100644 integrations/uptrain/README.md create mode 100644 integrations/uptrain/example/example.py create mode 100644 integrations/uptrain/pyproject.toml create mode 100644 integrations/uptrain/src/haystack_integrations/components/evaluators/__init__.py create mode 100644 integrations/uptrain/src/haystack_integrations/components/evaluators/evaluator.py create mode 100644 integrations/uptrain/src/haystack_integrations/components/evaluators/metrics.py create mode 100644 integrations/uptrain/tests/__init__.py create mode 100644 integrations/uptrain/tests/test_evaluator.py create mode 100644 integrations/uptrain/tests/test_metrics.py diff --git a/.github/labeler.yml b/.github/labeler.yml index 93eba1d82..ba74c43a2 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -79,6 +79,11 @@ integration:unstructured-fileconverter: - any-glob-to-any-file: "integrations/unstructured/fileconverter/**/*" - any-glob-to-any-file: ".github/workflows/unstructured_fileconverter.yml" +integration:uptrain: + - changed-files: + - any-glob-to-any-file: "integrations/uptrain/**/*" + - any-glob-to-any-file: ".github/workflows/uptrain.yml" + integration:weaviate: - changed-files: - any-glob-to-any-file: "integrations/weaviate/**/*" diff --git a/.github/workflows/uptrain.yml b/.github/workflows/uptrain.yml new file mode 100644 index 000000000..bacfa27fb --- /dev/null +++ b/.github/workflows/uptrain.yml @@ -0,0 +1,56 @@ +# This workflow comes from https://github.com/ofek/hatch-mypyc +# https://github.com/ofek/hatch-mypyc/blob/5a198c0ba8660494d02716cfc9d79ce4adfb1442/.github/workflows/test.yml +name: Test / uptrain + +on: + schedule: + - cron: "0 0 * * *" + pull_request: + paths: + - "integrations/uptrain/**" + - ".github/workflows/uptrain.yml" + +defaults: + run: + working-directory: integrations/uptrain + +concurrency: + group: uptrain-${{ github.head_ref }} + cancel-in-progress: true + +env: + PYTHONUNBUFFERED: "1" + FORCE_COLOR: "1" + +jobs: + run: + name: Python ${{ matrix.python-version }} on ${{ startsWith(matrix.os, 'macos-') && 'macOS' || startsWith(matrix.os, 'windows-') && 'Windows' || 'Linux' }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, windows-latest, macos-latest] + python-version: ["3.9", "3.10"] + + steps: + - name: Support longpaths + if: matrix.os == 'windows-latest' + working-directory: . + run: git config --system core.longpaths true + + - uses: actions/checkout@v4 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Install Hatch + run: pip install --upgrade hatch + + - name: Lint + if: matrix.python-version == '3.9' && runner.os == 'Linux' + run: hatch run lint:all + + - name: Run tests + run: hatch run cov diff --git a/README.md b/README.md index ae884862a..20b17b377 100644 --- a/README.md +++ b/README.md @@ -60,22 +60,23 @@ deepset-haystack ## Inventory -| Package | Type | PyPi Package | Status | -| ------------------------------------------------------------------------------- | ------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| [astra-haystack](integrations/astra/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/astra-haystack.svg)](https://pypi.org/project/astra-haystack) | [![Test / astra](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/astra.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/astra.yml) | -| [amazon-bedrock-haystack](integrations/amazon-bedrock/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/amazon-bedrock-haystack.svg)](https://pypi.org/project/amazon-bedrock-haystack) | [![Test / amazon_bedrock](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/amazon_bedrock.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/amazon_bedrock.yml) | -| [chroma-haystack](integrations/chroma/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/chroma-haystack.svg)](https://pypi.org/project/chroma-haystack) | [![Test / chroma](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/chroma.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/chroma.yml) | -| [cohere-haystack](integrations/cohere/) | Embedder, Generator | [![PyPI - Version](https://img.shields.io/pypi/v/cohere-haystack.svg)](https://pypi.org/project/cohere-haystack) | [![Test / cohere](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/cohere.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/cohere.yml) | -| [elasticsearch-haystack](integrations/elasticsearch/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/elasticsearch-haystack.svg)](https://pypi.org/project/elasticsearch-haystack) | [![Test / elasticsearch](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/elasticsearch.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/elasticsearch.yml) | -| [google-ai-haystack](integrations/google_ai/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/google-ai-haystack.svg)](https://pypi.org/project/google-ai-haystack) | [![Test / google-ai](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/google_ai.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/google_ai.yml) | -| [google-vertex-haystack](integrations/google_vertex/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/google-vertex-haystack.svg)](https://pypi.org/project/google-vertex-haystack) | [![Test / google-vertex](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/google_vertex.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/google_vertex.yml) | -| [gradient-haystack](integrations/gradient/) | Embedder, Generator | [![PyPI - Version](https://img.shields.io/pypi/v/gradient-haystack.svg)](https://pypi.org/project/gradient-haystack) | [![Test / gradient](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/gradient.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/gradient.yml) | -| [instructor-embedders-haystack](integrations/instructor_embedders/) | Embedder | [![PyPI - Version](https://img.shields.io/pypi/v/instructor-embedders-haystack.svg)](https://pypi.org/project/instructor-embedders-haystack) | [![Test / instructor-embedders](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/instructor_embedders.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/instructor_embedders.yml) | -| [jina-haystack](integrations/jina/) | Embedder | [![PyPI - Version](https://img.shields.io/pypi/v/jina-haystack.svg)](https://pypi.org/project/jina-haystack) | [![Test / jina](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/jina.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/jina.yml) | -| [llama-cpp-haystack](integrations/llama_cpp/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/ollama-haystack.svg?color=orange)](https://pypi.org/project/llama-cpp-haystack) | [![Test / llama-cpp](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/llama_cpp.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/llama_cpp.yml) | -| [ollama-haystack](integrations/ollama/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/ollama-haystack.svg?color=orange)](https://pypi.org/project/ollama-haystack) | [![Test / ollama](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/ollama.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/ollama.yml) | -| [opensearch-haystack](integrations/opensearch/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/opensearch-haystack.svg)](https://pypi.org/project/opensearch-haystack) | [![Test / opensearch](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/opensearch.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/opensearch.yml) | -| [pinecone-haystack](integrations/pinecone/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/pinecone-haystack.svg?color=orange)](https://pypi.org/project/pinecone-haystack) | [![Test / pinecone](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/pinecone.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/pinecone.yml) -| [pgvector-haystack](integrations/pgvector/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/pgvector-haystack.svg?color=orange)](https://pypi.org/project/pgvector-haystack) | [![Test / pgvector](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/pgvector.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/pgvector.yml) | -| [qdrant-haystack](integrations/qdrant/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/qdrant-haystack.svg?color=orange)](https://pypi.org/project/qdrant-haystack) | [![Test / qdrant](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/qdrant.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/qdrant.yml) | -| [unstructured-fileconverter-haystack](integrations/unstructured/) | File converter | [![PyPI - Version](https://img.shields.io/pypi/v/unstructured-fileconverter-haystack.svg)](https://pypi.org/project/unstructured-fileconverter-haystack) | [![Test / unstructured](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/unstructured.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/unstructured.yml) | +| Package | Type | PyPi Package | Status | +| ------------------------------------------------------------------- | ------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| [astra-haystack](integrations/astra/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/astra-haystack.svg)](https://pypi.org/project/astra-haystack) | [![Test / astra](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/astra.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/astra.yml) | +| [amazon-bedrock-haystack](integrations/amazon-bedrock/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/amazon-bedrock-haystack.svg)](https://pypi.org/project/amazon-bedrock-haystack) | [![Test / amazon_bedrock](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/amazon_bedrock.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/amazon_bedrock.yml) | +| [chroma-haystack](integrations/chroma/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/chroma-haystack.svg)](https://pypi.org/project/chroma-haystack) | [![Test / chroma](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/chroma.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/chroma.yml) | +| [cohere-haystack](integrations/cohere/) | Embedder, Generator | [![PyPI - Version](https://img.shields.io/pypi/v/cohere-haystack.svg)](https://pypi.org/project/cohere-haystack) | [![Test / cohere](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/cohere.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/cohere.yml) | +| [elasticsearch-haystack](integrations/elasticsearch/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/elasticsearch-haystack.svg)](https://pypi.org/project/elasticsearch-haystack) | [![Test / elasticsearch](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/elasticsearch.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/elasticsearch.yml) | +| [google-ai-haystack](integrations/google_ai/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/google-ai-haystack.svg)](https://pypi.org/project/google-ai-haystack) | [![Test / google-ai](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/google_ai.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/google_ai.yml) | +| [google-vertex-haystack](integrations/google_vertex/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/google-vertex-haystack.svg)](https://pypi.org/project/google-vertex-haystack) | [![Test / google-vertex](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/google_vertex.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/google_vertex.yml) | +| [gradient-haystack](integrations/gradient/) | Embedder, Generator | [![PyPI - Version](https://img.shields.io/pypi/v/gradient-haystack.svg)](https://pypi.org/project/gradient-haystack) | [![Test / gradient](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/gradient.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/gradient.yml) | +| [instructor-embedders-haystack](integrations/instructor_embedders/) | Embedder | [![PyPI - Version](https://img.shields.io/pypi/v/instructor-embedders-haystack.svg)](https://pypi.org/project/instructor-embedders-haystack) | [![Test / instructor-embedders](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/instructor_embedders.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/instructor_embedders.yml) | +| [jina-haystack](integrations/jina/) | Embedder | [![PyPI - Version](https://img.shields.io/pypi/v/jina-haystack.svg)](https://pypi.org/project/jina-haystack) | [![Test / jina](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/jina.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/jina.yml) | +| [llama-cpp-haystack](integrations/llama_cpp/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/ollama-haystack.svg?color=orange)](https://pypi.org/project/llama-cpp-haystack) | [![Test / llama-cpp](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/llama_cpp.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/llama_cpp.yml) | +| [ollama-haystack](integrations/ollama/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/ollama-haystack.svg?color=orange)](https://pypi.org/project/ollama-haystack) | [![Test / ollama](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/ollama.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/ollama.yml) | +| [opensearch-haystack](integrations/opensearch/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/opensearch-haystack.svg)](https://pypi.org/project/opensearch-haystack) | [![Test / opensearch](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/opensearch.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/opensearch.yml) | +| [pinecone-haystack](integrations/pinecone/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/pinecone-haystack.svg?color=orange)](https://pypi.org/project/pinecone-haystack) | [![Test / pinecone](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/pinecone.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/pinecone.yml) | +| [pgvector-haystack](integrations/pgvector/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/pgvector-haystack.svg?color=orange)](https://pypi.org/project/pgvector-haystack) | [![Test / pgvector](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/pgvector.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/pgvector.yml) | +| [qdrant-haystack](integrations/qdrant/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/qdrant-haystack.svg?color=orange)](https://pypi.org/project/qdrant-haystack) | [![Test / qdrant](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/qdrant.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/qdrant.yml) | +| [unstructured-fileconverter-haystack](integrations/unstructured/) | File converter | [![PyPI - Version](https://img.shields.io/pypi/v/unstructured-fileconverter-haystack.svg)](https://pypi.org/project/unstructured-fileconverter-haystack) | [![Test / unstructured](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/unstructured.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/unstructured.yml) | +| [uptrain-haystack](integrations/uptrain/) | Evaluator | [![PyPI - Version](https://img.shields.io/pypi/v/uptrain-haystack.svg)](https://pypi.org/project/uptrain-haystack) | [![Test / uptrain](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/uptrain.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/uptrain.yml) | diff --git a/integrations/uptrain/LICENSE.txt b/integrations/uptrain/LICENSE.txt new file mode 100644 index 000000000..137069b82 --- /dev/null +++ b/integrations/uptrain/LICENSE.txt @@ -0,0 +1,73 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. + +"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: + + (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. + + You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + +To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/integrations/uptrain/README.md b/integrations/uptrain/README.md new file mode 100644 index 000000000..6d7605306 --- /dev/null +++ b/integrations/uptrain/README.md @@ -0,0 +1,36 @@ +# uptrain-haystack + +[![PyPI - Version](https://img.shields.io/pypi/v/uptrain-haystack.svg)](https://pypi.org/project/uptrain-haystack) +[![PyPI - Python Version](https://img.shields.io/pypi/pyversions/uptrain-haystack.svg)](https://pypi.org/project/uptrain-haystack) + +--- + +**Table of Contents** + +- [uptrain-haystack](#uptrain-haystack) + - [Installation](#installation) + - [Testing](#testing) + - [Examples](#examples) + - [License](#license) + +## Installation + +```console +pip install uptrain-haystack +``` + +For more information about the UpTrain evaluation framework, please refer to their [documentation](https://docs.uptrain.ai/getting-started/introduction). + +## Testing + +```console +hatch run test +``` + +## Examples + +You can find a code example showing how to use the Evaluator under the `example/` folder of this repo. + +## License + +`uptrain-haystack` is distributed under the terms of the [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html) license. diff --git a/integrations/uptrain/example/example.py b/integrations/uptrain/example/example.py new file mode 100644 index 000000000..b029b9a65 --- /dev/null +++ b/integrations/uptrain/example/example.py @@ -0,0 +1,32 @@ +# A valid OpenAI API key is required to run this example. + +from haystack import Pipeline +from haystack_integrations.components.evaluators import UpTrainEvaluator, UpTrainMetric + +QUESTIONS = [ + "Which is the most popular global sport?", + "Who created the Python language?", +] +CONTEXTS = [ + "The popularity of sports can be measured in various ways, including TV viewership, social media presence, number of participants, and economic impact. Football is undoubtedly the world's most popular sport with major events like the FIFA World Cup and sports personalities like Ronaldo and Messi, drawing a followership of more than 4 billion people.", + "Python, created by Guido van Rossum in the late 1980s, is a high-level general-purpose programming language. Its design philosophy emphasizes code readability, and its language constructs aim to help programmers write clear, logical code for both small and large-scale software projects.", +] +RESPONSES = [ + "Football is the most popular sport with around 4 billion followers worldwide", + "Python language was created by Guido van Rossum.", +] + +pipeline = Pipeline() +evaluator = UpTrainEvaluator( + metric=UpTrainMetric.FACTUAL_ACCURACY, + api="openai", + api_key_env_var="OPENAI_API_KEY", +) +pipeline.add_component("evaluator", evaluator) + +# Each metric expects a specific set of parameters as input. Refer to the +# UpTrainMetric class' documentation for more details. +output = pipeline.run({"evaluator": {"questions": QUESTIONS, "contexts": CONTEXTS, "responses": RESPONSES}}) + +for output in output["evaluator"]["results"]: + print(output) diff --git a/integrations/uptrain/pyproject.toml b/integrations/uptrain/pyproject.toml new file mode 100644 index 000000000..631b7dab8 --- /dev/null +++ b/integrations/uptrain/pyproject.toml @@ -0,0 +1,157 @@ +[build-system] +requires = ["hatchling", "hatch-vcs"] +build-backend = "hatchling.build" + +[project] +name = "uptrain-haystack" +dynamic = ["version"] +description = 'An integration of UpTrain LLM evaluation framework with Haystack' +readme = "README.md" +requires-python = ">=3.7" +license = "Apache-2.0" +keywords = [] +authors = [{ name = "deepset GmbH", email = "info@deepset.ai" }] +classifiers = [ + "Development Status :: 4 - Beta", + "Programming Language :: Python", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: Implementation :: CPython", + "Programming Language :: Python :: Implementation :: PyPy", +] +dependencies = ["haystack-ai", "uptrain>=0.5"] + +[project.urls] +Source = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/uptrain" +Documentation = "https://github.com/deepset-ai/haystack-core-integrations/blob/main/integrations/uptrain/README.md" +Issues = "https://github.com/deepset-ai/haystack-core-integrations/issues" + +[tool.hatch.build.targets.wheel] +packages = ["src/haystack_integrations"] + +[tool.hatch.version] +source = "vcs" +tag-pattern = 'integrations\/uptrain(?P.*)' + +[tool.hatch.version.raw-options] +root = "../.." +git_describe_command = 'git describe --tags --match="integrations/uptrain[0-9]*"' + +[tool.hatch.envs.default] +dependencies = ["coverage[toml]>=6.5", "pytest"] +[tool.hatch.envs.default.scripts] +test = "pytest {args:tests}" +test-cov = "coverage run -m pytest {args:tests}" +cov-report = ["- coverage combine", "coverage report"] +cov = ["test-cov", "cov-report"] + +[[tool.hatch.envs.all.matrix]] +python = ["3.7", "3.8", "3.9", "3.10", "3.11"] + +[tool.hatch.envs.lint] +detached = true +dependencies = ["black>=23.1.0", "mypy>=1.0.0", "ruff>=0.0.243"] +[tool.hatch.envs.lint.scripts] +typing = "mypy --install-types --non-interactive {args:src/}" +style = ["ruff {args:.}", "black --check --diff {args:.}"] +fmt = ["black {args:.}", "ruff --fix {args:.}", "style"] +all = ["style", "typing"] + +[tool.black] +target-version = ["py37"] +line-length = 120 +skip-string-normalization = true + +[tool.ruff] +target-version = "py37" +line-length = 120 +select = [ + "A", + "ARG", + "B", + "C", + "DTZ", + "E", + "EM", + "F", + "FBT", + "I", + "ICN", + "ISC", + "N", + "PLC", + "PLE", + "PLR", + "PLW", + "Q", + "RUF", + "S", + "T", + "TID", + "UP", + "W", + "YTT", +] +ignore = [ + # Allow non-abstract empty methods in abstract base classes + "B027", + # Allow boolean positional values in function calls, like `dict.get(... True)` + "FBT003", + # Ignore checks for possible passwords + "S105", + "S106", + "S107", + # Ignore complexity + "C901", + "PLR0911", + "PLR0912", + "PLR0913", + "PLR0915", + # Misc + "S101", + "TID252", +] +unfixable = [ + # Don't touch unused imports + "F401", +] +extend-exclude = ["tests", "example"] + +[tool.ruff.isort] +known-first-party = ["src"] + +[tool.ruff.flake8-tidy-imports] +ban-relative-imports = "all" + +[tool.ruff.per-file-ignores] +# Tests can use magic values, assertions, and relative imports +"tests/**/*" = ["PLR2004", "S101", "TID252"] + +[tool.coverage.run] +source_pkgs = ["src", "tests"] +branch = true +parallel = true + +[tool.coverage.paths] +uptrain_haystack = [ + "src/haystack_integrations", + "*/uptrain-haystack/src/uptrain_haystack", +] +tests = ["tests"] + +[tool.coverage.report] +exclude_lines = ["no cov", "if __name__ == .__main__.:", "if TYPE_CHECKING:"] + +[[tool.mypy.overrides]] +module = [ + "haystack.*", + "pytest.*", + "uptrain.*", + "numpy", + "grpc", + "haystack_integrations.*", +] +ignore_missing_imports = true diff --git a/integrations/uptrain/src/haystack_integrations/components/evaluators/__init__.py b/integrations/uptrain/src/haystack_integrations/components/evaluators/__init__.py new file mode 100644 index 000000000..e8366dfc0 --- /dev/null +++ b/integrations/uptrain/src/haystack_integrations/components/evaluators/__init__.py @@ -0,0 +1,7 @@ +from .evaluator import UpTrainEvaluator +from .metrics import UpTrainMetric + +__all__ = ( + "UpTrainEvaluator", + "UpTrainMetric", +) diff --git a/integrations/uptrain/src/haystack_integrations/components/evaluators/evaluator.py b/integrations/uptrain/src/haystack_integrations/components/evaluators/evaluator.py new file mode 100644 index 000000000..f99ec8105 --- /dev/null +++ b/integrations/uptrain/src/haystack_integrations/components/evaluators/evaluator.py @@ -0,0 +1,199 @@ +import json +import os +from typing import Any, Dict, List, Optional, Union + +from haystack import DeserializationError, component, default_from_dict, default_to_dict +from haystack_integrations.components.evaluators.metrics import ( + METRIC_DESCRIPTORS, + InputConverters, + OutputConverters, + UpTrainMetric, +) +from uptrain import APIClient, EvalLLM, Evals +from uptrain.framework.evals import ParametricEval + + +@component +class UpTrainEvaluator: + """ + A component that uses the UpTrain framework to evaluate inputs against a specific metric. + + The supported metrics are defined by :class:`UpTrainMetric`. The inputs of the component + metric-dependent. The output is a list of :class:`UpTrainEvaluatorOutput` objects, each + containing a single input and the result of the evaluation performed on it. + """ + + _backend_metric: Union[Evals, ParametricEval] + _backend_client: Union[APIClient, EvalLLM] + + def __init__( + self, + metric: Union[str, UpTrainMetric], + metric_params: Optional[Dict[str, Any]] = None, + *, + api: str = "openai", + api_key_env_var: Optional[str] = "OPENAI_API_KEY", + api_params: Optional[Dict[str, Any]] = None, + ): + """ + Construct a new UpTrain evaluator. + + :param metric: + The metric to use for evaluation. + :param metric_params: + Parameters to pass to the metric's constructor. + :param api: + The API to use for evaluation. + + Supported APIs: "openai", "uptrain". + :param api_key_env_var: + The name of the environment variable containing the API key. + :param api_params: + Additional parameters to pass to the API client. + """ + self.metric = metric if isinstance(metric, UpTrainMetric) else UpTrainMetric.from_str(metric) + self.metric_params = metric_params + self.descriptor = METRIC_DESCRIPTORS[self.metric] + self.api = api + self.api_key_env_var = api_key_env_var + self.api_params = api_params + + self._init_backend() + expected_inputs = self.descriptor.input_parameters + component.set_input_types(self, **expected_inputs) + + @component.output_types(results=List[List[Dict[str, Any]]]) + def run(self, **inputs) -> Dict[str, Any]: + """ + Run the UpTrain evaluator. + + Example: + ```python + pipeline = Pipeline() + evaluator = UpTrainEvaluator( + metric=UpTrainMetric.FACTUAL_ACCURACY, + api="openai", + api_key_env_var="OPENAI_API_KEY", + ) + pipeline.add_component("evaluator", evaluator) + + # Each metric expects a specific set of parameters as input. Refer to the + # UpTrainMetric class' documentation for more details. + output = pipeline.run({"evaluator": { + "questions": ["question], + "contexts": ["context"], + "responses": ["response"] + }}) + ``` + + :param inputs: + The inputs to evaluate. These are determined by the + metric being calculated. See :class:`UpTrainMetric` for more + information. + :returns: + A nested list of metric results. Each input can have one or more + results, depending on the metric. Each result is a dictionary + containing the following keys and values: + * `name` - The name of the metric. + * `score` - The score of the metric. + * `explanation` - An optional explanation of the score. + """ + # The backend requires random access to the data, so we can't stream it. + InputConverters.validate_input_parameters(self.metric, self.descriptor.input_parameters, inputs) + converted_inputs: List[Dict[str, str]] = list(self.descriptor.input_converter(**inputs)) # type: ignore + + eval_args = {"data": converted_inputs, "checks": [self._backend_metric]} + if self.api_params is not None: + eval_args.update({k: v for k, v in self.api_params.items() if k not in eval_args}) + + results: List[Dict[str, Any]] + if isinstance(self._backend_client, EvalLLM): + results = self._backend_client.evaluate(**eval_args) + else: + results = self._backend_client.log_and_evaluate(**eval_args) + + OutputConverters.validate_outputs(results) + converted_results = [ + [result.to_dict() for result in self.descriptor.output_converter(x, self.metric_params)] for x in results + ] + + return {"results": converted_results} + + def to_dict(self) -> Dict[str, Any]: + """ + Serialize this component to a dictionary. + """ + + def check_serializable(obj: Any): + try: + json.dumps(obj) + return True + except (TypeError, OverflowError): + return False + + if not check_serializable(self.api_params) or not check_serializable(self.metric_params): + msg = "UpTrain evaluator cannot serialize the API/metric parameters" + raise DeserializationError(msg) + + return default_to_dict( + self, + metric=self.metric, + metric_params=self.metric_params, + api=self.api, + api_key_env_var=self.api_key_env_var, + api_params=self.api_params, + ) + + @classmethod + def from_dict(cls, data: Dict[str, Any]) -> "UpTrainEvaluator": + """ + Deserialize a component from a dictionary. + + :param data: + The dictionary to deserialize from. + """ + return default_from_dict(cls, data) + + def _init_backend(self): + """ + Initialize the UpTrain backend. + """ + if isinstance(self.descriptor.backend, Evals): + if self.metric_params is not None: + msg = ( + f"Uptrain metric '{self.metric}' received the following unexpected init parameters:" + f"{self.metric_params}" + ) + raise ValueError(msg) + backend_metric = self.descriptor.backend + else: + assert issubclass(self.descriptor.backend, ParametricEval) + if self.metric_params is None: + msg = f"Uptrain metric '{self.metric}' expected init parameters but got none" + raise ValueError(msg) + elif not all(k in self.descriptor.init_parameters for k in self.metric_params.keys()): + msg = ( + f"Invalid init parameters for UpTrain metric '{self.metric}'. " + f"Expected: {list(self.descriptor.init_parameters.keys())}" + ) + + raise ValueError(msg) + backend_metric = self.descriptor.backend(**self.metric_params) + + supported_apis = ("openai", "uptrain") + if self.api not in supported_apis: + msg = f"Unsupported API '{self.api}' for UpTrain evaluator. Supported APIs: {supported_apis}" + raise ValueError(msg) + + api_key = os.environ.get(self.api_key_env_var) + if api_key is None: + msg = f"Missing API key environment variable '{self.api_key_env_var}' for UpTrain evaluator" + raise ValueError(msg) + + if self.api == "openai": + backend_client = EvalLLM(openai_api_key=api_key) + elif self.api == "uptrain": + backend_client = APIClient(uptrain_api_key=api_key) + + self._backend_metric = backend_metric + self._backend_client = backend_client diff --git a/integrations/uptrain/src/haystack_integrations/components/evaluators/metrics.py b/integrations/uptrain/src/haystack_integrations/components/evaluators/metrics.py new file mode 100644 index 000000000..e42b63e21 --- /dev/null +++ b/integrations/uptrain/src/haystack_integrations/components/evaluators/metrics.py @@ -0,0 +1,366 @@ +import dataclasses +import inspect +from dataclasses import dataclass +from enum import Enum +from functools import partial +from typing import Any, Callable, Dict, Iterable, List, Optional, Type, Union + +from uptrain import CritiqueTone, Evals, GuidelineAdherence, ResponseMatching +from uptrain.framework.evals import ParametricEval + + +class UpTrainMetric(Enum): + """ + Metrics supported by UpTrain. + """ + + #: Context relevance. + #: Inputs - `questions: List[str], contexts: List[str]` + CONTEXT_RELEVANCE = "context_relevance" + + #: Factual accuracy. + #: Inputs - `questions: List[str], contexts: List[str], responses: List[str]` + FACTUAL_ACCURACY = "factual_accuracy" + + #: Response relevance. + #: Inputs - `questions: List[str], responses: List[str]` + RESPONSE_RELEVANCE = "response_relevance" + + #: Response completeness. + #: Inputs - `questions: List[str], responses: List[str]` + RESPONSE_COMPLETENESS = "response_completeness" + + #: Response completeness with respect to context. + #: Inputs - `questions: List[str], contexts: List[str], responses: List[str]` + RESPONSE_COMPLETENESS_WRT_CONTEXT = "response_completeness_wrt_context" + + #: Response consistency. + #: Inputs - `questions: List[str], contexts: List[str], responses: List[str]` + RESPONSE_CONSISTENCY = "response_consistency" + + #: Response conciseness. + #: Inputs - `questions: List[str], responses: List[str]` + RESPONSE_CONCISENESS = "response_conciseness" + + #: Language critique. + #: Inputs - `responses: List[str]` + CRITIQUE_LANGUAGE = "critique_language" + + #: Tone critique. + #: Inputs - `responses: List[str]` + CRITIQUE_TONE = "critique_tone" + + #: Guideline adherence. + #: Inputs - `questions: List[str], responses: List[str]` + GUIDELINE_ADHERENCE = "guideline_adherence" + + #: Response matching. + #: Inputs - `responses: List[str], ground_truths: List[str]` + RESPONSE_MATCHING = "response_matching" + + def __str__(self): + return self.value + + @classmethod + def from_str(cls, string: str) -> "UpTrainMetric": + """ + Create a metric type from a string. + + :param string: + The string to convert. + :returns: + The metric. + """ + enum_map = {e.value: e for e in UpTrainMetric} + metric = enum_map.get(string) + if metric is None: + msg = f"Unknown UpTrain metric '{string}'. Supported metrics: {list(enum_map.keys())}" + raise ValueError(msg) + return metric + + +@dataclass(frozen=True) +class MetricResult: + """ + Result of a metric evaluation. + + :param name: + The name of the metric. + :param score: + The score of the metric. + :param explanation: + An optional explanation of the metric. + """ + + name: str + score: float + explanation: Optional[str] = None + + def to_dict(self): + return dataclasses.asdict(self) + + +@dataclass(frozen=True) +class MetricDescriptor: + """ + Descriptor for a metric. + + :param metric: + The metric. + :param backend: + The associated UpTrain metric class. + :param input_parameters: + Parameters accepted by the metric. This is used + to set the input types of the evaluator component. + :param input_converter: + Callable that converts input parameters to the UpTrain input format. + :param output_converter: + Callable that converts the UpTrain output format to our output format. + :param init_parameters: + Additional parameters that need to be passed to the metric class during initialization. + """ + + metric: UpTrainMetric + backend: Union[Evals, Type[ParametricEval]] + input_parameters: Dict[str, Type] + input_converter: Callable[[Any], Iterable[Dict[str, str]]] + output_converter: Callable[[Dict[str, Any], Optional[Dict[str, Any]]], List[MetricResult]] + init_parameters: Optional[Dict[str, Type[Any]]] = None + + @classmethod + def new( + cls, + metric: UpTrainMetric, + backend: Union[Evals, Type[ParametricEval]], + input_converter: Callable[[Any], Iterable[Dict[str, str]]], + output_converter: Optional[Callable[[Dict[str, Any], Optional[Dict[str, Any]]], List[MetricResult]]] = None, + *, + init_parameters: Optional[Dict[str, Type]] = None, + ) -> "MetricDescriptor": + input_converter_signature = inspect.signature(input_converter) + input_parameters = {} + for name, param in input_converter_signature.parameters.items(): + if name in ("cls", "self"): + continue + elif param.kind not in (inspect.Parameter.KEYWORD_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD): + continue + input_parameters[name] = param.annotation + + return cls( + metric=metric, + backend=backend, + input_parameters=input_parameters, + input_converter=input_converter, + output_converter=output_converter if output_converter is not None else OutputConverters.default(metric), + init_parameters=init_parameters, + ) + + +class InputConverters: + """ + Converters for input parameters. + + The signature of the converter functions serves as the ground-truth of the + expected input parameters of a given metric. They are also responsible for validating + the input parameters and converting them to the format expected by UpTrain. + """ + + @staticmethod + def _validate_input_elements(**kwargs): + for k, collection in kwargs.items(): + if not isinstance(collection, list): + msg = ( + f"UpTrain evaluator expected input '{k}' to be a collection of type 'list', " + f"got '{type(collection).__name__}' instead" + ) + raise ValueError(msg) + elif not all(isinstance(x, str) for x in collection): + msg = f"UpTrain evaluator expects inputs to be of type 'str' in '{k}'" + raise ValueError(msg) + + same_length = len({len(x) for x in kwargs.values()}) == 1 + if not same_length: + msg = f"Mismatching counts in the following inputs: {({k: len(v) for k, v in kwargs.items()})}" + raise ValueError(msg) + + @staticmethod + def validate_input_parameters(metric: UpTrainMetric, expected: Dict[str, Any], received: Dict[str, Any]): + for param, _ in expected.items(): + if param not in received: + msg = f"UpTrain evaluator expected input parameter '{param}' for metric '{metric}'" + raise ValueError(msg) + + @staticmethod + def question_context_response( + questions: List[str], contexts: List[str], responses: List[str] + ) -> Iterable[Dict[str, str]]: + InputConverters._validate_input_elements(questions=questions, contexts=contexts, responses=responses) + for q, c, r in zip(questions, contexts, responses): # type: ignore + yield {"question": q, "context": c, "response": r} + + @staticmethod + def question_context( + questions: List[str], + contexts: List[str], + ) -> Iterable[Dict[str, str]]: + InputConverters._validate_input_elements(questions=questions, contexts=contexts) + for q, c in zip(questions, contexts): # type: ignore + yield {"question": q, "context": c} + + @staticmethod + def question_response( + questions: List[str], + responses: List[str], + ) -> Iterable[Dict[str, str]]: + InputConverters._validate_input_elements(questions=questions, responses=responses) + for q, r in zip(questions, responses): # type: ignore + yield {"question": q, "response": r} + + @staticmethod + def response( + responses: List[str], + ) -> Iterable[Dict[str, str]]: + InputConverters._validate_input_elements(responses=responses) + for r in responses: + yield {"response": r} + + @staticmethod + def response_ground_truth( + responses: List[str], + ground_truths: List[str], + ) -> Iterable[Dict[str, str]]: + InputConverters._validate_input_elements(ground_truths=ground_truths, responses=responses) + for r, gt in zip(responses, ground_truths): # type: ignore + yield {"response": r, "ground_truth": gt} + + +class OutputConverters: + """ + Converters for results returned by UpTrain. + + They are responsible for converting the results to our output format. + """ + + @staticmethod + def validate_outputs(outputs: List[Dict[str, Any]]): + msg = None + if not isinstance(outputs, list): + msg = f"Expected response from UpTrain evaluator to be a 'list', got '{type(outputs).__name__}'" + elif not all(isinstance(x, dict) for x in outputs): + msg = "UpTrain evaluator expects outputs to be a list of `dict`s" + elif not all(isinstance(y, str) for x in outputs for y in x.keys()): + msg = "UpTrain evaluator expects keys in the output dicts to be `str`" + elif not all(isinstance(y, (float, str)) for x in outputs for y in x.values()): + msg = "UpTrain evaluator expects values in the output dicts to be either `str` or `float`" + + if msg is not None: + raise ValueError(msg) + + @staticmethod + def _extract_default_results(output: Dict[str, Any], metric_name: str) -> MetricResult: + try: + score_key = f"score_{metric_name}" + explanation_key = f"explanation_{metric_name}" + return MetricResult(name=metric_name, score=output[score_key], explanation=output.get(explanation_key)) + except KeyError as e: + msg = f"UpTrain evaluator did not return an expected output for metric '{metric_name}'" + raise ValueError(msg) from e + + @staticmethod + def default( + metric: UpTrainMetric, + ) -> Callable[[Dict[str, Any], Optional[Dict[str, Any]]], List[MetricResult]]: + def inner( + output: Dict[str, Any], metric_params: Optional[Dict[str, Any]], metric: UpTrainMetric # noqa: ARG001 + ) -> List[MetricResult]: + return [OutputConverters._extract_default_results(output, str(metric))] + + return partial(inner, metric=metric) + + @staticmethod + def critique_language( + output: Dict[str, Any], metric_params: Optional[Dict[str, Any]] # noqa: ARG004 + ) -> List[MetricResult]: + out = [] + for expected_key in ("fluency", "coherence", "grammar", "politeness"): + out.append(OutputConverters._extract_default_results(output, expected_key)) + return out + + @staticmethod + def critique_tone( + output: Dict[str, Any], metric_params: Optional[Dict[str, Any]] # noqa: ARG004 + ) -> List[MetricResult]: + return [OutputConverters._extract_default_results(output, "tone")] + + @staticmethod + def guideline_adherence(output: Dict[str, Any], metric_params: Optional[Dict[str, Any]]) -> List[MetricResult]: + assert metric_params is not None + return [OutputConverters._extract_default_results(output, f'{metric_params["guideline_name"]}_adherence')] + + @staticmethod + def response_matching( + output: Dict[str, Any], metric_params: Optional[Dict[str, Any]] # noqa: ARG004 + ) -> List[MetricResult]: + metric_str = "response_match" + out = [OutputConverters._extract_default_results(output, metric_str)] + + # Enumerate other relevant keys. + score_key = f"score_{metric_str}" + for k, v in output.items(): + if k != score_key and metric_str in k and isinstance(v, float): + out.append(MetricResult(name=k, score=v)) + return out + + +METRIC_DESCRIPTORS = { + UpTrainMetric.CONTEXT_RELEVANCE: MetricDescriptor.new( + UpTrainMetric.CONTEXT_RELEVANCE, Evals.CONTEXT_RELEVANCE, InputConverters.question_context # type: ignore + ), + UpTrainMetric.FACTUAL_ACCURACY: MetricDescriptor.new( + UpTrainMetric.FACTUAL_ACCURACY, Evals.FACTUAL_ACCURACY, InputConverters.question_context_response # type: ignore + ), + UpTrainMetric.RESPONSE_RELEVANCE: MetricDescriptor.new( + UpTrainMetric.RESPONSE_RELEVANCE, Evals.RESPONSE_RELEVANCE, InputConverters.question_response # type: ignore + ), + UpTrainMetric.RESPONSE_COMPLETENESS: MetricDescriptor.new( + UpTrainMetric.RESPONSE_COMPLETENESS, Evals.RESPONSE_COMPLETENESS, InputConverters.question_response # type: ignore + ), + UpTrainMetric.RESPONSE_COMPLETENESS_WRT_CONTEXT: MetricDescriptor.new( + UpTrainMetric.RESPONSE_COMPLETENESS_WRT_CONTEXT, + Evals.RESPONSE_COMPLETENESS_WRT_CONTEXT, + InputConverters.question_context_response, # type: ignore + ), + UpTrainMetric.RESPONSE_CONSISTENCY: MetricDescriptor.new( + UpTrainMetric.RESPONSE_CONSISTENCY, Evals.RESPONSE_CONSISTENCY, InputConverters.question_context_response # type: ignore + ), + UpTrainMetric.RESPONSE_CONCISENESS: MetricDescriptor.new( + UpTrainMetric.RESPONSE_CONCISENESS, Evals.RESPONSE_CONCISENESS, InputConverters.question_response # type: ignore + ), + UpTrainMetric.CRITIQUE_LANGUAGE: MetricDescriptor.new( + UpTrainMetric.CRITIQUE_LANGUAGE, + Evals.CRITIQUE_LANGUAGE, + InputConverters.response, + OutputConverters.critique_language, + ), + UpTrainMetric.CRITIQUE_TONE: MetricDescriptor.new( + UpTrainMetric.CRITIQUE_TONE, + CritiqueTone, + InputConverters.response, + OutputConverters.critique_tone, + init_parameters={"llm_persona": str}, + ), + UpTrainMetric.GUIDELINE_ADHERENCE: MetricDescriptor.new( + UpTrainMetric.GUIDELINE_ADHERENCE, + GuidelineAdherence, + InputConverters.question_response, # type: ignore + OutputConverters.guideline_adherence, + init_parameters={"guideline": str, "guideline_name": str, "response_schema": Optional[str]}, # type: ignore + ), + UpTrainMetric.RESPONSE_MATCHING: MetricDescriptor.new( + UpTrainMetric.RESPONSE_MATCHING, + ResponseMatching, + InputConverters.response_ground_truth, # type: ignore + OutputConverters.response_matching, + init_parameters={"method": Optional[str]}, # type: ignore + ), +} diff --git a/integrations/uptrain/tests/__init__.py b/integrations/uptrain/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/integrations/uptrain/tests/test_evaluator.py b/integrations/uptrain/tests/test_evaluator.py new file mode 100644 index 000000000..2128e0634 --- /dev/null +++ b/integrations/uptrain/tests/test_evaluator.py @@ -0,0 +1,380 @@ +import copy +import os +from dataclasses import dataclass +from typing import List +from unittest.mock import patch + +import pytest +from haystack import DeserializationError + +from haystack_integrations.components.evaluators import UpTrainEvaluator, UpTrainMetric + +DEFAULT_QUESTIONS = [ + "Which is the most popular global sport?", + "Who created the Python language?", +] +DEFAULT_CONTEXTS = [ + "The popularity of sports can be measured in various ways, including TV viewership, social media presence, number of participants, and economic impact. Football is undoubtedly the world's most popular sport with major events like the FIFA World Cup and sports personalities like Ronaldo and Messi, drawing a followership of more than 4 billion people.", + "Python, created by Guido van Rossum in the late 1980s, is a high-level general-purpose programming language. Its design philosophy emphasizes code readability, and its language constructs aim to help programmers write clear, logical code for both small and large-scale software projects.", +] +DEFAULT_RESPONSES = [ + "Football is the most popular sport with around 4 billion followers worldwide", + "Python language was created by Guido van Rossum.", +] + + +@dataclass(frozen=True) +class Unserializable: + something: str + + +# Only returns results for the passed metrics. +class MockBackend: + def __init__(self, metric_outputs: List[UpTrainMetric]) -> None: + self.metrics = metric_outputs + if not self.metrics: + self.metrics = [e for e in UpTrainMetric] + + def log_and_evaluate(self, data, checks, **kwargs): + output_map = { + UpTrainMetric.CONTEXT_RELEVANCE: { + "score_context_relevance": 0.5, + "explanation_context_relevance": "1", + }, + UpTrainMetric.FACTUAL_ACCURACY: { + "score_factual_accuracy": 1.0, + "explanation_factual_accuracy": "2", + }, + UpTrainMetric.RESPONSE_RELEVANCE: { + "score_response_relevance": 1.0, + "explanation_response_relevance": "3", + }, + UpTrainMetric.RESPONSE_COMPLETENESS: { + "score_response_completeness": 0.5, + "explanation_response_completeness": "4", + }, + UpTrainMetric.RESPONSE_COMPLETENESS_WRT_CONTEXT: { + "score_response_completeness_wrt_context": 1.0, + "explanation_response_completeness_wrt_context": "5", + }, + UpTrainMetric.RESPONSE_CONSISTENCY: { + "score_response_consistency": 0.9, + "explanation_response_consistency": "6", + }, + UpTrainMetric.RESPONSE_CONCISENESS: { + "score_response_conciseness": 1.0, + "explanation_response_conciseness": "7", + }, + UpTrainMetric.CRITIQUE_LANGUAGE: { + "score_fluency": 1.0, + "score_coherence": 1.0, + "score_grammar": 1.0, + "score_politeness": 1.0, + "explanation_fluency": "8", + "explanation_coherence": "9", + "explanation_grammar": "10", + "explanation_politeness": "11", + }, + UpTrainMetric.CRITIQUE_TONE: { + "score_tone": 0.4, + "explanation_tone": "12", + }, + UpTrainMetric.GUIDELINE_ADHERENCE: { + "score_guideline_adherence": 1.0, + "explanation_guideline_adherence": "13", + }, + UpTrainMetric.RESPONSE_MATCHING: { + "response_match_precision": 1.0, + "response_match_recall": 0.6666666666666666, + "score_response_match": 0.7272727272727273, + }, + } + + data = copy.deepcopy(data) + for x in data: + for m in self.metrics: + x.update(output_map[m]) + return data + + +@patch("os.environ.get") +def test_evaluator_api(os_environ_get): + api_key_var = "test-api-key" + os_environ_get.return_value = api_key_var + + eval = UpTrainEvaluator(UpTrainMetric.RESPONSE_COMPLETENESS) + assert eval.api == "openai" + assert eval.api_key_env_var == "OPENAI_API_KEY" + + eval = UpTrainEvaluator(UpTrainMetric.RESPONSE_COMPLETENESS, api="uptrain", api_key_env_var="UPTRAIN_API_KEY") + assert eval.api == "uptrain" + assert eval.api_key_env_var == "UPTRAIN_API_KEY" + + with pytest.raises(ValueError, match="Unsupported API"): + UpTrainEvaluator(UpTrainMetric.CONTEXT_RELEVANCE, api="cohere") + + os_environ_get.return_value = None + with pytest.raises(ValueError, match="Missing API key"): + UpTrainEvaluator(UpTrainMetric.CONTEXT_RELEVANCE, api="uptrain") + + +@patch("os.environ.get") +def test_evaluator_metric_init_params(os_environ_get): + api_key = "test-api-key" + os_environ_get.return_value = api_key + + eval = UpTrainEvaluator(UpTrainMetric.CRITIQUE_TONE, metric_params={"llm_persona": "village idiot"}) + assert eval._backend_metric.llm_persona == "village idiot" + + with pytest.raises(ValueError, match="Invalid init parameters"): + UpTrainEvaluator(UpTrainMetric.CRITIQUE_TONE, metric_params={"role": "village idiot"}) + + with pytest.raises(ValueError, match="unexpected init parameters"): + UpTrainEvaluator(UpTrainMetric.FACTUAL_ACCURACY, metric_params={"check_numbers": True}) + + with pytest.raises(ValueError, match="expected init parameters"): + UpTrainEvaluator(UpTrainMetric.RESPONSE_MATCHING) + + +@patch("os.environ.get") +def test_evaluator_serde(os_environ_get): + os_environ_get.return_value = "abacab" + + init_params = { + "metric": UpTrainMetric.RESPONSE_MATCHING, + "metric_params": {"method": "rouge"}, + "api": "uptrain", + "api_key_env_var": "abacab", + "api_params": {"eval_name": "test"}, + } + eval = UpTrainEvaluator(**init_params) + serde_data = eval.to_dict() + new_eval = UpTrainEvaluator.from_dict(serde_data) + + assert eval.metric == new_eval.metric + assert eval.api == new_eval.api + assert eval.api_key_env_var == new_eval.api_key_env_var + assert eval.metric_params == new_eval.metric_params + assert eval.api_params == new_eval.api_params + assert type(new_eval._backend_client) == type(eval._backend_client) + assert type(new_eval._backend_metric) == type(eval._backend_metric) + + with pytest.raises(DeserializationError, match=r"cannot serialize the API/metric parameters"): + init_params3 = copy.deepcopy(init_params) + init_params3["api_params"] = {"arg": Unserializable("")} + eval = UpTrainEvaluator(**init_params3) + eval.to_dict() + + +@pytest.mark.parametrize( + "metric, inputs, params", + [ + (UpTrainMetric.CONTEXT_RELEVANCE, {"questions": [], "contexts": []}, None), + (UpTrainMetric.FACTUAL_ACCURACY, {"questions": [], "contexts": [], "responses": []}, None), + (UpTrainMetric.RESPONSE_RELEVANCE, {"questions": [], "responses": []}, None), + (UpTrainMetric.RESPONSE_COMPLETENESS, {"questions": [], "responses": []}, None), + (UpTrainMetric.RESPONSE_COMPLETENESS_WRT_CONTEXT, {"questions": [], "contexts": [], "responses": []}, None), + (UpTrainMetric.RESPONSE_CONSISTENCY, {"questions": [], "contexts": [], "responses": []}, None), + (UpTrainMetric.RESPONSE_CONCISENESS, {"questions": [], "responses": []}, None), + (UpTrainMetric.CRITIQUE_LANGUAGE, {"responses": []}, None), + (UpTrainMetric.CRITIQUE_TONE, {"responses": []}, {"llm_persona": "idiot"}), + ( + UpTrainMetric.GUIDELINE_ADHERENCE, + {"questions": [], "responses": []}, + {"guideline": "Do nothing", "guideline_name": "somename", "response_schema": None}, + ), + (UpTrainMetric.RESPONSE_MATCHING, {"ground_truths": [], "responses": []}, {"method": "llm"}), + ], +) +@patch("os.environ.get") +def test_evaluator_valid_inputs(os_environ_get, metric, inputs, params): + os_environ_get.return_value = "abacab" + init_params = { + "metric": metric, + "metric_params": params, + "api": "uptrain", + "api_key_env_var": "abacab", + "api_params": None, + } + eval = UpTrainEvaluator(**init_params) + eval._backend_client = MockBackend([metric]) + output = eval.run(**inputs) + + +@pytest.mark.parametrize( + "metric, inputs, error_string, params", + [ + (UpTrainMetric.CONTEXT_RELEVANCE, {"questions": {}, "contexts": []}, "to be a collection of type 'list'", None), + ( + UpTrainMetric.FACTUAL_ACCURACY, + {"questions": [1], "contexts": [2], "responses": [3]}, + "expects inputs to be of type 'str'", + None, + ), + (UpTrainMetric.RESPONSE_RELEVANCE, {"questions": [""], "responses": []}, "Mismatching counts ", None), + (UpTrainMetric.RESPONSE_RELEVANCE, {"responses": []}, "expected input parameter ", None), + ], +) +@patch("os.environ.get") +def test_evaluator_invalid_inputs(os_environ_get, metric, inputs, error_string, params): + os_environ_get.return_value = "abacab" + with pytest.raises(ValueError, match=error_string): + init_params = { + "metric": metric, + "metric_params": params, + "api": "uptrain", + "api_key_env_var": "abacab", + "api_params": None, + } + eval = UpTrainEvaluator(**init_params) + eval._backend_client = MockBackend([metric]) + output = eval.run(**inputs) + + +# This test validates the expected outputs of the evaluator. +# Each output is parameterized as a list of tuples, where each tuple is +# (name, score, explanation). The name and explanation are optional. If +# the name is None, then the metric name is used. +@pytest.mark.parametrize( + "metric, inputs, expected_outputs, metric_params", + [ + (UpTrainMetric.CONTEXT_RELEVANCE, {"questions": ["q1"], "contexts": ["c1"]}, [[(None, 0.5, "1")]], None), + ( + UpTrainMetric.FACTUAL_ACCURACY, + {"questions": ["q2"], "contexts": ["c2"], "responses": ["r2"]}, + [[(None, 1.0, "2")]], + None, + ), + (UpTrainMetric.RESPONSE_RELEVANCE, {"questions": ["q3"], "responses": ["r3"]}, [[(None, 1.0, "3")]], None), + (UpTrainMetric.RESPONSE_COMPLETENESS, {"questions": ["q4"], "responses": ["r4"]}, [[(None, 0.5, "4")]], None), + ( + UpTrainMetric.RESPONSE_COMPLETENESS_WRT_CONTEXT, + {"questions": ["q5"], "contexts": ["c5"], "responses": ["r5"]}, + [[(None, 1.0, "5")]], + None, + ), + ( + UpTrainMetric.RESPONSE_CONSISTENCY, + {"questions": ["q6"], "contexts": ["c6"], "responses": ["r6"]}, + [[(None, 0.9, "6")]], + None, + ), + (UpTrainMetric.RESPONSE_CONCISENESS, {"questions": ["q7"], "responses": ["r7"]}, [[(None, 1.0, "7")]], None), + ( + UpTrainMetric.CRITIQUE_LANGUAGE, + {"responses": ["r8"]}, + [ + [ + ("fluency", 1.0, "8"), + ("coherence", 1.0, "9"), + ("grammar", 1.0, "10"), + ("politeness", 1.0, "11"), + ] + ], + None, + ), + (UpTrainMetric.CRITIQUE_TONE, {"responses": ["r9"]}, [[("tone", 0.4, "12")]], {"llm_persona": "idiot"}), + ( + UpTrainMetric.GUIDELINE_ADHERENCE, + {"questions": ["q10"], "responses": ["r10"]}, + [[(None, 1.0, "13")]], + {"guideline": "Do nothing", "guideline_name": "guideline", "response_schema": None}, + ), + ( + UpTrainMetric.RESPONSE_MATCHING, + {"ground_truths": ["g11"], "responses": ["r11"]}, + [ + [ + ("response_match_precision", 1.0, None), + ("response_match_recall", 0.6666666666666666, None), + ("response_match", 0.7272727272727273, None), + ] + ], + {"method": "llm"}, + ), + ], +) +@patch("os.environ.get") +def test_evaluator_outputs(os_environ_get, metric, inputs, expected_outputs, metric_params): + os_environ_get.return_value = "abacab" + init_params = { + "metric": metric, + "metric_params": metric_params, + "api": "uptrain", + "api_key_env_var": "abacab", + "api_params": None, + } + eval = UpTrainEvaluator(**init_params) + eval._backend_client = MockBackend([metric]) + results = eval.run(**inputs)["results"] + + assert type(results) == type(expected_outputs) + assert len(results) == len(expected_outputs) + + for r, o in zip(results, expected_outputs): + assert len(r) == len(o) + + expected = {(name if name is not None else str(metric), score, exp) for name, score, exp in o} + got = {(x["name"], x["score"], x["explanation"]) for x in r} + assert got == expected + + +# This integration test validates the evaluator by running it against the +# OpenAI API. It is parameterized by the metric, the inputs to the evalutor +# and the metric parameters. +@pytest.mark.skipif("OPENAI_API_KEY" not in os.environ, reason="OPENAI_API_KEY not set") +@pytest.mark.parametrize( + "metric, inputs, metric_params", + [ + (UpTrainMetric.CONTEXT_RELEVANCE, {"questions": DEFAULT_QUESTIONS, "contexts": DEFAULT_CONTEXTS}, None), + ( + UpTrainMetric.FACTUAL_ACCURACY, + {"questions": DEFAULT_QUESTIONS, "contexts": DEFAULT_CONTEXTS, "responses": DEFAULT_RESPONSES}, + None, + ), + (UpTrainMetric.RESPONSE_RELEVANCE, {"questions": DEFAULT_QUESTIONS, "responses": DEFAULT_RESPONSES}, None), + (UpTrainMetric.RESPONSE_COMPLETENESS, {"questions": DEFAULT_QUESTIONS, "responses": DEFAULT_RESPONSES}, None), + ( + UpTrainMetric.RESPONSE_COMPLETENESS_WRT_CONTEXT, + {"questions": DEFAULT_QUESTIONS, "contexts": DEFAULT_CONTEXTS, "responses": DEFAULT_RESPONSES}, + None, + ), + ( + UpTrainMetric.RESPONSE_CONSISTENCY, + {"questions": DEFAULT_QUESTIONS, "contexts": DEFAULT_CONTEXTS, "responses": DEFAULT_RESPONSES}, + None, + ), + (UpTrainMetric.RESPONSE_CONCISENESS, {"questions": DEFAULT_QUESTIONS, "responses": DEFAULT_RESPONSES}, None), + (UpTrainMetric.CRITIQUE_LANGUAGE, {"responses": DEFAULT_RESPONSES}, None), + (UpTrainMetric.CRITIQUE_TONE, {"responses": DEFAULT_RESPONSES}, {"llm_persona": "idiot"}), + ( + UpTrainMetric.GUIDELINE_ADHERENCE, + {"questions": DEFAULT_QUESTIONS, "responses": DEFAULT_RESPONSES}, + {"guideline": "Do nothing", "guideline_name": "somename", "response_schema": None}, + ), + ( + UpTrainMetric.RESPONSE_MATCHING, + { + "ground_truths": [ + "Consumerism is the most popular sport in the world", + "Python language was created by some dude.", + ], + "responses": DEFAULT_RESPONSES, + }, + {"method": "llm"}, + ), + ], +) +def test_integration_run(metric, inputs, metric_params): + init_params = { + "metric": metric, + "metric_params": metric_params, + "api": "openai", + } + eval = UpTrainEvaluator(**init_params) + output = eval.run(**inputs) + + assert type(output) == dict + assert len(output) == 1 + assert "results" in output + assert len(output["results"]) == len(next(iter(inputs.values()))) diff --git a/integrations/uptrain/tests/test_metrics.py b/integrations/uptrain/tests/test_metrics.py new file mode 100644 index 000000000..b73b2aa92 --- /dev/null +++ b/integrations/uptrain/tests/test_metrics.py @@ -0,0 +1,11 @@ +import pytest + +from haystack_integrations.components.evaluators import UpTrainMetric + + +def test_uptrain_metric(): + for e in UpTrainMetric: + assert e == UpTrainMetric.from_str(e.value) + + with pytest.raises(ValueError, match="Unknown UpTrain metric"): + UpTrainMetric.from_str("smugness") From 4118b629b2053a0d09b872343fd17a1ca90ab677 Mon Sep 17 00:00:00 2001 From: Massimiliano Pippi Date: Fri, 26 Jan 2024 16:02:02 +0100 Subject: [PATCH 20/47] change import paths (#273) --- integrations/google_vertex/pyproject.toml | 19 ++++++++-------- .../src/google_vertex_haystack/__init__.py | 3 --- .../generators/__init__.py | 3 --- .../generators/google_vertex/__init__.py | 20 +++++++++++++++++ .../generators/google_vertex}/captioner.py | 0 .../generators/google_vertex/chat/__init__.py | 0 .../generators/google_vertex}/chat/gemini.py | 0 .../google_vertex}/code_generator.py | 0 .../generators/google_vertex}/gemini.py | 0 .../google_vertex}/image_generator.py | 0 .../google_vertex}/question_answering.py | 0 .../google_vertex}/text_generator.py | 0 .../google_vertex/tests/test_captioner.py | 22 +++++++++---------- .../tests/test_code_generator.py | 22 +++++++++---------- .../tests/test_image_generator.py | 22 +++++++++---------- .../tests/test_question_answering.py | 22 +++++++++---------- .../tests/test_text_generator.py | 22 +++++++++---------- 17 files changed, 85 insertions(+), 70 deletions(-) delete mode 100644 integrations/google_vertex/src/google_vertex_haystack/__init__.py delete mode 100644 integrations/google_vertex/src/google_vertex_haystack/generators/__init__.py create mode 100644 integrations/google_vertex/src/haystack_integrations/components/generators/google_vertex/__init__.py rename integrations/google_vertex/src/{google_vertex_haystack/generators => haystack_integrations/components/generators/google_vertex}/captioner.py (100%) create mode 100644 integrations/google_vertex/src/haystack_integrations/components/generators/google_vertex/chat/__init__.py rename integrations/google_vertex/src/{google_vertex_haystack/generators => haystack_integrations/components/generators/google_vertex}/chat/gemini.py (100%) rename integrations/google_vertex/src/{google_vertex_haystack/generators => haystack_integrations/components/generators/google_vertex}/code_generator.py (100%) rename integrations/google_vertex/src/{google_vertex_haystack/generators => haystack_integrations/components/generators/google_vertex}/gemini.py (100%) rename integrations/google_vertex/src/{google_vertex_haystack/generators => haystack_integrations/components/generators/google_vertex}/image_generator.py (100%) rename integrations/google_vertex/src/{google_vertex_haystack/generators => haystack_integrations/components/generators/google_vertex}/question_answering.py (100%) rename integrations/google_vertex/src/{google_vertex_haystack/generators => haystack_integrations/components/generators/google_vertex}/text_generator.py (100%) diff --git a/integrations/google_vertex/pyproject.toml b/integrations/google_vertex/pyproject.toml index 1d15a4270..ecd509f15 100644 --- a/integrations/google_vertex/pyproject.toml +++ b/integrations/google_vertex/pyproject.toml @@ -33,6 +33,9 @@ Documentation = "https://github.com/deepset-ai/haystack-core-integrations/tree/m Issues = "https://github.com/deepset-ai/haystack-core-integrations/issues" Source = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/google_vertex" +[tool.hatch.build.targets.wheel] +packages = ["src/haystack_integrations"] + [tool.hatch.version] source = "vcs" tag-pattern = 'integrations\/google_vertex-v(?P.*)' @@ -69,7 +72,7 @@ dependencies = [ "ruff>=0.0.243", ] [tool.hatch.envs.lint.scripts] -typing = "mypy --install-types --non-interactive {args:src/google_vertex_haystack tests}" +typing = "mypy --install-types --non-interactive --explicit-package-bases {args:src/ tests}" style = [ "ruff {args:.}", "black --check --diff {args:.}", @@ -132,26 +135,23 @@ unfixable = [ ] [tool.ruff.isort] -known-first-party = ["google_vertex_haystack"] +known-first-party = ["haystack_integrations"] [tool.ruff.flake8-tidy-imports] -ban-relative-imports = "all" +ban-relative-imports = "parents" [tool.ruff.per-file-ignores] # Tests can use magic values, assertions, and relative imports "tests/**/*" = ["PLR2004", "S101", "TID252"] [tool.coverage.run] -source_pkgs = ["google_vertex_haystack", "tests"] +source_pkgs = ["haystack_integrations", "tests"] branch = true parallel = true -omit = [ - "src/google_vertex_haystack/__about__.py", -] [tool.coverage.paths] -google_vertex_haystack = ["src/google_vertex_haystack", "*/google_vertex/src/google_vertex_haystack"] -tests = ["tests", "*/google_vertex_haystack/tests"] +google_vertex_haystack = ["src/"] +tests = ["tests"] [tool.coverage.report] exclude_lines = [ @@ -164,6 +164,7 @@ exclude_lines = [ module = [ "vertexai.*", "haystack.*", + "haystack_integrations.*", "pytest.*", "numpy.*", ] diff --git a/integrations/google_vertex/src/google_vertex_haystack/__init__.py b/integrations/google_vertex/src/google_vertex_haystack/__init__.py deleted file mode 100644 index e873bc332..000000000 --- a/integrations/google_vertex/src/google_vertex_haystack/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# SPDX-FileCopyrightText: 2023-present deepset GmbH -# -# SPDX-License-Identifier: Apache-2.0 diff --git a/integrations/google_vertex/src/google_vertex_haystack/generators/__init__.py b/integrations/google_vertex/src/google_vertex_haystack/generators/__init__.py deleted file mode 100644 index e873bc332..000000000 --- a/integrations/google_vertex/src/google_vertex_haystack/generators/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# SPDX-FileCopyrightText: 2023-present deepset GmbH -# -# SPDX-License-Identifier: Apache-2.0 diff --git a/integrations/google_vertex/src/haystack_integrations/components/generators/google_vertex/__init__.py b/integrations/google_vertex/src/haystack_integrations/components/generators/google_vertex/__init__.py new file mode 100644 index 000000000..07c2a5260 --- /dev/null +++ b/integrations/google_vertex/src/haystack_integrations/components/generators/google_vertex/__init__.py @@ -0,0 +1,20 @@ +# SPDX-FileCopyrightText: 2023-present deepset GmbH +# +# SPDX-License-Identifier: Apache-2.0 +from .captioner import VertexAIImageCaptioner +from .chat.gemini import VertexAIGeminiChatGenerator +from .code_generator import VertexAICodeGenerator +from .gemini import VertexAIGeminiGenerator +from .image_generator import VertexAIImageGenerator +from .question_answering import VertexAIImageQA +from .text_generator import VertexAITextGenerator + +__all__ = [ + "VertexAICodeGenerator", + "VertexAIGeminiGenerator", + "VertexAIGeminiChatGenerator", + "VertexAIImageCaptioner", + "VertexAIImageGenerator", + "VertexAIImageQA", + "VertexAITextGenerator", +] diff --git a/integrations/google_vertex/src/google_vertex_haystack/generators/captioner.py b/integrations/google_vertex/src/haystack_integrations/components/generators/google_vertex/captioner.py similarity index 100% rename from integrations/google_vertex/src/google_vertex_haystack/generators/captioner.py rename to integrations/google_vertex/src/haystack_integrations/components/generators/google_vertex/captioner.py diff --git a/integrations/google_vertex/src/haystack_integrations/components/generators/google_vertex/chat/__init__.py b/integrations/google_vertex/src/haystack_integrations/components/generators/google_vertex/chat/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/integrations/google_vertex/src/google_vertex_haystack/generators/chat/gemini.py b/integrations/google_vertex/src/haystack_integrations/components/generators/google_vertex/chat/gemini.py similarity index 100% rename from integrations/google_vertex/src/google_vertex_haystack/generators/chat/gemini.py rename to integrations/google_vertex/src/haystack_integrations/components/generators/google_vertex/chat/gemini.py diff --git a/integrations/google_vertex/src/google_vertex_haystack/generators/code_generator.py b/integrations/google_vertex/src/haystack_integrations/components/generators/google_vertex/code_generator.py similarity index 100% rename from integrations/google_vertex/src/google_vertex_haystack/generators/code_generator.py rename to integrations/google_vertex/src/haystack_integrations/components/generators/google_vertex/code_generator.py diff --git a/integrations/google_vertex/src/google_vertex_haystack/generators/gemini.py b/integrations/google_vertex/src/haystack_integrations/components/generators/google_vertex/gemini.py similarity index 100% rename from integrations/google_vertex/src/google_vertex_haystack/generators/gemini.py rename to integrations/google_vertex/src/haystack_integrations/components/generators/google_vertex/gemini.py diff --git a/integrations/google_vertex/src/google_vertex_haystack/generators/image_generator.py b/integrations/google_vertex/src/haystack_integrations/components/generators/google_vertex/image_generator.py similarity index 100% rename from integrations/google_vertex/src/google_vertex_haystack/generators/image_generator.py rename to integrations/google_vertex/src/haystack_integrations/components/generators/google_vertex/image_generator.py diff --git a/integrations/google_vertex/src/google_vertex_haystack/generators/question_answering.py b/integrations/google_vertex/src/haystack_integrations/components/generators/google_vertex/question_answering.py similarity index 100% rename from integrations/google_vertex/src/google_vertex_haystack/generators/question_answering.py rename to integrations/google_vertex/src/haystack_integrations/components/generators/google_vertex/question_answering.py diff --git a/integrations/google_vertex/src/google_vertex_haystack/generators/text_generator.py b/integrations/google_vertex/src/haystack_integrations/components/generators/google_vertex/text_generator.py similarity index 100% rename from integrations/google_vertex/src/google_vertex_haystack/generators/text_generator.py rename to integrations/google_vertex/src/haystack_integrations/components/generators/google_vertex/text_generator.py diff --git a/integrations/google_vertex/tests/test_captioner.py b/integrations/google_vertex/tests/test_captioner.py index bc7e4f829..26249dbee 100644 --- a/integrations/google_vertex/tests/test_captioner.py +++ b/integrations/google_vertex/tests/test_captioner.py @@ -2,11 +2,11 @@ from haystack.dataclasses.byte_stream import ByteStream -from google_vertex_haystack.generators.captioner import VertexAIImageCaptioner +from haystack_integrations.components.generators.google_vertex import VertexAIImageCaptioner -@patch("google_vertex_haystack.generators.captioner.vertexai") -@patch("google_vertex_haystack.generators.captioner.ImageTextModel") +@patch("haystack_integrations.components.generators.google_vertex.captioner.vertexai") +@patch("haystack_integrations.components.generators.google_vertex.captioner.ImageTextModel") def test_init(mock_model_class, mock_vertexai): captioner = VertexAIImageCaptioner( model="imagetext", project_id="myproject-123456", number_of_results=1, language="it" @@ -19,14 +19,14 @@ def test_init(mock_model_class, mock_vertexai): assert captioner._kwargs == {"number_of_results": 1, "language": "it"} -@patch("google_vertex_haystack.generators.captioner.vertexai") -@patch("google_vertex_haystack.generators.captioner.ImageTextModel") +@patch("haystack_integrations.components.generators.google_vertex.captioner.vertexai") +@patch("haystack_integrations.components.generators.google_vertex.captioner.ImageTextModel") def test_to_dict(_mock_model_class, _mock_vertexai): captioner = VertexAIImageCaptioner( model="imagetext", project_id="myproject-123456", number_of_results=1, language="it" ) assert captioner.to_dict() == { - "type": "google_vertex_haystack.generators.captioner.VertexAIImageCaptioner", + "type": "haystack_integrations.components.generators.google_vertex.captioner.VertexAIImageCaptioner", "init_parameters": { "model": "imagetext", "project_id": "myproject-123456", @@ -37,12 +37,12 @@ def test_to_dict(_mock_model_class, _mock_vertexai): } -@patch("google_vertex_haystack.generators.captioner.vertexai") -@patch("google_vertex_haystack.generators.captioner.ImageTextModel") +@patch("haystack_integrations.components.generators.google_vertex.captioner.vertexai") +@patch("haystack_integrations.components.generators.google_vertex.captioner.ImageTextModel") def test_from_dict(_mock_model_class, _mock_vertexai): captioner = VertexAIImageCaptioner.from_dict( { - "type": "google_vertex_haystack.generators.captioner.VertexAIImageCaptioner", + "type": "haystack_integrations.components.generators.google_vertex.captioner.VertexAIImageCaptioner", "init_parameters": { "model": "imagetext", "project_id": "myproject-123456", @@ -58,8 +58,8 @@ def test_from_dict(_mock_model_class, _mock_vertexai): assert captioner._model is not None -@patch("google_vertex_haystack.generators.captioner.vertexai") -@patch("google_vertex_haystack.generators.captioner.ImageTextModel") +@patch("haystack_integrations.components.generators.google_vertex.captioner.vertexai") +@patch("haystack_integrations.components.generators.google_vertex.captioner.ImageTextModel") def test_run_calls_get_captions(mock_model_class, _mock_vertexai): mock_model = Mock() mock_model_class.from_pretrained.return_value = mock_model diff --git a/integrations/google_vertex/tests/test_code_generator.py b/integrations/google_vertex/tests/test_code_generator.py index c2a2e5aa9..129954062 100644 --- a/integrations/google_vertex/tests/test_code_generator.py +++ b/integrations/google_vertex/tests/test_code_generator.py @@ -2,11 +2,11 @@ from vertexai.language_models import TextGenerationResponse -from google_vertex_haystack.generators.code_generator import VertexAICodeGenerator +from haystack_integrations.components.generators.google_vertex import VertexAICodeGenerator -@patch("google_vertex_haystack.generators.code_generator.vertexai") -@patch("google_vertex_haystack.generators.code_generator.CodeGenerationModel") +@patch("haystack_integrations.components.generators.google_vertex.code_generator.vertexai") +@patch("haystack_integrations.components.generators.google_vertex.code_generator.CodeGenerationModel") def test_init(mock_model_class, mock_vertexai): generator = VertexAICodeGenerator( model="code-bison", project_id="myproject-123456", candidate_count=3, temperature=0.5 @@ -19,14 +19,14 @@ def test_init(mock_model_class, mock_vertexai): assert generator._kwargs == {"candidate_count": 3, "temperature": 0.5} -@patch("google_vertex_haystack.generators.code_generator.vertexai") -@patch("google_vertex_haystack.generators.code_generator.CodeGenerationModel") +@patch("haystack_integrations.components.generators.google_vertex.code_generator.vertexai") +@patch("haystack_integrations.components.generators.google_vertex.code_generator.CodeGenerationModel") def test_to_dict(_mock_model_class, _mock_vertexai): generator = VertexAICodeGenerator( model="code-bison", project_id="myproject-123456", candidate_count=3, temperature=0.5 ) assert generator.to_dict() == { - "type": "google_vertex_haystack.generators.code_generator.VertexAICodeGenerator", + "type": "haystack_integrations.components.generators.google_vertex.code_generator.VertexAICodeGenerator", "init_parameters": { "model": "code-bison", "project_id": "myproject-123456", @@ -37,12 +37,12 @@ def test_to_dict(_mock_model_class, _mock_vertexai): } -@patch("google_vertex_haystack.generators.code_generator.vertexai") -@patch("google_vertex_haystack.generators.code_generator.CodeGenerationModel") +@patch("haystack_integrations.components.generators.google_vertex.code_generator.vertexai") +@patch("haystack_integrations.components.generators.google_vertex.code_generator.CodeGenerationModel") def test_from_dict(_mock_model_class, _mock_vertexai): generator = VertexAICodeGenerator.from_dict( { - "type": "google_vertex_haystack.generators.code_generator.VertexAICodeGenerator", + "type": "haystack_integrations.components.generators.google_vertex.code_generator.VertexAICodeGenerator", "init_parameters": { "model": "code-bison", "project_id": "myproject-123456", @@ -58,8 +58,8 @@ def test_from_dict(_mock_model_class, _mock_vertexai): assert generator._model is not None -@patch("google_vertex_haystack.generators.code_generator.vertexai") -@patch("google_vertex_haystack.generators.code_generator.CodeGenerationModel") +@patch("haystack_integrations.components.generators.google_vertex.code_generator.vertexai") +@patch("haystack_integrations.components.generators.google_vertex.code_generator.CodeGenerationModel") def test_run_calls_predict(mock_model_class, _mock_vertexai): mock_model = Mock() mock_model.predict.return_value = TextGenerationResponse("answer", None) diff --git a/integrations/google_vertex/tests/test_image_generator.py b/integrations/google_vertex/tests/test_image_generator.py index 1c5381a48..42cc0a0a3 100644 --- a/integrations/google_vertex/tests/test_image_generator.py +++ b/integrations/google_vertex/tests/test_image_generator.py @@ -2,11 +2,11 @@ from vertexai.preview.vision_models import ImageGenerationResponse -from google_vertex_haystack.generators.image_generator import VertexAIImageGenerator +from haystack_integrations.components.generators.google_vertex import VertexAIImageGenerator -@patch("google_vertex_haystack.generators.image_generator.vertexai") -@patch("google_vertex_haystack.generators.image_generator.ImageGenerationModel") +@patch("haystack_integrations.components.generators.google_vertex.image_generator.vertexai") +@patch("haystack_integrations.components.generators.google_vertex.image_generator.ImageGenerationModel") def test_init(mock_model_class, mock_vertexai): generator = VertexAIImageGenerator( model="imagetext", @@ -25,8 +25,8 @@ def test_init(mock_model_class, mock_vertexai): } -@patch("google_vertex_haystack.generators.image_generator.vertexai") -@patch("google_vertex_haystack.generators.image_generator.ImageGenerationModel") +@patch("haystack_integrations.components.generators.google_vertex.image_generator.vertexai") +@patch("haystack_integrations.components.generators.google_vertex.image_generator.ImageGenerationModel") def test_to_dict(_mock_model_class, _mock_vertexai): generator = VertexAIImageGenerator( model="imagetext", @@ -35,7 +35,7 @@ def test_to_dict(_mock_model_class, _mock_vertexai): number_of_images=3, ) assert generator.to_dict() == { - "type": "google_vertex_haystack.generators.image_generator.VertexAIImageGenerator", + "type": "haystack_integrations.components.generators.google_vertex.image_generator.VertexAIImageGenerator", "init_parameters": { "model": "imagetext", "project_id": "myproject-123456", @@ -46,12 +46,12 @@ def test_to_dict(_mock_model_class, _mock_vertexai): } -@patch("google_vertex_haystack.generators.image_generator.vertexai") -@patch("google_vertex_haystack.generators.image_generator.ImageGenerationModel") +@patch("haystack_integrations.components.generators.google_vertex.image_generator.vertexai") +@patch("haystack_integrations.components.generators.google_vertex.image_generator.ImageGenerationModel") def test_from_dict(_mock_model_class, _mock_vertexai): generator = VertexAIImageGenerator.from_dict( { - "type": "google_vertex_haystack.generators.image_generator.VertexAIImageGenerator", + "type": "haystack_integrations.components.generators.google_vertex.image_generator.VertexAIImageGenerator", "init_parameters": { "model": "imagetext", "project_id": "myproject-123456", @@ -70,8 +70,8 @@ def test_from_dict(_mock_model_class, _mock_vertexai): } -@patch("google_vertex_haystack.generators.image_generator.vertexai") -@patch("google_vertex_haystack.generators.image_generator.ImageGenerationModel") +@patch("haystack_integrations.components.generators.google_vertex.image_generator.vertexai") +@patch("haystack_integrations.components.generators.google_vertex.image_generator.ImageGenerationModel") def test_run_calls_generate_images(mock_model_class, _mock_vertexai): mock_model = Mock() mock_model.generate_images.return_value = ImageGenerationResponse(images=[]) diff --git a/integrations/google_vertex/tests/test_question_answering.py b/integrations/google_vertex/tests/test_question_answering.py index 3495afcb2..3f414f0e0 100644 --- a/integrations/google_vertex/tests/test_question_answering.py +++ b/integrations/google_vertex/tests/test_question_answering.py @@ -2,11 +2,11 @@ from haystack.dataclasses.byte_stream import ByteStream -from google_vertex_haystack.generators.question_answering import VertexAIImageQA +from haystack_integrations.components.generators.google_vertex import VertexAIImageQA -@patch("google_vertex_haystack.generators.question_answering.vertexai") -@patch("google_vertex_haystack.generators.question_answering.ImageTextModel") +@patch("haystack_integrations.components.generators.google_vertex.question_answering.vertexai") +@patch("haystack_integrations.components.generators.google_vertex.question_answering.ImageTextModel") def test_init(mock_model_class, mock_vertexai): generator = VertexAIImageQA( model="imagetext", @@ -21,8 +21,8 @@ def test_init(mock_model_class, mock_vertexai): assert generator._kwargs == {"number_of_results": 3} -@patch("google_vertex_haystack.generators.question_answering.vertexai") -@patch("google_vertex_haystack.generators.question_answering.ImageTextModel") +@patch("haystack_integrations.components.generators.google_vertex.question_answering.vertexai") +@patch("haystack_integrations.components.generators.google_vertex.question_answering.ImageTextModel") def test_to_dict(_mock_model_class, _mock_vertexai): generator = VertexAIImageQA( model="imagetext", @@ -30,7 +30,7 @@ def test_to_dict(_mock_model_class, _mock_vertexai): number_of_results=3, ) assert generator.to_dict() == { - "type": "google_vertex_haystack.generators.question_answering.VertexAIImageQA", + "type": "haystack_integrations.components.generators.google_vertex.question_answering.VertexAIImageQA", "init_parameters": { "model": "imagetext", "project_id": "myproject-123456", @@ -40,12 +40,12 @@ def test_to_dict(_mock_model_class, _mock_vertexai): } -@patch("google_vertex_haystack.generators.question_answering.vertexai") -@patch("google_vertex_haystack.generators.question_answering.ImageTextModel") +@patch("haystack_integrations.components.generators.google_vertex.question_answering.vertexai") +@patch("haystack_integrations.components.generators.google_vertex.question_answering.ImageTextModel") def test_from_dict(_mock_model_class, _mock_vertexai): generator = VertexAIImageQA.from_dict( { - "type": "google_vertex_haystack.generators.question_answering.VertexAIImageQA", + "type": "haystack_integrations.components.generators.google_vertex.question_answering.VertexAIImageQA", "init_parameters": { "model": "imagetext", "project_id": "myproject-123456", @@ -60,8 +60,8 @@ def test_from_dict(_mock_model_class, _mock_vertexai): assert generator._kwargs == {"number_of_results": 3} -@patch("google_vertex_haystack.generators.question_answering.vertexai") -@patch("google_vertex_haystack.generators.question_answering.ImageTextModel") +@patch("haystack_integrations.components.generators.google_vertex.question_answering.vertexai") +@patch("haystack_integrations.components.generators.google_vertex.question_answering.ImageTextModel") def test_run_calls_ask_question(mock_model_class, _mock_vertexai): mock_model = Mock() mock_model.ask_question.return_value = [] diff --git a/integrations/google_vertex/tests/test_text_generator.py b/integrations/google_vertex/tests/test_text_generator.py index f2edbfc3b..3e5248dc7 100644 --- a/integrations/google_vertex/tests/test_text_generator.py +++ b/integrations/google_vertex/tests/test_text_generator.py @@ -2,11 +2,11 @@ from vertexai.language_models import GroundingSource -from google_vertex_haystack.generators.text_generator import VertexAITextGenerator +from haystack_integrations.components.generators.google_vertex import VertexAITextGenerator -@patch("google_vertex_haystack.generators.text_generator.vertexai") -@patch("google_vertex_haystack.generators.text_generator.TextGenerationModel") +@patch("haystack_integrations.components.generators.google_vertex.text_generator.vertexai") +@patch("haystack_integrations.components.generators.google_vertex.text_generator.TextGenerationModel") def test_init(mock_model_class, mock_vertexai): grounding_source = GroundingSource.VertexAISearch("1234", "us-central-1") generator = VertexAITextGenerator( @@ -20,15 +20,15 @@ def test_init(mock_model_class, mock_vertexai): assert generator._kwargs == {"temperature": 0.2, "grounding_source": grounding_source} -@patch("google_vertex_haystack.generators.text_generator.vertexai") -@patch("google_vertex_haystack.generators.text_generator.TextGenerationModel") +@patch("haystack_integrations.components.generators.google_vertex.text_generator.vertexai") +@patch("haystack_integrations.components.generators.google_vertex.text_generator.TextGenerationModel") def test_to_dict(_mock_model_class, _mock_vertexai): grounding_source = GroundingSource.VertexAISearch("1234", "us-central-1") generator = VertexAITextGenerator( model="text-bison", project_id="myproject-123456", temperature=0.2, grounding_source=grounding_source ) assert generator.to_dict() == { - "type": "google_vertex_haystack.generators.text_generator.VertexAITextGenerator", + "type": "haystack_integrations.components.generators.google_vertex.text_generator.VertexAITextGenerator", "init_parameters": { "model": "text-bison", "project_id": "myproject-123456", @@ -47,12 +47,12 @@ def test_to_dict(_mock_model_class, _mock_vertexai): } -@patch("google_vertex_haystack.generators.text_generator.vertexai") -@patch("google_vertex_haystack.generators.text_generator.TextGenerationModel") +@patch("haystack_integrations.components.generators.google_vertex.text_generator.vertexai") +@patch("haystack_integrations.components.generators.google_vertex.text_generator.TextGenerationModel") def test_from_dict(_mock_model_class, _mock_vertexai): generator = VertexAITextGenerator.from_dict( { - "type": "google_vertex_haystack.generators.text_generator.VertexAITextGenerator", + "type": "haystack_integrations.components.generators.google_vertex.text_generator.VertexAITextGenerator", "init_parameters": { "model": "text-bison", "project_id": "myproject-123456", @@ -79,8 +79,8 @@ def test_from_dict(_mock_model_class, _mock_vertexai): } -@patch("google_vertex_haystack.generators.text_generator.vertexai") -@patch("google_vertex_haystack.generators.text_generator.TextGenerationModel") +@patch("haystack_integrations.components.generators.google_vertex.text_generator.vertexai") +@patch("haystack_integrations.components.generators.google_vertex.text_generator.TextGenerationModel") def test_run_calls_get_captions(mock_model_class, _mock_vertexai): mock_model = Mock() mock_model.predict.return_value = MagicMock() From 2678beb17e4f0c81f19a47c2badb170f1c02792b Mon Sep 17 00:00:00 2001 From: Massimiliano Pippi Date: Fri, 26 Jan 2024 18:14:55 +0100 Subject: [PATCH 21/47] Create CODE_OF_CONDUCT.md --- CODE_OF_CONDUCT.md | 128 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 128 insertions(+) create mode 100644 CODE_OF_CONDUCT.md diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..2b2d0bf2f --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,128 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, religion, or sexual identity +and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the + overall community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or + advances of any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email + address, without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +haystack@deepset.ai. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series +of actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or +permanent ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within +the community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.0, available at +https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. + +Community Impact Guidelines were inspired by [Mozilla's code of conduct +enforcement ladder](https://github.com/mozilla/diversity). + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see the FAQ at +https://www.contributor-covenant.org/faq. Translations are available at +https://www.contributor-covenant.org/translations. From 37507ded040cf0327f4ed185080b30ea80eb8a29 Mon Sep 17 00:00:00 2001 From: Massimiliano Pippi Date: Fri, 26 Jan 2024 18:15:21 +0100 Subject: [PATCH 22/47] Delete code_of_conduct.txt --- code_of_conduct.txt | 98 --------------------------------------------- 1 file changed, 98 deletions(-) delete mode 100644 code_of_conduct.txt diff --git a/code_of_conduct.txt b/code_of_conduct.txt deleted file mode 100644 index c4814cb22..000000000 --- a/code_of_conduct.txt +++ /dev/null @@ -1,98 +0,0 @@ -CONTRIBUTOR COVENANT CODE OF CONDUCT -==================================== - -Our Pledge ----------- - -We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for -everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, -gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, -race, caste, color, religion, or sexual identity and orientation. - -We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. - -Our Standards -------------- - -Examples of behavior that contributes to a positive environment for our community include: - - Demonstrating empathy and kindness toward other people - - Being respectful of differing opinions, viewpoints, and experiences - - Giving and gracefully accepting constructive feedback - - Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience - - Focusing on what is best not just for us as individuals, but for the overall community - -Examples of unacceptable behavior include: - - The use of sexualized language or imagery, and sexual attention or advances of any kind - - Trolling, insulting or derogatory comments, and personal or political attacks - - Public or private harassment - - Publishing others’ private information, such as a physical or email address, without their explicit permission - - Other conduct which could reasonably be considered inappropriate in a professional setting - -Enforcement Responsibilities ----------------------------- - -Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take -appropriate and fair corrective action in response to any behavior that they deem inappropriate, -threatening, offensive, or harmful. - -Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, -issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for -moderation decisions when appropriate. - -Scope ------ - -This Code of Conduct applies within all community spaces, and also applies when an individual is officially -representing the community in public spaces. Examples of representing our community include using an official -e-mail address, posting via an official social media account, or acting as an appointed representative -at an online or offline event. - -Enforcement ------------ - -Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible -for enforcement at engage@deepset.ai. All complaints will be reviewed and investigated promptly and fairly. - -All community leaders are obligated to respect the privacy and security of the reporter of any incident. - -Enforcement Guidelines ----------------------- - -Community leaders will follow these Community Impact Guidelines in determining the consequences for any action -they deem in violation of this Code of Conduct: - -1. Correction - Community Impact: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. - - Consequence: A private, written warning from community leaders, providing clarity around the nature of the violation - and an explanation of why the behavior was inappropriate. A public apology may be requested. - -2. Warning - Community Impact: A violation through a single incident or series of actions. - - Consequence: A warning with consequences for continued behavior. No interaction with the people involved, - including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. - This includes avoiding interactions in community spaces as well as external channels like social media. - Violating these terms may lead to a temporary or permanent ban. - -3. Temporary Ban - Community Impact: A serious violation of community standards, including sustained inappropriate behavior. - - Consequence: A temporary ban from any sort of interaction or public communication with the community for a specified - period of time. No public or private interaction with the people involved, including unsolicited interaction with - those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. - -4. Permanent Ban - Community Impact: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. - - Consequence: A permanent ban from any sort of public interaction within the community. - -Attribution ------------ - -This Code of Conduct is adapted from the Contributor Covenant, version 2.0, available at https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. - -Community Impact Guidelines were inspired by Mozilla’s code of conduct enforcement ladder. - -For answers to common questions about this code of conduct, see the FAQ at https://www.contributor-covenant.org/faq. -Translations are available at https://www.contributor-covenant.org/translations. From df86747c1260fdbabbaf74100575403abd82715a Mon Sep 17 00:00:00 2001 From: Massimiliano Pippi Date: Mon, 29 Jan 2024 10:43:29 +0100 Subject: [PATCH 23/47] refact!: change import paths (#277) * change import paths * linting * fix protocol interface * fix coverage * moar linting --- integrations/astra/examples/example.py | 6 ++-- .../astra/examples/pipeline_example.py | 6 ++-- integrations/astra/pyproject.toml | 15 ++++---- .../components/retrievers/astra/__init__.py | 6 ++++ .../components/retrievers/astra}/retriever.py | 2 +- .../document_stores/astra}/__init__.py | 2 +- .../document_stores/astra}/astra_client.py | 0 .../document_stores/astra}/document_store.py | 36 +++++-------------- .../document_stores/astra}/errors.py | 0 .../document_stores/astra}/filters.py | 0 integrations/astra/tests/conftest.py | 2 +- .../astra/tests/test_document_store.py | 2 +- integrations/astra/tests/test_retriever.py | 10 +++--- 13 files changed, 39 insertions(+), 48 deletions(-) create mode 100644 integrations/astra/src/haystack_integrations/components/retrievers/astra/__init__.py rename integrations/astra/src/{astra_haystack => haystack_integrations/components/retrievers/astra}/retriever.py (96%) rename integrations/astra/src/{astra_haystack => haystack_integrations/document_stores/astra}/__init__.py (71%) rename integrations/astra/src/{astra_haystack => haystack_integrations/document_stores/astra}/astra_client.py (100%) rename integrations/astra/src/{astra_haystack => haystack_integrations/document_stores/astra}/document_store.py (91%) rename integrations/astra/src/{astra_haystack => haystack_integrations/document_stores/astra}/errors.py (100%) rename integrations/astra/src/{astra_haystack => haystack_integrations/document_stores/astra}/filters.py (100%) diff --git a/integrations/astra/examples/example.py b/integrations/astra/examples/example.py index ac93f43ed..35963868c 100644 --- a/integrations/astra/examples/example.py +++ b/integrations/astra/examples/example.py @@ -8,10 +8,10 @@ from haystack.components.preprocessors import DocumentCleaner, DocumentSplitter from haystack.components.routers import FileTypeRouter from haystack.components.writers import DocumentWriter -from haystack.document_stores import DuplicatePolicy +from haystack.document_stores.types import DuplicatePolicy -from astra_haystack.document_store import AstraDocumentStore -from astra_haystack.retriever import AstraRetriever +from haystack_integrations.components.retrievers.astra import AstraRetriever +from haystack_integrations.document_stores.astra import AstraDocumentStore logger = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) diff --git a/integrations/astra/examples/pipeline_example.py b/integrations/astra/examples/pipeline_example.py index fb13c3d93..cacb1eb9f 100644 --- a/integrations/astra/examples/pipeline_example.py +++ b/integrations/astra/examples/pipeline_example.py @@ -7,10 +7,10 @@ from haystack.components.embedders import SentenceTransformersDocumentEmbedder, SentenceTransformersTextEmbedder from haystack.components.generators import OpenAIGenerator from haystack.components.writers import DocumentWriter -from haystack.document_stores import DuplicatePolicy +from haystack.document_stores.types import DuplicatePolicy -from astra_haystack.document_store import AstraDocumentStore -from astra_haystack.retriever import AstraRetriever +from haystack_integrations.components.retrievers.astra import AstraRetriever +from haystack_integrations.document_stores.astra import AstraDocumentStore logger = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) diff --git a/integrations/astra/pyproject.toml b/integrations/astra/pyproject.toml index b99449e03..6b4e2565d 100644 --- a/integrations/astra/pyproject.toml +++ b/integrations/astra/pyproject.toml @@ -35,6 +35,9 @@ Documentation = "https://github.com/deepset-ai/haystack-core-integrations/tree/m Issues = "https://github.com/deepset-ai/haystack-core-integrations/issues" Source = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/astra" +[tool.hatch.build.targets.wheel] +packages = ["src/haystack_integrations"] + [tool.hatch.version] source = "vcs" tag-pattern = 'integrations\/astra-v(?P.*)' @@ -71,7 +74,7 @@ dependencies = [ "ruff>=0.0.243", ] [tool.hatch.envs.lint.scripts] -typing = "mypy --install-types --non-interactive {args:src/astra_haystack tests}" +typing = "mypy --install-types --non-interactive --explicit-package-bases {args:src/ tests}" style = [ "ruff {args:.}", "black --check --diff {args:.}", @@ -141,17 +144,17 @@ unfixable = [ exclude = ["example"] [tool.ruff.isort] -known-first-party = ["astra_haystack"] +known-first-party = ["haystack_integrations"] [tool.ruff.flake8-tidy-imports] -ban-relative-imports = "all" +ban-relative-imports = "parents" [tool.ruff.per-file-ignores] # Tests can use magic values, assertions, and relative imports "tests/**/*" = ["PLR2004", "S101", "TID252"] [tool.coverage.run] -source_pkgs = ["astra_haystack", "tests"] +source_pkgs = ["haystack_integrations", "tests"] branch = true parallel = true omit = [ @@ -159,7 +162,7 @@ omit = [ ] [tool.coverage.paths] -astra_haystack = ["src/astra_haystack", "*/astra-store/src/astra_haystack"] +astra_haystack = ["src"] tests = ["tests"] [tool.coverage.report] @@ -178,10 +181,10 @@ markers = [ [[tool.mypy.overrides]] module = [ - "astra_haystack.*", "astra_client.*", "pydantic.*", "haystack.*", + "haystack_integrations.*", "pytest.*" ] ignore_missing_imports = true diff --git a/integrations/astra/src/haystack_integrations/components/retrievers/astra/__init__.py b/integrations/astra/src/haystack_integrations/components/retrievers/astra/__init__.py new file mode 100644 index 000000000..33ef6d15e --- /dev/null +++ b/integrations/astra/src/haystack_integrations/components/retrievers/astra/__init__.py @@ -0,0 +1,6 @@ +# SPDX-FileCopyrightText: 2023-present Anant Corporation +# +# SPDX-License-Identifier: Apache-2.0 +from .retriever import AstraRetriever + +__all__ = ["AstraRetriever"] diff --git a/integrations/astra/src/astra_haystack/retriever.py b/integrations/astra/src/haystack_integrations/components/retrievers/astra/retriever.py similarity index 96% rename from integrations/astra/src/astra_haystack/retriever.py rename to integrations/astra/src/haystack_integrations/components/retrievers/astra/retriever.py index 47304df2c..fdf9b0722 100644 --- a/integrations/astra/src/astra_haystack/retriever.py +++ b/integrations/astra/src/haystack_integrations/components/retrievers/astra/retriever.py @@ -6,7 +6,7 @@ from haystack import Document, component, default_from_dict, default_to_dict -from astra_haystack.document_store import AstraDocumentStore +from haystack_integrations.document_stores.astra import AstraDocumentStore @component diff --git a/integrations/astra/src/astra_haystack/__init__.py b/integrations/astra/src/haystack_integrations/document_stores/astra/__init__.py similarity index 71% rename from integrations/astra/src/astra_haystack/__init__.py rename to integrations/astra/src/haystack_integrations/document_stores/astra/__init__.py index 5c99dedf6..4618beb08 100644 --- a/integrations/astra/src/astra_haystack/__init__.py +++ b/integrations/astra/src/haystack_integrations/document_stores/astra/__init__.py @@ -1,6 +1,6 @@ # SPDX-FileCopyrightText: 2023-present Anant Corporation # # SPDX-License-Identifier: Apache-2.0 -from astra_haystack.document_store import AstraDocumentStore +from .document_store import AstraDocumentStore __all__ = ["AstraDocumentStore"] diff --git a/integrations/astra/src/astra_haystack/astra_client.py b/integrations/astra/src/haystack_integrations/document_stores/astra/astra_client.py similarity index 100% rename from integrations/astra/src/astra_haystack/astra_client.py rename to integrations/astra/src/haystack_integrations/document_stores/astra/astra_client.py diff --git a/integrations/astra/src/astra_haystack/document_store.py b/integrations/astra/src/haystack_integrations/document_stores/astra/document_store.py similarity index 91% rename from integrations/astra/src/astra_haystack/document_store.py rename to integrations/astra/src/haystack_integrations/document_stores/astra/document_store.py index 6e630bef5..8e03de4a6 100644 --- a/integrations/astra/src/astra_haystack/document_store.py +++ b/integrations/astra/src/haystack_integrations/document_stores/astra/document_store.py @@ -12,9 +12,9 @@ from haystack.document_stores.errors import DuplicateDocumentError, MissingDocumentError from haystack.document_stores.types import DuplicatePolicy -from astra_haystack.astra_client import AstraClient -from astra_haystack.errors import AstraDocumentStoreFilterError -from astra_haystack.filters import _convert_filters +from .astra_client import AstraClient +from .errors import AstraDocumentStoreFilterError +from .filters import _convert_filters logger = logging.getLogger(__name__) @@ -40,7 +40,7 @@ def __init__( astra_application_token: str, astra_keyspace: str, astra_collection: str, - embedding_dim: Optional[int] = 768, + embedding_dim: int = 768, duplicates_policy: DuplicatePolicy = DuplicatePolicy.NONE, similarity: str = "cosine", ): @@ -104,17 +104,12 @@ def to_dict(self) -> Dict[str, Any]: def write_documents( self, documents: List[Document], - index: Optional[str] = None, - batch_size: int = 20, policy: DuplicatePolicy = DuplicatePolicy.NONE, ): """ Indexes documents for later queries. :param documents: a list of Haystack Document objects. - :param index: Optional name of index where the documents shall be written to. - If None, the DocumentStore's default index (self.index) will be used. - :param batch_size: Number of documents that are passed to bulk function at a time. :param policy: Handle duplicate documents based on DuplicatePolicy parameter options. Parameter options : (SKIP, OVERWRITE, FAIL, NONE) - `DuplicatePolicy.NONE`: Default policy, If a Document with the same id already exists, @@ -125,26 +120,13 @@ def write_documents( - `DuplicatePolicy.FAIL`: If a Document with the same id already exists, an error is raised. :return: int """ - - if index is None and self.index is None: - msg = "No Astra client provided" - raise ValueError(msg) - - if index is None: - index = self.index - if policy is None or policy == DuplicatePolicy.NONE: if self.duplicates_policy is not None and self.duplicates_policy != DuplicatePolicy.NONE: policy = self.duplicates_policy else: policy = DuplicatePolicy.SKIP - if batch_size > MAX_BATCH_SIZE: - logger.warning( - f"batch_size set to {batch_size}, " - f"but maximum batch_size for Astra when using the JSON API is 20. batch_size set to 20." - ) - batch_size = MAX_BATCH_SIZE + batch_size = MAX_BATCH_SIZE def _convert_input_document(document: Union[dict, Document]): if isinstance(document, Document): @@ -196,7 +178,7 @@ def _convert_input_document(document: Union[dict, Document]): if policy == DuplicatePolicy.SKIP: if len(new_documents) > 0: for batch in _batches(new_documents, batch_size): - inserted_ids = index.insert(batch) # type: ignore + inserted_ids = self.index.insert(batch) # type: ignore insertion_counter += len(inserted_ids) logger.info(f"write_documents inserted documents with id {inserted_ids}") else: @@ -205,7 +187,7 @@ def _convert_input_document(document: Union[dict, Document]): elif policy == DuplicatePolicy.OVERWRITE: if len(new_documents) > 0: for batch in _batches(new_documents, batch_size): - inserted_ids = index.insert(batch) # type: ignore + inserted_ids = self.index.insert(batch) # type: ignore insertion_counter += len(inserted_ids) logger.info(f"write_documents inserted documents with id {inserted_ids}") else: @@ -214,7 +196,7 @@ def _convert_input_document(document: Union[dict, Document]): if len(duplicate_documents) > 0: updated_ids = [] for duplicate_doc in duplicate_documents: - updated = index.update_document(duplicate_doc, "_id") # type: ignore + updated = self.index.update_document(duplicate_doc, "_id") # type: ignore if updated: updated_ids.append(duplicate_doc["_id"]) insertion_counter = insertion_counter + len(updated_ids) @@ -225,7 +207,7 @@ def _convert_input_document(document: Union[dict, Document]): elif policy == DuplicatePolicy.FAIL: if len(new_documents) > 0: for batch in _batches(new_documents, batch_size): - inserted_ids = index.insert(batch) # type: ignore + inserted_ids = self.index.insert(batch) # type: ignore insertion_counter = insertion_counter + len(inserted_ids) logger.info(f"write_documents inserted documents with id {inserted_ids}") else: diff --git a/integrations/astra/src/astra_haystack/errors.py b/integrations/astra/src/haystack_integrations/document_stores/astra/errors.py similarity index 100% rename from integrations/astra/src/astra_haystack/errors.py rename to integrations/astra/src/haystack_integrations/document_stores/astra/errors.py diff --git a/integrations/astra/src/astra_haystack/filters.py b/integrations/astra/src/haystack_integrations/document_stores/astra/filters.py similarity index 100% rename from integrations/astra/src/astra_haystack/filters.py rename to integrations/astra/src/haystack_integrations/document_stores/astra/filters.py diff --git a/integrations/astra/tests/conftest.py b/integrations/astra/tests/conftest.py index 02f5d7cad..274b38352 100644 --- a/integrations/astra/tests/conftest.py +++ b/integrations/astra/tests/conftest.py @@ -3,7 +3,7 @@ import pytest from haystack.document_stores.types import DuplicatePolicy -from astra_haystack.document_store import AstraDocumentStore +from haystack_integrations.document_stores.astra import AstraDocumentStore @pytest.fixture diff --git a/integrations/astra/tests/test_document_store.py b/integrations/astra/tests/test_document_store.py index f203ab721..019a66398 100644 --- a/integrations/astra/tests/test_document_store.py +++ b/integrations/astra/tests/test_document_store.py @@ -10,7 +10,7 @@ from haystack.document_stores.types import DuplicatePolicy from haystack.testing.document_store import DocumentStoreBaseTests -from astra_haystack.document_store import AstraDocumentStore +from haystack_integrations.document_stores.astra import AstraDocumentStore @pytest.mark.skipif( diff --git a/integrations/astra/tests/test_retriever.py b/integrations/astra/tests/test_retriever.py index 2212d44fd..eb9260590 100644 --- a/integrations/astra/tests/test_retriever.py +++ b/integrations/astra/tests/test_retriever.py @@ -5,7 +5,7 @@ import pytest -from astra_haystack.retriever import AstraRetriever +from haystack_integrations.components.retrievers.astra import AstraRetriever @pytest.mark.skipif( @@ -16,7 +16,7 @@ def test_retriever_to_json(document_store): retriever = AstraRetriever(document_store, filters={"foo": "bar"}, top_k=99) assert retriever.to_dict() == { - "type": "astra_haystack.retriever.AstraRetriever", + "type": "haystack_integrations.components.retrievers.astra.retriever.AstraRetriever", "init_parameters": { "filters": {"foo": "bar"}, "top_k": 99, @@ -30,7 +30,7 @@ def test_retriever_to_json(document_store): "embedding_dim": 768, "similarity": "cosine", }, - "type": "astra_haystack.document_store.AstraDocumentStore", + "type": "haystack_integrations.document_stores.astra.document_store.AstraDocumentStore", }, }, } @@ -43,7 +43,7 @@ def test_retriever_to_json(document_store): @pytest.mark.integration def test_retriever_from_json(): data = { - "type": "astra_haystack.retriever.AstraRetriever", + "type": "haystack_integrations.components.retrievers.astra.retriever.AstraRetriever", "init_parameters": { "filters": {"bar": "baz"}, "top_k": 42, @@ -58,7 +58,7 @@ def test_retriever_from_json(): "embedding_dim": 768, "similarity": "cosine", }, - "type": "astra_haystack.document_store.AstraDocumentStore", + "type": "haystack_integrations.document_stores.astra.document_store.AstraDocumentStore", }, }, } From c23a41c2e320584a298db9f29f08789768c32479 Mon Sep 17 00:00:00 2001 From: Massimiliano Pippi Date: Mon, 29 Jan 2024 11:07:26 +0100 Subject: [PATCH 24/47] fix linter errors (#282) --- .../document_stores/qdrant/filters.py | 24 +++++---- integrations/qdrant/tests/test_filters.py | 21 +++----- .../qdrant/tests/test_legacy_filters.py | 51 +++++++------------ 3 files changed, 39 insertions(+), 57 deletions(-) diff --git a/integrations/qdrant/src/haystack_integrations/document_stores/qdrant/filters.py b/integrations/qdrant/src/haystack_integrations/document_stores/qdrant/filters.py index 21e29e570..77d800853 100644 --- a/integrations/qdrant/src/haystack_integrations/document_stores/qdrant/filters.py +++ b/integrations/qdrant/src/haystack_integrations/document_stores/qdrant/filters.py @@ -113,9 +113,11 @@ def _build_in_condition(self, key: str, value: List[models.ValueVariants]) -> mo raise FilterError(msg) return models.Filter( should=[ - models.FieldCondition(key=key, match=models.MatchText(text=item)) - if isinstance(item, str) and " " not in item - else models.FieldCondition(key=key, match=models.MatchValue(value=item)) + ( + models.FieldCondition(key=key, match=models.MatchText(text=item)) + if isinstance(item, str) and " " not in item + else models.FieldCondition(key=key, match=models.MatchValue(value=item)) + ) for item in value ] ) @@ -123,9 +125,11 @@ def _build_in_condition(self, key: str, value: List[models.ValueVariants]) -> mo def _build_ne_condition(self, key: str, value: models.ValueVariants) -> models.Condition: return models.Filter( must_not=[ - models.FieldCondition(key=key, match=models.MatchText(text=value)) - if isinstance(value, str) and " " not in value - else models.FieldCondition(key=key, match=models.MatchValue(value=value)) + ( + models.FieldCondition(key=key, match=models.MatchText(text=value)) + if isinstance(value, str) and " " not in value + else models.FieldCondition(key=key, match=models.MatchValue(value=value)) + ) ] ) @@ -135,9 +139,11 @@ def _build_nin_condition(self, key: str, value: List[models.ValueVariants]) -> m raise FilterError(msg) return models.Filter( must_not=[ - models.FieldCondition(key=key, match=models.MatchText(text=item)) - if isinstance(item, str) and " " not in item - else models.FieldCondition(key=key, match=models.MatchValue(value=item)) + ( + models.FieldCondition(key=key, match=models.MatchText(text=item)) + if isinstance(item, str) and " " not in item + else models.FieldCondition(key=key, match=models.MatchValue(value=item)) + ) for item in value ] ) diff --git a/integrations/qdrant/tests/test_filters.py b/integrations/qdrant/tests/test_filters.py index 848d799e4..74bac76ad 100644 --- a/integrations/qdrant/tests/test_filters.py +++ b/integrations/qdrant/tests/test_filters.py @@ -86,29 +86,22 @@ def test_comparison_less_than_equal_with_none(self, document_store, filterable_d # ======== ========================== ======== @pytest.mark.skip(reason="Qdrant doesn't support comparision with dataframe") - def test_comparison_equal_with_dataframe(self, document_store, filterable_docs): - ... + def test_comparison_equal_with_dataframe(self, document_store, filterable_docs): ... @pytest.mark.skip(reason="Qdrant doesn't support comparision with dataframe") - def test_comparison_not_equal_with_dataframe(self, document_store, filterable_docs): - ... + def test_comparison_not_equal_with_dataframe(self, document_store, filterable_docs): ... @pytest.mark.skip(reason="Qdrant doesn't support comparision with Dates") - def test_comparison_greater_than_with_iso_date(self, document_store, filterable_docs): - ... + def test_comparison_greater_than_with_iso_date(self, document_store, filterable_docs): ... @pytest.mark.skip(reason="Qdrant doesn't support comparision with Dates") - def test_comparison_greater_than_equal_with_iso_date(self, document_store, filterable_docs): - ... + def test_comparison_greater_than_equal_with_iso_date(self, document_store, filterable_docs): ... @pytest.mark.skip(reason="Qdrant doesn't support comparision with Dates") - def test_comparison_less_than_with_iso_date(self, document_store, filterable_docs): - ... + def test_comparison_less_than_with_iso_date(self, document_store, filterable_docs): ... @pytest.mark.skip(reason="Qdrant doesn't support comparision with Dates") - def test_comparison_less_than_equal_with_iso_date(self, document_store, filterable_docs): - ... + def test_comparison_less_than_equal_with_iso_date(self, document_store, filterable_docs): ... @pytest.mark.skip(reason="Cannot distinguish errors yet") - def test_missing_top_level_operator_key(self, document_store, filterable_docs): - ... + def test_missing_top_level_operator_key(self, document_store, filterable_docs): ... diff --git a/integrations/qdrant/tests/test_legacy_filters.py b/integrations/qdrant/tests/test_legacy_filters.py index ff01c3971..60f1fad2b 100644 --- a/integrations/qdrant/tests/test_legacy_filters.py +++ b/integrations/qdrant/tests/test_legacy_filters.py @@ -44,8 +44,7 @@ def test_filter_simple_metadata_value(self, document_store: DocumentStore, filte self.assert_documents_are_equal(result, [doc for doc in filterable_docs if doc.meta.get("page") == "100"]) @pytest.mark.skip(reason="Dataframe filtering is not supported in Qdrant") - def test_filter_document_dataframe(self, document_store: DocumentStore, filterable_docs: List[Document]): - ... + def test_filter_document_dataframe(self, document_store: DocumentStore, filterable_docs: List[Document]): ... def test_eq_filter_explicit(self, document_store: DocumentStore, filterable_docs: List[Document]): document_store.write_documents(filterable_docs) @@ -58,12 +57,10 @@ def test_eq_filter_implicit(self, document_store: DocumentStore, filterable_docs self.assert_documents_are_equal(result, [doc for doc in filterable_docs if doc.meta.get("page") == "100"]) @pytest.mark.skip(reason="Dataframe filtering is not supported in Qdrant") - def test_eq_filter_table(self, document_store: DocumentStore, filterable_docs: List[Document]): - ... + def test_eq_filter_table(self, document_store: DocumentStore, filterable_docs: List[Document]): ... @pytest.mark.skip(reason="Embedding filtering is not supported in Qdrant") - def test_eq_filter_embedding(self, document_store: DocumentStore, filterable_docs: List[Document]): - ... + def test_eq_filter_embedding(self, document_store: DocumentStore, filterable_docs: List[Document]): ... # LegacyFilterDocumentsNotEqualTest @@ -73,12 +70,10 @@ def test_ne_filter(self, document_store: DocumentStore, filterable_docs: List[Do self.assert_documents_are_equal(result, [doc for doc in filterable_docs if doc.meta.get("page") != "100"]) @pytest.mark.skip(reason="Dataframe filtering is not supported in Qdrant") - def test_ne_filter_table(self, document_store: DocumentStore, filterable_docs: List[Document]): - ... + def test_ne_filter_table(self, document_store: DocumentStore, filterable_docs: List[Document]): ... @pytest.mark.skip(reason="Embedding filtering is not supported in Qdrant") - def test_ne_filter_embedding(self, document_store: DocumentStore, filterable_docs: List[Document]): - ... + def test_ne_filter_embedding(self, document_store: DocumentStore, filterable_docs: List[Document]): ... # LegacyFilterDocumentsInTest @@ -122,22 +117,18 @@ def test_in_filter_implicit(self, document_store: DocumentStore, filterable_docs ) @pytest.mark.skip(reason="Dataframe filtering is not supported in Qdrant") - def test_in_filter_table(self, document_store: DocumentStore, filterable_docs: List[Document]): - ... + def test_in_filter_table(self, document_store: DocumentStore, filterable_docs: List[Document]): ... @pytest.mark.skip(reason="Embedding filtering is not supported in Qdrant") - def test_in_filter_embedding(self, document_store: DocumentStore, filterable_docs: List[Document]): - ... + def test_in_filter_embedding(self, document_store: DocumentStore, filterable_docs: List[Document]): ... # LegacyFilterDocumentsNotInTest @pytest.mark.skip(reason="Dataframe filtering is not supported in Qdrant") - def test_nin_filter_table(self, document_store: DocumentStore, filterable_docs: List[Document]): - ... + def test_nin_filter_table(self, document_store: DocumentStore, filterable_docs: List[Document]): ... @pytest.mark.skip(reason="Embedding filtering is not supported in Qdrant") - def test_nin_filter_embedding(self, document_store: DocumentStore, filterable_docs: List[Document]): - ... + def test_nin_filter_embedding(self, document_store: DocumentStore, filterable_docs: List[Document]): ... def test_nin_filter(self, document_store: DocumentStore, filterable_docs: List[Document]): document_store.write_documents(filterable_docs) @@ -163,12 +154,10 @@ def test_gt_filter_non_numeric(self, document_store: DocumentStore, filterable_d document_store.filter_documents(filters={"meta.page": {"$gt": "100"}}) @pytest.mark.skip(reason="Dataframe filtering is not supported in Qdrant") - def test_gt_filter_table(self, document_store: DocumentStore, filterable_docs: List[Document]): - ... + def test_gt_filter_table(self, document_store: DocumentStore, filterable_docs: List[Document]): ... @pytest.mark.skip(reason="Embedding filtering is not supported in Qdrant") - def test_gt_filter_embedding(self, document_store: DocumentStore, filterable_docs: List[Document]): - ... + def test_gt_filter_embedding(self, document_store: DocumentStore, filterable_docs: List[Document]): ... # LegacyFilterDocumentsGreaterThanEqualTest @@ -186,12 +175,10 @@ def test_gte_filter_non_numeric(self, document_store: DocumentStore, filterable_ document_store.filter_documents(filters={"meta.page": {"$gte": "100"}}) @pytest.mark.skip(reason="Dataframe filtering is not supported in Qdrant") - def test_gte_filter_table(self, document_store: DocumentStore, filterable_docs: List[Document]): - ... + def test_gte_filter_table(self, document_store: DocumentStore, filterable_docs: List[Document]): ... @pytest.mark.skip(reason="Embedding filtering is not supported in Qdrant") - def test_gte_filter_embedding(self, document_store: DocumentStore, filterable_docs: List[Document]): - ... + def test_gte_filter_embedding(self, document_store: DocumentStore, filterable_docs: List[Document]): ... # LegacyFilterDocumentsLessThanTest @@ -209,12 +196,10 @@ def test_lt_filter_non_numeric(self, document_store: DocumentStore, filterable_d document_store.filter_documents(filters={"meta.page": {"$lt": "100"}}) @pytest.mark.skip(reason="Dataframe filtering is not supported in Qdrant") - def test_lt_filter_table(self, document_store: DocumentStore, filterable_docs: List[Document]): - ... + def test_lt_filter_table(self, document_store: DocumentStore, filterable_docs: List[Document]): ... @pytest.mark.skip(reason="Embedding filtering is not supported in Qdrant") - def test_lt_filter_embedding(self, document_store: DocumentStore, filterable_docs: List[Document]): - ... + def test_lt_filter_embedding(self, document_store: DocumentStore, filterable_docs: List[Document]): ... # LegacyFilterDocumentsLessThanEqualTest @@ -232,12 +217,10 @@ def test_lte_filter_non_numeric(self, document_store: DocumentStore, filterable_ document_store.filter_documents(filters={"meta.page": {"$lte": "100"}}) @pytest.mark.skip(reason="Dataframe filtering is not supported in Qdrant") - def test_lte_filter_table(self, document_store: DocumentStore, filterable_docs: List[Document]): - ... + def test_lte_filter_table(self, document_store: DocumentStore, filterable_docs: List[Document]): ... @pytest.mark.skip(reason="Embedding filtering is not supported in Qdrant") - def test_lte_filter_embedding(self, document_store: DocumentStore, filterable_docs: List[Document]): - ... + def test_lte_filter_embedding(self, document_store: DocumentStore, filterable_docs: List[Document]): ... # LegacyFilterDocumentsSimpleLogicalTest From 2b608af8f4896d38b4e520c2b46bdfee9402460e Mon Sep 17 00:00:00 2001 From: Massimiliano Pippi Date: Mon, 29 Jan 2024 11:43:24 +0100 Subject: [PATCH 25/47] fix linter (#281) --- .../pinecone/tests/test_document_store.py | 6 ++-- integrations/pinecone/tests/test_filters.py | 33 +++++++------------ 2 files changed, 13 insertions(+), 26 deletions(-) diff --git a/integrations/pinecone/tests/test_document_store.py b/integrations/pinecone/tests/test_document_store.py index a856cde86..cd1bb0db3 100644 --- a/integrations/pinecone/tests/test_document_store.py +++ b/integrations/pinecone/tests/test_document_store.py @@ -80,12 +80,10 @@ def test_write_documents(self, document_store: PineconeDocumentStore): assert document_store.write_documents(docs) == 1 @pytest.mark.skip(reason="Pinecone only supports UPSERT operations") - def test_write_documents_duplicate_fail(self, document_store: PineconeDocumentStore): - ... + def test_write_documents_duplicate_fail(self, document_store: PineconeDocumentStore): ... @pytest.mark.skip(reason="Pinecone only supports UPSERT operations") - def test_write_documents_duplicate_skip(self, document_store: PineconeDocumentStore): - ... + def test_write_documents_duplicate_skip(self, document_store: PineconeDocumentStore): ... def test_init_fails_wo_api_key(self, monkeypatch): api_key = None diff --git a/integrations/pinecone/tests/test_filters.py b/integrations/pinecone/tests/test_filters.py index a38482a26..05796cf20 100644 --- a/integrations/pinecone/tests/test_filters.py +++ b/integrations/pinecone/tests/test_filters.py @@ -38,45 +38,34 @@ def assert_documents_are_equal(self, received: List[Document], expected: List[Do assert received_doc.embedding == pytest.approx(expected_doc.embedding) @pytest.mark.skip(reason="Pinecone does not support comparison with null values") - def test_comparison_equal_with_none(self, document_store, filterable_docs): - ... + def test_comparison_equal_with_none(self, document_store, filterable_docs): ... @pytest.mark.skip(reason="Pinecone does not support comparison with null values") - def test_comparison_not_equal_with_none(self, document_store, filterable_docs): - ... + def test_comparison_not_equal_with_none(self, document_store, filterable_docs): ... @pytest.mark.skip(reason="Pinecone does not support comparison with dates") - def test_comparison_greater_than_with_iso_date(self, document_store, filterable_docs): - ... + def test_comparison_greater_than_with_iso_date(self, document_store, filterable_docs): ... @pytest.mark.skip(reason="Pinecone does not support comparison with null values") - def test_comparison_greater_than_with_none(self, document_store, filterable_docs): - ... + def test_comparison_greater_than_with_none(self, document_store, filterable_docs): ... @pytest.mark.skip(reason="Pinecone does not support comparison with dates") - def test_comparison_greater_than_equal_with_iso_date(self, document_store, filterable_docs): - ... + def test_comparison_greater_than_equal_with_iso_date(self, document_store, filterable_docs): ... @pytest.mark.skip(reason="Pinecone does not support comparison with null values") - def test_comparison_greater_than_equal_with_none(self, document_store, filterable_docs): - ... + def test_comparison_greater_than_equal_with_none(self, document_store, filterable_docs): ... @pytest.mark.skip(reason="Pinecone does not support comparison with dates") - def test_comparison_less_than_with_iso_date(self, document_store, filterable_docs): - ... + def test_comparison_less_than_with_iso_date(self, document_store, filterable_docs): ... @pytest.mark.skip(reason="Pinecone does not support comparison with null values") - def test_comparison_less_than_with_none(self, document_store, filterable_docs): - ... + def test_comparison_less_than_with_none(self, document_store, filterable_docs): ... @pytest.mark.skip(reason="Pinecone does not support comparison with dates") - def test_comparison_less_than_equal_with_iso_date(self, document_store, filterable_docs): - ... + def test_comparison_less_than_equal_with_iso_date(self, document_store, filterable_docs): ... @pytest.mark.skip(reason="Pinecone does not support comparison with null values") - def test_comparison_less_than_equal_with_none(self, document_store, filterable_docs): - ... + def test_comparison_less_than_equal_with_none(self, document_store, filterable_docs): ... @pytest.mark.skip(reason="Pinecone does not support the 'not' operator") - def test_not_operator(self, document_store, filterable_docs): - ... + def test_not_operator(self, document_store, filterable_docs): ... From 01f08b951e5a119542646ac4535d272e2ebbae22 Mon Sep 17 00:00:00 2001 From: ZanSara Date: Tue, 30 Jan 2024 09:56:01 +0100 Subject: [PATCH 26/47] feat: Sagemaker integration: `SagemakerGenerator` (#276) * basic generator and tests * readme * fix import paths * improve tests * to/from dict test * review feedback * readme * quotes * typo * readme * labeler --- .github/labeler.yml | 5 + .github/workflows/amazon_sagemaker.yml | 56 ++++ integrations/amazon_sagemaker/LICENSE.txt | 73 ++++++ integrations/amazon_sagemaker/README.md | 52 ++++ integrations/amazon_sagemaker/pyproject.toml | 177 +++++++++++++ .../generators/amazon_sagemaker/__init__.py | 6 + .../generators/amazon_sagemaker/errors.py | 46 ++++ .../generators/amazon_sagemaker/sagemaker.py | 224 ++++++++++++++++ .../amazon_sagemaker/tests/__init__.py | 3 + .../amazon_sagemaker/tests/test_sagemaker.py | 243 ++++++++++++++++++ 10 files changed, 885 insertions(+) create mode 100644 .github/workflows/amazon_sagemaker.yml create mode 100644 integrations/amazon_sagemaker/LICENSE.txt create mode 100644 integrations/amazon_sagemaker/README.md create mode 100644 integrations/amazon_sagemaker/pyproject.toml create mode 100644 integrations/amazon_sagemaker/src/haystack_integrations/components/generators/amazon_sagemaker/__init__.py create mode 100644 integrations/amazon_sagemaker/src/haystack_integrations/components/generators/amazon_sagemaker/errors.py create mode 100644 integrations/amazon_sagemaker/src/haystack_integrations/components/generators/amazon_sagemaker/sagemaker.py create mode 100644 integrations/amazon_sagemaker/tests/__init__.py create mode 100644 integrations/amazon_sagemaker/tests/test_sagemaker.py diff --git a/.github/labeler.yml b/.github/labeler.yml index ba74c43a2..f5eaa3374 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -4,6 +4,11 @@ integration:amazon-bedrock: - any-glob-to-any-file: "integrations/amazon_bedrock/**/*" - any-glob-to-any-file: ".github/workflows/amazon_bedrock.yml" +integration:amazon-sagemaker: + - changed-files: + - any-glob-to-any-file: "integrations/amazon_sagemaker/**/*" + - any-glob-to-any-file: ".github/workflows/amazon_sagemaker.yml" + integration:astra: - changed-files: - any-glob-to-any-file: "integrations/astra/**/*" diff --git a/.github/workflows/amazon_sagemaker.yml b/.github/workflows/amazon_sagemaker.yml new file mode 100644 index 000000000..88f397c85 --- /dev/null +++ b/.github/workflows/amazon_sagemaker.yml @@ -0,0 +1,56 @@ +# This workflow comes from https://github.com/ofek/hatch-mypyc +# https://github.com/ofek/hatch-mypyc/blob/5a198c0ba8660494d02716cfc9d79ce4adfb1442/.github/workflows/test.yml +name: Test / amazon-sagemaker + +on: + schedule: + - cron: "0 0 * * *" + pull_request: + paths: + - "integrations/amazon_sagemaker/**" + - ".github/workflows/amazon_sagemaker.yml" + +defaults: + run: + working-directory: integrations/amazon_sagemaker + +concurrency: + group: amazon-sagemaker-${{ github.head_ref }} + cancel-in-progress: true + +env: + PYTHONUNBUFFERED: "1" + FORCE_COLOR: "1" + +jobs: + run: + name: Python ${{ matrix.python-version }} on ${{ startsWith(matrix.os, 'macos-') && 'macOS' || startsWith(matrix.os, 'windows-') && 'Windows' || 'Linux' }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, windows-latest, macos-latest] + python-version: ["3.9", "3.10"] + + steps: + - name: Support longpaths + if: matrix.os == 'windows-latest' + working-directory: . + run: git config --system core.longpaths true + + - uses: actions/checkout@v4 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Install Hatch + run: pip install --upgrade hatch + + - name: Lint + if: matrix.python-version == '3.9' && runner.os == 'Linux' + run: hatch run lint:all + + - name: Run tests + run: hatch run cov diff --git a/integrations/amazon_sagemaker/LICENSE.txt b/integrations/amazon_sagemaker/LICENSE.txt new file mode 100644 index 000000000..137069b82 --- /dev/null +++ b/integrations/amazon_sagemaker/LICENSE.txt @@ -0,0 +1,73 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. + +"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: + + (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. + + You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + +To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/integrations/amazon_sagemaker/README.md b/integrations/amazon_sagemaker/README.md new file mode 100644 index 000000000..1ea01871d --- /dev/null +++ b/integrations/amazon_sagemaker/README.md @@ -0,0 +1,52 @@ +# amazon-sagemaker-haystack + +[![PyPI - Version](https://img.shields.io/pypi/v/amazon-sagemaker-haystack.svg)](https://pypi.org/project/amazon-sagemaker-haystack) +[![PyPI - Python Version](https://img.shields.io/pypi/pyversions/amazon-sagemaker-haystack.svg)](https://pypi.org/project/amazon-sagemaker-haystack) + +----- + +**Table of Contents** + +- [Installation](#installation) +- [Contributing](#contributing) +- [License](#license) + +## Installation + +```console +pip install amazon-sagemaker-haystack +``` + +## Contributing + +`hatch` is the best way to interact with this project, to install it: +```sh +pip install hatch +``` + +With `hatch` installed, to run all the tests: +``` +hatch run test +``` + +> Note: You need to export your AWS credentials for Sagemaker integration tests to run (`AWS_ACCESS_KEY_ID` and +`AWS_SECRET_SECRET_KEY`). If those are missing, the integration tests will be skipped. + +To only run unit tests: +``` +hatch run test -m "not integration" +``` + +To only run integration tests: +``` +hatch run test -m "integration" +``` + +To run the linters `ruff` and `mypy`: +``` +hatch run lint:all +``` + +## License + +`amazon-sagemaker-haystack` is distributed under the terms of the [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html) license. diff --git a/integrations/amazon_sagemaker/pyproject.toml b/integrations/amazon_sagemaker/pyproject.toml new file mode 100644 index 000000000..916307156 --- /dev/null +++ b/integrations/amazon_sagemaker/pyproject.toml @@ -0,0 +1,177 @@ +# SPDX-FileCopyrightText: 2023-present deepset GmbH +# +# SPDX-License-Identifier: Apache-2.0 +[build-system] +requires = ["hatchling", "hatch-vcs"] +build-backend = "hatchling.build" + +[project] +name = "amazon-sagemaker-haystack" +dynamic = ["version"] +description = 'An integration of Amazon Sagemaker as an SagemakerGenerator component.' +readme = "README.md" +requires-python = ">=3.8" +license = "Apache-2.0" +keywords = [] +authors = [ + { name = "deepset GmbH", email = "info@deepset.ai" }, +] +classifiers = [ + "Development Status :: 4 - Beta", + "Programming Language :: Python", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: Implementation :: CPython", + "Programming Language :: Python :: Implementation :: PyPy", +] +dependencies = [ + "haystack-ai", + "boto3>=1.28.57", +] + +[project.urls] +Documentation = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/amazon_sagemaker_haystack#readme" +Issues = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/amazon_sagemaker_haystack/issues" +Source = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/amazon_sagemaker_haystack" + +[tool.hatch.build.targets.wheel] +packages = ["src/haystack_integrations"] + +[tool.hatch.version] +source = "vcs" +tag-pattern = 'integrations\/amazon_sagemaker-v(?P.*)' + +[tool.hatch.version.raw-options] +root = "../.." +git_describe_command = 'git describe --tags --match="integrations/amazon_sagemaker-v[0-9]*"' + +[tool.hatch.envs.default] +dependencies = [ + "coverage[toml]>=6.5", + "pytest", +] +[tool.hatch.envs.default.scripts] +test = "pytest {args:tests}" +test-cov = "coverage run -m pytest {args:tests}" +cov-report = [ + "- coverage combine", + "coverage report", +] +cov = [ + "test-cov", + "cov-report", +] + +[[tool.hatch.envs.all.matrix]] +python = ["3.7", "3.8", "3.9", "3.10", "3.11"] + +[tool.hatch.envs.lint] +detached = true +dependencies = [ + "black>=23.1.0", + "mypy>=1.0.0", + "ruff>=0.0.243", +] +[tool.hatch.envs.lint.scripts] +typing = "mypy --install-types --non-interactive --explicit-package-bases {args:src/ tests}" +style = [ + "ruff {args:.}", + "black --check --diff {args:.}", +] +fmt = [ + "black {args:.}", + "ruff --fix {args:.}", + "style", +] +all = [ + "style", + "typing", +] + +[tool.black] +target-version = ["py37"] +line-length = 120 +skip-string-normalization = true + +[tool.ruff] +target-version = "py37" +line-length = 120 +select = [ + "A", + "ARG", + "B", + "C", + "DTZ", + "E", + "EM", + "F", + "FBT", + "I", + "ICN", + "ISC", + "N", + "PLC", + "PLE", + "PLR", + "PLW", + "Q", + "RUF", + "S", + "T", + "TID", + "UP", + "W", + "YTT", +] +ignore = [ + # Import sorting doesn't seem to work + "I001", + # Allow non-abstract empty methods in abstract base classes + "B027", + # Allow boolean positional values in function calls, like `dict.get(... True)` + "FBT003", + # Ignore checks for possible passwords + "S105", "S106", "S107", + # Ignore complexity + "C901", "PLR0911", "PLR0912", "PLR0913", "PLR0915", +] +unfixable = [ + # Don't touch unused imports + "F401", +] + +[tool.ruff.isort] +known-first-party = ["haystack_integrations"] + +[tool.ruff.flake8-tidy-imports] +ban-relative-imports = "parents" + +[tool.ruff.per-file-ignores] +# Tests can use magic values, assertions, and relative imports +"tests/**/*" = ["PLR2004", "S101", "TID252"] + +[tool.coverage.run] +branch = true +parallel = true + +[tool.coverage.paths] +amazon_sagemaker_haystack = ["src"] +tests = ["tests"] + +[tool.coverage.report] +exclude_lines = [ + "no cov", + "if __name__ == .__main__.:", + "if TYPE_CHECKING:", +] +[[tool.mypy.overrides]] +module = [ + "haystack.*", + "haystack_integrations.*", + "pytest.*", + "numpy.*", +] +ignore_missing_imports = true \ No newline at end of file diff --git a/integrations/amazon_sagemaker/src/haystack_integrations/components/generators/amazon_sagemaker/__init__.py b/integrations/amazon_sagemaker/src/haystack_integrations/components/generators/amazon_sagemaker/__init__.py new file mode 100644 index 000000000..0fe45a8a1 --- /dev/null +++ b/integrations/amazon_sagemaker/src/haystack_integrations/components/generators/amazon_sagemaker/__init__.py @@ -0,0 +1,6 @@ +# SPDX-FileCopyrightText: 2023-present deepset GmbH +# +# SPDX-License-Identifier: Apache-2.0 +from haystack_integrations.components.generators.amazon_sagemaker.sagemaker import SagemakerGenerator + +__all__ = ["SagemakerGenerator"] diff --git a/integrations/amazon_sagemaker/src/haystack_integrations/components/generators/amazon_sagemaker/errors.py b/integrations/amazon_sagemaker/src/haystack_integrations/components/generators/amazon_sagemaker/errors.py new file mode 100644 index 000000000..6c13d0fcb --- /dev/null +++ b/integrations/amazon_sagemaker/src/haystack_integrations/components/generators/amazon_sagemaker/errors.py @@ -0,0 +1,46 @@ +from typing import Optional + + +class SagemakerError(Exception): + """ + Error generated by the Amazon Sagemaker integration. + """ + + def __init__( + self, + message: Optional[str] = None, + ): + super().__init__() + if message: + self.message = message + + def __getattr__(self, attr): + # If self.__cause__ is None, it will raise the expected AttributeError + getattr(self.__cause__, attr) + + def __str__(self): + return self.message + + def __repr__(self): + return str(self) + + +class AWSConfigurationError(SagemakerError): + """Exception raised when AWS is not configured correctly""" + + def __init__(self, message: Optional[str] = None): + super().__init__(message=message) + + +class SagemakerNotReadyError(SagemakerError): + """Exception for issues that occur during Sagemaker inference""" + + def __init__(self, message: Optional[str] = None): + super().__init__(message=message) + + +class SagemakerInferenceError(SagemakerError): + """Exception for issues that occur during Sagemaker inference""" + + def __init__(self, message: Optional[str] = None): + super().__init__(message=message) diff --git a/integrations/amazon_sagemaker/src/haystack_integrations/components/generators/amazon_sagemaker/sagemaker.py b/integrations/amazon_sagemaker/src/haystack_integrations/components/generators/amazon_sagemaker/sagemaker.py new file mode 100644 index 000000000..35e54a055 --- /dev/null +++ b/integrations/amazon_sagemaker/src/haystack_integrations/components/generators/amazon_sagemaker/sagemaker.py @@ -0,0 +1,224 @@ +import json +import logging +import os +from typing import Any, ClassVar, Dict, List, Optional + +import requests +from haystack import component, default_from_dict, default_to_dict +from haystack.lazy_imports import LazyImport +from haystack_integrations.components.generators.amazon_sagemaker.errors import ( + AWSConfigurationError, + SagemakerInferenceError, + SagemakerNotReadyError, +) + +with LazyImport(message="Run 'pip install boto3'") as boto3_import: + import boto3 # type: ignore + from botocore.client import BaseClient # type: ignore + + +logger = logging.getLogger(__name__) + + +MODEL_NOT_READY_STATUS_CODE = 429 + + +@component +class SagemakerGenerator: + """ + Enables text generation using Sagemaker. It supports Large Language Models (LLMs) hosted and deployed on a SageMaker + Inference Endpoint. For guidance on how to deploy a model to SageMaker, refer to the + [SageMaker JumpStart foundation models documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/jumpstart-foundation-models-use.html). + + **Example:** + + First export your AWS credentials as environment variables: + ```bash + export AWS_ACCESS_KEY_ID= + export AWS_SECRET_ACCESS_KEY= + ``` + (Note: you may also need to set the session token and region name, depending on your AWS configuration) + + Then you can use the generator as follows: + ```python + from haystack.components.generators.sagemaker import SagemakerGenerator + generator = SagemakerGenerator(model="jumpstart-dft-hf-llm-falcon-7b-instruct-bf16") + generator.warm_up() + response = generator.run("What's Natural Language Processing? Be brief.") + print(response) + ``` + ``` + >> {'replies': ['Natural Language Processing (NLP) is a branch of artificial intelligence that focuses on + >> the interaction between computers and human language. It involves enabling computers to understand, interpret, + >> and respond to natural human language in a way that is both meaningful and useful.'], 'meta': [{}]} + ``` + """ + + model_generation_keys: ClassVar = ["generated_text", "generation"] + + def __init__( + self, + model: str, + aws_access_key_id_var: str = "AWS_ACCESS_KEY_ID", + aws_secret_access_key_var: str = "AWS_SECRET_ACCESS_KEY", + aws_session_token_var: str = "AWS_SESSION_TOKEN", + aws_region_name_var: str = "AWS_REGION", + aws_profile_name_var: str = "AWS_PROFILE", + aws_custom_attributes: Optional[Dict[str, Any]] = None, + generation_kwargs: Optional[Dict[str, Any]] = None, + ): + """ + Instantiates the session with SageMaker. + + :param model: The name for SageMaker Model Endpoint. + :param aws_access_key_id_var: The name of the env var where the AWS access key ID is stored. + :param aws_secret_access_key_var: The name of the env var where the AWS secret access key is stored. + :param aws_session_token_var: The name of the env var where the AWS session token is stored. + :param aws_region_name_var: The name of the env var where the AWS region name is stored. + :param aws_profile_name_var: The name of the env var where the AWS profile name is stored. + :param aws_custom_attributes: Custom attributes to be passed to SageMaker, for example `{"accept_eula": True}` + in case of Llama-2 models. + :param generation_kwargs: Additional keyword arguments for text generation. For a list of supported parameters + see your model's documentation page, for example here for HuggingFace models: + https://huggingface.co/blog/sagemaker-huggingface-llm#4-run-inference-and-chat-with-our-model + + Specifically, Llama-2 models support the following inference payload parameters: + + - `max_new_tokens`: Model generates text until the output length (excluding the input context length) + reaches `max_new_tokens`. If specified, it must be a positive integer. + - `temperature`: Controls the randomness in the output. Higher temperature results in output sequence with + low-probability words and lower temperature results in output sequence with high-probability words. + If `temperature=0`, it results in greedy decoding. If specified, it must be a positive float. + - `top_p`: In each step of text generation, sample from the smallest possible set of words with cumulative + probability `top_p`. If specified, it must be a float between 0 and 1. + - `return_full_text`: If `True`, input text will be part of the output generated text. If specified, it must + be boolean. The default value for it is `False`. + """ + self.model = model + self.aws_access_key_id_var = aws_access_key_id_var + self.aws_secret_access_key_var = aws_secret_access_key_var + self.aws_session_token_var = aws_session_token_var + self.aws_region_name_var = aws_region_name_var + self.aws_profile_name_var = aws_profile_name_var + self.aws_custom_attributes = aws_custom_attributes or {} + self.generation_kwargs = generation_kwargs or {"max_new_tokens": 1024} + self.client: Optional[BaseClient] = None + + if not os.getenv(self.aws_access_key_id_var) or not os.getenv(self.aws_secret_access_key_var): + msg = ( + f"Please provide AWS credentials via environment variables '{self.aws_access_key_id_var}' and " + f"'{self.aws_secret_access_key_var}'." + ) + raise AWSConfigurationError(msg) + + def _get_telemetry_data(self) -> Dict[str, Any]: + """ + Data that is sent to Posthog for usage analytics. + """ + return {"model": self.model} + + def to_dict(self) -> Dict[str, Any]: + """ + Serialize the object to a dictionary. + """ + return default_to_dict( + self, + model=self.model, + aws_access_key_id_var=self.aws_access_key_id_var, + aws_secret_access_key_var=self.aws_secret_access_key_var, + aws_session_token_var=self.aws_session_token_var, + aws_region_name_var=self.aws_region_name_var, + aws_profile_name_var=self.aws_profile_name_var, + aws_custom_attributes=self.aws_custom_attributes, + generation_kwargs=self.generation_kwargs, + ) + + @classmethod + def from_dict(cls, data) -> "SagemakerGenerator": + """ + Deserialize the dictionary into an instance of SagemakerGenerator. + """ + return default_from_dict(cls, data) + + def warm_up(self): + """ + Initializes the SageMaker Inference client. + """ + boto3_import.check() + try: + session = boto3.Session( + aws_access_key_id=os.getenv(self.aws_access_key_id_var), + aws_secret_access_key=os.getenv(self.aws_secret_access_key_var), + aws_session_token=os.getenv(self.aws_session_token_var), + region_name=os.getenv(self.aws_region_name_var), + profile_name=os.getenv(self.aws_profile_name_var), + ) + self.client = session.client("sagemaker-runtime") + except Exception as e: + msg = ( + f"Could not connect to SageMaker Inference Endpoint '{self.model}'." + f"Make sure the Endpoint exists and AWS environment is configured." + ) + raise AWSConfigurationError(msg) from e + + @component.output_types(replies=List[str], meta=List[Dict[str, Any]]) + def run(self, prompt: str, generation_kwargs: Optional[Dict[str, Any]] = None): + """ + Invoke the text generation inference based on the provided messages and generation parameters. + + :param prompt: The string prompt to use for text generation. + :param generation_kwargs: Additional keyword arguments for text generation. These parameters will + potentially override the parameters passed in the `__init__` method. + + :return: A list of strings containing the generated responses and a list of dictionaries containing the metadata + for each response. + """ + if self.client is None: + msg = "SageMaker Inference client is not initialized. Please call warm_up() first." + raise ValueError(msg) + + generation_kwargs = generation_kwargs or self.generation_kwargs + custom_attributes = ";".join( + f"{k}={str(v).lower() if isinstance(v, bool) else str(v)}" for k, v in self.aws_custom_attributes.items() + ) + try: + body = json.dumps({"inputs": prompt, "parameters": generation_kwargs}) + response = self.client.invoke_endpoint( + EndpointName=self.model, + Body=body, + ContentType="application/json", + Accept="application/json", + CustomAttributes=custom_attributes, + ) + response_json = response.get("Body").read().decode("utf-8") + output: Dict[str, Dict[str, Any]] = json.loads(response_json) + + # The output might be either a list of dictionaries or a single dictionary + list_output: List[Dict[str, Any]] + if output and isinstance(output, dict): + list_output = [output] + elif isinstance(output, list) and all(isinstance(o, dict) for o in output): + list_output = output + else: + msg = f"Unexpected model response type: {type(output)}" + raise ValueError(msg) + + # The key where the replies are stored changes from model to model, so we need to look for it. + # All other keys in the response are added to the metadata. + # Unfortunately every model returns different metadata, most of them return none at all, + # so we can't replicate the metadata structure of other generators. + for key in self.model_generation_keys: + if key in list_output[0]: + break + replies = [o.pop(key, None) for o in list_output] + + return {"replies": replies, "meta": list_output * len(replies)} + + except requests.HTTPError as err: + res = err.response + if res.status_code == MODEL_NOT_READY_STATUS_CODE: + msg = f"Sagemaker model not ready: {res.text}" + raise SagemakerNotReadyError(msg) from err + + msg = f"SageMaker Inference returned an error. Status code: {res.status_code} Response body: {res.text}" + raise SagemakerInferenceError(msg, status_code=res.status_code) from err diff --git a/integrations/amazon_sagemaker/tests/__init__.py b/integrations/amazon_sagemaker/tests/__init__.py new file mode 100644 index 000000000..e873bc332 --- /dev/null +++ b/integrations/amazon_sagemaker/tests/__init__.py @@ -0,0 +1,3 @@ +# SPDX-FileCopyrightText: 2023-present deepset GmbH +# +# SPDX-License-Identifier: Apache-2.0 diff --git a/integrations/amazon_sagemaker/tests/test_sagemaker.py b/integrations/amazon_sagemaker/tests/test_sagemaker.py new file mode 100644 index 000000000..a22634be1 --- /dev/null +++ b/integrations/amazon_sagemaker/tests/test_sagemaker.py @@ -0,0 +1,243 @@ +import os +from unittest.mock import Mock + +import pytest +from haystack_integrations.components.generators.amazon_sagemaker import SagemakerGenerator +from haystack_integrations.components.generators.amazon_sagemaker.errors import AWSConfigurationError + + +class TestSagemakerGenerator: + def test_init_default(self, monkeypatch): + monkeypatch.setenv("AWS_ACCESS_KEY_ID", "test-access-key") + monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "test-secret-key") + + component = SagemakerGenerator(model="test-model") + assert component.model == "test-model" + assert component.aws_access_key_id_var == "AWS_ACCESS_KEY_ID" + assert component.aws_secret_access_key_var == "AWS_SECRET_ACCESS_KEY" + assert component.aws_session_token_var == "AWS_SESSION_TOKEN" + assert component.aws_region_name_var == "AWS_REGION" + assert component.aws_profile_name_var == "AWS_PROFILE" + assert component.aws_custom_attributes == {} + assert component.generation_kwargs == {"max_new_tokens": 1024} + assert component.client is None + + def test_init_fail_wo_access_key_or_secret_key(self, monkeypatch): + monkeypatch.delenv("AWS_ACCESS_KEY_ID", raising=False) + monkeypatch.delenv("AWS_SECRET_ACCESS_KEY", raising=False) + with pytest.raises(AWSConfigurationError): + SagemakerGenerator(model="test-model") + + monkeypatch.setenv("AWS_ACCESS_KEY_ID", "test-access-key") + monkeypatch.delenv("AWS_SECRET_ACCESS_KEY", raising=False) + with pytest.raises(AWSConfigurationError): + SagemakerGenerator(model="test-model") + + monkeypatch.delenv("AWS_ACCESS_KEY_ID", raising=False) + monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "test-secret-key") + with pytest.raises(AWSConfigurationError): + SagemakerGenerator(model="test-model") + + def test_init_with_parameters(self, monkeypatch): + monkeypatch.setenv("MY_ACCESS_KEY_ID", "test-access-key") + monkeypatch.setenv("MY_SECRET_ACCESS_KEY", "test-secret-key") + + component = SagemakerGenerator( + model="test-model", + aws_access_key_id_var="MY_ACCESS_KEY_ID", + aws_secret_access_key_var="MY_SECRET_ACCESS_KEY", + aws_session_token_var="MY_SESSION_TOKEN", + aws_region_name_var="MY_REGION", + aws_profile_name_var="MY_PROFILE", + aws_custom_attributes={"custom": "attr"}, + generation_kwargs={"generation": "kwargs"}, + ) + assert component.model == "test-model" + assert component.aws_access_key_id_var == "MY_ACCESS_KEY_ID" + assert component.aws_secret_access_key_var == "MY_SECRET_ACCESS_KEY" + assert component.aws_session_token_var == "MY_SESSION_TOKEN" + assert component.aws_region_name_var == "MY_REGION" + assert component.aws_profile_name_var == "MY_PROFILE" + assert component.aws_custom_attributes == {"custom": "attr"} + assert component.generation_kwargs == {"generation": "kwargs"} + assert component.client is None + + def test_to_from_dict(self, monkeypatch): + monkeypatch.setenv("MY_ACCESS_KEY_ID", "test-access-key") + monkeypatch.setenv("MY_SECRET_ACCESS_KEY", "test-secret-key") + + component = SagemakerGenerator( + model="test-model", + aws_access_key_id_var="MY_ACCESS_KEY_ID", + aws_secret_access_key_var="MY_SECRET_ACCESS_KEY", + aws_session_token_var="MY_SESSION_TOKEN", + aws_region_name_var="MY_REGION", + aws_profile_name_var="MY_PROFILE", + aws_custom_attributes={"custom": "attr"}, + generation_kwargs={"generation": "kwargs"}, + ) + serialized = component.to_dict() + assert serialized == { + "type": "haystack_integrations.components.generators.amazon_sagemaker.sagemaker.SagemakerGenerator", + "init_parameters": { + "model": "test-model", + "aws_access_key_id_var": "MY_ACCESS_KEY_ID", + "aws_secret_access_key_var": "MY_SECRET_ACCESS_KEY", + "aws_session_token_var": "MY_SESSION_TOKEN", + "aws_region_name_var": "MY_REGION", + "aws_profile_name_var": "MY_PROFILE", + "aws_custom_attributes": {"custom": "attr"}, + "generation_kwargs": {"generation": "kwargs"}, + }, + } + deserialized = SagemakerGenerator.from_dict(serialized) + assert deserialized.model == "test-model" + assert deserialized.aws_access_key_id_var == "MY_ACCESS_KEY_ID" + assert deserialized.aws_secret_access_key_var == "MY_SECRET_ACCESS_KEY" + assert deserialized.aws_session_token_var == "MY_SESSION_TOKEN" + assert deserialized.aws_region_name_var == "MY_REGION" + assert deserialized.aws_profile_name_var == "MY_PROFILE" + assert deserialized.aws_custom_attributes == {"custom": "attr"} + assert deserialized.generation_kwargs == {"generation": "kwargs"} + assert deserialized.client is None + + def test_run_with_list_of_dictionaries(self, monkeypatch): + monkeypatch.setenv("AWS_ACCESS_KEY_ID", "test-access-key") + monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "test-secret-key") + client_mock = Mock() + client_mock.invoke_endpoint.return_value = { + "Body": Mock(read=lambda: b'[{"generated_text": "test-reply", "other": "metadata"}]') + } + + component = SagemakerGenerator(model="test-model") + component.client = client_mock # Simulate warm_up() + response = component.run("What's Natural Language Processing?") + + # check that the component returns the correct ChatMessage response + assert isinstance(response, dict) + assert "replies" in response + assert isinstance(response["replies"], list) + assert len(response["replies"]) == 1 + assert [isinstance(reply, str) for reply in response["replies"]] + assert "test-reply" in response["replies"][0] + + assert "meta" in response + assert isinstance(response["meta"], list) + assert len(response["meta"]) == 1 + assert [isinstance(reply, dict) for reply in response["meta"]] + assert response["meta"][0]["other"] == "metadata" + + def test_run_with_single_dictionary(self, monkeypatch): + monkeypatch.setenv("AWS_ACCESS_KEY_ID", "test-access-key") + monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "test-secret-key") + client_mock = Mock() + client_mock.invoke_endpoint.return_value = { + "Body": Mock(read=lambda: b'{"generation": "test-reply", "other": "metadata"}') + } + + component = SagemakerGenerator(model="test-model") + component.client = client_mock # Simulate warm_up() + response = component.run("What's Natural Language Processing?") + + # check that the component returns the correct ChatMessage response + assert isinstance(response, dict) + assert "replies" in response + assert isinstance(response["replies"], list) + assert len(response["replies"]) == 1 + assert [isinstance(reply, str) for reply in response["replies"]] + assert "test-reply" in response["replies"][0] + + assert "meta" in response + assert isinstance(response["meta"], list) + assert len(response["meta"]) == 1 + assert [isinstance(reply, dict) for reply in response["meta"]] + assert response["meta"][0]["other"] == "metadata" + + @pytest.mark.skipif( + (not os.environ.get("AWS_ACCESS_KEY_ID", None) or not os.environ.get("AWS_SECRET_ACCESS_KEY", None)), + reason="Export two env vars called AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY to run this test.", + ) + @pytest.mark.integration + def test_run_falcon(self): + component = SagemakerGenerator( + model="jumpstart-dft-hf-llm-falcon-7b-instruct-bf16", generation_kwargs={"max_new_tokens": 10} + ) + component.warm_up() + response = component.run("What's Natural Language Processing?") + + # check that the component returns the correct ChatMessage response + assert isinstance(response, dict) + assert "replies" in response + assert isinstance(response["replies"], list) + assert len(response["replies"]) == 1 + assert [isinstance(reply, str) for reply in response["replies"]] + + # Coarse check: assuming no more than 4 chars per token. In any case it + # will fail if the `max_new_tokens` parameter is not respected, as the + # default is either 256 or 1024 + assert all(len(reply) <= 40 for reply in response["replies"]) + + assert "meta" in response + assert isinstance(response["meta"], list) + assert len(response["meta"]) == 1 + assert [isinstance(reply, dict) for reply in response["meta"]] + + @pytest.mark.skipif( + (not os.environ.get("AWS_ACCESS_KEY_ID", None) or not os.environ.get("AWS_SECRET_ACCESS_KEY", None)), + reason="Export two env vars called AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY to run this test.", + ) + @pytest.mark.integration + def test_run_llama2(self): + component = SagemakerGenerator( + model="jumpstart-dft-meta-textgenerationneuron-llama-2-7b", + generation_kwargs={"max_new_tokens": 10}, + aws_custom_attributes={"accept_eula": True}, + ) + component.warm_up() + response = component.run("What's Natural Language Processing?") + + # check that the component returns the correct ChatMessage response + assert isinstance(response, dict) + assert "replies" in response + assert isinstance(response["replies"], list) + assert len(response["replies"]) == 1 + assert [isinstance(reply, str) for reply in response["replies"]] + + # Coarse check: assuming no more than 4 chars per token. In any case it + # will fail if the `max_new_tokens` parameter is not respected, as the + # default is either 256 or 1024 + assert all(len(reply) <= 40 for reply in response["replies"]) + + assert "meta" in response + assert isinstance(response["meta"], list) + assert len(response["meta"]) == 1 + assert [isinstance(reply, dict) for reply in response["meta"]] + + @pytest.mark.skipif( + (not os.environ.get("AWS_ACCESS_KEY_ID", None) or not os.environ.get("AWS_SECRET_ACCESS_KEY", None)), + reason="Export two env vars called AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY to run this test.", + ) + @pytest.mark.integration + def test_run_bloomz(self): + component = SagemakerGenerator( + model="jumpstart-dft-hf-textgeneration-bloomz-1b1", generation_kwargs={"max_new_tokens": 10} + ) + component.warm_up() + response = component.run("What's Natural Language Processing?") + + # check that the component returns the correct ChatMessage response + assert isinstance(response, dict) + assert "replies" in response + assert isinstance(response["replies"], list) + assert len(response["replies"]) == 1 + assert [isinstance(reply, str) for reply in response["replies"]] + + # Coarse check: assuming no more than 4 chars per token. In any case it + # will fail if the `max_new_tokens` parameter is not respected, as the + # default is either 256 or 1024 + assert all(len(reply) <= 40 for reply in response["replies"]) + + assert "meta" in response + assert isinstance(response["meta"], list) + assert len(response["meta"]) == 1 + assert [isinstance(reply, dict) for reply in response["meta"]] From 762045d7137d271b0eba737fa93df488a0b1e056 Mon Sep 17 00:00:00 2001 From: Stefano Fiorucci Date: Tue, 30 Jan 2024 09:58:59 +0100 Subject: [PATCH 27/47] pin sentence transformers (#289) --- integrations/instructor_embedders/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integrations/instructor_embedders/pyproject.toml b/integrations/instructor_embedders/pyproject.toml index 67cbcb7af..c8a591b69 100644 --- a/integrations/instructor_embedders/pyproject.toml +++ b/integrations/instructor_embedders/pyproject.toml @@ -38,7 +38,7 @@ dependencies = [ "requests>=2.26.0", "scikit_learn>=1.0.2", "scipy", - "sentence_transformers>=2.2.0", + "sentence_transformers>=2.2.0,<2.3.0", "torch", "tqdm", "rich", From 29c869e819cf1b0fe7b7c0702c90943c0aa2964e Mon Sep 17 00:00:00 2001 From: ZanSara Date: Tue, 30 Jan 2024 11:32:37 +0100 Subject: [PATCH 28/47] Add Sagemaker to README (#291) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 20b17b377..39d669322 100644 --- a/README.md +++ b/README.md @@ -80,3 +80,4 @@ deepset-haystack | [qdrant-haystack](integrations/qdrant/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/qdrant-haystack.svg?color=orange)](https://pypi.org/project/qdrant-haystack) | [![Test / qdrant](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/qdrant.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/qdrant.yml) | | [unstructured-fileconverter-haystack](integrations/unstructured/) | File converter | [![PyPI - Version](https://img.shields.io/pypi/v/unstructured-fileconverter-haystack.svg)](https://pypi.org/project/unstructured-fileconverter-haystack) | [![Test / unstructured](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/unstructured.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/unstructured.yml) | | [uptrain-haystack](integrations/uptrain/) | Evaluator | [![PyPI - Version](https://img.shields.io/pypi/v/uptrain-haystack.svg)](https://pypi.org/project/uptrain-haystack) | [![Test / uptrain](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/uptrain.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/uptrain.yml) | +| [amazon-sagemaker-haystack](integrations/amazon_sagemaker/) | Generator | [![PyPI - Version](https://img.shields.io/pypi/v/amazon-sagemaker-haystack.svg)](https://pypi.org/project/amazon-sagemaker-haystack) | [![Test / amazon_sagemaker](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/amazon_sagemaker.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/amazon_sagemaker.yml) | From cd737575df28f291f9786e2917a984f70a4ca567 Mon Sep 17 00:00:00 2001 From: Madeesh Kannan Date: Tue, 30 Jan 2024 12:40:45 +0100 Subject: [PATCH 29/47] fix: Broken version pattern in `pyproject.toml` (#294) --- integrations/uptrain/pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/integrations/uptrain/pyproject.toml b/integrations/uptrain/pyproject.toml index 631b7dab8..498772313 100644 --- a/integrations/uptrain/pyproject.toml +++ b/integrations/uptrain/pyproject.toml @@ -34,11 +34,11 @@ packages = ["src/haystack_integrations"] [tool.hatch.version] source = "vcs" -tag-pattern = 'integrations\/uptrain(?P.*)' +tag-pattern = 'integrations\/uptrain-v(?P.*)' [tool.hatch.version.raw-options] root = "../.." -git_describe_command = 'git describe --tags --match="integrations/uptrain[0-9]*"' +git_describe_command = 'git describe --tags --match="integrations/uptrain-v[0-9]*"' [tool.hatch.envs.default] dependencies = ["coverage[toml]>=6.5", "pytest"] From 799c50349e3e546a30c67f171944548f026f95bb Mon Sep 17 00:00:00 2001 From: Stefano Fiorucci Date: Tue, 30 Jan 2024 16:51:06 +0100 Subject: [PATCH 30/47] increase pinecone sleep time (#288) --- integrations/pinecone/tests/conftest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integrations/pinecone/tests/conftest.py b/integrations/pinecone/tests/conftest.py index c7a1342d5..79d2608f2 100644 --- a/integrations/pinecone/tests/conftest.py +++ b/integrations/pinecone/tests/conftest.py @@ -6,7 +6,7 @@ from haystack_integrations.document_stores.pinecone import PineconeDocumentStore # This is the approximate time it takes for the documents to be available -SLEEP_TIME = 20 +SLEEP_TIME = 25 @pytest.fixture() From 9014494d40a17f0678ccc45f8d51ffac1be514cf Mon Sep 17 00:00:00 2001 From: Vladimir Blagojevic Date: Tue, 30 Jan 2024 17:33:41 +0100 Subject: [PATCH 31/47] chore: Amazon Bedrock subproject refactoring (#293) * Initial migration * Update README.md * Update test instructions * Update integrations/amazon_bedrock/pyproject.toml Co-authored-by: Silvano Cerza <3314350+silvanocerza@users.noreply.github.com> * Update integrations/amazon_bedrock/pyproject.toml Co-authored-by: Silvano Cerza <3314350+silvanocerza@users.noreply.github.com> * Linting --------- Co-authored-by: Silvano Cerza <3314350+silvanocerza@users.noreply.github.com> --- integrations/amazon_bedrock/README.md | 19 ++++++++++++++++ integrations/amazon_bedrock/pyproject.toml | 21 ++++++++++-------- .../generators/__init__.py | 3 --- .../generators/amazon_bedrock}/__init__.py | 2 +- .../amazon_bedrock_adapters.py | 2 +- .../amazon_bedrock_handlers.py | 0 .../generators/amazon_bedrock}/errors.py | 0 .../generators/amazon_bedrock/generator.py} | 14 ++++++------ .../tests/test_amazon_bedrock.py | 22 +++++++++---------- 9 files changed, 51 insertions(+), 32 deletions(-) delete mode 100644 integrations/amazon_bedrock/src/amazon_bedrock_haystack/generators/__init__.py rename integrations/amazon_bedrock/src/{amazon_bedrock_haystack => haystack_integrations/components/generators/amazon_bedrock}/__init__.py (63%) rename integrations/amazon_bedrock/src/{amazon_bedrock_haystack/generators => haystack_integrations/components/generators/amazon_bedrock}/amazon_bedrock_adapters.py (98%) rename integrations/amazon_bedrock/src/{amazon_bedrock_haystack/generators => haystack_integrations/components/generators/amazon_bedrock}/amazon_bedrock_handlers.py (100%) rename integrations/amazon_bedrock/src/{amazon_bedrock_haystack => haystack_integrations/components/generators/amazon_bedrock}/errors.py (100%) rename integrations/amazon_bedrock/src/{amazon_bedrock_haystack/generators/amazon_bedrock.py => haystack_integrations/components/generators/amazon_bedrock/generator.py} (98%) diff --git a/integrations/amazon_bedrock/README.md b/integrations/amazon_bedrock/README.md index f84c8f3c4..3a689ef3b 100644 --- a/integrations/amazon_bedrock/README.md +++ b/integrations/amazon_bedrock/README.md @@ -8,6 +8,7 @@ **Table of Contents** - [Installation](#installation) +- [Contributing](#contributing) - [License](#license) ## Installation @@ -16,6 +17,24 @@ pip install amazon-bedrock-haystack ``` +## Contributing + +`hatch` is the best way to interact with this project, to install it: +```sh +pip install hatch +``` + +With `hatch` installed, to run all the tests: +``` +hatch run test +``` +> Note: there are no integration tests for this project. + +To run the linters `ruff` and `mypy`: +``` +hatch run lint:all +``` + ## License `amazon-bedrock-haystack` is distributed under the terms of the [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html) license. diff --git a/integrations/amazon_bedrock/pyproject.toml b/integrations/amazon_bedrock/pyproject.toml index 7e82924a8..6a2ce3eab 100644 --- a/integrations/amazon_bedrock/pyproject.toml +++ b/integrations/amazon_bedrock/pyproject.toml @@ -35,6 +35,9 @@ Documentation = "https://github.com/deepset-ai/haystack-core-integrations/tree/m Issues = "https://github.com/deepset-ai/haystack-core-integrations/issues" Source = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/amazon_bedrock" +[tool.hatch.build.targets.wheel] +packages = ["src/haystack_integrations"] + [tool.hatch.version] source = "vcs" tag-pattern = 'integrations\/amazon_bedrock-v(?P.*)' @@ -71,7 +74,8 @@ dependencies = [ "ruff>=0.0.243", ] [tool.hatch.envs.lint.scripts] -typing = "mypy --install-types --non-interactive {args:src/amazon_bedrock_haystack tests}" +typing = "mypy --install-types --non-interactive --explicit-package-bases {args:src/ tests}" + style = [ "ruff {args:.}", "black --check --diff {args:.}", @@ -136,26 +140,24 @@ unfixable = [ ] [tool.ruff.isort] -known-first-party = ["amazon_bedrock_haystack"] +known-first-party = ["haystack_integrations"] [tool.ruff.flake8-tidy-imports] -ban-relative-imports = "all" +ban-relative-imports = "parents" [tool.ruff.per-file-ignores] # Tests can use magic values, assertions, and relative imports "tests/**/*" = ["PLR2004", "S101", "TID252"] [tool.coverage.run] -source_pkgs = ["amazon_bedrock_haystack", "tests"] +source_pkgs = ["src", "tests"] branch = true parallel = true -omit = [ - "src/amazon_bedrock_haystack/__about__.py", -] + [tool.coverage.paths] -amazon_bedrock_haystack = ["src/amazon_bedrock_haystack", "*/amazon_bedrock/src/amazon_bedrock_haystack"] -tests = ["tests", "*/amazon_bedrock_haystack/tests"] +amazon_bedrock_haystack = ["src/*"] +tests = ["tests"] [tool.coverage.report] exclude_lines = [ @@ -170,6 +172,7 @@ module = [ "transformers.*", "boto3.*", "haystack.*", + "haystack_integrations.*", "pytest.*", "numpy.*", ] diff --git a/integrations/amazon_bedrock/src/amazon_bedrock_haystack/generators/__init__.py b/integrations/amazon_bedrock/src/amazon_bedrock_haystack/generators/__init__.py deleted file mode 100644 index e873bc332..000000000 --- a/integrations/amazon_bedrock/src/amazon_bedrock_haystack/generators/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# SPDX-FileCopyrightText: 2023-present deepset GmbH -# -# SPDX-License-Identifier: Apache-2.0 diff --git a/integrations/amazon_bedrock/src/amazon_bedrock_haystack/__init__.py b/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/__init__.py similarity index 63% rename from integrations/amazon_bedrock/src/amazon_bedrock_haystack/__init__.py rename to integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/__init__.py index 3e05179c0..236347b61 100644 --- a/integrations/amazon_bedrock/src/amazon_bedrock_haystack/__init__.py +++ b/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/__init__.py @@ -1,6 +1,6 @@ # SPDX-FileCopyrightText: 2023-present deepset GmbH # # SPDX-License-Identifier: Apache-2.0 -from amazon_bedrock_haystack.generators.amazon_bedrock import AmazonBedrockGenerator +from .generator import AmazonBedrockGenerator __all__ = ["AmazonBedrockGenerator"] diff --git a/integrations/amazon_bedrock/src/amazon_bedrock_haystack/generators/amazon_bedrock_adapters.py b/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/amazon_bedrock_adapters.py similarity index 98% rename from integrations/amazon_bedrock/src/amazon_bedrock_haystack/generators/amazon_bedrock_adapters.py rename to integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/amazon_bedrock_adapters.py index bec172867..b7e775cb8 100644 --- a/integrations/amazon_bedrock/src/amazon_bedrock_haystack/generators/amazon_bedrock_adapters.py +++ b/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/amazon_bedrock_adapters.py @@ -2,7 +2,7 @@ from abc import ABC, abstractmethod from typing import Any, Dict, List, Optional -from amazon_bedrock_haystack.generators.amazon_bedrock_handlers import TokenStreamingHandler +from .amazon_bedrock_handlers import TokenStreamingHandler class BedrockModelAdapter(ABC): diff --git a/integrations/amazon_bedrock/src/amazon_bedrock_haystack/generators/amazon_bedrock_handlers.py b/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/amazon_bedrock_handlers.py similarity index 100% rename from integrations/amazon_bedrock/src/amazon_bedrock_haystack/generators/amazon_bedrock_handlers.py rename to integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/amazon_bedrock_handlers.py diff --git a/integrations/amazon_bedrock/src/amazon_bedrock_haystack/errors.py b/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/errors.py similarity index 100% rename from integrations/amazon_bedrock/src/amazon_bedrock_haystack/errors.py rename to integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/errors.py diff --git a/integrations/amazon_bedrock/src/amazon_bedrock_haystack/generators/amazon_bedrock.py b/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/generator.py similarity index 98% rename from integrations/amazon_bedrock/src/amazon_bedrock_haystack/generators/amazon_bedrock.py rename to integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/generator.py index dda84fe14..c79ef9de4 100644 --- a/integrations/amazon_bedrock/src/amazon_bedrock_haystack/generators/amazon_bedrock.py +++ b/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/generator.py @@ -7,12 +7,7 @@ from botocore.exceptions import BotoCoreError, ClientError from haystack import component, default_from_dict, default_to_dict -from amazon_bedrock_haystack.errors import ( - AmazonBedrockConfigurationError, - AmazonBedrockInferenceError, - AWSConfigurationError, -) -from amazon_bedrock_haystack.generators.amazon_bedrock_adapters import ( +from .amazon_bedrock_adapters import ( AI21LabsJurassic2Adapter, AmazonTitanAdapter, AnthropicClaudeAdapter, @@ -20,11 +15,16 @@ CohereCommandAdapter, MetaLlama2ChatAdapter, ) -from amazon_bedrock_haystack.generators.amazon_bedrock_handlers import ( +from .amazon_bedrock_handlers import ( DefaultPromptHandler, DefaultTokenStreamingHandler, TokenStreamingHandler, ) +from .errors import ( + AmazonBedrockConfigurationError, + AmazonBedrockInferenceError, + AWSConfigurationError, +) logger = logging.getLogger(__name__) diff --git a/integrations/amazon_bedrock/tests/test_amazon_bedrock.py b/integrations/amazon_bedrock/tests/test_amazon_bedrock.py index a05c95ba3..c6bb0add4 100644 --- a/integrations/amazon_bedrock/tests/test_amazon_bedrock.py +++ b/integrations/amazon_bedrock/tests/test_amazon_bedrock.py @@ -4,9 +4,8 @@ import pytest from botocore.exceptions import BotoCoreError -from amazon_bedrock_haystack.errors import AmazonBedrockConfigurationError -from amazon_bedrock_haystack.generators.amazon_bedrock import AmazonBedrockGenerator -from amazon_bedrock_haystack.generators.amazon_bedrock_adapters import ( +from haystack_integrations.components.generators.amazon_bedrock import AmazonBedrockGenerator +from haystack_integrations.components.generators.amazon_bedrock.amazon_bedrock_adapters import ( AI21LabsJurassic2Adapter, AmazonTitanAdapter, AnthropicClaudeAdapter, @@ -14,6 +13,7 @@ CohereCommandAdapter, MetaLlama2ChatAdapter, ) +from haystack_integrations.components.generators.amazon_bedrock.errors import AmazonBedrockConfigurationError @pytest.fixture @@ -34,7 +34,7 @@ def mock_boto3_session(): @pytest.fixture def mock_prompt_handler(): with patch( - "amazon_bedrock_haystack.generators.amazon_bedrock_handlers.DefaultPromptHandler" + "haystack_integrations.components.generators.amazon_bedrock.amazon_bedrock_handlers.DefaultPromptHandler" ) as mock_prompt_handler: yield mock_prompt_handler @@ -55,7 +55,7 @@ def test_to_dict(mock_auto_tokenizer, mock_boto3_session): ) expected_dict = { - "type": "amazon_bedrock_haystack.generators.amazon_bedrock.AmazonBedrockGenerator", + "type": "haystack_integrations.components.generators.amazon_bedrock.generator.AmazonBedrockGenerator", "init_parameters": { "model": "anthropic.claude-v2", "max_length": 99, @@ -72,7 +72,7 @@ def test_from_dict(mock_auto_tokenizer, mock_boto3_session): """ generator = AmazonBedrockGenerator.from_dict( { - "type": "amazon_bedrock_haystack.generators.amazon_bedrock.AmazonBedrockGenerator", + "type": "haystack_integrations.components.generators.amazon_bedrock.generator.AmazonBedrockGenerator", "init_parameters": { "model": "anthropic.claude-v2", "max_length": 99, @@ -235,7 +235,7 @@ def test_supports_for_valid_aws_configuration(): # Patch the class method to return the mock session with patch( - "amazon_bedrock_haystack.generators.amazon_bedrock.AmazonBedrockGenerator.get_aws_session", + "haystack_integrations.components.generators.amazon_bedrock.AmazonBedrockGenerator.get_aws_session", return_value=mock_session, ): supported = AmazonBedrockGenerator.supports( @@ -266,7 +266,7 @@ def test_supports_for_invalid_bedrock_config(): # Patch the class method to return the mock session with patch( - "amazon_bedrock_haystack.generators.amazon_bedrock.AmazonBedrockGenerator.get_aws_session", + "haystack_integrations.components.generators.amazon_bedrock.AmazonBedrockGenerator.get_aws_session", return_value=mock_session, ), pytest.raises(AmazonBedrockConfigurationError, match="Could not connect to Amazon Bedrock."): AmazonBedrockGenerator.supports( @@ -282,7 +282,7 @@ def test_supports_for_invalid_bedrock_config_error_on_list_models(): # Patch the class method to return the mock session with patch( - "amazon_bedrock_haystack.generators.amazon_bedrock.AmazonBedrockGenerator.get_aws_session", + "haystack_integrations.components.generators.amazon_bedrock.AmazonBedrockGenerator.get_aws_session", return_value=mock_session, ), pytest.raises(AmazonBedrockConfigurationError, match="Could not connect to Amazon Bedrock."): AmazonBedrockGenerator.supports( @@ -314,7 +314,7 @@ def test_supports_with_stream_true_for_model_that_supports_streaming(): # Patch the class method to return the mock session with patch( - "amazon_bedrock_haystack.generators.amazon_bedrock.AmazonBedrockGenerator.get_aws_session", + "haystack_integrations.components.generators.amazon_bedrock.AmazonBedrockGenerator.get_aws_session", return_value=mock_session, ): supported = AmazonBedrockGenerator.supports( @@ -335,7 +335,7 @@ def test_supports_with_stream_true_for_model_that_does_not_support_streaming(): # Patch the class method to return the mock session with patch( - "amazon_bedrock_haystack.generators.amazon_bedrock.AmazonBedrockGenerator.get_aws_session", + "haystack_integrations.components.generators.amazon_bedrock.AmazonBedrockGenerator.get_aws_session", return_value=mock_session, ), pytest.raises( AmazonBedrockConfigurationError, From b6115c21282257984ebc0046e4013174a29c5f6e Mon Sep 17 00:00:00 2001 From: Vladimir Blagojevic Date: Tue, 30 Jan 2024 18:04:18 +0100 Subject: [PATCH 32/47] chore: Adjust amazon bedrock helper classes names (#297) * Adjust amazon bedrock helper classes names * Linting * Update tests * More linting * Small update --- .../{amazon_bedrock_adapters.py => adapters.py} | 2 +- .../generators/amazon_bedrock/generator.py | 12 ++++++------ .../{amazon_bedrock_handlers.py => handlers.py} | 0 .../amazon_bedrock/tests/test_amazon_bedrock.py | 4 ++-- 4 files changed, 9 insertions(+), 9 deletions(-) rename integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/{amazon_bedrock_adapters.py => adapters.py} (99%) rename integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/{amazon_bedrock_handlers.py => handlers.py} (100%) diff --git a/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/amazon_bedrock_adapters.py b/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/adapters.py similarity index 99% rename from integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/amazon_bedrock_adapters.py rename to integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/adapters.py index b7e775cb8..40ba0bc67 100644 --- a/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/amazon_bedrock_adapters.py +++ b/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/adapters.py @@ -2,7 +2,7 @@ from abc import ABC, abstractmethod from typing import Any, Dict, List, Optional -from .amazon_bedrock_handlers import TokenStreamingHandler +from .handlers import TokenStreamingHandler class BedrockModelAdapter(ABC): diff --git a/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/generator.py b/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/generator.py index c79ef9de4..4c43c9a09 100644 --- a/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/generator.py +++ b/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/generator.py @@ -7,7 +7,7 @@ from botocore.exceptions import BotoCoreError, ClientError from haystack import component, default_from_dict, default_to_dict -from .amazon_bedrock_adapters import ( +from .adapters import ( AI21LabsJurassic2Adapter, AmazonTitanAdapter, AnthropicClaudeAdapter, @@ -15,16 +15,16 @@ CohereCommandAdapter, MetaLlama2ChatAdapter, ) -from .amazon_bedrock_handlers import ( - DefaultPromptHandler, - DefaultTokenStreamingHandler, - TokenStreamingHandler, -) from .errors import ( AmazonBedrockConfigurationError, AmazonBedrockInferenceError, AWSConfigurationError, ) +from .handlers import ( + DefaultPromptHandler, + DefaultTokenStreamingHandler, + TokenStreamingHandler, +) logger = logging.getLogger(__name__) diff --git a/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/amazon_bedrock_handlers.py b/integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/handlers.py similarity index 100% rename from integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/amazon_bedrock_handlers.py rename to integrations/amazon_bedrock/src/haystack_integrations/components/generators/amazon_bedrock/handlers.py diff --git a/integrations/amazon_bedrock/tests/test_amazon_bedrock.py b/integrations/amazon_bedrock/tests/test_amazon_bedrock.py index c6bb0add4..b08e9dfd5 100644 --- a/integrations/amazon_bedrock/tests/test_amazon_bedrock.py +++ b/integrations/amazon_bedrock/tests/test_amazon_bedrock.py @@ -5,7 +5,7 @@ from botocore.exceptions import BotoCoreError from haystack_integrations.components.generators.amazon_bedrock import AmazonBedrockGenerator -from haystack_integrations.components.generators.amazon_bedrock.amazon_bedrock_adapters import ( +from haystack_integrations.components.generators.amazon_bedrock.adapters import ( AI21LabsJurassic2Adapter, AmazonTitanAdapter, AnthropicClaudeAdapter, @@ -34,7 +34,7 @@ def mock_boto3_session(): @pytest.fixture def mock_prompt_handler(): with patch( - "haystack_integrations.components.generators.amazon_bedrock.amazon_bedrock_handlers.DefaultPromptHandler" + "haystack_integrations.components.generators.amazon_bedrock.handlers.DefaultPromptHandler" ) as mock_prompt_handler: yield mock_prompt_handler From 60038c064b33d8cb522ef71886b1bd85eafc7244 Mon Sep 17 00:00:00 2001 From: Stefano Fiorucci Date: Wed, 31 Jan 2024 11:34:20 +0100 Subject: [PATCH 33/47] try changing dummy vector (#301) --- .../document_stores/pinecone/document_store.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integrations/pinecone/src/haystack_integrations/document_stores/pinecone/document_store.py b/integrations/pinecone/src/haystack_integrations/document_stores/pinecone/document_store.py index a755b7e47..92ea987b4 100644 --- a/integrations/pinecone/src/haystack_integrations/document_stores/pinecone/document_store.py +++ b/integrations/pinecone/src/haystack_integrations/document_stores/pinecone/document_store.py @@ -85,7 +85,7 @@ def __init__( ) self.dimension = actual_dimension or dimension - self._dummy_vector = [0.0] * self.dimension + self._dummy_vector = [-10.0] * self.dimension self.environment = environment self.index = index self.namespace = namespace From 69803e923a8a9446f241c7fdadfa9eadbeb33a2e Mon Sep 17 00:00:00 2001 From: ZanSara Date: Wed, 31 Jan 2024 11:35:57 +0100 Subject: [PATCH 34/47] Add typing_extensions pin (#295) --- integrations/chroma/pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/integrations/chroma/pyproject.toml b/integrations/chroma/pyproject.toml index ce4641611..2653c491f 100644 --- a/integrations/chroma/pyproject.toml +++ b/integrations/chroma/pyproject.toml @@ -25,6 +25,7 @@ classifiers = [ dependencies = [ "haystack-ai", "chromadb<0.4.20", # FIXME: investigate why filtering tests broke on 0.4.20 + "typing_extensions>=4.8.0", ] [project.urls] From dabf0712fd67db98521f97509cdbc0cd8444e910 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bilge=20Y=C3=BCcel?= Date: Wed, 31 Jan 2024 14:30:27 +0300 Subject: [PATCH 35/47] Update Breaking Change Proposal issue template (#299) * Update breaking-change-proposal.md * Update .github/ISSUE_TEMPLATE/breaking-change-proposal.md Co-authored-by: ZanSara --------- Co-authored-by: ZanSara --- .github/ISSUE_TEMPLATE/breaking-change-proposal.md | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/breaking-change-proposal.md b/.github/ISSUE_TEMPLATE/breaking-change-proposal.md index 71aa2a5e9..6c6fb9017 100644 --- a/.github/ISSUE_TEMPLATE/breaking-change-proposal.md +++ b/.github/ISSUE_TEMPLATE/breaking-change-proposal.md @@ -15,9 +15,12 @@ Briefly explain how the change is breaking and why is needed. ```[tasklist] ### Tasks -- [ ] The change is documented with docstrings and was merged in the `main` branch -- [ ] Integration tile on https://github.com/deepset-ai/haystack-integrations was updated +- [ ] The changes are merged in the `main` branch (Code + Docstrings) +- [ ] New package version declares the breaking change +- [ ] The package has been released on PyPI - [ ] Docs at https://docs.haystack.deepset.ai/ were updated +- [ ] Integration tile on https://github.com/deepset-ai/haystack-integrations was updated - [ ] Notebooks on https://github.com/deepset-ai/haystack-cookbook were updated (if needed) -- [ ] New package version declares the breaking change and package has been released on PyPI -``` \ No newline at end of file +- [ ] Tutorials on https://github.com/deepset-ai/haystack-tutorials were updated (if needed) +- [ ] Articles on https://github.com/deepset-ai/haystack-home/tree/main/content were updated (if needed) +``` From ae80056f6d7e3eeeded4850504659e76ec288fcf Mon Sep 17 00:00:00 2001 From: Stefano Fiorucci Date: Wed, 31 Jan 2024 14:47:37 +0100 Subject: [PATCH 36/47] Pgvector - filters (#257) * very first draft * setup integration folder and workflow * update readme * making progress! * mypy overrides * making progress on index * drop sqlalchemy in favor of psycopggit add tests/test_document_store.py ! * good improvements! * docstrings * improve definition * small improvements * more test cases * standardize * start working on filters * inner_product * explicit create statement * address feedback * tests separation * filters - draft * change embedding_similarity_function to vector_function * explicit insert and update statements * remove useless condition * unit tests for conversion functions * tests change * simplify! * progress! * better error messages and more * cover also complex cases * fmt * make things work again * progress on simplification * further simplification * filters simplification * fmt * rm print * uncomment line * fix name * mv check filters is a dict in filter_documents * f-strings * NO_VALUE constant * handle nested logical conditions in _parse_logical_condition * add examples to _treat_meta_field * fix fmt * ellipsis fmt * more tests for unhappy paths * more tests for internal methods * black * log debug query and params --- .../pgvector/document_store.py | 51 +++- .../document_stores/pgvector/filters.py | 242 ++++++++++++++++++ integrations/pgvector/tests/conftest.py | 24 ++ .../pgvector/tests/test_document_store.py | 21 -- integrations/pgvector/tests/test_filters.py | 179 +++++++++++++ 5 files changed, 489 insertions(+), 28 deletions(-) create mode 100644 integrations/pgvector/src/haystack_integrations/document_stores/pgvector/filters.py create mode 100644 integrations/pgvector/tests/conftest.py create mode 100644 integrations/pgvector/tests/test_filters.py diff --git a/integrations/pgvector/src/haystack_integrations/document_stores/pgvector/document_store.py b/integrations/pgvector/src/haystack_integrations/document_stores/pgvector/document_store.py index bb1915a6f..b49bd87c3 100644 --- a/integrations/pgvector/src/haystack_integrations/document_stores/pgvector/document_store.py +++ b/integrations/pgvector/src/haystack_integrations/document_stores/pgvector/document_store.py @@ -8,6 +8,7 @@ from haystack.dataclasses.document import ByteStream, Document from haystack.document_stores.errors import DocumentStoreError, DuplicateDocumentError from haystack.document_stores.types import DuplicatePolicy +from haystack.utils.filters import convert from psycopg import Error, IntegrityError, connect from psycopg.abc import Query from psycopg.cursor import Cursor @@ -18,6 +19,8 @@ from pgvector.psycopg import register_vector +from .filters import _convert_filters_to_where_clause_and_params + logger = logging.getLogger(__name__) CREATE_TABLE_STATEMENT = """ @@ -158,11 +161,16 @@ def _execute_sql( params = params or () cursor = cursor or self._cursor + sql_query_str = sql_query.as_string(cursor) if not isinstance(sql_query, str) else sql_query + logger.debug("SQL query: %s\nParameters: %s", sql_query_str, params) + try: result = cursor.execute(sql_query, params) except Error as e: self._connection.rollback() - raise DocumentStoreError(error_msg) from e + detailed_error_msg = f"{error_msg}.\nYou can find the SQL query and the parameters in the debug logs." + raise DocumentStoreError(detailed_error_msg) from e + return result def _create_table_if_not_exists(self): @@ -257,15 +265,37 @@ def count_documents(self) -> int: ] return count - def filter_documents(self, filters: Optional[Dict[str, Any]] = None) -> List[Document]: # noqa: ARG002 - # TODO: implement filters - sql_get_docs = SQL("SELECT * FROM {table_name}").format(table_name=Identifier(self.table_name)) + def filter_documents(self, filters: Optional[Dict[str, Any]] = None) -> List[Document]: + """ + Returns the documents that match the filters provided. + + For a detailed specification of the filters, + refer to the [documentation](https://docs.haystack.deepset.ai/v2.0/docs/metadata-filtering) + + :param filters: The filters to apply to the document list. + :return: A list of Documents that match the given filters. + """ + if filters: + if not isinstance(filters, dict): + msg = "Filters must be a dictionary" + raise TypeError(msg) + if "operator" not in filters and "conditions" not in filters: + filters = convert(filters) + + sql_filter = SQL("SELECT * FROM {table_name}").format(table_name=Identifier(self.table_name)) + + params = () + if filters: + sql_where_clause, params = _convert_filters_to_where_clause_and_params(filters) + sql_filter += sql_where_clause result = self._execute_sql( - sql_get_docs, error_msg="Could not filter documents from PgvectorDocumentStore", cursor=self._dict_cursor + sql_filter, + params, + error_msg="Could not filter documents from PgvectorDocumentStore.", + cursor=self._dict_cursor, ) - # Fetch all the records records = result.fetchall() docs = self._from_pg_to_haystack_documents(records) return docs @@ -300,6 +330,9 @@ def write_documents(self, documents: List[Document], policy: DuplicatePolicy = D sql_insert += SQL(" RETURNING id") + sql_query_str = sql_insert.as_string(self._cursor) if not isinstance(sql_insert, str) else sql_insert + logger.debug("SQL query: %s\nParameters: %s", sql_query_str, db_documents) + try: self._cursor.executemany(sql_insert, db_documents, returning=True) except IntegrityError as ie: @@ -307,7 +340,11 @@ def write_documents(self, documents: List[Document], policy: DuplicatePolicy = D raise DuplicateDocumentError from ie except Error as e: self._connection.rollback() - raise DocumentStoreError from e + error_msg = ( + "Could not write documents to PgvectorDocumentStore. \n" + "You can find the SQL query and the parameters in the debug logs." + ) + raise DocumentStoreError(error_msg) from e # get the number of the inserted documents, inspired by psycopg3 docs # https://www.psycopg.org/psycopg3/docs/api/cursors.html#psycopg.Cursor.executemany diff --git a/integrations/pgvector/src/haystack_integrations/document_stores/pgvector/filters.py b/integrations/pgvector/src/haystack_integrations/document_stores/pgvector/filters.py new file mode 100644 index 000000000..daa90f502 --- /dev/null +++ b/integrations/pgvector/src/haystack_integrations/document_stores/pgvector/filters.py @@ -0,0 +1,242 @@ +# SPDX-FileCopyrightText: 2023-present deepset GmbH +# +# SPDX-License-Identifier: Apache-2.0 +from datetime import datetime +from itertools import chain +from typing import Any, Dict, List + +from haystack.errors import FilterError +from pandas import DataFrame +from psycopg.sql import SQL +from psycopg.types.json import Jsonb + +# we need this mapping to cast meta values to the correct type, +# since they are stored in the JSONB field as strings. +# this dict can be extended if needed +PYTHON_TYPES_TO_PG_TYPES = { + int: "integer", + float: "real", + bool: "boolean", +} + +NO_VALUE = "no_value" + + +def _convert_filters_to_where_clause_and_params(filters: Dict[str, Any]) -> tuple[SQL, tuple]: + """ + Convert Haystack filters to a WHERE clause and a tuple of params to query PostgreSQL. + """ + if "field" in filters: + query, values = _parse_comparison_condition(filters) + else: + query, values = _parse_logical_condition(filters) + + where_clause = SQL(" WHERE ") + SQL(query) + params = tuple(value for value in values if value != NO_VALUE) + + return where_clause, params + + +def _parse_logical_condition(condition: Dict[str, Any]) -> tuple[str, List[Any]]: + if "operator" not in condition: + msg = f"'operator' key missing in {condition}" + raise FilterError(msg) + if "conditions" not in condition: + msg = f"'conditions' key missing in {condition}" + raise FilterError(msg) + + operator = condition["operator"] + if operator not in ["AND", "OR"]: + msg = f"Unknown logical operator '{operator}'. Valid operators are: 'AND', 'OR'" + raise FilterError(msg) + + # logical conditions can be nested, so we need to parse them recursively + conditions = [] + for c in condition["conditions"]: + if "field" in c: + query, vals = _parse_comparison_condition(c) + else: + query, vals = _parse_logical_condition(c) + conditions.append((query, vals)) + + query_parts, values = [], [] + for c in conditions: + query_parts.append(c[0]) + values.append(c[1]) + if isinstance(values[0], list): + values = list(chain.from_iterable(values)) + + if operator == "AND": + sql_query = f"({' AND '.join(query_parts)})" + elif operator == "OR": + sql_query = f"({' OR '.join(query_parts)})" + else: + msg = f"Unknown logical operator '{operator}'" + raise FilterError(msg) + + return sql_query, values + + +def _parse_comparison_condition(condition: Dict[str, Any]) -> tuple[str, List[Any]]: + field: str = condition["field"] + if "operator" not in condition: + msg = f"'operator' key missing in {condition}" + raise FilterError(msg) + if "value" not in condition: + msg = f"'value' key missing in {condition}" + raise FilterError(msg) + operator: str = condition["operator"] + if operator not in COMPARISON_OPERATORS: + msg = f"Unknown comparison operator '{operator}'. Valid operators are: {list(COMPARISON_OPERATORS.keys())}" + raise FilterError(msg) + + value: Any = condition["value"] + if isinstance(value, DataFrame): + # DataFrames are stored as JSONB and we query them as such + value = Jsonb(value.to_json()) + field = f"({field})::jsonb" + + if field.startswith("meta."): + field = _treat_meta_field(field, value) + + field, value = COMPARISON_OPERATORS[operator](field, value) + return field, [value] + + +def _treat_meta_field(field: str, value: Any) -> str: + """ + Internal method that modifies the field str + to make the meta JSONB field queryable. + + Examples: + >>> _treat_meta_field(field="meta.number", value=9) + "(meta->>'number')::integer" + + >>> _treat_meta_field(field="meta.name", value="my_name") + "meta->>'name'" + """ + + # use the ->> operator to access keys in the meta JSONB field + field_name = field.split(".", 1)[-1] + field = f"meta->>'{field_name}'" + + # meta fields are stored as strings in the JSONB field, + # so we need to cast them to the correct type + type_value = PYTHON_TYPES_TO_PG_TYPES.get(type(value)) + if isinstance(value, list) and len(value) > 0: + type_value = PYTHON_TYPES_TO_PG_TYPES.get(type(value[0])) + + if type_value: + field = f"({field})::{type_value}" + + return field + + +def _equal(field: str, value: Any) -> tuple[str, Any]: + if value is None: + # NO_VALUE is a placeholder that will be removed in _convert_filters_to_where_clause_and_params + return f"{field} IS NULL", NO_VALUE + return f"{field} = %s", value + + +def _not_equal(field: str, value: Any) -> tuple[str, Any]: + # we use IS DISTINCT FROM to correctly handle NULL values + # (not handled by !=) + return f"{field} IS DISTINCT FROM %s", value + + +def _greater_than(field: str, value: Any) -> tuple[str, Any]: + if isinstance(value, str): + try: + datetime.fromisoformat(value) + except (ValueError, TypeError) as exc: + msg = ( + "Can't compare strings using operators '>', '>=', '<', '<='. " + "Strings are only comparable if they are ISO formatted dates." + ) + raise FilterError(msg) from exc + if type(value) in [list, Jsonb]: + msg = f"Filter value can't be of type {type(value)} using operators '>', '>=', '<', '<='" + raise FilterError(msg) + + return f"{field} > %s", value + + +def _greater_than_equal(field: str, value: Any) -> tuple[str, Any]: + if isinstance(value, str): + try: + datetime.fromisoformat(value) + except (ValueError, TypeError) as exc: + msg = ( + "Can't compare strings using operators '>', '>=', '<', '<='. " + "Strings are only comparable if they are ISO formatted dates." + ) + raise FilterError(msg) from exc + if type(value) in [list, Jsonb]: + msg = f"Filter value can't be of type {type(value)} using operators '>', '>=', '<', '<='" + raise FilterError(msg) + + return f"{field} >= %s", value + + +def _less_than(field: str, value: Any) -> tuple[str, Any]: + if isinstance(value, str): + try: + datetime.fromisoformat(value) + except (ValueError, TypeError) as exc: + msg = ( + "Can't compare strings using operators '>', '>=', '<', '<='. " + "Strings are only comparable if they are ISO formatted dates." + ) + raise FilterError(msg) from exc + if type(value) in [list, Jsonb]: + msg = f"Filter value can't be of type {type(value)} using operators '>', '>=', '<', '<='" + raise FilterError(msg) + + return f"{field} < %s", value + + +def _less_than_equal(field: str, value: Any) -> tuple[str, Any]: + if isinstance(value, str): + try: + datetime.fromisoformat(value) + except (ValueError, TypeError) as exc: + msg = ( + "Can't compare strings using operators '>', '>=', '<', '<='. " + "Strings are only comparable if they are ISO formatted dates." + ) + raise FilterError(msg) from exc + if type(value) in [list, Jsonb]: + msg = f"Filter value can't be of type {type(value)} using operators '>', '>=', '<', '<='" + raise FilterError(msg) + + return f"{field} <= %s", value + + +def _not_in(field: str, value: Any) -> tuple[str, List]: + if not isinstance(value, list): + msg = f"{field}'s value must be a list when using 'not in' comparator in Pinecone" + raise FilterError(msg) + + return f"{field} IS NULL OR {field} != ALL(%s)", [value] + + +def _in(field: str, value: Any) -> tuple[str, List]: + if not isinstance(value, list): + msg = f"{field}'s value must be a list when using 'in' comparator in Pinecone" + raise FilterError(msg) + + # see https://www.psycopg.org/psycopg3/docs/basic/adapt.html#lists-adaptation + return f"{field} = ANY(%s)", [value] + + +COMPARISON_OPERATORS = { + "==": _equal, + "!=": _not_equal, + ">": _greater_than, + ">=": _greater_than_equal, + "<": _less_than, + "<=": _less_than_equal, + "in": _in, + "not in": _not_in, +} diff --git a/integrations/pgvector/tests/conftest.py b/integrations/pgvector/tests/conftest.py new file mode 100644 index 000000000..34260f409 --- /dev/null +++ b/integrations/pgvector/tests/conftest.py @@ -0,0 +1,24 @@ +import pytest +from haystack_integrations.document_stores.pgvector import PgvectorDocumentStore + + +@pytest.fixture +def document_store(request): + connection_string = "postgresql://postgres:postgres@localhost:5432/postgres" + table_name = f"haystack_{request.node.name}" + embedding_dimension = 768 + vector_function = "cosine_distance" + recreate_table = True + search_strategy = "exact_nearest_neighbor" + + store = PgvectorDocumentStore( + connection_string=connection_string, + table_name=table_name, + embedding_dimension=embedding_dimension, + vector_function=vector_function, + recreate_table=recreate_table, + search_strategy=search_strategy, + ) + yield store + + store.delete_table() diff --git a/integrations/pgvector/tests/test_document_store.py b/integrations/pgvector/tests/test_document_store.py index 9f3521838..e8d9107d7 100644 --- a/integrations/pgvector/tests/test_document_store.py +++ b/integrations/pgvector/tests/test_document_store.py @@ -14,27 +14,6 @@ class TestDocumentStore(CountDocumentsTest, WriteDocumentsTest, DeleteDocumentsTest): - @pytest.fixture - def document_store(self, request): - connection_string = "postgresql://postgres:postgres@localhost:5432/postgres" - table_name = f"haystack_{request.node.name}" - embedding_dimension = 768 - vector_function = "cosine_distance" - recreate_table = True - search_strategy = "exact_nearest_neighbor" - - store = PgvectorDocumentStore( - connection_string=connection_string, - table_name=table_name, - embedding_dimension=embedding_dimension, - vector_function=vector_function, - recreate_table=recreate_table, - search_strategy=search_strategy, - ) - yield store - - store.delete_table() - def test_write_documents(self, document_store: PgvectorDocumentStore): docs = [Document(id="1")] assert document_store.write_documents(docs) == 1 diff --git a/integrations/pgvector/tests/test_filters.py b/integrations/pgvector/tests/test_filters.py new file mode 100644 index 000000000..8b2dc8ec9 --- /dev/null +++ b/integrations/pgvector/tests/test_filters.py @@ -0,0 +1,179 @@ +from typing import List + +import pytest +from haystack.dataclasses.document import Document +from haystack.testing.document_store import FilterDocumentsTest +from haystack_integrations.document_stores.pgvector.filters import ( + FilterError, + _convert_filters_to_where_clause_and_params, + _parse_comparison_condition, + _parse_logical_condition, + _treat_meta_field, +) +from pandas import DataFrame +from psycopg.sql import SQL +from psycopg.types.json import Jsonb + + +class TestFilters(FilterDocumentsTest): + def assert_documents_are_equal(self, received: List[Document], expected: List[Document]): + """ + This overrides the default assert_documents_are_equal from FilterDocumentsTest. + It is needed because the embeddings are not exactly the same when they are retrieved from Postgres. + """ + + assert len(received) == len(expected) + received.sort(key=lambda x: x.id) + expected.sort(key=lambda x: x.id) + for received_doc, expected_doc in zip(received, expected): + # we first compare the embeddings approximately + if received_doc.embedding is None: + assert expected_doc.embedding is None + else: + assert received_doc.embedding == pytest.approx(expected_doc.embedding) + + received_doc.embedding, expected_doc.embedding = None, None + assert received_doc == expected_doc + + def test_complex_filter(self, document_store, filterable_docs): + document_store.write_documents(filterable_docs) + filters = { + "operator": "OR", + "conditions": [ + { + "operator": "AND", + "conditions": [ + {"field": "meta.number", "operator": "==", "value": 100}, + {"field": "meta.chapter", "operator": "==", "value": "intro"}, + ], + }, + { + "operator": "AND", + "conditions": [ + {"field": "meta.page", "operator": "==", "value": "90"}, + {"field": "meta.chapter", "operator": "==", "value": "conclusion"}, + ], + }, + ], + } + + result = document_store.filter_documents(filters=filters) + + self.assert_documents_are_equal( + result, + [ + d + for d in filterable_docs + if (d.meta.get("number") == 100 and d.meta.get("chapter") == "intro") + or (d.meta.get("page") == "90" and d.meta.get("chapter") == "conclusion") + ], + ) + + @pytest.mark.skip(reason="NOT operator is not supported in PgvectorDocumentStore") + def test_not_operator(self, document_store, filterable_docs): ... + + def test_treat_meta_field(self): + assert _treat_meta_field(field="meta.number", value=9) == "(meta->>'number')::integer" + assert _treat_meta_field(field="meta.number", value=[1, 2, 3]) == "(meta->>'number')::integer" + assert _treat_meta_field(field="meta.name", value="my_name") == "meta->>'name'" + assert _treat_meta_field(field="meta.name", value=["my_name"]) == "meta->>'name'" + assert _treat_meta_field(field="meta.number", value=1.1) == "(meta->>'number')::real" + assert _treat_meta_field(field="meta.number", value=[1.1, 2.2, 3.3]) == "(meta->>'number')::real" + assert _treat_meta_field(field="meta.bool", value=True) == "(meta->>'bool')::boolean" + assert _treat_meta_field(field="meta.bool", value=[True, False, True]) == "(meta->>'bool')::boolean" + + # do not cast the field if its value is not one of the known types, an empty list or None + assert _treat_meta_field(field="meta.other", value={"a": 3, "b": "example"}) == "meta->>'other'" + assert _treat_meta_field(field="meta.empty_list", value=[]) == "meta->>'empty_list'" + assert _treat_meta_field(field="meta.name", value=None) == "meta->>'name'" + + def test_comparison_condition_dataframe_jsonb_conversion(self): + dataframe = DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]}) + condition = {"field": "meta.df", "operator": "==", "value": dataframe} + field, values = _parse_comparison_condition(condition) + assert field == "(meta.df)::jsonb = %s" + + # we check each slot of the Jsonb object because it does not implement __eq__ + assert values[0].obj == Jsonb(dataframe.to_json()).obj + assert values[0].dumps == Jsonb(dataframe.to_json()).dumps + + def test_comparison_condition_missing_operator(self): + condition = {"field": "meta.type", "value": "article"} + with pytest.raises(FilterError): + _parse_comparison_condition(condition) + + def test_comparison_condition_missing_value(self): + condition = {"field": "meta.type", "operator": "=="} + with pytest.raises(FilterError): + _parse_comparison_condition(condition) + + def test_comparison_condition_unknown_operator(self): + condition = {"field": "meta.type", "operator": "unknown", "value": "article"} + with pytest.raises(FilterError): + _parse_comparison_condition(condition) + + def test_logical_condition_missing_operator(self): + condition = {"conditions": []} + with pytest.raises(FilterError): + _parse_logical_condition(condition) + + def test_logical_condition_missing_conditions(self): + condition = {"operator": "AND"} + with pytest.raises(FilterError): + _parse_logical_condition(condition) + + def test_logical_condition_unknown_operator(self): + condition = {"operator": "unknown", "conditions": []} + with pytest.raises(FilterError): + _parse_logical_condition(condition) + + def test_logical_condition_nested(self): + condition = { + "operator": "AND", + "conditions": [ + { + "operator": "OR", + "conditions": [ + {"field": "meta.domain", "operator": "!=", "value": "science"}, + {"field": "meta.chapter", "operator": "in", "value": ["intro", "conclusion"]}, + ], + }, + { + "operator": "OR", + "conditions": [ + {"field": "meta.number", "operator": ">=", "value": 90}, + {"field": "meta.author", "operator": "not in", "value": ["John", "Jane"]}, + ], + }, + ], + } + query, values = _parse_logical_condition(condition) + assert query == ( + "((meta->>'domain' IS DISTINCT FROM %s OR meta->>'chapter' = ANY(%s)) " + "AND ((meta->>'number')::integer >= %s OR meta->>'author' IS NULL OR meta->>'author' != ALL(%s)))" + ) + assert values == ["science", [["intro", "conclusion"]], 90, [["John", "Jane"]]] + + def test_convert_filters_to_where_clause_and_params(self): + filters = { + "operator": "AND", + "conditions": [ + {"field": "meta.number", "operator": "==", "value": 100}, + {"field": "meta.chapter", "operator": "==", "value": "intro"}, + ], + } + where_clause, params = _convert_filters_to_where_clause_and_params(filters) + assert where_clause == SQL(" WHERE ") + SQL("((meta->>'number')::integer = %s AND meta->>'chapter' = %s)") + assert params == (100, "intro") + + def test_convert_filters_to_where_clause_and_params_handle_null(self): + filters = { + "operator": "AND", + "conditions": [ + {"field": "meta.number", "operator": "==", "value": None}, + {"field": "meta.chapter", "operator": "==", "value": "intro"}, + ], + } + where_clause, params = _convert_filters_to_where_clause_and_params(filters) + assert where_clause == SQL(" WHERE ") + SQL("(meta->>'number' IS NULL AND meta->>'chapter' = %s)") + assert params == ("intro",) From 0d15e3675785a4db745b98a7c53f235ced57c7a2 Mon Sep 17 00:00:00 2001 From: Stefano Fiorucci Date: Wed, 31 Jan 2024 17:43:14 +0100 Subject: [PATCH 37/47] Pgvector - embedding retrieval (#298) * squash * Update integrations/pgvector/src/haystack_integrations/document_stores/pgvector/document_store.py Co-authored-by: Massimiliano Pippi * Update integrations/pgvector/src/haystack_integrations/document_stores/pgvector/document_store.py Co-authored-by: Massimiliano Pippi * Update integrations/pgvector/src/haystack_integrations/document_stores/pgvector/document_store.py Co-authored-by: Massimiliano Pippi * Update integrations/pgvector/src/haystack_integrations/document_stores/pgvector/document_store.py Co-authored-by: Massimiliano Pippi * fix fmt --------- Co-authored-by: Massimiliano Pippi --- .../pgvector/document_store.py | 102 +++++++++++++- integrations/pgvector/tests/conftest.py | 2 +- .../tests/test_embedding_retrieval.py | 130 ++++++++++++++++++ 3 files changed, 229 insertions(+), 5 deletions(-) create mode 100644 integrations/pgvector/tests/test_embedding_retrieval.py diff --git a/integrations/pgvector/src/haystack_integrations/document_stores/pgvector/document_store.py b/integrations/pgvector/src/haystack_integrations/document_stores/pgvector/document_store.py index b49bd87c3..0abaaecce 100644 --- a/integrations/pgvector/src/haystack_integrations/document_stores/pgvector/document_store.py +++ b/integrations/pgvector/src/haystack_integrations/document_stores/pgvector/document_store.py @@ -52,8 +52,10 @@ meta = EXCLUDED.meta """ +VALID_VECTOR_FUNCTIONS = ["cosine_similarity", "inner_product", "l2_distance"] + VECTOR_FUNCTION_TO_POSTGRESQL_OPS = { - "cosine_distance": "vector_cosine_ops", + "cosine_similarity": "vector_cosine_ops", "inner_product": "vector_ip_ops", "l2_distance": "vector_l2_ops", } @@ -70,7 +72,7 @@ def __init__( connection_string: str, table_name: str = "haystack_documents", embedding_dimension: int = 768, - vector_function: Literal["cosine_distance", "inner_product", "l2_distance"] = "cosine_distance", + vector_function: Literal["cosine_similarity", "inner_product", "l2_distance"] = "cosine_similarity", recreate_table: bool = False, search_strategy: Literal["exact_nearest_neighbor", "hnsw"] = "exact_nearest_neighbor", hnsw_recreate_index_if_exists: bool = False, @@ -87,12 +89,23 @@ def __init__( :param table_name: The name of the table to use to store Haystack documents. Defaults to "haystack_documents". :param embedding_dimension: The dimension of the embedding. Defaults to 768. :param vector_function: The similarity function to use when searching for similar embeddings. - Defaults to "cosine_distance". Set it to one of the following values: - :type vector_function: Literal["cosine_distance", "inner_product", "l2_distance"] + Defaults to "cosine_similarity". "cosine_similarity" and "inner_product" are similarity functions and + higher scores indicate greater similarity between the documents. + "l2_distance" returns the straight-line distance between vectors, + and the most similar documents are the ones with the smallest score. + + Important: when using the "hnsw" search strategy, an index will be created that depends on the + `vector_function` passed here. Make sure subsequent queries will keep using the same + vector similarity function in order to take advantage of the index. + :type vector_function: Literal["cosine_similarity", "inner_product", "l2_distance"] :param recreate_table: Whether to recreate the table if it already exists. Defaults to False. :param search_strategy: The search strategy to use when searching for similar embeddings. Defaults to "exact_nearest_neighbor". "hnsw" is an approximate nearest neighbor search strategy, which trades off some accuracy for speed; it is recommended for large numbers of documents. + + Important: when using the "hnsw" search strategy, an index will be created that depends on the + `vector_function` passed here. Make sure subsequent queries will keep using the same + vector similarity function in order to take advantage of the index. :type search_strategy: Literal["exact_nearest_neighbor", "hnsw"] :param hnsw_recreate_index_if_exists: Whether to recreate the HNSW index if it already exists. Defaults to False. Only used if search_strategy is set to "hnsw". @@ -107,6 +120,9 @@ def __init__( self.connection_string = connection_string self.table_name = table_name self.embedding_dimension = embedding_dimension + if vector_function not in VALID_VECTOR_FUNCTIONS: + msg = f"vector_function must be one of {VALID_VECTOR_FUNCTIONS}, but got {vector_function}" + raise ValueError(msg) self.vector_function = vector_function self.recreate_table = recreate_table self.search_strategy = search_strategy @@ -423,3 +439,81 @@ def delete_documents(self, document_ids: List[str]) -> None: ) self._execute_sql(delete_sql, error_msg="Could not delete documents from PgvectorDocumentStore") + + def _embedding_retrieval( + self, + query_embedding: List[float], + *, + filters: Optional[Dict[str, Any]] = None, + top_k: int = 10, + vector_function: Optional[Literal["cosine_similarity", "inner_product", "l2_distance"]] = None, + ) -> List[Document]: + """ + Retrieves documents that are most similar to the query embedding using a vector similarity metric. + + This method is not meant to be part of the public interface of + `PgvectorDocumentStore` and it should not be called directly. + `PgvectorEmbeddingRetriever` uses this method directly and is the public interface for it. + :raises ValueError + :return: List of Documents that are most similar to `query_embedding` + """ + + if not query_embedding: + msg = "query_embedding must be a non-empty list of floats" + raise ValueError(msg) + if len(query_embedding) != self.embedding_dimension: + msg = ( + f"query_embedding dimension ({len(query_embedding)}) does not match PgvectorDocumentStore " + f"embedding dimension ({self.embedding_dimension})." + ) + raise ValueError(msg) + + vector_function = vector_function or self.vector_function + if vector_function not in VALID_VECTOR_FUNCTIONS: + msg = f"vector_function must be one of {VALID_VECTOR_FUNCTIONS}, but got {vector_function}" + raise ValueError(msg) + + # the vector must be a string with this format: "'[3,1,2]'" + query_embedding_for_postgres = f"'[{','.join(str(el) for el in query_embedding)}]'" + + # to compute the scores, we use the approach described in pgvector README: + # https://github.com/pgvector/pgvector?tab=readme-ov-file#distances + # cosine_similarity and inner_product are modified from the result of the operator + if vector_function == "cosine_similarity": + score_definition = f"1 - (embedding <=> {query_embedding_for_postgres}) AS score" + elif vector_function == "inner_product": + score_definition = f"(embedding <#> {query_embedding_for_postgres}) * -1 AS score" + elif vector_function == "l2_distance": + score_definition = f"embedding <-> {query_embedding_for_postgres} AS score" + + sql_select = SQL("SELECT *, {score} FROM {table_name}").format( + table_name=Identifier(self.table_name), + score=SQL(score_definition), + ) + + sql_where_clause = SQL("") + params = () + if filters: + sql_where_clause, params = _convert_filters_to_where_clause_and_params(filters) + + # we always want to return the most similar documents first + # so when using l2_distance, the sort order must be ASC + sort_order = "ASC" if vector_function == "l2_distance" else "DESC" + + sql_sort = SQL(" ORDER BY score {sort_order} LIMIT {top_k}").format( + top_k=SQLLiteral(top_k), + sort_order=SQL(sort_order), + ) + + sql_query = sql_select + sql_where_clause + sql_sort + + result = self._execute_sql( + sql_query, + params, + error_msg="Could not retrieve documents from PgvectorDocumentStore.", + cursor=self._dict_cursor, + ) + + records = result.fetchall() + docs = self._from_pg_to_haystack_documents(records) + return docs diff --git a/integrations/pgvector/tests/conftest.py b/integrations/pgvector/tests/conftest.py index 34260f409..743e8de14 100644 --- a/integrations/pgvector/tests/conftest.py +++ b/integrations/pgvector/tests/conftest.py @@ -7,7 +7,7 @@ def document_store(request): connection_string = "postgresql://postgres:postgres@localhost:5432/postgres" table_name = f"haystack_{request.node.name}" embedding_dimension = 768 - vector_function = "cosine_distance" + vector_function = "cosine_similarity" recreate_table = True search_strategy = "exact_nearest_neighbor" diff --git a/integrations/pgvector/tests/test_embedding_retrieval.py b/integrations/pgvector/tests/test_embedding_retrieval.py new file mode 100644 index 000000000..1d5e8e297 --- /dev/null +++ b/integrations/pgvector/tests/test_embedding_retrieval.py @@ -0,0 +1,130 @@ +# SPDX-FileCopyrightText: 2023-present deepset GmbH +# +# SPDX-License-Identifier: Apache-2.0 + +from typing import List + +import pytest +from haystack.dataclasses.document import Document +from haystack_integrations.document_stores.pgvector import PgvectorDocumentStore +from numpy.random import rand + + +class TestEmbeddingRetrieval: + @pytest.fixture + def document_store_w_hnsw_index(self, request): + connection_string = "postgresql://postgres:postgres@localhost:5432/postgres" + table_name = f"haystack_hnsw_{request.node.name}" + embedding_dimension = 768 + vector_function = "cosine_similarity" + recreate_table = True + search_strategy = "hnsw" + + store = PgvectorDocumentStore( + connection_string=connection_string, + table_name=table_name, + embedding_dimension=embedding_dimension, + vector_function=vector_function, + recreate_table=recreate_table, + search_strategy=search_strategy, + ) + yield store + + store.delete_table() + + @pytest.mark.parametrize("document_store", ["document_store", "document_store_w_hnsw_index"], indirect=True) + def test_embedding_retrieval_cosine_similarity(self, document_store: PgvectorDocumentStore): + query_embedding = [0.1] * 768 + most_similar_embedding = [0.8] * 768 + second_best_embedding = [0.8] * 700 + [0.1] * 3 + [0.2] * 65 + another_embedding = rand(768).tolist() + + docs = [ + Document(content="Most similar document (cosine sim)", embedding=most_similar_embedding), + Document(content="2nd best document (cosine sim)", embedding=second_best_embedding), + Document(content="Not very similar document (cosine sim)", embedding=another_embedding), + ] + + document_store.write_documents(docs) + + results = document_store._embedding_retrieval( + query_embedding=query_embedding, top_k=2, filters={}, vector_function="cosine_similarity" + ) + assert len(results) == 2 + assert results[0].content == "Most similar document (cosine sim)" + assert results[1].content == "2nd best document (cosine sim)" + assert results[0].score > results[1].score + + @pytest.mark.parametrize("document_store", ["document_store", "document_store_w_hnsw_index"], indirect=True) + def test_embedding_retrieval_inner_product(self, document_store: PgvectorDocumentStore): + query_embedding = [0.1] * 768 + most_similar_embedding = [0.8] * 768 + second_best_embedding = [0.8] * 700 + [0.1] * 3 + [0.2] * 65 + another_embedding = rand(768).tolist() + + docs = [ + Document(content="Most similar document (inner product)", embedding=most_similar_embedding), + Document(content="2nd best document (inner product)", embedding=second_best_embedding), + Document(content="Not very similar document (inner product)", embedding=another_embedding), + ] + + document_store.write_documents(docs) + + results = document_store._embedding_retrieval( + query_embedding=query_embedding, top_k=2, filters={}, vector_function="inner_product" + ) + assert len(results) == 2 + assert results[0].content == "Most similar document (inner product)" + assert results[1].content == "2nd best document (inner product)" + assert results[0].score > results[1].score + + @pytest.mark.parametrize("document_store", ["document_store", "document_store_w_hnsw_index"], indirect=True) + def test_embedding_retrieval_l2_distance(self, document_store: PgvectorDocumentStore): + query_embedding = [0.1] * 768 + most_similar_embedding = [0.1] * 765 + [0.15] * 3 + second_best_embedding = [0.1] * 700 + [0.1] * 3 + [0.2] * 65 + another_embedding = rand(768).tolist() + + docs = [ + Document(content="Most similar document (l2 dist)", embedding=most_similar_embedding), + Document(content="2nd best document (l2 dist)", embedding=second_best_embedding), + Document(content="Not very similar document (l2 dist)", embedding=another_embedding), + ] + + document_store.write_documents(docs) + + results = document_store._embedding_retrieval( + query_embedding=query_embedding, top_k=2, filters={}, vector_function="l2_distance" + ) + assert len(results) == 2 + assert results[0].content == "Most similar document (l2 dist)" + assert results[1].content == "2nd best document (l2 dist)" + assert results[0].score < results[1].score + + @pytest.mark.parametrize("document_store", ["document_store", "document_store_w_hnsw_index"], indirect=True) + def test_embedding_retrieval_with_filters(self, document_store: PgvectorDocumentStore): + docs = [Document(content=f"Document {i}", embedding=rand(768).tolist()) for i in range(10)] + + for i in range(10): + docs[i].meta["meta_field"] = "custom_value" if i % 2 == 0 else "other_value" + + document_store.write_documents(docs) + + query_embedding = [0.1] * 768 + filters = {"field": "meta.meta_field", "operator": "==", "value": "custom_value"} + + results = document_store._embedding_retrieval(query_embedding=query_embedding, top_k=3, filters=filters) + assert len(results) == 3 + for result in results: + assert result.meta["meta_field"] == "custom_value" + assert results[0].score > results[1].score > results[2].score + + def test_empty_query_embedding(self, document_store: PgvectorDocumentStore): + query_embedding: List[float] = [] + with pytest.raises(ValueError): + document_store._embedding_retrieval(query_embedding=query_embedding) + + def test_query_embedding_wrong_dimension(self, document_store: PgvectorDocumentStore): + query_embedding = [0.1] * 4 + with pytest.raises(ValueError): + document_store._embedding_retrieval(query_embedding=query_embedding) From f56905a23a8e195025d8f1abd78caa4b339fe108 Mon Sep 17 00:00:00 2001 From: ZanSara Date: Thu, 1 Feb 2024 09:58:14 +0100 Subject: [PATCH 38/47] elasticsearch: generate api docs (#322) * add api docs * working dir * typo --- .github/workflows/elasticsearch.yml | 11 ++++++-- integrations/elasticsearch/pydoc/config.yml | 31 +++++++++++++++++++++ integrations/elasticsearch/pyproject.toml | 4 +++ 3 files changed, 43 insertions(+), 3 deletions(-) create mode 100644 integrations/elasticsearch/pydoc/config.yml diff --git a/.github/workflows/elasticsearch.yml b/.github/workflows/elasticsearch.yml index eb2c1748d..688e5c48f 100644 --- a/.github/workflows/elasticsearch.yml +++ b/.github/workflows/elasticsearch.yml @@ -10,6 +10,10 @@ on: - "integrations/elasticsearch/**" - ".github/workflows/elasticsearch.yml" +defaults: + run: + working-directory: integrations/elasticsearch + concurrency: group: elasticsearch-${{ github.head_ref }} cancel-in-progress: true @@ -40,14 +44,15 @@ jobs: run: pip install --upgrade hatch - name: Lint - working-directory: integrations/elasticsearch if: matrix.python-version == '3.9' run: hatch run lint:all - name: Run ElasticSearch container - working-directory: integrations/elasticsearch run: docker-compose up -d + - name: Generate docs + if: matrix.python-version == '3.9' && runner.os == 'Linux' + run: hatch run docs + - name: Run tests - working-directory: integrations/elasticsearch run: hatch run cov diff --git a/integrations/elasticsearch/pydoc/config.yml b/integrations/elasticsearch/pydoc/config.yml new file mode 100644 index 000000000..dc5a090bc --- /dev/null +++ b/integrations/elasticsearch/pydoc/config.yml @@ -0,0 +1,31 @@ +loaders: + - type: haystack_pydoc_tools.loaders.CustomPythonLoader + search_path: [../src] + modules: [ + "haystack_integrations.components.retrievers.elasticsearch.bm25_retriever", + "haystack_integrations.components.retrievers.elasticsearch.embedding_retriever", + "haystack_integrations.document_stores.elasticsearch.document_store", + "haystack_integrations.document_stores.elasticsearch.filters", + ] + ignore_when_discovered: ["__init__"] +processors: + - type: filter + expression: + documented_only: true + do_not_filter_modules: false + skip_empty_modules: true + - type: smart + - type: crossref +renderer: + type: haystack_pydoc_tools.renderers.ReadmePreviewRenderer + excerpt: Elasticsearch integration for Haystack + category_slug: haystack-integrations + title: Elasticsearch + slug: integrations-elasticsearch + order: 50 + markdown: + descriptive_class_title: false + descriptive_module_title: true + add_method_class_prefix: true + add_member_class_prefix: false + filename: _readme_elasticsearch.md \ No newline at end of file diff --git a/integrations/elasticsearch/pyproject.toml b/integrations/elasticsearch/pyproject.toml index af3d89c0c..b67df7e03 100644 --- a/integrations/elasticsearch/pyproject.toml +++ b/integrations/elasticsearch/pyproject.toml @@ -49,6 +49,7 @@ dependencies = [ "coverage[toml]>=6.5", "pytest", "pytest-xdist", + "haystack-pydoc-tools", ] [tool.hatch.envs.default.scripts] test = "pytest {args:tests}" @@ -61,6 +62,9 @@ cov = [ "test-cov", "cov-report", ] +docs = [ + "pydoc-markdown pydoc/config.yml" +] [[tool.hatch.envs.all.matrix]] python = ["3.8", "3.9", "3.10", "3.11"] From bdee9332c964332e99b0f1d7a87870856ad5dbdd Mon Sep 17 00:00:00 2001 From: Stefano Fiorucci Date: Thu, 1 Feb 2024 12:22:50 +0100 Subject: [PATCH 39/47] Pinecone - decrease concurrency in tests (#323) * pinecone - decrease concurrency * decrease more sleep time --- integrations/pinecone/pyproject.toml | 4 ++-- integrations/pinecone/tests/conftest.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/integrations/pinecone/pyproject.toml b/integrations/pinecone/pyproject.toml index 2d73cdf58..c95ee0aac 100644 --- a/integrations/pinecone/pyproject.toml +++ b/integrations/pinecone/pyproject.toml @@ -54,8 +54,8 @@ dependencies = [ [tool.hatch.envs.default.scripts] # Pinecone tests are slow (require HTTP requests), so we run them in parallel # with pytest-xdist (https://pytest-xdist.readthedocs.io/en/stable/distribution.html) -test = "pytest -n auto --maxprocesses=3 {args:tests}" -test-cov = "coverage run -m pytest -n auto --maxprocesses=3 {args:tests}" +test = "pytest -n auto --maxprocesses=2 {args:tests}" +test-cov = "coverage run -m pytest -n auto --maxprocesses=2 {args:tests}" cov-report = [ "- coverage combine", "coverage report", diff --git a/integrations/pinecone/tests/conftest.py b/integrations/pinecone/tests/conftest.py index 79d2608f2..c7a1342d5 100644 --- a/integrations/pinecone/tests/conftest.py +++ b/integrations/pinecone/tests/conftest.py @@ -6,7 +6,7 @@ from haystack_integrations.document_stores.pinecone import PineconeDocumentStore # This is the approximate time it takes for the documents to be available -SLEEP_TIME = 25 +SLEEP_TIME = 20 @pytest.fixture() From 61daacb3f9e63af8f9df67cada8288ad333ad074 Mon Sep 17 00:00:00 2001 From: ZanSara Date: Thu, 1 Feb 2024 15:14:29 +0100 Subject: [PATCH 40/47] add pydocconf (#321) --- .github/workflows/cohere.yml | 4 ++++ integrations/cohere/pydoc/config.yml | 32 ++++++++++++++++++++++++++++ integrations/cohere/pyproject.toml | 4 ++++ 3 files changed, 40 insertions(+) create mode 100644 integrations/cohere/pydoc/config.yml diff --git a/.github/workflows/cohere.yml b/.github/workflows/cohere.yml index 0f0030ec1..562556e47 100644 --- a/.github/workflows/cohere.yml +++ b/.github/workflows/cohere.yml @@ -52,5 +52,9 @@ jobs: if: matrix.python-version == '3.9' && runner.os == 'Linux' run: hatch run lint:all + - name: Generate docs + if: matrix.python-version == '3.9' && runner.os == 'Linux' + run: hatch run docs + - name: Run tests run: hatch run cov \ No newline at end of file diff --git a/integrations/cohere/pydoc/config.yml b/integrations/cohere/pydoc/config.yml new file mode 100644 index 000000000..9418739b5 --- /dev/null +++ b/integrations/cohere/pydoc/config.yml @@ -0,0 +1,32 @@ +loaders: + - type: haystack_pydoc_tools.loaders.CustomPythonLoader + search_path: [../src] + modules: [ + "haystack_integrations.components.embedders.cohere.document_embedder", + "haystack_integrations.components.embedders.cohere.text_embedder", + "haystack_integrations.components.embedders.cohere.utils", + "haystack_integrations.components.generators.cohere.generator", + "haystack_integrations.components.generators.cohere.chat.chat_generator", + ] + ignore_when_discovered: ["__init__"] +processors: + - type: filter + expression: + documented_only: true + do_not_filter_modules: false + skip_empty_modules: true + - type: smart + - type: crossref +renderer: + type: haystack_pydoc_tools.renderers.ReadmePreviewRenderer + excerpt: Cohere integration for Haystack + category_slug: haystack-integrations + title: Cohere + slug: integrations-cohere + order: 40 + markdown: + descriptive_class_title: false + descriptive_module_title: true + add_method_class_prefix: true + add_member_class_prefix: false + filename: _readme_cohere.md diff --git a/integrations/cohere/pyproject.toml b/integrations/cohere/pyproject.toml index 42349d9fb..332471674 100644 --- a/integrations/cohere/pyproject.toml +++ b/integrations/cohere/pyproject.toml @@ -49,6 +49,7 @@ git_describe_command = 'git describe --tags --match="integrations/cohere-v[0-9]* dependencies = [ "coverage[toml]>=6.5", "pytest", + "haystack-pydoc-tools", ] [tool.hatch.envs.default.scripts] test = "pytest {args:tests}" @@ -61,6 +62,9 @@ cov = [ "test-cov", "cov-report", ] +docs = [ + "pydoc-markdown pydoc/config.yml" +] [[tool.hatch.envs.all.matrix]] python = ["3.7", "3.8", "3.9", "3.10", "3.11"] From 3454815095b539558cdda083c6d51f76ed2b12ea Mon Sep 17 00:00:00 2001 From: Stefano Fiorucci Date: Thu, 1 Feb 2024 17:01:26 +0100 Subject: [PATCH 41/47] Pgvector - Embedding Retriever (#320) * squash * squash * Update integrations/pgvector/src/haystack_integrations/document_stores/pgvector/document_store.py Co-authored-by: Massimiliano Pippi * Update integrations/pgvector/src/haystack_integrations/document_stores/pgvector/document_store.py Co-authored-by: Massimiliano Pippi * Update integrations/pgvector/src/haystack_integrations/document_stores/pgvector/document_store.py Co-authored-by: Massimiliano Pippi * Update integrations/pgvector/src/haystack_integrations/document_stores/pgvector/document_store.py Co-authored-by: Massimiliano Pippi * fix fmt * adjust docstrings * Update integrations/pgvector/src/haystack_integrations/components/retrievers/pgvector/embedding_retriever.py Co-authored-by: Massimiliano Pippi * Update integrations/pgvector/src/haystack_integrations/components/retrievers/pgvector/embedding_retriever.py Co-authored-by: Massimiliano Pippi * improve docstrings * fmt --------- Co-authored-by: Massimiliano Pippi --- .../retrievers/pgvector/__init__.py | 6 + .../pgvector/embedding_retriever.py | 104 ++++++++++++++++ integrations/pgvector/tests/test_retriever.py | 112 ++++++++++++++++++ 3 files changed, 222 insertions(+) create mode 100644 integrations/pgvector/src/haystack_integrations/components/retrievers/pgvector/__init__.py create mode 100644 integrations/pgvector/src/haystack_integrations/components/retrievers/pgvector/embedding_retriever.py create mode 100644 integrations/pgvector/tests/test_retriever.py diff --git a/integrations/pgvector/src/haystack_integrations/components/retrievers/pgvector/__init__.py b/integrations/pgvector/src/haystack_integrations/components/retrievers/pgvector/__init__.py new file mode 100644 index 000000000..ec0cf0dc4 --- /dev/null +++ b/integrations/pgvector/src/haystack_integrations/components/retrievers/pgvector/__init__.py @@ -0,0 +1,6 @@ +# SPDX-FileCopyrightText: 2023-present deepset GmbH +# +# SPDX-License-Identifier: Apache-2.0 +from .embedding_retriever import PgvectorEmbeddingRetriever + +__all__ = ["PgvectorEmbeddingRetriever"] diff --git a/integrations/pgvector/src/haystack_integrations/components/retrievers/pgvector/embedding_retriever.py b/integrations/pgvector/src/haystack_integrations/components/retrievers/pgvector/embedding_retriever.py new file mode 100644 index 000000000..26807e9bd --- /dev/null +++ b/integrations/pgvector/src/haystack_integrations/components/retrievers/pgvector/embedding_retriever.py @@ -0,0 +1,104 @@ +# SPDX-FileCopyrightText: 2023-present deepset GmbH +# +# SPDX-License-Identifier: Apache-2.0 +from typing import Any, Dict, List, Literal, Optional + +from haystack import component, default_from_dict, default_to_dict +from haystack.dataclasses import Document +from haystack_integrations.document_stores.pgvector import PgvectorDocumentStore +from haystack_integrations.document_stores.pgvector.document_store import VALID_VECTOR_FUNCTIONS + + +@component +class PgvectorEmbeddingRetriever: + """ + Retrieves documents from the PgvectorDocumentStore, based on their dense embeddings. + + Needs to be connected to the PgvectorDocumentStore. + """ + + def __init__( + self, + *, + document_store: PgvectorDocumentStore, + filters: Optional[Dict[str, Any]] = None, + top_k: int = 10, + vector_function: Optional[Literal["cosine_similarity", "inner_product", "l2_distance"]] = None, + ): + """ + Create the PgvectorEmbeddingRetriever component. + + :param document_store: An instance of PgvectorDocumentStore. + :param filters: Filters applied to the retrieved Documents. Defaults to None. + :param top_k: Maximum number of Documents to return, defaults to 10. + :param vector_function: The similarity function to use when searching for similar embeddings. + Defaults to the one set in the `document_store` instance. + "cosine_similarity" and "inner_product" are similarity functions and + higher scores indicate greater similarity between the documents. + "l2_distance" returns the straight-line distance between vectors, + and the most similar documents are the ones with the smallest score. + + Important: if the document store is using the "hnsw" search strategy, the vector function + should match the one utilized during index creation to take advantage of the index. + :type vector_function: Literal["cosine_similarity", "inner_product", "l2_distance"] + + :raises ValueError: If `document_store` is not an instance of PgvectorDocumentStore. + """ + if not isinstance(document_store, PgvectorDocumentStore): + msg = "document_store must be an instance of PgvectorDocumentStore" + raise ValueError(msg) + + if vector_function and vector_function not in VALID_VECTOR_FUNCTIONS: + msg = f"vector_function must be one of {VALID_VECTOR_FUNCTIONS}" + raise ValueError(msg) + + self.document_store = document_store + self.filters = filters or {} + self.top_k = top_k + self.vector_function = vector_function or document_store.vector_function + + def to_dict(self) -> Dict[str, Any]: + return default_to_dict( + self, + filters=self.filters, + top_k=self.top_k, + vector_function=self.vector_function, + document_store=self.document_store.to_dict(), + ) + + @classmethod + def from_dict(cls, data: Dict[str, Any]) -> "PgvectorEmbeddingRetriever": + data["init_parameters"]["document_store"] = default_from_dict( + PgvectorDocumentStore, data["init_parameters"]["document_store"] + ) + return default_from_dict(cls, data) + + @component.output_types(documents=List[Document]) + def run( + self, + query_embedding: List[float], + filters: Optional[Dict[str, Any]] = None, + top_k: Optional[int] = None, + vector_function: Optional[Literal["cosine_similarity", "inner_product", "l2_distance"]] = None, + ): + """ + Retrieve documents from the PgvectorDocumentStore, based on their embeddings. + + :param query_embedding: Embedding of the query. + :param filters: Filters applied to the retrieved Documents. + :param top_k: Maximum number of Documents to return. + :param vector_function: The similarity function to use when searching for similar embeddings. + :type vector_function: Literal["cosine_similarity", "inner_product", "l2_distance"] + :return: List of Documents similar to `query_embedding`. + """ + filters = filters or self.filters + top_k = top_k or self.top_k + vector_function = vector_function or self.vector_function + + docs = self.document_store._embedding_retrieval( + query_embedding=query_embedding, + filters=filters, + top_k=top_k, + vector_function=vector_function, + ) + return {"documents": docs} diff --git a/integrations/pgvector/tests/test_retriever.py b/integrations/pgvector/tests/test_retriever.py new file mode 100644 index 000000000..cca6bbc9f --- /dev/null +++ b/integrations/pgvector/tests/test_retriever.py @@ -0,0 +1,112 @@ +# SPDX-FileCopyrightText: 2023-present deepset GmbH +# +# SPDX-License-Identifier: Apache-2.0 +from unittest.mock import Mock + +from haystack.dataclasses import Document +from haystack_integrations.components.retrievers.pgvector import PgvectorEmbeddingRetriever +from haystack_integrations.document_stores.pgvector import PgvectorDocumentStore + + +class TestRetriever: + def test_init_default(self, document_store: PgvectorDocumentStore): + retriever = PgvectorEmbeddingRetriever(document_store=document_store) + assert retriever.document_store == document_store + assert retriever.filters == {} + assert retriever.top_k == 10 + assert retriever.vector_function == document_store.vector_function + + def test_init(self, document_store: PgvectorDocumentStore): + retriever = PgvectorEmbeddingRetriever( + document_store=document_store, filters={"field": "value"}, top_k=5, vector_function="l2_distance" + ) + assert retriever.document_store == document_store + assert retriever.filters == {"field": "value"} + assert retriever.top_k == 5 + assert retriever.vector_function == "l2_distance" + + def test_to_dict(self, document_store: PgvectorDocumentStore): + retriever = PgvectorEmbeddingRetriever( + document_store=document_store, filters={"field": "value"}, top_k=5, vector_function="l2_distance" + ) + res = retriever.to_dict() + t = "haystack_integrations.components.retrievers.pgvector.embedding_retriever.PgvectorEmbeddingRetriever" + assert res == { + "type": t, + "init_parameters": { + "document_store": { + "type": "haystack_integrations.document_stores.pgvector.document_store.PgvectorDocumentStore", + "init_parameters": { + "connection_string": "postgresql://postgres:postgres@localhost:5432/postgres", + "table_name": "haystack_test_to_dict", + "embedding_dimension": 768, + "vector_function": "cosine_similarity", + "recreate_table": True, + "search_strategy": "exact_nearest_neighbor", + "hnsw_recreate_index_if_exists": False, + "hnsw_index_creation_kwargs": {}, + "hnsw_ef_search": None, + }, + }, + "filters": {"field": "value"}, + "top_k": 5, + "vector_function": "l2_distance", + }, + } + + def test_from_dict(self): + t = "haystack_integrations.components.retrievers.pgvector.embedding_retriever.PgvectorEmbeddingRetriever" + data = { + "type": t, + "init_parameters": { + "document_store": { + "type": "haystack_integrations.document_stores.pgvector.document_store.PgvectorDocumentStore", + "init_parameters": { + "connection_string": "postgresql://postgres:postgres@localhost:5432/postgres", + "table_name": "haystack_test_to_dict", + "embedding_dimension": 768, + "vector_function": "cosine_similarity", + "recreate_table": True, + "search_strategy": "exact_nearest_neighbor", + "hnsw_recreate_index_if_exists": False, + "hnsw_index_creation_kwargs": {}, + "hnsw_ef_search": None, + }, + }, + "filters": {"field": "value"}, + "top_k": 5, + "vector_function": "l2_distance", + }, + } + + retriever = PgvectorEmbeddingRetriever.from_dict(data) + document_store = retriever.document_store + + assert isinstance(document_store, PgvectorDocumentStore) + assert document_store.connection_string == "postgresql://postgres:postgres@localhost:5432/postgres" + assert document_store.table_name == "haystack_test_to_dict" + assert document_store.embedding_dimension == 768 + assert document_store.vector_function == "cosine_similarity" + assert document_store.recreate_table + assert document_store.search_strategy == "exact_nearest_neighbor" + assert not document_store.hnsw_recreate_index_if_exists + assert document_store.hnsw_index_creation_kwargs == {} + assert document_store.hnsw_ef_search is None + + assert retriever.filters == {"field": "value"} + assert retriever.top_k == 5 + assert retriever.vector_function == "l2_distance" + + def test_run(self): + mock_store = Mock(spec=PgvectorDocumentStore) + doc = Document(content="Test doc", embedding=[0.1, 0.2]) + mock_store._embedding_retrieval.return_value = [doc] + + retriever = PgvectorEmbeddingRetriever(document_store=mock_store, vector_function="l2_distance") + res = retriever.run(query_embedding=[0.3, 0.5]) + + mock_store._embedding_retrieval.assert_called_once_with( + query_embedding=[0.3, 0.5], filters={}, top_k=10, vector_function="l2_distance" + ) + + assert res == {"documents": [doc]} From d477a21e56b216c937bcd1363b43d8981ac7a7ac Mon Sep 17 00:00:00 2001 From: Daria Fokina Date: Fri, 2 Feb 2024 08:38:01 +0100 Subject: [PATCH 42/47] astra: generate api docs (#327) --- .github/workflows/astra.yml | 4 ++++ integrations/astra/pydoc/config.yml | 30 +++++++++++++++++++++++++++++ integrations/astra/pyproject.toml | 5 ++++- 3 files changed, 38 insertions(+), 1 deletion(-) create mode 100644 integrations/astra/pydoc/config.yml diff --git a/.github/workflows/astra.yml b/.github/workflows/astra.yml index b751550de..a1aab7154 100644 --- a/.github/workflows/astra.yml +++ b/.github/workflows/astra.yml @@ -53,6 +53,10 @@ jobs: if: matrix.python-version == '3.9' && runner.os == 'Linux' run: hatch run lint:all + - name: Generate docs + if: matrix.python-version == '3.9' && runner.os == 'Linux' + run: hatch run docs + - name: Run tests env: ASTRA_DB_APPLICATION_TOKEN: ${{ secrets.ASTRA_DB_APPLICATION_TOKEN }} diff --git a/integrations/astra/pydoc/config.yml b/integrations/astra/pydoc/config.yml new file mode 100644 index 000000000..68cc1c809 --- /dev/null +++ b/integrations/astra/pydoc/config.yml @@ -0,0 +1,30 @@ +loaders: + - type: haystack_pydoc_tools.loaders.CustomPythonLoader + search_path: [../src] + modules: [ + "haystack_integrations.components.retrievers.astra.retriever", + "haystack_integrations.document_stores.astra.document_store", + "haystack_integrations.document_stores.astra.errors", + ] + ignore_when_discovered: ["__init__"] +processors: + - type: filter + expression: + documented_only: true + do_not_filter_modules: false + skip_empty_modules: true + - type: smart + - type: crossref +renderer: + type: haystack_pydoc_tools.renderers.ReadmePreviewRenderer + excerpt: Astra integration for Haystack + category_slug: haystack-integrations + title: Astra + slug: integrations-astra + order: 20 + markdown: + descriptive_class_title: false + descriptive_module_title: true + add_method_class_prefix: true + add_member_class_prefix: false + filename: _readme_astra.md \ No newline at end of file diff --git a/integrations/astra/pyproject.toml b/integrations/astra/pyproject.toml index 6b4e2565d..7599797a8 100644 --- a/integrations/astra/pyproject.toml +++ b/integrations/astra/pyproject.toml @@ -50,6 +50,7 @@ git_describe_command = 'git describe --tags --match="integrations/astra-v[0-9]*" dependencies = [ "coverage[toml]>=6.5", "pytest", + "haystack-pydoc-tools", ] [tool.hatch.envs.default.scripts] test = "pytest {args:tests}" @@ -62,7 +63,9 @@ cov = [ "test-cov", "cov-report", ] - +docs = [ + "pydoc-markdown pydoc/config.yml" +] [[tool.hatch.envs.all.matrix]] python = ["3.7", "3.8", "3.9", "3.10", "3.11"] From 8c96def2b6abec323c71221935187f45c381ee9a Mon Sep 17 00:00:00 2001 From: Stefano Fiorucci Date: Fri, 2 Feb 2024 08:44:04 +0100 Subject: [PATCH 43/47] opensearch: generate API docs (#324) --- .github/workflows/opensearch.yml | 10 ++++++-- integrations/opensearch/pydoc/config.yml | 31 ++++++++++++++++++++++++ integrations/opensearch/pyproject.toml | 5 ++++ 3 files changed, 44 insertions(+), 2 deletions(-) create mode 100644 integrations/opensearch/pydoc/config.yml diff --git a/.github/workflows/opensearch.yml b/.github/workflows/opensearch.yml index aacb4ce71..72a01d090 100644 --- a/.github/workflows/opensearch.yml +++ b/.github/workflows/opensearch.yml @@ -18,6 +18,10 @@ env: PYTHONUNBUFFERED: "1" FORCE_COLOR: "1" +defaults: + run: + working-directory: integrations/opensearch + jobs: run: name: Python ${{ matrix.python-version }} on ${{ startsWith(matrix.os, 'macos-') && 'macOS' || startsWith(matrix.os, 'windows-') && 'Windows' || 'Linux' }} @@ -40,14 +44,16 @@ jobs: run: pip install --upgrade hatch - name: Lint - working-directory: integrations/opensearch if: matrix.python-version == '3.9' run: hatch run lint:all - name: Run opensearch container - working-directory: integrations/opensearch run: docker-compose up -d + - name: Generate docs + if: matrix.python-version == '3.9' && runner.os == 'Linux' + run: hatch run docs + - name: Run tests working-directory: integrations/opensearch run: hatch run cov diff --git a/integrations/opensearch/pydoc/config.yml b/integrations/opensearch/pydoc/config.yml new file mode 100644 index 000000000..3e07f6625 --- /dev/null +++ b/integrations/opensearch/pydoc/config.yml @@ -0,0 +1,31 @@ +loaders: + - type: haystack_pydoc_tools.loaders.CustomPythonLoader + search_path: [../src] + modules: [ + "haystack_integrations.components.retrievers.opensearch.bm25_retriever", + "haystack_integrations.components.retrievers.opensearch.embedding_retriever", + "haystack_integrations.document_stores.opensearch.document_store", + "haystack_integrations.document_stores.opensearch.filters", + ] + ignore_when_discovered: ["__init__"] +processors: + - type: filter + expression: + documented_only: true + do_not_filter_modules: false + skip_empty_modules: true + - type: smart + - type: crossref +renderer: + type: haystack_pydoc_tools.renderers.ReadmePreviewRenderer + excerpt: OpenSearch integration for Haystack + category_slug: haystack-integrations + title: OpenSearch + slug: integrations-opensearch + order: 130 + markdown: + descriptive_class_title: false + descriptive_module_title: true + add_method_class_prefix: true + add_member_class_prefix: false + filename: _readme_opensearch.md diff --git a/integrations/opensearch/pyproject.toml b/integrations/opensearch/pyproject.toml index 3edd544a2..794fa73fa 100644 --- a/integrations/opensearch/pyproject.toml +++ b/integrations/opensearch/pyproject.toml @@ -49,6 +49,7 @@ dependencies = [ "coverage[toml]>=6.5", "pytest", "pytest-xdist", + "haystack-pydoc-tools", ] [tool.hatch.envs.default.scripts] test = "pytest {args:tests}" @@ -62,6 +63,10 @@ cov = [ "cov-report", ] +docs = [ + "pydoc-markdown pydoc/config.yml" +] + [[tool.hatch.envs.all.matrix]] python = ["3.8", "3.9", "3.10", "3.11"] From 68358e7e992b7d724e6b61dc8a0abe9ab5287d8d Mon Sep 17 00:00:00 2001 From: Stefano Fiorucci Date: Fri, 2 Feb 2024 08:44:24 +0100 Subject: [PATCH 44/47] fix linting (#328) --- .../document_stores/pgvector/document_store.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integrations/pgvector/src/haystack_integrations/document_stores/pgvector/document_store.py b/integrations/pgvector/src/haystack_integrations/document_stores/pgvector/document_store.py index 0abaaecce..097e86c7e 100644 --- a/integrations/pgvector/src/haystack_integrations/document_stores/pgvector/document_store.py +++ b/integrations/pgvector/src/haystack_integrations/document_stores/pgvector/document_store.py @@ -409,7 +409,7 @@ def _from_pg_to_haystack_documents(self, documents: List[Dict[str, Any]]) -> Lis # postgresql returns the embedding as a string # so we need to convert it to a list of floats - if "embedding" in document and document["embedding"]: + if document.get("embedding"): haystack_dict["embedding"] = [float(el) for el in document["embedding"].strip("[]").split(",")] haystack_document = Document.from_dict(haystack_dict) From 55ddf41c1b3e49a20ccda84c3b585b39df9fc074 Mon Sep 17 00:00:00 2001 From: Stefano Fiorucci Date: Fri, 2 Feb 2024 08:44:37 +0100 Subject: [PATCH 45/47] new entry for pgvector (#329) --- .github/labeler.yml | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/.github/labeler.yml b/.github/labeler.yml index f5eaa3374..4d060772c 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -69,6 +69,11 @@ integration:opensearch: - any-glob-to-any-file: "integrations/opensearch/**/*" - any-glob-to-any-file: ".github/workflows/opensearch.yml" +integration:pgvector: + - changed-files: + - any-glob-to-any-file: "integrations/pgvector/**/*" + - any-glob-to-any-file: ".github/workflows/pgvector.yml" + integration:pinecone: - changed-files: - any-glob-to-any-file: "integrations/pinecone/**/*" @@ -81,8 +86,8 @@ integration:qdrant: integration:unstructured-fileconverter: - changed-files: - - any-glob-to-any-file: "integrations/unstructured/fileconverter/**/*" - - any-glob-to-any-file: ".github/workflows/unstructured_fileconverter.yml" + - any-glob-to-any-file: "integrations/unstructured/**/*" + - any-glob-to-any-file: ".github/workflows/unstructured.yml" integration:uptrain: - changed-files: From 26680ca12640454254dc8d93a5a84649d06c96bb Mon Sep 17 00:00:00 2001 From: Stefano Fiorucci Date: Fri, 2 Feb 2024 14:22:36 +0100 Subject: [PATCH 46/47] api docs (#325) --- .github/workflows/pgvector.yml | 12 ++++++++--- integrations/pgvector/pydoc/config.yml | 30 ++++++++++++++++++++++++++ integrations/pgvector/pyproject.toml | 4 ++++ 3 files changed, 43 insertions(+), 3 deletions(-) create mode 100644 integrations/pgvector/pydoc/config.yml diff --git a/.github/workflows/pgvector.yml b/.github/workflows/pgvector.yml index c985b765a..badb2565b 100644 --- a/.github/workflows/pgvector.yml +++ b/.github/workflows/pgvector.yml @@ -18,6 +18,10 @@ env: PYTHONUNBUFFERED: "1" FORCE_COLOR: "1" +defaults: + run: + working-directory: integrations/pgvector + jobs: run: name: Python ${{ matrix.python-version }} on ${{ startsWith(matrix.os, 'macos-') && 'macOS' || startsWith(matrix.os, 'windows-') && 'Windows' || 'Linux' }} @@ -49,10 +53,12 @@ jobs: run: pip install --upgrade hatch - name: Lint - working-directory: integrations/pgvector if: matrix.python-version == '3.9' - run: hatch run lint:all + run: hatch run lint:all + + - name: Generate docs + if: matrix.python-version == '3.9' && runner.os == 'Linux' + run: hatch run docs - name: Run tests - working-directory: integrations/pgvector run: hatch run cov diff --git a/integrations/pgvector/pydoc/config.yml b/integrations/pgvector/pydoc/config.yml new file mode 100644 index 000000000..79974b4a1 --- /dev/null +++ b/integrations/pgvector/pydoc/config.yml @@ -0,0 +1,30 @@ +loaders: + - type: haystack_pydoc_tools.loaders.CustomPythonLoader + search_path: [../src] + modules: [ + "haystack_integrations.components.retrievers.pgvector.embedding_retriever", + "haystack_integrations.document_stores.pgvector.document_store", + "haystack_integrations.document_stores.pgvector.filters", + ] + ignore_when_discovered: ["__init__"] +processors: + - type: filter + expression: + documented_only: true + do_not_filter_modules: false + skip_empty_modules: true + - type: smart + - type: crossref +renderer: + type: haystack_pydoc_tools.renderers.ReadmePreviewRenderer + excerpt: Pgvector integration for Haystack + category_slug: haystack-integrations + title: Pgvector + slug: integrations-pgvector + order: 140 + markdown: + descriptive_class_title: false + descriptive_module_title: true + add_method_class_prefix: true + add_member_class_prefix: false + filename: _readme_pgvector.md diff --git a/integrations/pgvector/pyproject.toml b/integrations/pgvector/pyproject.toml index b361af8b1..10ef5d314 100644 --- a/integrations/pgvector/pyproject.toml +++ b/integrations/pgvector/pyproject.toml @@ -51,6 +51,7 @@ dependencies = [ "coverage[toml]>=6.5", "pytest", "ipython", + "haystack-pydoc-tools", ] [tool.hatch.envs.default.scripts] test = "pytest {args:tests}" @@ -63,6 +64,9 @@ cov = [ "test-cov", "cov-report", ] +docs = [ + "pydoc-markdown pydoc/config.yml" +] [[tool.hatch.envs.all.matrix]] python = ["3.8", "3.9", "3.10", "3.11", "3.12"] From f7678e104399bcaab15fd3c71bc029efbcbb84a7 Mon Sep 17 00:00:00 2001 From: Daria Fokina Date: Fri, 2 Feb 2024 15:57:00 +0100 Subject: [PATCH 47/47] ollama: generate api docs (#332) * ollama: generate api docs * Update .github/workflows/ollama.yml Co-authored-by: Silvano Cerza <3314350+silvanocerza@users.noreply.github.com> --------- Co-authored-by: Silvano Cerza <3314350+silvanocerza@users.noreply.github.com> --- .github/workflows/ollama.yml | 5 +++++ integrations/ollama/pydoc/config.yml | 29 ++++++++++++++++++++++++++++ integrations/ollama/pyproject.toml | 4 ++++ 3 files changed, 38 insertions(+) create mode 100644 integrations/ollama/pydoc/config.yml diff --git a/.github/workflows/ollama.yml b/.github/workflows/ollama.yml index 7f61af14e..a375fc7db 100644 --- a/.github/workflows/ollama.yml +++ b/.github/workflows/ollama.yml @@ -54,6 +54,11 @@ jobs: - name: Pull the LLM in the Ollama service run: docker exec ollama ollama pull ${{ env.LLM_FOR_TESTS }} + - name: Generate docs + working-directory: integrations/ollama + if: matrix.python-version == '3.9' && runner.os == 'Linux' + run: hatch run docs + - name: Run tests working-directory: integrations/ollama run: hatch run cov diff --git a/integrations/ollama/pydoc/config.yml b/integrations/ollama/pydoc/config.yml new file mode 100644 index 000000000..768694991 --- /dev/null +++ b/integrations/ollama/pydoc/config.yml @@ -0,0 +1,29 @@ +loaders: + - type: haystack_pydoc_tools.loaders.CustomPythonLoader + search_path: [../src] + modules: [ + "haystack_integrations.components.generators.ollama.generator", + "haystack_integrations.components.generators.ollama.chat.chat_generator" + ] + ignore_when_discovered: ["__init__"] +processors: + - type: filter + expression: + documented_only: true + do_not_filter_modules: false + skip_empty_modules: true + - type: smart + - type: crossref +renderer: + type: haystack_pydoc_tools.renderers.ReadmePreviewRenderer + excerpt: Ollama integration for Haystack + category_slug: haystack-integrations + title: Ollama + slug: integrations-ollama + order: 120 + markdown: + descriptive_class_title: false + descriptive_module_title: true + add_method_class_prefix: true + add_member_class_prefix: false + filename: _readme_ollama.md \ No newline at end of file diff --git a/integrations/ollama/pyproject.toml b/integrations/ollama/pyproject.toml index 69cc2ed16..e3bb738b6 100644 --- a/integrations/ollama/pyproject.toml +++ b/integrations/ollama/pyproject.toml @@ -48,6 +48,7 @@ git_describe_command = 'git describe --tags --match="integrations/ollama-v[0-9]* dependencies = [ "coverage[toml]>=6.5", "pytest", + "haystack-pydoc-tools", ] [tool.hatch.envs.default.scripts] test = "pytest {args:tests}" @@ -60,6 +61,9 @@ cov = [ "test-cov", "cov-report", ] +docs = [ + "pydoc-markdown pydoc/config.yml" +] [[tool.hatch.envs.all.matrix]] python = ["3.8", "3.9", "3.10", "3.11", "3.12"]