Skip to content

Commit

Permalink
remove backend code
Browse files Browse the repository at this point in the history
  • Loading branch information
raspawar committed Aug 6, 2024
1 parent a953600 commit 2d2e9dd
Show file tree
Hide file tree
Showing 10 changed files with 69 additions and 147 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

from haystack import Document, component, default_from_dict, default_to_dict
from haystack.utils import Secret, deserialize_secrets_inplace
from haystack_integrations.util.nvidia import EmbedderBackend, NimBackend
from haystack_integrations.util.nvidia import NimBackend
from haystack_integrations.util.nvidia.util import is_hosted
from tqdm import tqdm

Expand Down Expand Up @@ -86,7 +86,7 @@ def __init__(
truncate = EmbeddingTruncateMode.from_str(truncate)
self.truncate = truncate

self.backend: Optional[EmbedderBackend] = None
self.backend: Optional[Any] = None
self._initialized = False

if is_hosted(api_url) and not self.model: # manually set default model
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

from haystack import component, default_from_dict, default_to_dict
from haystack.utils import Secret, deserialize_secrets_inplace
from haystack_integrations.util.nvidia import EmbedderBackend, NimBackend
from haystack_integrations.util.nvidia import NimBackend
from haystack_integrations.util.nvidia.util import is_hosted

from .truncate import EmbeddingTruncateMode
Expand Down Expand Up @@ -70,7 +70,7 @@ def __init__(
truncate = EmbeddingTruncateMode.from_str(truncate)
self.truncate = truncate

self.backend: Optional[EmbedderBackend] = None
self.backend: Optional[Any] = None
self._initialized = False

if is_hosted(api_url) and not self.model: # manually set default model
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

from haystack import component, default_from_dict, default_to_dict
from haystack.utils.auth import Secret, deserialize_secrets_inplace
from haystack_integrations.util.nvidia import GeneratorBackend, NimBackend
from haystack_integrations.util.nvidia import NimBackend
from haystack_integrations.util.nvidia.util import is_hosted

_DEFAULT_API_URL = "https://integrate.api.nvidia.com/v1"
Expand Down Expand Up @@ -69,7 +69,7 @@ def __init__(
self._api_key = api_key
self._model_arguments = model_arguments or {}

self._backend: Optional[GeneratorBackend] = None
self._backend: Optional[Any] = None

self.is_hosted = is_hosted(api_url)

Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
from .backend import EmbedderBackend, GeneratorBackend, Model
from .nim_backend import NimBackend
from .nim_backend import Model, NimBackend

__all__ = ["NimBackend", "EmbedderBackend", "GeneratorBackend", "Model"]
__all__ = ["NimBackend", "Model"]

This file was deleted.

Original file line number Diff line number Diff line change
@@ -1,14 +1,29 @@
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Tuple

import requests
from haystack.utils import Secret

from .backend import EmbedderBackend, GeneratorBackend, Model

REQUEST_TIMEOUT = 60


class NimBackend(GeneratorBackend, EmbedderBackend):
@dataclass
class Model:
"""
Model information.
id: unique identifier for the model, passed as model parameter for requests
aliases: list of aliases for the model
base_model: root model for the model
All aliases are deprecated and will trigger a warning when used.
"""

id: str
aliases: Optional[List[str]] = field(default_factory=list)
base_model: Optional[str] = None


class NimBackend:
def __init__(
self,
model: str,
Expand Down
3 changes: 3 additions & 0 deletions integrations/nvidia/tests/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
# SPDX-FileCopyrightText: 2023-present deepset GmbH <[email protected]>
#
# SPDX-License-Identifier: Apache-2.0
from .conftest import MockBackend

__all__ = ["MockBackend"]
22 changes: 22 additions & 0 deletions integrations/nvidia/tests/conftest.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,29 @@
from typing import Any, Dict, List, Optional, Tuple

import pytest
from haystack_integrations.util.nvidia import Model, NimBackend
from requests_mock import Mocker


class MockBackend(NimBackend):
def __init__(self, model: str, model_kwargs: Optional[Dict[str, Any]] = None):
super().__init__(model, api_url="", model_kwargs=model_kwargs or {})

def embed(self, texts):
inputs = texts
data = [[0.1, 0.2, 0.3] for i in range(len(inputs))]
return data, {"usage": {"total_tokens": 4, "prompt_tokens": 4}}

def models(self):
return [Model(id="aa")]

def generate(self) -> Tuple[List[str], List[Dict[str, Any]]]:
return (
["This is a mocked response."],
[{"role": "assistant", "usage": {"prompt_tokens": 5, "total_tokens": 10, "completion_tokens": 5}}],
)


@pytest.fixture
def mock_local_models(requests_mock: Mocker) -> None:
requests_mock.get(
Expand Down
36 changes: 13 additions & 23 deletions integrations/nvidia/tests/test_document_embedder.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,20 +4,8 @@
from haystack import Document
from haystack.utils import Secret
from haystack_integrations.components.embedders.nvidia import EmbeddingTruncateMode, NvidiaDocumentEmbedder
from haystack_integrations.util.nvidia import EmbedderBackend, Model


class MockBackend(EmbedderBackend):
def __init__(self, model, model_kwargs):
super().__init__(model, model_kwargs)

def embed(self, texts):
inputs = texts
data = [[0.1, 0.2, 0.3] for i in range(len(inputs))]
return data, {"usage": {"total_tokens": 4, "prompt_tokens": 4}}

def models(self):
return [Model(id="aa")]
from . import MockBackend


class TestNvidiaDocumentEmbedder:
Expand Down Expand Up @@ -187,14 +175,14 @@ def test_prepare_texts_to_embed_w_suffix(self):

def test_embed_batch(self):
texts = ["text 1", "text 2", "text 3", "text 4", "text 5"]

model = "playground_nvolveqa_40k"
embedder = NvidiaDocumentEmbedder(
"playground_nvolveqa_40k",
model,
api_key=Secret.from_token("fake-api-key"),
)

embedder.warm_up()
embedder.backend = MockBackend("aa", None)
embedder.backend = MockBackend(model)

embeddings, metadata = embedder._embed_batch(texts_to_embed=texts, batch_size=2)

Expand Down Expand Up @@ -230,7 +218,7 @@ def test_run_default_model(self):
assert "Default model is set as:" in str(record[0].message)
assert embedder.model == "model1"

embedder.backend = MockBackend("aa", None)
embedder.backend = MockBackend(embedder.model)

result = embedder.run(documents=docs)

Expand Down Expand Up @@ -263,7 +251,7 @@ def test_run(self):
)

embedder.warm_up()
embedder.backend = MockBackend("aa", None)
embedder.backend = MockBackend(model)

result = embedder.run(documents=docs)

Expand Down Expand Up @@ -296,7 +284,7 @@ def test_run_custom_batch_size(self):
)

embedder.warm_up()
embedder.backend = MockBackend("aa", None)
embedder.backend = MockBackend(model)

result = embedder.run(documents=docs)

Expand All @@ -314,10 +302,11 @@ def test_run_custom_batch_size(self):
assert metadata == {"usage": {"prompt_tokens": 2 * 4, "total_tokens": 2 * 4}}

def test_run_wrong_input_format(self):
embedder = NvidiaDocumentEmbedder("playground_nvolveqa_40k", api_key=Secret.from_token("fake-api-key"))
model = "playground_nvolveqa_40k"
embedder = NvidiaDocumentEmbedder(model, api_key=Secret.from_token("fake-api-key"))

embedder.warm_up()
embedder.backend = MockBackend("aa", None)
embedder.backend = MockBackend(model)

string_input = "text"
list_integers_input = [1, 2, 3]
Expand All @@ -329,10 +318,11 @@ def test_run_wrong_input_format(self):
embedder.run(documents=list_integers_input)

def test_run_on_empty_list(self):
embedder = NvidiaDocumentEmbedder("playground_nvolveqa_40k", api_key=Secret.from_token("fake-api-key"))
model = "playground_nvolveqa_40k"
embedder = NvidiaDocumentEmbedder(model, api_key=Secret.from_token("fake-api-key"))

embedder.warm_up()
embedder.backend = MockBackend("aa", None)
embedder.backend = MockBackend(model, None)

empty_list_input = []
result = embedder.run(documents=empty_list_input)
Expand Down
24 changes: 5 additions & 19 deletions integrations/nvidia/tests/test_text_embedder.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,20 +3,8 @@
import pytest
from haystack.utils import Secret
from haystack_integrations.components.embedders.nvidia import EmbeddingTruncateMode, NvidiaTextEmbedder
from haystack_integrations.util.nvidia import EmbedderBackend, Model


class MockBackend(EmbedderBackend):
def __init__(self, model, model_kwargs):
super().__init__(model, model_kwargs)

def embed(self, texts):
inputs = texts
data = [[0.1, 0.2, 0.3] for i in range(len(inputs))]
return data, {"usage": {"total_tokens": 4, "prompt_tokens": 4}}

def models(self):
return [Model(id="a1"), Model(id="a2")]
from . import MockBackend


class TestNvidiaTextEmbedder:
Expand Down Expand Up @@ -120,7 +108,7 @@ def test_run_default_model(self):
assert "Default model is set as:" in str(record[0].message)
assert embedder.model == "model1"

embedder.backend = MockBackend("aa", None)
embedder.backend = MockBackend(embedder.model)

result = embedder.run(text="The food was delicious")

Expand All @@ -131,12 +119,10 @@ def test_run_default_model(self):
}

def test_run(self):
embedder = NvidiaTextEmbedder(
"playground_nvolveqa_40k", api_key=Secret.from_token("fake-api-key"), prefix="prefix ", suffix=" suffix"
)
embedder = NvidiaTextEmbedder("playground_nvolveqa_40k", prefix="prefix ", suffix=" suffix")

embedder.warm_up()
embedder.backend = MockBackend("aa", None)
embedder.backend = MockBackend("playground_nvolveqa_40k")

result = embedder.run(text="The food was delicious")

Expand All @@ -149,7 +135,7 @@ def test_run(self):
def test_run_wrong_input_format(self):
embedder = NvidiaTextEmbedder("playground_nvolveqa_40k", api_key=Secret.from_token("fake-api-key"))
embedder.warm_up()
embedder.backend = MockBackend("aa", None)
embedder.backend = MockBackend("playground_nvolveqa_40k")

list_integers_input = [1, 2, 3]

Expand Down

0 comments on commit 2d2e9dd

Please sign in to comment.