Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

update import paths for beta5 #232

Merged
merged 1 commit into from
Jan 18, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 0 additions & 13 deletions integrations/gradient/tests/test_gradient_document_embedder.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@


class TestGradientDocumentEmbedder:
@pytest.mark.unit
def test_init_from_env(self, monkeypatch):
monkeypatch.setenv("GRADIENT_ACCESS_TOKEN", access_token)
monkeypatch.setenv("GRADIENT_WORKSPACE_ID", workspace_id)
Expand All @@ -23,28 +22,24 @@ def test_init_from_env(self, monkeypatch):
assert embedder._gradient.workspace_id == workspace_id
assert embedder._gradient._api_client.configuration.access_token == access_token

@pytest.mark.unit
def test_init_without_access_token(self, monkeypatch):
monkeypatch.delenv("GRADIENT_ACCESS_TOKEN", raising=False)

with pytest.raises(ValueError):
GradientDocumentEmbedder(workspace_id=workspace_id)

@pytest.mark.unit
def test_init_without_workspace(self, monkeypatch):
monkeypatch.delenv("GRADIENT_WORKSPACE_ID", raising=False)

with pytest.raises(ValueError):
GradientDocumentEmbedder(access_token=access_token)

@pytest.mark.unit
def test_init_from_params(self):
embedder = GradientDocumentEmbedder(access_token=access_token, workspace_id=workspace_id)
assert embedder is not None
assert embedder._gradient.workspace_id == workspace_id
assert embedder._gradient._api_client.configuration.access_token == access_token

@pytest.mark.unit
def test_init_from_params_precedence(self, monkeypatch):
monkeypatch.setenv("GRADIENT_ACCESS_TOKEN", "env_access_token")
monkeypatch.setenv("GRADIENT_WORKSPACE_ID", "env_workspace_id")
Expand All @@ -54,7 +49,6 @@ def test_init_from_params_precedence(self, monkeypatch):
assert embedder._gradient.workspace_id == workspace_id
assert embedder._gradient._api_client.configuration.access_token == access_token

@pytest.mark.unit
def test_to_dict(self):
component = GradientDocumentEmbedder(access_token=access_token, workspace_id=workspace_id)
data = component.to_dict()
Expand All @@ -63,29 +57,25 @@ def test_to_dict(self):
"init_parameters": {"workspace_id": workspace_id, "model_name": "bge-large"},
}

@pytest.mark.unit
def test_warmup(self):
embedder = GradientDocumentEmbedder(access_token=access_token, workspace_id=workspace_id)
embedder._gradient.get_embeddings_model = MagicMock()
embedder.warm_up()
embedder._gradient.get_embeddings_model.assert_called_once_with(slug="bge-large")

@pytest.mark.unit
def test_warmup_doesnt_reload(self):
embedder = GradientDocumentEmbedder(access_token=access_token, workspace_id=workspace_id)
embedder._gradient.get_embeddings_model = MagicMock(default_return_value="fake model")
embedder.warm_up()
embedder.warm_up()
embedder._gradient.get_embeddings_model.assert_called_once_with(slug="bge-large")

@pytest.mark.unit
def test_run_fail_if_not_warmed_up(self):
embedder = GradientDocumentEmbedder(access_token=access_token, workspace_id=workspace_id)

with pytest.raises(RuntimeError, match="warm_up()"):
embedder.run(documents=[Document(content=f"document number {i}") for i in range(5)])

@pytest.mark.unit
def test_run(self):
embedder = GradientDocumentEmbedder(access_token=access_token, workspace_id=workspace_id)
embedder._embedding_model = NonCallableMagicMock()
Expand All @@ -105,7 +95,6 @@ def test_run(self):
assert isinstance(doc.embedding, list)
assert isinstance(doc.embedding[0], float)

@pytest.mark.unit
def test_run_batch(self):
embedder = GradientDocumentEmbedder(access_token=access_token, workspace_id=workspace_id)
embedder._embedding_model = NonCallableMagicMock()
Expand All @@ -126,7 +115,6 @@ def test_run_batch(self):
assert isinstance(doc.embedding, list)
assert isinstance(doc.embedding[0], float)

@pytest.mark.unit
def test_run_custom_batch(self):
embedder = GradientDocumentEmbedder(access_token=access_token, workspace_id=workspace_id, batch_size=20)
embedder._embedding_model = NonCallableMagicMock()
Expand All @@ -148,7 +136,6 @@ def test_run_custom_batch(self):
assert isinstance(doc.embedding, list)
assert isinstance(doc.embedding[0], float)

@pytest.mark.unit
def test_run_empty(self):
embedder = GradientDocumentEmbedder(access_token=access_token, workspace_id=workspace_id)
embedder._embedding_model = NonCallableMagicMock()
Expand Down
4 changes: 2 additions & 2 deletions integrations/gradient/tests/test_gradient_rag_pipelines.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,9 @@
from haystack import Document, Pipeline
from haystack.components.builders.answer_builder import AnswerBuilder
from haystack.components.builders.prompt_builder import PromptBuilder
from haystack.components.retrievers import InMemoryEmbeddingRetriever
from haystack.components.retrievers.in_memory import InMemoryEmbeddingRetriever
from haystack.components.writers import DocumentWriter
from haystack.document_stores import InMemoryDocumentStore
from haystack.document_stores.in_memory import InMemoryDocumentStore

from gradient_haystack.embedders.gradient_document_embedder import GradientDocumentEmbedder
from gradient_haystack.embedders.gradient_text_embedder import GradientTextEmbedder
Expand Down
12 changes: 0 additions & 12 deletions integrations/gradient/tests/test_gradient_text_embedder.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@


class TestGradientTextEmbedder:
@pytest.mark.unit
def test_init_from_env(self, monkeypatch):
monkeypatch.setenv("GRADIENT_ACCESS_TOKEN", access_token)
monkeypatch.setenv("GRADIENT_WORKSPACE_ID", workspace_id)
Expand All @@ -22,28 +21,24 @@ def test_init_from_env(self, monkeypatch):
assert embedder._gradient.workspace_id == workspace_id
assert embedder._gradient._api_client.configuration.access_token == access_token

@pytest.mark.unit
def test_init_without_access_token(self, monkeypatch):
monkeypatch.delenv("GRADIENT_ACCESS_TOKEN", raising=False)

with pytest.raises(ValueError):
GradientTextEmbedder(workspace_id=workspace_id)

@pytest.mark.unit
def test_init_without_workspace(self, monkeypatch):
monkeypatch.delenv("GRADIENT_WORKSPACE_ID", raising=False)

with pytest.raises(ValueError):
GradientTextEmbedder(access_token=access_token)

@pytest.mark.unit
def test_init_from_params(self):
embedder = GradientTextEmbedder(access_token=access_token, workspace_id=workspace_id)
assert embedder is not None
assert embedder._gradient.workspace_id == workspace_id
assert embedder._gradient._api_client.configuration.access_token == access_token

@pytest.mark.unit
def test_init_from_params_precedence(self, monkeypatch):
monkeypatch.setenv("GRADIENT_ACCESS_TOKEN", "env_access_token")
monkeypatch.setenv("GRADIENT_WORKSPACE_ID", "env_workspace_id")
Expand All @@ -53,7 +48,6 @@ def test_init_from_params_precedence(self, monkeypatch):
assert embedder._gradient.workspace_id == workspace_id
assert embedder._gradient._api_client.configuration.access_token == access_token

@pytest.mark.unit
def test_to_dict(self):
component = GradientTextEmbedder(access_token=access_token, workspace_id=workspace_id)
data = component.to_dict()
Expand All @@ -62,29 +56,25 @@ def test_to_dict(self):
"init_parameters": {"workspace_id": workspace_id, "model_name": "bge-large"},
}

@pytest.mark.unit
def test_warmup(self):
embedder = GradientTextEmbedder(access_token=access_token, workspace_id=workspace_id)
embedder._gradient.get_embeddings_model = MagicMock()
embedder.warm_up()
embedder._gradient.get_embeddings_model.assert_called_once_with(slug="bge-large")

@pytest.mark.unit
def test_warmup_doesnt_reload(self):
embedder = GradientTextEmbedder(access_token=access_token, workspace_id=workspace_id)
embedder._gradient.get_embeddings_model = MagicMock(default_return_value="fake model")
embedder.warm_up()
embedder.warm_up()
embedder._gradient.get_embeddings_model.assert_called_once_with(slug="bge-large")

@pytest.mark.unit
def test_run_fail_if_not_warmed_up(self):
embedder = GradientTextEmbedder(access_token=access_token, workspace_id=workspace_id)

with pytest.raises(RuntimeError, match="warm_up()"):
embedder.run(text="The food was delicious")

@pytest.mark.unit
def test_run_fail_when_no_embeddings_returned(self):
embedder = GradientTextEmbedder(access_token=access_token, workspace_id=workspace_id)
embedder._embedding_model = NonCallableMagicMock()
Expand All @@ -94,7 +84,6 @@ def test_run_fail_when_no_embeddings_returned(self):
_result = embedder.run(text="The food was delicious")
embedder._embedding_model.embed.assert_called_once_with(inputs=[{"input": "The food was delicious"}])

@pytest.mark.unit
def test_run_empty_string(self):
embedder = GradientTextEmbedder(access_token=access_token, workspace_id=workspace_id)
embedder._embedding_model = NonCallableMagicMock()
Expand All @@ -108,7 +97,6 @@ def test_run_empty_string(self):
assert len(result["embedding"]) == 1024 # 1024 is the bge-large embedding size
assert all(isinstance(x, float) for x in result["embedding"])

@pytest.mark.unit
def test_run(self):
embedder = GradientTextEmbedder(access_token=access_token, workspace_id=workspace_id)
embedder._embedding_model = NonCallableMagicMock()
Expand Down