Skip to content

Commit

Permalink
updating tests
Browse files Browse the repository at this point in the history
  • Loading branch information
davidsbatista committed Sep 26, 2024
1 parent 99e9a9e commit 74ff3b6
Showing 1 changed file with 11 additions and 6 deletions.
17 changes: 11 additions & 6 deletions test/components/extractors/test_llm_metadata_extractor.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,12 @@
class TestLLMMetadataExtractor:

def test_init_default(self):
extractor = LLMMetadataExtractor(prompt="prompt", expected_keys=["key1", "key2"], generator_api=LLMProvider.OPENAI, input_text="test")
extractor = LLMMetadataExtractor(
prompt="prompt {{test}}",
expected_keys=["key1", "key2"],
generator_api=LLMProvider.OPENAI,
input_text="test"
)
assert isinstance(extractor.builder, PromptBuilder)
assert extractor.generator_api == LLMProvider.OPENAI
assert extractor.expected_keys == ["key1", "key2"]
Expand All @@ -21,7 +26,7 @@ def test_init_default(self):

def test_init_with_parameters(self):
extractor = LLMMetadataExtractor(
prompt="prompt",
prompt="prompt {{test}}",
expected_keys=["key1", "key2"],
raise_on_failure=True,
generator_api=LLMProvider.OPENAI,
Expand All @@ -42,7 +47,7 @@ def test_init_with_parameters(self):
def test_to_dict(self, monkeypatch):
monkeypatch.setenv("OPENAI_API_KEY", "test-api-key")
extractor = LLMMetadataExtractor(
prompt="some prompt that was used with the LLM",
prompt="some prompt that was used with the LLM {{test}}",
expected_keys=["key1", "key2"],
generator_api=LLMProvider.OPENAI,
input_text="test",
Expand All @@ -52,7 +57,7 @@ def test_to_dict(self, monkeypatch):
assert extractor_dict == {
'type': 'haystack_experimental.components.extractors.llm_metadata_extractor.LLMMetadataExtractor',
'init_parameters': {
'prompt': 'some prompt that was used with the LLM',
'prompt': 'some prompt that was used with the LLM {{test}}',
'expected_keys': ['key1', 'key2'],
'raise_on_failure': True,
'input_text': 'test',
Expand All @@ -74,7 +79,7 @@ def test_from_dict(self, monkeypatch):
extractor_dict = {
'type': 'haystack_experimental.components.extractors.llm_metadata_extractor.LLMMetadataExtractor',
'init_parameters': {
'prompt': 'some prompt that was used with the LLM',
'prompt': 'some prompt that was used with the LLM {{test}}',
'expected_keys': ['key1', 'key2'],
'raise_on_failure': True,
'input_text': 'test',
Expand All @@ -93,7 +98,7 @@ def test_from_dict(self, monkeypatch):
extractor = LLMMetadataExtractor.from_dict(extractor_dict)
assert extractor.raise_on_failure is True
assert extractor.expected_keys == ["key1", "key2"]
assert extractor.prompt == "some prompt that was used with the LLM"
assert extractor.prompt == "some prompt that was used with the LLM {{test}}"
assert extractor.generator_api == LLMProvider.OPENAI

@pytest.mark.skipif(
Expand Down

0 comments on commit 74ff3b6

Please sign in to comment.