Skip to content

Commit

Permalink
fix linter issues
Browse files Browse the repository at this point in the history
  • Loading branch information
julian-risch committed Jun 25, 2024
1 parent 510192a commit a50af3e
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 5 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -334,10 +334,11 @@ def __init__(self, generation_kwargs: Dict[str, Any]):
# a) we should get good estimates for the prompt length
# b) we can use apply_chat_template with the template above to delineate ChatMessages
# Mistral models are gated on HF Hub. If no HF_TOKEN is found we use a non-gated alternative tokenizer model.
tokenizer: PreTrainedTokenizer
if os.environ.get("HF_TOKEN"):
tokenizer: PreTrainedTokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1")
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1")
else:
tokenizer: PreTrainedTokenizer = AutoTokenizer.from_pretrained("NousResearch/Llama-2-7b-chat-hf")
tokenizer = AutoTokenizer.from_pretrained("NousResearch/Llama-2-7b-chat-hf")
logger.warning(
"Gated mistralai/Mistral-7B-Instruct-v0.1 model cannot be used as a tokenizer for "
"estimating the prompt length because no HF_TOKEN was found. Using "
Expand Down
5 changes: 2 additions & 3 deletions integrations/amazon_bedrock/tests/test_chat_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
from unittest.mock import patch

import pytest
from _pytest.monkeypatch import MonkeyPatch
from haystack.components.generators.utils import print_streaming_chunk
from haystack.dataclasses import ChatMessage, ChatRole, StreamingChunk

Expand Down Expand Up @@ -241,7 +240,7 @@ def test_mistral_chat_template_incorrect_order(self):
except Exception as e:
assert "Conversation roles must alternate user/assistant/" in str(e)

def test_use_mistral_adapter_without_hf_token(self, monkeypatch: MonkeyPatch, caplog) -> None:
def test_use_mistral_adapter_without_hf_token(self, monkeypatch, caplog) -> None:
monkeypatch.delenv("HF_TOKEN", raising=False)
with (
patch("transformers.AutoTokenizer.from_pretrained") as mock_pretrained,
Expand All @@ -252,7 +251,7 @@ def test_use_mistral_adapter_without_hf_token(self, monkeypatch: MonkeyPatch, ca
mock_pretrained.assert_called_with("NousResearch/Llama-2-7b-chat-hf")
assert "no HF_TOKEN was found" in caplog.text

def test_use_mistral_adapter_with_hf_token(self, monkeypatch: MonkeyPatch) -> None:
def test_use_mistral_adapter_with_hf_token(self, monkeypatch) -> None:
monkeypatch.setenv("HF_TOKEN", "test")
with (
patch("transformers.AutoTokenizer.from_pretrained") as mock_pretrained,
Expand Down

0 comments on commit a50af3e

Please sign in to comment.