Skip to content

Commit

Permalink
Pin llama-cpp-python version
Browse files Browse the repository at this point in the history
  • Loading branch information
Amna Mubashar authored and Amna Mubashar committed Aug 2, 2024
1 parent dcd2e38 commit 7e091da
Show file tree
Hide file tree
Showing 4 changed files with 8 additions and 11 deletions.
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# To run this example, you will need to set a `ANTHROPIC_API_KEY` environment variable.

from haystack import Pipeline
from haystack.components.builders import DynamicChatPromptBuilder
from haystack.components.builders import ChatPromptBuilder
from haystack.components.converters import HTMLToDocument
from haystack.components.fetchers import LinkContentFetcher
from haystack.components.generators.utils import print_streaming_chunk
Expand All @@ -18,7 +18,7 @@
rag_pipeline = Pipeline()
rag_pipeline.add_component("fetcher", LinkContentFetcher())
rag_pipeline.add_component("converter", HTMLToDocument())
rag_pipeline.add_component("prompt_builder", DynamicChatPromptBuilder(runtime_variables=["documents"]))
rag_pipeline.add_component("prompt_builder", ChatPromptBuilder())
rag_pipeline.add_component(
"llm",
AnthropicChatGenerator(
Expand All @@ -30,12 +30,12 @@

rag_pipeline.connect("fetcher", "converter")
rag_pipeline.connect("converter", "prompt_builder")
rag_pipeline.connect("prompt_builder", "llm")
rag_pipeline.connect("prompt_builder.prompt", "llm.messages")

question = "What are the best practices in prompt engineering?"
rag_pipeline.run(
data={
"fetcher": {"urls": ["https://docs.anthropic.com/claude/docs/prompt-engineering"]},
"prompt_builder": {"template_variables": {"query": question}, "prompt_source": messages},
"prompt_builder": {"template_variables": {"query": question}, "template": messages},
}
)
2 changes: 1 addition & 1 deletion integrations/llama_cpp/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ classifiers = [
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
]
dependencies = ["haystack-ai", "llama-cpp-python"]
dependencies = ["haystack-ai", "llama-cpp-python==0.2.79"]

[project.urls]
Documentation = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/llama_cpp#readme"
Expand Down
5 changes: 1 addition & 4 deletions integrations/llama_cpp/tests/test_chat_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,9 +213,7 @@ def test_run_rag_pipeline(self, generator):
instance=InMemoryBM25Retriever(document_store=document_store, top_k=1),
name="retriever",
)
pipeline.add_component(
instance=ChatPromptBuilder(variables=["query", "documents"]), name="prompt_builder"
)
pipeline.add_component(instance=ChatPromptBuilder(variables=["query", "documents"]), name="prompt_builder")
pipeline.add_component(instance=generator, name="llm")
pipeline.connect("retriever.documents", "prompt_builder.documents")
pipeline.connect("prompt_builder.prompt", "llm.messages")
Expand Down Expand Up @@ -412,7 +410,6 @@ def test_function_call_and_execute(self, generator):
messages.append(function_message)

second_response = generator.run(messages=messages)
print(second_response)
assert "replies" in second_response
assert len(second_response["replies"]) > 0
assert any("San Francisco" in reply.content for reply in second_response["replies"])
Expand Down
4 changes: 2 additions & 2 deletions integrations/mistral/examples/streaming_chat_with_rag.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@

text_embedder = MistralTextEmbedder()
retriever = InMemoryEmbeddingRetriever(document_store=document_store)
prompt_builder = ChatPromptBuilder(runtime_variables=["documents"])
prompt_builder = ChatPromptBuilder(variables=["documents"])
llm = MistralChatGenerator(streaming_callback=print_streaming_chunk)

messages = [ChatMessage.from_user("Here are some the documents: {{documents}} \\n Answer: {{query}}")]
Expand All @@ -60,7 +60,7 @@
result = rag_pipeline.run(
{
"text_embedder": {"text": question},
"prompt_builder": {"template_variables": {"query": question}, "prompt_source": messages},
"prompt_builder": {"template_variables": {"query": question}, "template": messages},
"llm": {"generation_kwargs": {"max_tokens": 165}},
}
)

0 comments on commit 7e091da

Please sign in to comment.