Skip to content

Commit

Permalink
[Bedrock] Added Amazon Bedrock examples (#635)
Browse files Browse the repository at this point in the history
* add ChatGenerator example

* add Generator, Embedders example

* move system prompt from inference params to messages
  • Loading branch information
AntonP84 authored Apr 4, 2024
1 parent dc590f2 commit 0fd7f91
Show file tree
Hide file tree
Showing 2 changed files with 110 additions and 0 deletions.
36 changes: 36 additions & 0 deletions integrations/amazon_bedrock/examples/chatgenerator_example.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
# To run this example, you will need to
# 1) set `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` and `AWS_DEFAULT_REGION` environment variables
# 2) enable access to the selected model in Amazon Bedrock
# Note: if you change the model, update the model-specific inference parameters.


from haystack.dataclasses import ChatMessage

from haystack_integrations.components.generators.amazon_bedrock import AmazonBedrockChatGenerator

generator = AmazonBedrockChatGenerator(
model="anthropic.claude-3-haiku-20240307-v1:0",
# model-specific inference parameters
generation_kwargs={
"max_tokens": 500,
"temperature": 0.0,
},
)

system_prompt = """
You are a helpful assistant that helps users learn more about AWS services.
Your audience is engineers with a decent technical background.
Be very concise and specific in your answers, keeping them short.
You may use technical terms, jargon, and abbreviations that are common among practitioners.
"""

# Even though Anthropic Claud models support only messages with `user` and `assistant` roles,
# internal handling converts message with `system` role into `system` inference parameter for Claude
# which allows for more portablability of code across generators
messages = [
ChatMessage.from_system(system_prompt),
ChatMessage.from_user("Which service should I use to train custom Machine Learning models?"),
]

results = generator.run(messages)
results["replies"]
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
# To run this example, you will need to
# 1) set `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` and `AWS_DEFAULT_REGION` environment variables
# 2) enable access to the selected model in Amazon Bedrock
# Note: if you change the model, update the model-specific inference parameters.

from haystack import Document, Pipeline
from haystack.components.builders import PromptBuilder
from haystack.components.retrievers.in_memory import InMemoryEmbeddingRetriever
from haystack.document_stores.in_memory import InMemoryDocumentStore

from haystack_integrations.components.embedders.amazon_bedrock import (
AmazonBedrockDocumentEmbedder,
AmazonBedrockTextEmbedder,
)
from haystack_integrations.components.generators.amazon_bedrock import AmazonBedrockGenerator

generator_model_name = "amazon.titan-text-lite-v1"
embedder_model_name = "amazon.titan-embed-text-v1"

prompt_template = """
Context:
{% for document in documents %}
{{ document.content }}
{% endfor %}
Given the context above, answer the question.
Write a full detailed answer.
Provide explanation of why the answer is relevant to the question.
If you cannot answer the question, output "I do not know".
Question: {{ question }}?
"""

docs = [
Document(content="User ABC is using Amazon SageMaker to train ML models."),
Document(content="User XYZ is using Amazon EC2 instances to train ML models."),
]


doc_embedder = AmazonBedrockDocumentEmbedder(model=embedder_model_name)
docs_with_embeddings = doc_embedder.run(docs)["documents"]

doc_store = InMemoryDocumentStore()
doc_store.write_documents(docs_with_embeddings)


pipe = Pipeline()
pipe.add_component("text_embedder", AmazonBedrockTextEmbedder(embedder_model_name))
pipe.add_component("retriever", InMemoryEmbeddingRetriever(doc_store, top_k=1))
pipe.add_component("prompt_builder", PromptBuilder(prompt_template))
pipe.add_component(
"generator",
AmazonBedrockGenerator(
model=generator_model_name,
# model-specific inference parameters
generation_kwargs={
"maxTokenCount": 1024,
"temperature": 0.0,
},
),
)
pipe.connect("text_embedder", "retriever")
pipe.connect("retriever", "prompt_builder")
pipe.connect("prompt_builder", "generator")


question = "Which user is using IaaS services for Machine Learning?"
results = pipe.run(
{
"text_embedder": {"text": question},
"prompt_builder": {"question": question},
}
)
results["generator"]["replies"]

0 comments on commit 0fd7f91

Please sign in to comment.