Skip to content

Commit

Permalink
add function calling and execute test, it works!
Browse files Browse the repository at this point in the history
  • Loading branch information
lbux committed May 11, 2024
1 parent 788b311 commit 40bf82b
Show file tree
Hide file tree
Showing 2 changed files with 73 additions and 12 deletions.
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import json
import logging
from typing import Any, Dict, List, Optional

Expand All @@ -25,7 +24,7 @@ class LlamaCppChatGenerator:
generator = LlamaCppGenerator(model="zephyr-7b-beta.Q4_0.gguf", n_ctx=2048, n_batch=512)
print(generator.run(user_message, generation_kwargs={"max_tokens": 128}))
# {'replies': [ChatMessage(content='John Cusack', role=<ChatRole.ASSISTANT: 'assistant'>, name=None, meta={...}]}
# {"replies": [ChatMessage(content="John Cusack", role=<ChatRole.ASSISTANT: "assistant">, name=None, meta={...}]}
```
"""

Expand Down Expand Up @@ -55,9 +54,9 @@ def __init__(
model_kwargs = model_kwargs or {}
generation_kwargs = generation_kwargs or {}

if 'hf_tokenizer_path' in model_kwargs:
tokenizer = LlamaHFTokenizer.from_pretrained(model_kwargs['hf_tokenizer_path'])
model_kwargs['tokenizer'] = tokenizer
if "hf_tokenizer_path" in model_kwargs:
tokenizer = LlamaHFTokenizer.from_pretrained(model_kwargs["hf_tokenizer_path"])
model_kwargs["tokenizer"] = tokenizer

# check if the model_kwargs contain the essential parameters
# otherwise, populate them with values from init parameters
Expand Down
76 changes: 69 additions & 7 deletions integrations/llama_cpp/tests/test_chat_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -198,10 +198,12 @@ def test_run_rag_pipeline(self, generator):
documents = [
Document(content="There are over 7,000 languages spoken around the world today."),
Document(
content="Elephants have been observed to behave in a way that indicates a high level of self-awareness, such as recognizing themselves in mirrors."
content="""Elephants have been observed to behave in a way that indicates a high
level of self-awareness, such as recognizing themselves in mirrors."""
),
Document(
content="In certain parts of the world, like the Maldives, Puerto Rico, and San Diego, you can witness the phenomenon of bioluminescent waves."
content="""In certain parts of the world, like the Maldives, Puerto Rico,
and San Diego, you can witness the phenomenon of bioluminescent waves."""
),
]
document_store.write_documents(documents=documents)
Expand Down Expand Up @@ -249,13 +251,24 @@ def test_run_rag_pipeline(self, generator):
}
)

replies = result['llm']['replies']
replies = result["llm"]["replies"]
assert len(replies) > 0
assert any("bioluminescent waves" in reply.content for reply in replies)
assert all(reply.role == ChatRole.ASSISTANT for reply in replies)


class TestLlamaCppChatGeneratorFunctionCalls:
def get_current_temperature(self, location):
"""Get the current temperature in a given location"""
if "tokyo" in location.lower():
return json.dumps({"location": "Tokyo", "temperature": "10", "unit": "celsius"})
elif "san francisco" in location.lower():
return json.dumps({"location": "San Francisco", "temperature": "72", "unit": "fahrenheit"})
elif "paris" in location.lower():
return json.dumps({"location": "Paris", "temperature": "22", "unit": "celsius"})
else:
return json.dumps({"location": location, "temperature": "unknown"})

@pytest.fixture
def generator(self, model_path, capsys):
gguf_model_path = (
Expand All @@ -278,7 +291,7 @@ def generator(self, model_path, capsys):
return generator

@pytest.mark.integration
def test_function_call_scenario(self, generator):
def test_function_call(self, generator):
tools = [
{
"type": "function",
Expand All @@ -300,13 +313,62 @@ def test_function_call_scenario(self, generator):
messages = [
ChatMessage.from_user("Get information for user john_doe"),
]
generation_kwargs = {"tools": tools, "tool_choice": tool_choice}

response = generator.run(messages=messages, generation_kwargs=generation_kwargs)
response = generator.run(messages=messages, generation_kwargs={"tools": tools, "tool_choice": tool_choice})

assert "tool_calls" in response["replies"][0].meta
tool_calls = response["replies"][0].meta["tool_calls"]
assert len(tool_calls) > 0
assert tool_calls[0]["function"]["name"] == "get_user_info"
assert "username" in json.loads(tool_calls[0]["function"]["arguments"])
assert response["replies"][0].role == ChatRole.ASSISTANT

def test_function_call_and_execute(self, generator):
messages = [ChatMessage.from_user("What's the weather like in San Francisco?")]
tools = [
{
"type": "function",
"function": {
"name": "get_current_temperature",
"description": "Get the current temperature in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
},
"required": ["location"],
},
},
}
]

response = generator.run(messages=messages, generation_kwargs={"tools": tools})

available_functions = {
"get_current_temperature": self.get_current_temperature,
}

assert "replies" in response
assert len(response["replies"]) > 0

first_reply = response["replies"][0]
assert "tool_calls" in first_reply.meta
tool_calls = first_reply.meta["tool_calls"]

for tool_call in tool_calls:
function_name = tool_call["function"]["name"]
function_args = json.loads(tool_call["function"]["arguments"])
assert function_name in available_functions
function_response = available_functions[function_name](**function_args)
function_message = ChatMessage.from_function(function_response, function_name)
messages.append(function_message)

second_response = generator.run(messages=messages)
print(second_response)
assert "replies" in second_response
assert len(second_response["replies"]) > 0
assert any("current temperature" in reply.content for reply in second_response["replies"])
assert any("72" in reply.content for reply in second_response["replies"])

0 comments on commit 40bf82b

Please sign in to comment.