-
Notifications
You must be signed in to change notification settings - Fork 416
/
function_calling.py
132 lines (113 loc) · 5.2 KB
/
function_calling.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
# Reference: https://platform.openai.com/docs/guides/function-calling
import json
import os
from qwen_agent.llm import get_chat_model
# Example dummy function hard coded to return the same weather
# In production, this could be your backend API or an external API
def get_current_weather(location, unit='fahrenheit'):
"""Get the current weather in a given location"""
if 'tokyo' in location.lower():
return json.dumps({'location': 'Tokyo', 'temperature': '10', 'unit': 'celsius'})
elif 'san francisco' in location.lower():
return json.dumps({'location': 'San Francisco', 'temperature': '72', 'unit': 'fahrenheit'})
elif 'paris' in location.lower():
return json.dumps({'location': 'Paris', 'temperature': '22', 'unit': 'celsius'})
else:
return json.dumps({'location': location, 'temperature': 'unknown'})
def test(fncall_prompt_type: str = 'qwen'):
llm = get_chat_model({
# Use the model service provided by DashScope:
'model': 'qwen2.5-72b-instruct',
'model_server': 'dashscope',
'api_key': os.getenv('DASHSCOPE_API_KEY'),
'generate_cfg': {
'fncall_prompt_type': fncall_prompt_type
},
# Use the OpenAI-compatible model service provided by DashScope:
# 'model': 'qwen2.5-72b-instruct',
# 'model_server': 'https://dashscope.aliyuncs.com/compatible-mode/v1',
# 'api_key': os.getenv('DASHSCOPE_API_KEY'),
# Use the model service provided by Together.AI:
# 'model': 'Qwen/qwen2.5-7b-instruct',
# 'model_server': 'https://api.together.xyz', # api_base
# 'api_key': os.getenv('TOGETHER_API_KEY'),
# Use your own model service compatible with OpenAI API:
# 'model': 'Qwen/qwen2.5-7b-instruct',
# 'model_server': 'http://localhost:8000/v1', # api_base
# 'api_key': 'EMPTY',
})
# Step 1: send the conversation and available functions to the model
messages = [{'role': 'user', 'content': "What's the weather like in San Francisco?"}]
functions = [{
'name': 'get_current_weather',
'description': 'Get the current weather in a given location',
'parameters': {
'type': 'object',
'properties': {
'location': {
'type': 'string',
'description': 'The city and state, e.g. San Francisco, CA',
},
'unit': {
'type': 'string',
'enum': ['celsius', 'fahrenheit']
},
},
'required': ['location'],
},
}]
print('# Assistant Response 1:')
responses = []
for responses in llm.chat(
messages=messages,
functions=functions,
stream=True,
# Note: extra_generate_cfg is optional
# extra_generate_cfg=dict(
# # Note: if function_choice='auto', let the model decide whether to call a function or not
# # function_choice='auto', # 'auto' is the default if function_choice is not set
# # Note: set function_choice='get_current_weather' to force the model to call this function
# function_choice='get_current_weather',
# ),
):
print(responses)
# If you do not need streaming output, you can either use the following trick:
# *_, responses = llm.chat(messages=messages, functions=functions, stream=True)
# or use stream=False:
# responses = llm.chat(messages=messages, functions=functions, stream=False)
messages.extend(responses) # extend conversation with assistant's reply
# Step 2: check if the model wanted to call a function
last_response = messages[-1]
if last_response.get('function_call', None):
# Step 3: call the function
# Note: the JSON response may not always be valid; be sure to handle errors
available_functions = {
'get_current_weather': get_current_weather,
} # only one function in this example, but you can have multiple
function_name = last_response['function_call']['name']
function_to_call = available_functions[function_name]
function_args = json.loads(last_response['function_call']['arguments'])
function_response = function_to_call(
location=function_args.get('location'),
unit=function_args.get('unit'),
)
print('# Function Response:')
print(function_response)
# Step 4: send the info for each function call and function response to the model
messages.append({
'role': 'function',
'name': function_name,
'content': function_response,
}) # extend conversation with function response
print('# Assistant Response 2:')
for responses in llm.chat(
messages=messages,
functions=functions,
stream=True,
): # get a new response from the model where it can see the function response
print(responses)
if __name__ == '__main__':
# Run example of function calling with QwenFnCallPrompt
test()
# Run example of function calling with NousFnCallPrompt
# test(fncall_prompt_type='nous')