-
Notifications
You must be signed in to change notification settings - Fork 0
/
01_tool_use.py
120 lines (98 loc) · 4.06 KB
/
01_tool_use.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
import boto3
from time import strftime
def get_weather_tool_spec():
"""
Returns the JSON Schema specification for the get_current_weather tool. The tool specification defines the input schema and describes the tool's functionality.
For more information, see https://json-schema.org/understanding-json-schema/reference.
:return: The tool specification for the get_current_weather tool.
"""
return {
"toolSpec": {
"name": "get_current_weather",
"description": "Get the current weather for a city",
"inputSchema": {
"json": {
"type": "object",
"properties": {
"city": {
"type": "string",
"description": "The city to get weather for",
}
},
"required": ["city"],
}
},
}
}
def weather_tool(city):
weather_data = {
"new_york": {"temperature": 40, "condition": "Partly cloudy"},
"las_vegas": {"temperature": 71, "condition": "Sunny"}
}
return weather_data.get(city.lower().replace(" ", "_"))
def process_response(follow_up_response, depth=0, max_depth=3):
# Check recursion depth first
if depth >= max_depth:
print(f"Maximum recursion depth ({max_depth}) reached")
return
# Append the model's response to the conversation
messages.append(follow_up_response["output"]["message"])
# Check each individual content block of the response
for content_block in follow_up_response["output"]["message"]["content"]:
# Display text responses in the console
if "text" in content_block:
response_text = follow_up_response["output"]["message"]["content"][0]["text"]
print(f"\n{response_text}")
# Process tool use requests
elif "toolUse" in content_block:
tool_use_request = content_block["toolUse"]
if tool_use_request["name"] == "get_current_weather":
tool_use_id = tool_use_request["toolUseId"]
city = tool_use_request["input"]["city"]
weather_info = weather_tool(city)
# Append the tool's response to the conversation
messages.append({
"role": "user",
"content": [
{"toolResult": {
"toolUseId": tool_use_id,
"content": [{"json": weather_info}],
}}
]
})
# Send the tool's response back to the model
follow_up_response = client.converse(
modelId="anthropic.claude-3-haiku-20240307-v1:0",
toolConfig=tool_config,
system=system_prompt,
messages=messages
)
process_response(follow_up_response, depth + 1, max_depth)
client = boto3.client("bedrock-runtime", region_name="us-east-1")
date_as_text = strftime("%A %d %B %Y")
system_prompt = [{
"text": f"""
Today's date is {date_as_text}. You are a travel assistant.
You also have access to a tool get_current_weather.
With this in mind, answer the user's questions.
You MUST follow the rules below:
- ALWAYS use the get_current_weather to get current weather information.
- Don't rely on anything else for weather information.
- Don't make up weather information.
- If the tool doesn't return the weather, say that you don't know the answer.
- If the question is not related to travel, say that you don't know the answer.
"""
}]
prompt = "Would it be a good time to visit Las Vegas this month?"
messages = [{
"role": "user",
"content": [{"text": prompt}]
}]
tool_config = {"tools": [get_weather_tool_spec()]}
response = client.converse(
modelId="anthropic.claude-3-haiku-20240307-v1:0",
toolConfig=tool_config,
system=system_prompt,
messages=messages
)
process_response(response)