Function Calling & Tool UseΒΆ
OpenAI and Anthropic function calling, tool definitions, parallel tool calls, and structured agent outputs.
# Install dependencies
# !pip install openai anthropic
Defining Functions/ToolsΒΆ
# Function definitions for OpenAI
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "City name, e.g. 'San Francisco, CA'"
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
"description": "Temperature unit"
}
},
"required": ["location"]
}
}
},
{
"type": "function",
"function": {
"name": "search_web",
"description": "Search the web for information",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "Search query"
},
"num_results": {
"type": "integer",
"description": "Number of results to return",
"default": 5
}
},
"required": ["query"]
}
}
},
{
"type": "function",
"function": {
"name": "calculate",
"description": "Perform mathematical calculations",
"parameters": {
"type": "object",
"properties": {
"expression": {
"type": "string",
"description": "Mathematical expression to evaluate"
}
},
"required": ["expression"]
}
}
}
]
print(f"Defined {len(tools)} tools")
for tool in tools:
print(f" - {tool['function']['name']}")
Implementing Tool FunctionsΒΆ
import json
import random
from typing import Dict, Any
class ToolExecutor:
"""Execute actual tool functions"""
def get_weather(self, location: str, unit: str = "fahrenheit") -> Dict[str, Any]:
"""Mock weather API"""
# In production, call actual weather API
temps = {"fahrenheit": random.randint(50, 90), "celsius": random.randint(10, 32)}
conditions = ["sunny", "cloudy", "rainy", "partly cloudy"]
return {
"location": location,
"temperature": temps[unit],
"unit": unit,
"condition": random.choice(conditions),
"humidity": random.randint(30, 90)
}
def search_web(self, query: str, num_results: int = 5) -> Dict[str, Any]:
"""Mock web search"""
# In production, call actual search API (Google, Bing, etc.)
return {
"query": query,
"results": [
{"title": f"Result {i+1} for '{query}'", "url": f"https://example.com/{i}"}
for i in range(num_results)
]
}
def calculate(self, expression: str) -> Dict[str, Any]:
"""Safe calculator"""
try:
result = eval(expression, {"__builtins__": {}}, {})
return {"expression": expression, "result": result}
except Exception as e:
return {"expression": expression, "error": str(e)}
def execute(self, function_name: str, arguments: Dict[str, Any]) -> Any:
"""Execute a tool by name"""
if hasattr(self, function_name):
func = getattr(self, function_name)
return func(**arguments)
else:
return {"error": f"Unknown function: {function_name}"}
# Test the executor
executor = ToolExecutor()
print("Testing tools:")
print("\n1. Weather:")
print(json.dumps(executor.get_weather("Paris", "celsius"), indent=2))
print("\n2. Search:")
print(json.dumps(executor.search_web("AI agents", 3), indent=2))
print("\n3. Calculate:")
print(json.dumps(executor.calculate("(23 + 45) * 2"), indent=2))
Real OpenAI Function Calling ExampleΒΆ
Hereβs how to use actual OpenAI function calling (requires API key):
# Example with OpenAI API (requires API key)
'''
from openai import OpenAI
import json
client = OpenAI() # Requires OPENAI_API_KEY env var
def run_conversation():
# Step 1: Send query and functions to model
messages = [{"role": "user", "content": "What's the weather in Paris?"}]
response = client.chat.completions.create(
model="gpt-4-turbo-preview",
messages=messages,
tools=tools,
tool_choice="auto"
)
response_message = response.choices[0].message
tool_calls = response_message.tool_calls
# Step 2: Check if model wants to call a function
if tool_calls:
# Execute function calls
messages.append(response_message)
for tool_call in tool_calls:
function_name = tool_call.function.name
function_args = json.loads(tool_call.function.arguments)
# Execute the function
function_response = executor.execute(function_name, function_args)
# Add function response to messages
messages.append({
"tool_call_id": tool_call.id,
"role": "tool",
"name": function_name,
"content": json.dumps(function_response)
})
# Step 3: Get final response from model
second_response = client.chat.completions.create(
model="gpt-4-turbo-preview",
messages=messages
)
return second_response.choices[0].message.content
return response_message.content
# Usage:
# result = run_conversation()
# print(result)
'''
print("OpenAI function calling example (commented - requires API key)")
print("Uncomment and add your API key to run")
Parallel Function CallingΒΆ
Modern LLMs can call multiple functions simultaneously:
from typing import List
import concurrent.futures
def handle_parallel_calls(tool_calls: List[Dict]) -> List[Dict]:
"""Execute multiple tool calls in parallel"""
executor_obj = ToolExecutor()
results = []
# Execute in parallel using threads
with concurrent.futures.ThreadPoolExecutor() as pool:
futures = []
for call in tool_calls:
future = pool.submit(
executor_obj.execute,
call['name'],
call['arguments']
)
futures.append((call, future))
# Collect results
for call, future in futures:
result = future.result()
results.append({
"call": call,
"result": result
})
return results
# Example: Multiple tool calls
parallel_calls = [
{"name": "get_weather", "arguments": {"location": "Paris", "unit": "celsius"}},
{"name": "get_weather", "arguments": {"location": "London", "unit": "celsius"}},
{"name": "calculate", "arguments": {"expression": "25 * 4"}}
]
print("Executing parallel tool calls...\n")
results = handle_parallel_calls(parallel_calls)
for i, res in enumerate(results, 1):
print(f"Call {i}: {res['call']['name']}")
print(f"Result: {json.dumps(res['result'], indent=2)}\n")
Best PracticesΒΆ
1. Function DescriptionsΒΆ
Be specific and clear
Include examples
Specify parameter formats
Document edge cases
2. Error HandlingΒΆ
Always handle function failures gracefully
Return structured error messages
Implement retries for transient failures
Log all function calls
3. SecurityΒΆ
Validate all inputs
Sanitize function arguments
Limit function execution time
Restrict dangerous operations
Implement rate limiting
Key TakeawaysΒΆ
β Function calling enables LLMs to use external tools
β Define functions with clear descriptions and schemas
β Handle both single and parallel function calls
β Implement proper error handling and validation
β Always validate and sanitize inputs for security