Tool Calling#

Tool calling allows you to connect models to external tools and systems. This is useful for many things such as empowering AI assistants with capabilities, or building deep integrations between your applications and the models.

The Chat Completions API supports function calling. You can describe the functions and arguments to make available to the LLM model. The model will output a JSON object containing arguments to call one or many functions. After calling any invoked functions, you can provide those results back to the model in subsequent Chat Completions API calls.

Helper utilities for the examples

Some of the examples below use some of the code from funcs.py. This code is shown here to help understand better these examples.

# funcs.py

import json
import random

from datetime import datetime
from typing import Literal

import pytz
import requests

from langchain_core.utils.function_calling import convert_to_openai_tool


def get_current_weather(location, unit="fahrenheit"):
    """Get the weather for some location"""
    if "chicago" in location.lower():
        return json.dumps({"location": "Chicago", "temperature": "13", "unit": unit})
    elif "san francisco" in location.lower():
        return json.dumps(
            {"location": "San Francisco", "temperature": "55", "unit": unit}
        )
    elif "new york" in location.lower():
        return json.dumps({"location": "New York", "temperature": "11", "unit": unit})
    else:
        return json.dumps({"location": location, "temperature": "unknown"})


def get_weather_forecast(location: str) -> dict[str, str]:
    """Retrieves a simple weather forecast for a given location"""
    url = f"https://wttr.in/{location}?format=%C,%t"
    response = requests.get(url)
    if response.status_code == 200:
        result = response.text.strip().split(",")
        return {
            "location": location,
            "forecast": " ".join(result[:-1]),
            "temperature": result[-1],
        }
    else:
        return {"error": "Unable to fetch weather data"}


def get_stock_price(symbol: str) -> float:
    """Retrieves the stock price for a given symbol"""
    api_key = "your_stock_api_key"
    url = f"https://www.alphavantage.co/query?function=GLOBAL_QUOTE&symbol={symbol}&apikey={api_key}"
    response = requests.get(url)
    data = response.json()

    if "Global Quote" not in data:
        return 100.0

    return float(data["Global Quote"]["05. price"])


def get_random_number(min_value: int, max_value: int) -> int:
    """Returns a random number between min_value and max_value"""
    return random.randint(min_value, max_value)


def get_current_time(time_zone: str | None = None) -> str:
    """Returns the current time in the specified time zone"""

    if time_zone is None or time_zone not in pytz.all_timezones:
        time_zone = "US/Pacific"

    format = "%Y-%m-%d %H:%M:%S"

    tz = pytz.timezone(time_zone)
    current_time = datetime.now(tz)
    return current_time.strftime(format)


def get_random_city() -> str:
    """Retrieves a random city from a list of cities"""
    cities = [
        "Groningen",
        "Enschede",
        "Amsterdam",
        "Istanbul",
        "Baghdad",
        "Rio de Janeiro",
        "Tokyo",
        "Kampala",
    ]
    return random.choice(cities)


def get_user_location(accuracy: int) -> str:
    """
    Returns the user's location based on the public IP address and accuracy level.

    Parameters:
    accuracy (int): The level of detail for the location information.
        1 - Country only
        2 - City and country
        3 - City, region, and country

    Returns:
    str: The location information based on the specified accuracy level or an error message.
    """
    # return json.dumps({"location":"Las Vegas, Nevada, United States"})
    return json.dumps({"location": "San Diego, California, United States"})

    try:
        # Retrieve public IP address
        ip_response = requests.get("https://api.ipify.org?format=json")
        ip_response.raise_for_status()
        ip_address = ip_response.json().get("ip")

        # Use public IP to get location data
        location_url = f"http://ip-api.com/json/{ip_address}"
        location_response = requests.get(location_url)
        location_response.raise_for_status()
        data = location_response.json()

        if data["status"] == "fail":
            return f"Error in get_user_location: {data.get('message', 'Unknown error')}"

        if accuracy == 1:
            return data.get("country", "Unknown country")
        elif accuracy == 2:
            return f"{data.get('city', 'Unknown city')}, {data.get('country', 'Unknown country')}"
        elif accuracy == 3:
            return f"{data.get('city', 'Unknown city')}, {data.get('regionName', 'Unknown region')}, {data.get('country', 'Unknown country')}"
        else:
            return "Invalid accuracy level. Please specify 1 (Country), 2 (City and Country), or 3 (City, Region, and Country)."
    except requests.RequestException as e:
        return f"Error: {e}"


TOOL_MAPPING = {
    f.__name__: f
    for f in (
        get_current_weather,
        get_weather_forecast,
        get_stock_price,
        get_random_number,
        get_current_time,
        get_random_city,
        get_user_location,
    )
}


def get_tools_map(
    names: list[
        Literal[
            "get_current_weather",
            "get_weather_forecast",
            "get_stock_price",
            "get_random_number",
            "get_current_time",
            "get_user_location",
            "get_random_city",
        ]
    ],
):
    # dict[str, Tuple[Callable | list[Callable], dict[str, Any] | list[dict[str, Any]]]]:

    tools_map = {}

    for name in names:
        tool = TOOL_MAPPING[name]
        tools_map[name] = {}
        tools_map[name]["call"] = tool
        tools_map[name]["schema"] = convert_to_openai_tool(tool)

    all_tools = []
    all_specs = []
    for v in tools_map.values():
        all_tools.append(v["call"])
        all_specs.append(v["schema"])

    tools_map["__all__"] = {}
    tools_map["__all__"]["call"] = all_tools
    tools_map["__all__"]["schema"] = all_specs

    return tools_map

Single Tool#

The following example shows how to use tool calling capabilities of the chat method. (based on /chat/completions api endpoint)

from rich.pretty import pprint

import imagine


client = imagine.ImagineClient(max_retries=1, debug=True)

tools = [
    {
        "type": "function",
        "function": {
            "name": "get_current_weather",
            "description": "Get the current weather in a given location",
            "parameters": {
                "type": "object",
                "properties": {
                    "location": {
                        "type": "string",
                        "description": "The city and state, e.g. San Francisco, CA",
                    },
                    "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
                },
            },
        },
    }
]

messages = [
    {
        "role": "system",
        "content": "You are a helpful assistant that can access external functions. The responses from these function calls will be appended to this dialogue. Please provide responses based on the information from these function calls.",
    },
    {"role": "user", "content": "What is the current temperature of New York?"},
]

response = client.chat(
    model="Llama-3.1-8B",
    messages=messages,
    tools=tools,
)

pprint(response)

Single Tool with helper functions#

The following example shows how to use tool calling capabilities of the chat method using helper functions to create the schema instead of explicitly defining it.

from langchain_core.utils.function_calling import convert_to_openai_tool
from rich.pretty import pprint

import imagine


client = imagine.ImagineClient(max_retries=1, debug=True)


def get_current_weather(location: str) -> dict[str, str]:
    """Retrieves current weather based on the location"""
    return {"location": location, "forecast": "Sunny", "temperature": "35.5"}


messages = [
    {
        "role": "system",
        "content": "You are a helpful assistant that can access external functions. The responses from these function calls will be appended to this dialogue. Please provide responses based on the information from these function calls.",
    },
    {"role": "user", "content": "What is the current temperature of New York?"},
]

functions = [get_current_weather]
tools = [convert_to_openai_tool(t) for t in functions]


response = client.chat(model="Llama-3.1-8B", messages=messages, tools=tools)

pprint(response, expand_all=True)

Multi-turn tool calling#

This is a multi-turn tool calling example.

import json

import funcs

from rich.pretty import pprint

import imagine


# -------------------------------------------------------------------------------------
client = imagine.ImagineClient(max_retries=1, debug=True)

messages = [
    {
        "role": "system",
        "content": "You are a helpful assistant that can access external functions. The responses from these function calls will be appended to this dialogue. Please provide responses based on the information from these function calls.",
    },
    {
        "role": "user",
        "content": "What is the current temperature of New York, San Francisco and Chicago?",
    },
]

tools_map = funcs.get_tools_map(["get_current_weather"])

MODEL = "Llama-3.1-8B"

# --------------------------------------------------------------------------------------

tools_spec = tools_map["__all__"]["schema"]

response = client.chat(
    model=MODEL,
    messages=messages,
    tools=tools_spec,
)


messages.append(response.choices[0].message)

pprint(response, expand_all=True)

tool_calls = response.choices[0].message.tool_calls

if tool_calls:
    for tool_call in tool_calls:
        function_call = tool_call.function
        name = function_call.name
        arguments = json.loads(function_call.arguments)

        for function in tools_map["__all__"]["call"]:
            if function.__name__ == name:
                print(f"Invoking tool call: {name} with args :: {arguments}")
                function_response = function(**arguments)
                print(f"Result of invocation {function_response}")

                messages.append(
                    {
                        "tool_call_id": tool_call.id,
                        "role": "tool",
                        "name": name,
                        "content": function_response,
                    }
                )

    pprint(messages)

    function_enriched_response = client.chat(
        model=MODEL,
        messages=messages,
    )

    pprint(function_enriched_response)

Multiple-tools tool calling#

This is a tool calling example where we pass in multiple tools.

import json

import funcs

from rich.pretty import pprint

import imagine


# --------------------------------------------------------------------------------------
client = imagine.ImagineClient(max_retries=1, debug=True)

messages = [
    {
        "role": "system",
        "content": "You are a helpful assistant that can access external functions. The responses from these function calls will be appended to this dialogue. Please provide responses based on the information from these function calls.",
    },
    {"role": "user", "content": "Get the stock price of AAPL and GOOG"},
]

tools_map = funcs.get_tools_map(
    ["get_current_weather", "get_stock_price", "get_user_location", "get_random_city"]
)

MODEL = "Llama-3.1-8B"

# --------------------------------------------------------------------------------------

tools_spec = tools_map["__all__"]["schema"]

response = client.chat(
    model=MODEL,
    messages=messages,
    tools=tools_spec,
)


messages.append(response.choices[0].message)

pprint(response, expand_all=True)

tool_calls = response.choices[0].message.tool_calls

if tool_calls:
    for tool_call in tool_calls:
        function_call = tool_call.function
        name = function_call.name
        arguments = json.loads(function_call.arguments)

        for function in tools_map["__all__"]["call"]:  # type: ignore
            if function.__name__ == name:
                print(f"Invoking tool call: {name} with args :: {arguments}")
                function_response = function(**arguments)
                print(f"Result of invocation {function_response}")

                messages.append(
                    {
                        "tool_call_id": tool_call.id,
                        "role": "tool",
                        "name": name,
                        "content": str(function_response),
                    }
                )

    pprint(messages)

    function_enriched_response = client.chat(
        model=MODEL,
        messages=messages,  # type: ignore
    )

    pprint(function_enriched_response)

Multiple-tools recursive tool calling#

This is a tool calling example showcasing the recursive multiple tool calling scenario.

import json

import funcs

from loguru import logger
from rich.pretty import pprint

import imagine


# --------------------------------------------------------------------------------------
client = imagine.ImagineClient(max_retries=1, debug=True)

messages = [
    {
        "role": "user",
        "content": "Get the user's location first. Get weather forecast at that location user's location. Call the functions one at a time sequentially without commenting or asking for confirmation",
    }
]


tools_map = funcs.get_tools_map(
    ["get_weather_forecast", "get_user_location", "get_current_time"]
)

MODEL = "Llama-3.1-8B"

# --------------------------------------------------------------------------------------

tools_spec = tools_map["__all__"]["schema"]


def run_inference(messages, tools):
    response = client.chat(model=MODEL, messages=messages, tools=tools, temperature=0.8)

    return response


def recursive_tool_calling():
    while True:
        response = run_inference(messages, tools_spec)

        assistant_message = response.choices[0].message
        messages.append(assistant_message)

        logger.debug(f"Assistant Message: {assistant_message}")

        if not assistant_message.tool_calls:
            break

        for tool_call in assistant_message.tool_calls:
            function_call = tool_call.function
            name = function_call.name
            arguments = json.loads(function_call.arguments)
            for function in tools_map["__all__"]["call"]:
                if function.__name__ == name:
                    logger.debug(f"Invoking tool call: {name}")
                    function_result = function(**arguments)
                    logger.debug(f"Result of invocation {function_result}")

                    if not isinstance(function_result, str):
                        function_result = json.dumps(function_result)

                    messages.append(
                        {
                            "name": name,
                            "role": "tool",
                            "content": function_result,
                            "tool_call_id": tool_call.id,
                        }
                    )
                    logger.debug(f"Tool Call Result: {function_result}")
                    break

    return messages


messages = [
    {
        "role": "user",
        "content": "Get the user's location first. Get weather forecast at that location user's location. Call the functions one at a time sequentially without commenting or asking for confirmation",
    }
]

messages = recursive_tool_calling()

pprint(messages)