LangChain Tools#

LangChain tools can be used to enable interaction of models with the world.

Basic Example#

from langchain_core.messages import HumanMessage, SystemMessage
from pydantic import BaseModel, Field
from rich.pretty import pprint

from imagine.langchain import ImagineChat


# Reference for tool calling
# https://python.langchain.com/v0.1/docs/modules/model_io/chat/function_calling/


llm = ImagineChat(model="Llama-3.1-8B", max_tokens=512)


class GetWeather(BaseModel):
    """Get the current weather in a given location"""

    location: str = Field(..., description="The city and state, e.g. San Francisco, CA")


class GetPopulation(BaseModel):
    """Get the current population in a given location"""

    location: str = Field(..., description="The city and state, e.g. San Francisco, CA")


model_with_tools = llm.bind_tools([GetWeather, GetPopulation])


messages = [
    SystemMessage(
        content="You are a helpful assistant that can access external functions. The responses from these function calls will be appended to this dialogue. Please provide responses based on the information from these function calls."
    ),
    # HumanMessage(content="What is the population of NY?"),
    HumanMessage(content="What is the current weather in NY?"),
]


response_message = model_with_tools.invoke(messages)
# response_tool_calls = model_with_tools.invoke(messages).tool_calls


pprint(response_message)

Different ways for tools specification#

from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_core.tools import tool
from rich.pretty import pprint

from imagine.langchain import ImagineChat


# Pydantic class
class multiply(BaseModel):
    """Return product of 'x' and 'y'."""

    x: float = Field(..., description="First factor")
    y: float = Field(..., description="Second factor")


# LangChain tool
@tool
def exponentiate(x: float, y: float) -> float:
    """Raise 'x' to the 'y'."""
    return x**y


# Function
def subtract(x: float, y: float) -> float:
    """Subtract 'x' from 'y'."""
    return y - x


# OpenAI-format dict
add = {
    "name": "add",
    "description": "Add 'x' and 'y'.",
    "parameters": {
        "type": "object",
        "properties": {
            "x": {"type": "number", "description": "First number to add"},
            "y": {"type": "number", "description": "Second number to add"},
        },
        "required": ["x", "y"],
    },
}

llm = ImagineChat(model="Llama-3.1-8B", temperature=0)


llm_with_tools = llm.bind_tools([multiply, exponentiate, add, subtract])

response = llm_with_tools.invoke(
    [
        ("system", "You're a helpful assistant"),
        # ("human", "what's 5 raised to the 2.743"),
        # ("human", "what's 5 minus 56"),
        ("human", "What is 100 into 35"),
    ]
)


pprint(response)

1. Agent Executor#

from langchain_core.prompts import ChatPromptTemplate
from langchain_core.tools import tool

from imagine.langchain import ImagineChat
from langchain.agents import AgentExecutor, create_tool_calling_agent


@tool
def multiply(x: float, y: float) -> float:
    """Multiply 'x' times 'y'."""
    return x * y


@tool
def exponentiate(x: float, y: float) -> float:
    """Raise 'x' to the 'y'."""
    return x**y


@tool
def add(x: float, y: float) -> float:
    """Add 'x' and 'y'."""
    return x + y


prompt = ChatPromptTemplate.from_messages(
    [
        ("system", "you're a helpful assistant"),
        ("human", "{input}"),
        ("placeholder", "{agent_scratchpad}"),
    ]
)

tools = [multiply, exponentiate, add]


llm = ImagineChat(model="Llama-3.1-70B", temperature=0)


agent = create_tool_calling_agent(llm, tools, prompt)
agent_executor = AgentExecutor(
    agent=agent, tools=tools, verbose=True, stream_runnable=False
)

for step in agent_executor.iter(
    {"input": "what's 3 plus 5. Get me the 5th power of result"}
):
    print(step)

2. Agent Executor#

from langchain_core.prompts import ChatPromptTemplate
from langchain_core.tools import tool
from rich.pretty import pprint

from imagine.langchain import ImagineChat
from langchain.agents import AgentExecutor, create_tool_calling_agent


model = ImagineChat(model="Llama-3.1-70B", temperature=0)

query = "what is the value of magic_function(3)?"

prompt = ChatPromptTemplate.from_messages(
    [
        ("system", "You are a helpful assistant."),
        ("human", "{input}"),
        # Placeholders fill up a **list** of messages
        ("placeholder", "{agent_scratchpad}"),
    ]
)


@tool
def magic_function(input: int) -> int:
    """Applies a magic function to an input."""
    return input + 2


tools = [magic_function]

agent = create_tool_calling_agent(model, tools, prompt=prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools, stream_runnable=False)

response = agent_executor.invoke({"input": query})

pprint(response)

Structured Output#

from pydantic import BaseModel, Field
from rich.pretty import pprint

from imagine.langchain import ImagineChat


llm = ImagineChat(model="Llama-3.1-70B", max_tokens=512)


class AnswerWithJustification(BaseModel):
    """An answer to the user question along with justification for the answer."""

    answer: str

    # If we provide default values and/or descriptions for fields, these will be passed
    # to the model. This is an important part of improving a model's ability to correctly return structured outputs.
    justification: str | None = Field(
        default=None, description="A justification for the answer."
    )


# ---------------------------

# include_raw is False
structured_model = llm.with_structured_output(AnswerWithJustification)
response = structured_model.invoke(
    "What weighs more a pound of bricks or a pound of feathers"
)
pprint(response)

# ---------------------------

structured_model = llm.with_structured_output(AnswerWithJustification, include_raw=True)
response = structured_model.invoke(
    "What weighs more a pound of bricks or a pound of feathers"
)
pprint(response)


# ---------------------------

oai_schema = {
    "name": "AnswerWithJustification",
    "description": "An answer to the user question along with justification for the answer.",
    "parameters": {
        "type": "object",
        "properties": {
            "answer": {"type": "string"},
            "justification": {
                "description": "A justification for the answer.",
                "type": "string",
            },
        },
        "required": ["answer"],
    },
}

structured_model = llm.with_structured_output(oai_schema)
response = structured_model.invoke(
    "What weighs more a pound of bricks or a pound of feathers"
)
pprint(response)

# ---------------------------