Reputation: 125
I have this reproducible code to setup a simple agent that answer a simple question.
The purpose of this example is to show that the parameter return_direct
does not work.
Either it's True
or False
, the output of the tool will be passed to an LLM.
# Import necessary packages
import pandas as pd
import sys
import os
from dotenv import load_dotenv
from langchain_community.tools import StructuredTool
from typing import Literal
from langchain_core.tools import tool
from langgraph.prebuilt import create_react_agent
from langchain_openai import ChatOpenAI
sys.path.append("..")
from config import this_dir
load_dotenv(os.path.join(this_dir,".env"))
# Define function to print final output of Agent
def print_stream(stream):
for s in stream:
message = s["messages"][-1]
if isinstance(message, tuple):
print(message)
else:
message.pretty_print()
# Define function that will go into the tool
def get_weather(city: Literal["nyc", "sf"]):
"""Use this to get weather information."""
if city == "nyc":
return "It might be cloudy in nyc"
elif city == "sf":
return "It's always sunny in sf"
else:
raise AssertionError("Unknown city")
# Define the tool
weather_tool = StructuredTool.from_function(
name = "get_weather_information",
description="get weather information",
func=get_weather,
return_direct=True)
# Initialize the LLM model.
model = ChatOpenAI(model="gpt-4o", temperature=0)
# Define toolkit (only 1 tool in this case)
tools = [weather_tool]
# Define simple system prompt
prompt = "Respond in Italian"
# Define the graph
graph = create_react_agent(model, tools=tools, state_modifier=prompt)
# Define user_question
inputs = {"messages": [("user", "What's the weather in NYC?")]}
# Print messages
print_stream(graph.stream(inputs, stream_mode="values"))
Do you know how to force the agent to return the output of the tool as it is (i.e., without passing the output to an LLM)?
Upvotes: 0
Views: 549
Reputation: 125
I think I solved my issue.
Instead of setting the parameter "return_direct" into the StructuredTool.from_function()
function (which is not working in my case), I changed 1 line of code in the definition of the Agent.
I created my agent using the following class. This is the most basic agent that you can create and it is introduced in a deeplearning.ai training course by Harrison Chase:
https://learn.deeplearning.ai/courses/ai-agents-in-langgraph/lesson/3/langgraph-components:
from typing import TypedDict, Annotated
from langchain_core.messages import AnyMessage
import operator
from langgraph.graph import StateGraph, END
class AgentState(TypedDict):
messages: Annotated[list[AnyMessage], operator.add]
class Agent:
def __init__(self, tools, checkpointer, system=""):
self.system = system
graph = StateGraph(AgentState)
graph.add_node("llm", self.call_openai)
graph.add_node("action", self.take_action)
graph.add_conditional_edges(
"llm",
self.exists_action,
{True: "action", False: END}
)
graph.add_edge("action","llm")
graph.set_entry_point("llm")
self.graph = graph.compile(checkpointer=checkpointer)
self.tools = {t.name: t for t in tools}
self.model = model.bind_tools(tools)
def exists_action(self, state: AgentState):
result = state['messages'][-1]
return len(result.tool_calls) > 0
def call_openai(self, state: AgentState):
messages = state['messages']
if self.system:
messages = [SystemMessage(content=self.system)] + messages
message = self.model.invoke(messages)
return {'messages': [message]}
def take_action(self, state: AgentState):
tool_calls = state['messages'][-1].tool_calls
results = []
for t in tool_calls:
print(f"Calling: {t}")
if t['name'] not in self.tools: # check for bad tool name from LLM
print("\n ....bad tool name....")
result = "bad tool name, retry" # instruct LLM to retry if bad
else:
result = self.tools[t['name']].invoke(t['args'])
results.append(ToolMessage(tool_call_id=t['id'], name=t['name'], content=str(result)))
print("Back to the model!")
return {'messages': results}
What I had to do was to change the line graph.add_edge("action", "llm")
into graph.add_edge("action", END)
. In this way, if a tool is called by an LLM, the output of the tool will represent the END of the agent process.
It is working at the moment. I hope I didn't break anything else :D
Upvotes: 1
Reputation: 2294
This is not tested in theory it should work since you only have a tool.
First we create a function or a customised function since weather
tool uses StructuredTool
.
# graph creation process to handle return_direct
def customized_create_react_agent(model, tools, state_modifier):
# logic for returning the tool's output directly
def custom_graph(inputs):
# I am assuming we have a 'messages' key in form a a dic
user_message = inputs["messages"][0][1]
for tool in tools:
if tool.return_direct:
tool_output = tool.func(user_message.split()[-1].lower())
return [{"messages": [("tool", tool_output)]}]
# return to original graph if return_direct is False
return create_react_agent(model, tools=tools, state_modifier=state_modifier).stream(inputs, stream_mode="values")
return custom_graph
graph = customized_create_react_agent(model, tools=tools, state_modifier=prompt)
# questions from user
inputs = {"messages": [("user", "What's the weather in NYC?")]}
# Print messages
print_stream(graph(inputs))
If this doesn't work we would use the Union[AgentAction, AgentFinish]
from langchain.schema import AgentAction, AgentFinish
Upvotes: 0