suribe06
suribe06

Reputation: 89

Parameter Injection Issue with Tool Calls in LangChain Agent

I'm encountering an issue with parameter injection when using a LangChain agent to call a tool. Although the parameter injection appears to succeed based on the AI message I print, the agent's tool call ultimately receives empty parameters, as if it’s still using the original (empty) message instead of the updated one with the injected parameters.

I'm using the following code structure to inject parameters dynamically into the api_tool. The relevant code snippets are as follows:

class APIToolInput(BaseModel):
    swagger_file: Annotated[str, InjectedToolArg] = Field(..., description="The URL or file path to the OpenAPI (swagger) specification file, which describes the API's structure.")
    method: Annotated[str, InjectedToolArg] = Field(..., description="The HTTP method to use for the API call.")
    endpoint: Annotated[str, InjectedToolArg] = Field(..., description="The endpoint path to use for the API call.")
    params: Optional[dict] = Field(description="A dictionary of query parameters to include in the API request.", default={}) 
    headers: Optional[dict] = Field(description="A dictionary of headers to include in the API request.", default={})

@tool(parse_docstring=False, args_schema=APIToolInput)
def api_tool(swagger_file: Annotated[str, InjectedToolArg],
             method: Annotated[str, InjectedToolArg],
             endpoint: Annotated[str, InjectedToolArg],
             params: Optional[dict] = {},
             headers: Optional[dict] = {}) -> dict:
    spec = OpenAPISpec.from_url(swagger_file)
    url = spec.base_url
    full_url = f"{url}{endpoint}"
    try:
        response_api = requests.request(
            method=method,
            url=full_url,
            params=params,
            headers=headers
        )
    except Exception as e:
        return {"error": f"Failed to call API: {str(e)}"}
    
    if response_api.status_code != 200:
        return {"error": f"Failed to call API: {response_api.text}"}
    
    return response_api.json()

@chain
def inject_parameters(ai_msg, **kwargs):
    new_ai_msg = deepcopy(ai_msg)
    # Inject parameters into additional_kwargs 
    for tool_call in new_ai_msg.additional_kwargs['tool_calls']:
        arguments = "{" + ",".join([f'"{key}":"{value}"' for key, value in kwargs.items()]) + "}"
        tool_call['function']['arguments'] = arguments

    # Inject parameters into each tool call parameter
    for tool_call in new_ai_msg.tool_calls:
        tool_call["args"].update(kwargs)
        
    return new_ai_msg

The agent and workflow setup includes injecting parameters during a conditional step (should_continue) on a langgraph workflow. Here’s how I call the agent and inject parameters:

def call_agent(state: State) -> State:
    messages = state['messages']
    response = agent.invoke(messages)
    state["messages"] = [response]
    return state

def should_continue(state: State) -> Literal["tools", END]:
    messages = state['messages']
    last_message = messages[-1]
    if last_message.tool_calls:
        new_ai_msg = inject_parameters.invoke(
            last_message, 
            swagger_file=state["intermediate_steps"]["api_selection"]["swagger_file"],
            method=state["intermediate_steps"]["api_selection"]["http_method"],
            endpoint=state["intermediate_steps"]["api_selection"]["endpoint_path"]
        )
        state['messages'][-1] = new_ai_msg
        return "tools"
    return END
workflow.add_edge(START, "preprocess_query")
workflow.add_edge("preprocess_query", "decision")
workflow.add_conditional_edges(source="decision", path=route_tools, path_map={"llm": "llm", "graph_rag": "graph_rag"})
workflow.add_edge("graph_rag", "format_rag_results")
workflow.add_edge("format_rag_results", "select_api")
workflow.add_edge("select_api", "agent")
workflow.add_conditional_edges(source="agent", path=should_continue)
workflow.add_edge("tools", 'agent')
workflow.add_edge("llm", END)

checkpointer = MemorySaver()
app = workflow.compile(checkpointer=checkpointer)

After injecting the parameters, the printed AI message shows the parameters correctly injected:

[HumanMessage(content='What are the current bitcoin price?'), AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_KNwEt72ii9HJJkr3hRm7dQRG', 'function': {'arguments': '{"swagger_file":"https://api.apis.guru/v2/specs/interzoid.com/getcurrencyrate/1.0.0/openapi.json","method":"get","endpoint":"/getcurrencyrate"}', 'name': 'api_tool'}, 'type': 'function'}]}, tool_calls=[{'name': 'api_tool', 'args': {'swagger_file': 'https://api.apis.guru/v2/specs/interzoid.com/getcurrencyrate/1.0.0/openapi.json', 'method': 'get', 'endpoint': '/getcurrencyrate'}}])]

However, in Langsmith’s trace, the tool call appears empty and this results in the following error:

ValidationError(model='APIToolInput', errors=[{'loc': ('swagger_file',), 'msg': 'field required', 'type': 'value_error.missing'}, {'loc': ('method',), 'msg': 'field required', 'type': 'value_error.missing'}, {'loc': ('endpoint',), 'msg': 'field required', 'type': 'value_error.missing'}])

Why would the parameter injection show as successful in my printed output, but still result in empty parameters during the tool call execution? Am I missing something in how the tool call processes injected parameters, or could there be an issue with Langsmith’s handling of updated messages?

Upvotes: 0

Views: 198

Answers (0)

Related Questions