Reputation: 11
I set up a very simple version of a multi-agent setup, consisting of two agents that should communicate with each other. In my current example one is being the interviewer and one the interviewee (before, I used other more complex examples but always ran into the same issue).
The problem is, that the interviewee never returns any output, even when specifically prompted to do so by both the system prompt and the interviewer.
That is my code:
from langchain_ollama import ChatOllama
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.messages import HumanMessage
from langgraph.graph.message import AnyMessage, add_messages
from langgraph.graph import END, START, StateGraph
from typing import TypedDict, Annotated
import functools
# load Llama 3.1 from ollama
llm = ChatOllama(
model="llama3.1:70b",
temperature=0,
)
# function to create agent
def create_agent(llm, system_message: str):
"""Create an agent."""
prompt = ChatPromptTemplate.from_messages(
[
("system",
"""
{system_message}
If you reach the end of the interview prefix with FINAL ANSWER.
Example Interview:
Interviewer: Hello, and thank you for joining me today. Let us start with an introduction. What is your name and how old are you?
Interviewee: Hi I'm David and I'm 72 years old.
Interviewer: David, what is your profession?
Interviewee: I'm currently retired but I worked as a carpenter for over 40 years.
...
""",
),
MessagesPlaceholder(variable_name="messages"),
]
)
prompt = prompt.partial(system_message=system_message)
# return agent chain using the prompt template and LLM
return prompt | llm
# create interviewer agent
interviewer_agent = create_agent(
llm,
system_message="""You are an experienced interviewer, specialised on interviewing others about their daily lives and struggles. Ask questions that an interviewee has to answer.
Alyways maintain respectful and be neutral. Make sure your questions are concise and easy to understand.""",
)
# create interviewee agent
interviewee_agent = create_agent(
llm,
system_message="""You are a person being interviewed about your daily lives and struggles. Answer each question truthfully and in a concise way.
Your Background: You are Linda, 30 years old and a nurse working in the emergency room. Your job is very stressful and you currently consider quitting. You are very fulfilled in your personal live,
with an amazing friend ground and a loving cat named Archie.""",
)
# define the graph state that gets updated throughout the agent runtime
class AgentState(TypedDict):
messages: Annotated[list[AnyMessage], add_messages]
sender: str
# helper function to create a node for a given agent
def agent_node(state, agent):
messages = state['messages']
response = agent.invoke(messages)
# return a list, because this will get added to the existing list
return {"messages": [response]}
# interviewer node
interviewer_node = functools.partial(agent_node, agent=interviewer_agent)
# interviewee node
interviewee_node = functools.partial(agent_node, agent=interviewee_agent)
# define function that will be used to determine which conditional edge to use
def router(state):
# This is the router
messages = state["messages"]
last_message = messages[-1]
if "FINAL ANSWER" in last_message.content:
# Any agent decided the work is done
return "__end__"
return "continue"
# define a new graph
workflow = StateGraph(AgentState)
# add the nodes to the graph
workflow.add_node("interviewer", interviewer_node)
workflow.add_node("interviewee", interviewee_node)
# set the entrypoint
workflow.add_edge(START, "interviewer")
# add the conditional edges
workflow.add_conditional_edges(
"interviewer",
router,
{"continue": "interviewee", "__end__": END},
)
workflow.add_conditional_edges(
"interviewee",
router,
{"continue": "interviewer", "__end__": END},
)
# compile the finished graph (to a LangChain runnable)
app = workflow.compile()
events = app.stream(
{
"messages": [
HumanMessage(
content="Conduct an interview."
)
],
},
# Maximum number of steps to take in the graph
{"recursion_limit": 150},
#stream_mode="update"
)
for event in events:
print(event)
print("----")
These are the first 4 responses as example output:
{'interviewer': {'messages': [AIMessage(content="Hello, and thank you for joining me today. Let's start with an introduction. What is your name and how old are you?\n\n(Please respond as the interviewee)", additional_kwargs={}, response_metadata={'model': 'llama3.1:70b', 'created_at': '2024-11-18T09:45:16.607630216Z', 'message': {'role': 'assistant', 'content': ''}, 'done_reason': 'stop', 'done': True, 'total_duration': 1141033512, 'load_duration': 16577368, 'prompt_eval_count': 168, 'prompt_eval_duration': 118000000, 'eval_count': 36, 'eval_duration': 1004000000}, id='run-17ee4221-f43c-4302-a264-9fd539b4afcd-0', usage_metadata={'input_tokens': 168, 'output_tokens': 36, 'total_tokens': 204})]}}
----
{'interviewee': {'messages': [AIMessage(content='', additional_kwargs={}, response_metadata={'model': 'llama3.1:70b', 'created_at': '2024-11-18T09:45:16.884003648Z', 'message': {'role': 'assistant', 'content': ''}, 'done_reason': 'stop', 'done': True, 'total_duration': 246128695, 'load_duration': 15786026, 'prompt_eval_count': 234, 'prompt_eval_duration': 160000000, 'eval_count': 1, 'eval_duration': 68000000}, id='run-24ba18e4-7e46-43c8-8ce1-164c42636836-0', usage_metadata={'input_tokens': 234, 'output_tokens': 1, 'total_tokens': 235})]}}
----
{'interviewer': {'messages': [AIMessage(content="(Note: I'll ask follow-up questions based on your response)", additional_kwargs={}, response_metadata={'model': 'llama3.1:70b', 'created_at': '2024-11-18T09:45:17.513548188Z', 'message': {'role': 'assistant', 'content': ''}, 'done_reason': 'stop', 'done': True, 'total_duration': 585746776, 'load_duration': 15973939, 'prompt_eval_count': 203, 'prompt_eval_duration': 149000000, 'eval_count': 14, 'eval_duration': 417000000}, id='run-5dbe2491-e634-4be0-918d-945243c0ccef-0', usage_metadata={'input_tokens': 203, 'output_tokens': 14, 'total_tokens': 217})]}}
----
{'interviewee': {'messages': [AIMessage(content='', additional_kwargs={}, response_metadata={'model': 'llama3.1:70b', 'created_at': '2024-11-18T09:45:17.870774429Z', 'message': {'role': 'assistant', 'content': ''}, 'done_reason': 'stop', 'done': True, 'total_duration': 247878438, 'load_duration': 15821298, 'prompt_eval_count': 247, 'prompt_eval_duration': 161000000, 'eval_count': 1, 'eval_duration': 68000000}, id='run-8a0bd6f2-0ecb-4b45-ae2a-321ed6c332cc-0', usage_metadata={'input_tokens': 247, 'output_tokens': 1, 'total_tokens': 248})]}}
No matter what, the content of the second agent (in this case the interviewee) is always empty (which was also the case in my other more complex examples, e.g. letting two agents discuss a topic).
I already checked and confirmed that each message is correctly added to the state and that the list of messages the agents are invoked with are complete.
I also started using LangSmith in the hopes of finding the issue but everything looks fine there as well.
I also tried to improve my prompt, but I am unsure if poor prompt engineering is truly the only issue here?
Upvotes: 0
Views: 86