ValueError: variable agent_scratchpad should be a list of base messages, got #22885
Replies: 2 comments
-
The error you're encountering is due to the To resolve this, ensure that the
Here’s an example of how you can adjust your code: from langchain_core.messages import AIMessage, HumanMessage
def format_log_to_messages(intermediate_steps):
"""Construct the scratchpad that lets the agent continue its thought process."""
thoughts = []
for action, observation in intermediate_steps:
thoughts.append(AIMessage(content=action.log))
human_message = HumanMessage(content=f"Observation: {observation}")
thoughts.append(human_message)
return thoughts
# Ensure agent_scratchpad is formatted correctly
agent_scratchpad = format_log_to_messages([]) # Initialize with an empty list
agent = create_structured_chat_agent(
tools=tools,
llm=llm,
prompt=prompt
)
memory = ConversationBufferWindowMemory(memory_key="chat_history", k=3, return_messages=True)
agent_executor = AgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools,
verbose=True,
handle_parsing_errors=True,
memory=memory,
max_iterations=100,
return_intermediate_steps=True,
)
while True:
user_input = input("User: ")
response = agent_executor.invoke({
"input": user_input,
"agent_scratchpad": agent_scratchpad,
})
memory_content = memory.buffer
print(f"Memory content: {memory_content}")
print("Agent:", response['output']) This ensures that the |
Beta Was this translation helpful? Give feedback.
-
Hi @Itssshikhar, I was able to get it working by instantiating the messages like this, passing the agent_scratchpad as "ai":
or
and don't forget to add the in the end of your prompt so the agent can create a recursion on that:
I was following this documentation: Link |
Beta Was this translation helpful? Give feedback.
Uh oh!
There was an error while loading. Please reload this page.
-
Checked other resources
Commit to Help
Example Code
STOP
This is my query="{input}". Write only the next step needed to solve it.
Your answer should be based in the previous tools executions, even if you think you know the answer.
Remember to add STOP after each snippet.
These were the previous steps given to solve this query and the information you already gathered:
"""
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.prompts import SystemMessagePromptTemplate, HumanMessagePromptTemplate
from langchain.agents import AgentExecutor, create_structured_chat_agent
from langchain.memory import ConversationBufferWindowMemory
prompt = ChatPromptTemplate.from_messages([
SystemMessagePromptTemplate.from_template(
input_variables=['tools'],
template=SYSTEM_PROMPT_TEMPLATE
),
MessagesPlaceholder(variable_name='chat_history', optional=True),
HumanMessagePromptTemplate.from_template(
input_variables=["input", "chat_history", "agent_scratchpad"],
template=HUMAN_PROMPT_TEMPLATE
)
])
#agent_scratchpad = []
agent = create_structured_chat_agent(
tools = tools,
llm = llm,
prompt = prompt
#stop_sequence = ["STOP"],
#template_tool_response = "{observation}"
)
memory = ConversationBufferWindowMemory(memory_key="chat_history", k=3, return_messages=True)
agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True, handle_parsing_errors=True, memory=memory, max_iterations=100, return_intermediate_steps=True,)
while True:
user_input = input("User: ")
response = agent_executor.invoke({
"input": user_input,
#"agent_scratchpad": agent_scratchpad,
})
memory_content = memory.buffer
print(f"Memory content: {memory_content}")
print("Agent:", response['output'])
Beta Was this translation helpful? Give feedback.
All reactions