Replies: 4 comments 1 reply
-
To resolve the Here is a revised version of your code snippet: from langchain_core.prompts.chat import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.memory import ConversationBufferMemory
from langchain_core.runnables import RunnablePassthrough, RunnableLambda
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import BaseTool
from langchain.agents.agent import AgentExecutor
from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
from operator import itemgetter
# Define the system prompt
system_prompt = "Your system prompt here."
# Create the prompt template
prompt = ChatPromptTemplate.from_messages(
[
("system", system_prompt),
MessagesPlaceholder(variable_name="chat_history"),
("human", "{human_input}"),
("ai", "{agent_scratchpad}"),
]
)
# Initialize the memory
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
# Create the SQL agent
agent_executor = create_sql_agent(llm, db=db, agent_type="openai-tools", verbose=True)
# Define the function to format intermediate steps
def format_to_openai_function_messages(intermediate_steps):
# Ensure intermediate_steps is a list of messages
if isinstance(intermediate_steps, list):
return [BaseMessage(content=step) for step in intermediate_steps]
else:
raise TypeError("intermediate_steps must be a list of messages")
# Create the LLM chain
llm_chain = {
"human_input": RunnablePassthrough(),
"chat_history": (
RunnableLambda(memory.load_memory_variables) |
itemgetter("chat_history")
),
"agent_scratchpad": lambda x: format_to_openai_function_messages(x["intermediate_steps"]),
} | prompt | agent_executor
# Example usage
input_data = {
"human_input": "What is the capital of France?",
"chat_history": memory.load_memory_variables({})["chat_history"],
"intermediate_steps": ["Step 1", "Step 2"] # Example intermediate steps
}
# Invoke the LLM chain
response = llm_chain.invoke(input_data)
print(response) Key Points:
By following these steps, you should be able to resolve the
|
Beta Was this translation helpful? Give feedback.
-
@dosu still not resolved type error |
Beta Was this translation helpful? Give feedback.
-
Hi @milovate, Here is a working code. Does it solve your problem ? from langchain_core.prompts.chat import ChatPromptTemplate, MessagesPlaceholder
from langchain.memory.buffer import ConversationBufferMemory
from langchain_core.runnables import RunnablePassthrough, RunnableLambda
from langchain_openai.chat_models import ChatOpenAI
from operator import itemgetter
from langchain_community.agent_toolkits.sql.base import create_sql_agent
from langchain_community.utilities.sql_database import SQLDatabase
pg_uri = "DB_URL"
db = SQLDatabase.from_uri(pg_uri)
system_prompt = "Your system prompt here."
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
system_prompt,
),
MessagesPlaceholder(variable_name="chat_history"),
("human", "{input}"),
# MessagesPlaceholder("agent_scratchpad"),
("ai", "{agent_scratchpad}"),
]
)
# Conversation buffer memory will keep track of the conversation in the
# memory. It will use the "chat_history" as the name of the key.
memory = ConversationBufferMemory(memory_key="chat_history",
return_messages=True) # We create the internal llm chain first that takes our input and chat
# history and wraps it in a dictionary before passing it as input to our
# prompt. The prompt is then passed to our llm to generate an ai response.
agent_executor = create_sql_agent(ChatOpenAI(),prompt=prompt, db=db, agent_type="openai-tools", verbose=True)
chain = RunnablePassthrough.assign(
chat_history=RunnableLambda(memory.load_memory_variables) | itemgetter("chat_history")
) | agent_executor
print(chain.invoke({"input": "Hello, how are you?"})) |
Beta Was this translation helpful? Give feedback.
-
KeyError: "Input to PromptTemplate is missing variables {'instructions'}. Expected: ['agent_scratchpad', 'input', 'instructions'] Received: ['input', 'chat_history', 'intermediate_steps', 'agent_scratchpad']" here is my code ollama_model_name= "llama3.1" true_labels = [] predicts_analysis = [] class GoogleSearchInput(BaseModel): search_tool = StructuredTool( api_wrapper = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=1000) tools = [search_tool, wiki_tool, knowledge_retriever] prompt = hub.pull("hwchase17/openai-tools-agent") |
Beta Was this translation helpful? Give feedback.
Uh oh!
There was an error while loading. Please reload this page.
-
Checked other resources
Commit to Help
Example Code
Description
i am trying to add conversational memory to my chatbot application , i want to use create_sql_agent to execute my query , i am stuck at this error
KeyError: "Input to ChatPromptTemplate is missing variables {'agent_scratchpad'}. Expected: ['agent_scratchpad', 'chat_history', 'human_input'] Received: ['human_input', 'chat_history']"
to resolve this issue i tried to use
as agent_scratchpad needs BaseMessage
it further throws a type Error
TypeError: string indices must be integers
System Info
langchain==0.2.0
langchain-community==0.2.0
langchain-core==0.2.0
langchain-openai==0.1.1
langchain-text-splitters==0.2.0
Beta Was this translation helpful? Give feedback.
All reactions