How to input multiple inputs in RunnableWithMessageHistory of LangChain? #25466
-
Checked other resources
Commit to Help
Example Code# Import necessary modules from langchain
from langchain.chains import ChatPromptTemplate, RunnableWithMessageHistory
from langchain.prompts import (
SystemMessagePromptTemplate,
MessagesPlaceholder,
HumanMessagePromptTemplate
)
from langchain.output_parsers import StructuredOutputParser, ResponseSchema
from langchain.memory import ChatMessageHistory
from langchain.chat_models import ChatOpenAI
# Setup the OpenAI chat model
llm = ChatOpenAI(
model="gpt-4",
temperature=0,
max_tokens=None,
timeout=None,
max_retries=2,
api_key="your_api_key",
organization="your_organization"
# base_url="...",
# other params...
)
# Store for session histories
store = {}
# Function to get session history, initializing if not present
def get_session_history(session_id):
if session_id not in store:
store[session_id] = ChatMessageHistory()
return store[session_id]
# Define the output schemas
response_schemas = [
ResponseSchema(name="output1", description="translates first word to English"),
ResponseSchema(name="output2", description="translates next word to English")
]
# Create the output parser
output_parser = StructuredOutputParser.from_response_schemas(response_schemas)
# Create the prompt template with system and human messages
prompt_template_1 = ChatPromptTemplate(
messages=[
SystemMessagePromptTemplate.from_template(
"Output MUST be JSON format"
),
MessagesPlaceholder(variable_name="chat_history"),
HumanMessagePromptTemplate.from_template(
"Translate {korean_word1} and {korean_word2} to English."
"\n{format_instructions}"
)
],
input_variables=["korean_word1", "korean_word2"],
partial_variables={"format_instructions": output_parser.get_format_instructions()}
)
# Combine the prompt template and LLM into a chain
chain1 = prompt_template_1 | llm | output_parser
# Wrap the chain to handle message history
chain_with_history = RunnableWithMessageHistory(
chain1,
get_session_history,
history_messages_key="chat_history",
)
# Define the input words
word1 = '미래' # Korean for 'future'
word2 = '과거' # Korean for 'past'
# Invoke the chain with history, using a specific session ID
result = chain_with_history.invoke(
{"korean_word1": word1, "korean_word2": word2},
config={"configurable": {"session_id": "abc123"}},
)
# Print the result
print(result) DescriptionI would like to ask about LangChain for LLM in python. I need multiple inputs with chatting history to run the model, so I tried to use RunnableWithMessageHistory but got an error.
And I tried to use 'input_messages_key' in RunnableWithMessageHistory but also failed. Because input_messages_key allow one key only, not multiple keys.
Please help me to write the correct code System Infolangchain==0.2.9 linux(ubuntu) |
Beta Was this translation helpful? Give feedback.
Replies: 1 comment 4 replies
-
Hey there, @JANGSOONMYUN! I'm here to help you out 🤖 To correctly handle multiple inputs with Here is an example of how to set up and invoke from typing import List
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import BaseMessage, AIMessage, HumanMessage
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import RunnableWithMessageHistory, ConfigurableFieldSpec
from langchain_community.chat_models import ChatAnthropic
# Define an in-memory chat message history
class InMemoryHistory(BaseChatMessageHistory):
def __init__(self):
self.messages = []
def add_messages(self, messages: List[BaseMessage]) -> None:
self.messages.extend(messages)
def clear(self) -> None:
self.messages = []
# Store for chat histories
store = {}
def get_session_history(user_id: str, conversation_id: str) -> BaseChatMessageHistory:
if (user_id, conversation_id) not in store:
store[(user_id, conversation_id)] = InMemoryHistory()
return store[(user_id, conversation_id)]
# Define the prompt template
prompt = ChatPromptTemplate.from_messages([
("system", "You're an assistant who's good at {ability}"),
MessagesPlaceholder(variable_name="history"),
("human", "{question}"),
])
# Define the chain
chain = prompt | ChatAnthropic(model="claude-2")
# Wrap the chain with message history management
chain_with_history = RunnableWithMessageHistory(
chain,
get_session_history=get_session_history,
input_messages_key="question",
history_messages_key="history",
history_factory_config=[
ConfigurableFieldSpec(
id="user_id",
annotation=str,
name="User ID",
description="Unique identifier for the user.",
default="",
is_shared=True,
),
ConfigurableFieldSpec(
id="conversation_id",
annotation=str,
name="Conversation ID",
description="Unique identifier for the conversation.",
default="",
is_shared=True,
),
],
)
# Invoke the chain with history management
result = chain_with_history.invoke(
{"ability": "math", "question": "What does cosine mean?"},
config={"configurable": {"user_id": "123", "conversation_id": "1"}}
)
print(result) In this example:
This setup ensures that multiple inputs are handled correctly, and the chat message history is managed appropriately [1][2]. |
Beta Was this translation helpful? Give feedback.
Hey there, @JANGSOONMYUN! I'm here to help you out 🤖
To correctly handle multiple inputs with
RunnableWithMessageHistory
in LangChain, you need to ensure that theRunnableWithMessageHistory
is configured properly with the necessary keys for input messages, history messages, and any additional configuration required by the session history factory.Here is an example of how to set up and invoke
RunnableWithMessageHistory
with multiple inputs: