diff --git a/examples/langgraph-agent/agent.ipynb b/examples/langgraph-agent/agent.ipynb index 4c8ff1e3..f44518d7 100644 --- a/examples/langgraph-agent/agent.ipynb +++ b/examples/langgraph-agent/agent.ipynb @@ -16,7 +16,7 @@ "\n", "## Install dependencies\n", "```shell\n", - "pip install zep-cloud langchain-openai langgraph ipywidgets\n", + "pip install zep-cloud langchain-openai langgraph ipywidgets dotenv\n", "```" ] }, @@ -236,6 +236,21 @@ " {memory.context}\"\"\"\n", " )\n", "\n", + " # Truncate the chat history to keep the state from growing unbounded\n", + " # In this example, we going to keep the state small for demonstration purposes\n", + " # We'll use Zep's Facts to maintain conversation context\n", + " state[\"messages\"] = trim_messages(\n", + " state[\"messages\"],\n", + " strategy=\"last\",\n", + " token_counter=len,\n", + " max_tokens=3,\n", + " start_on=\"human\",\n", + " end_on=(\"human\", \"tool\"),\n", + " include_system=True,\n", + " )\n", + "\n", + " logger.info(f\"Messages in state: {state['messages']}\")\n", + "\n", " messages = [system_message] + state[\"messages\"]\n", "\n", " response = await llm.ainvoke(messages)\n", @@ -255,21 +270,6 @@ " messages=messages_to_save,\n", " )\n", "\n", - " # Truncate the chat history to keep the state from growing unbounded\n", - " # In this example, we going to keep the state small for demonstration purposes\n", - " # We'll use Zep's Facts to maintain conversation context\n", - " state[\"messages\"] = trim_messages(\n", - " state[\"messages\"],\n", - " strategy=\"last\",\n", - " token_counter=len,\n", - " max_tokens=3,\n", - " start_on=\"human\",\n", - " end_on=(\"human\", \"tool\"),\n", - " include_system=True,\n", - " )\n", - "\n", - " logger.info(f\"Messages in state: {state['messages']}\")\n", - "\n", " return {\"messages\": [response]}" ] },