From 7c0d4e6bc707d09d1f4eb14e9dd232aea544c55c Mon Sep 17 00:00:00 2001 From: Hunter Lovell Date: Fri, 10 Oct 2025 20:13:34 -0700 Subject: [PATCH 1/2] feat: js v1 release notes + migration guide --- src/oss/javascript/migrate/langchain-v1.mdx | 765 ++++++++++++++++++- src/oss/javascript/releases/langchain-v1.mdx | 463 +++++------ src/oss/python/migrate/langchain-v1.mdx | 162 +++- src/oss/python/releases/langchain-v1.mdx | 22 +- 4 files changed, 1180 insertions(+), 232 deletions(-) diff --git a/src/oss/javascript/migrate/langchain-v1.mdx b/src/oss/javascript/migrate/langchain-v1.mdx index 888f70895..7da29c1ce 100644 --- a/src/oss/javascript/migrate/langchain-v1.mdx +++ b/src/oss/javascript/migrate/langchain-v1.mdx @@ -1,4 +1,767 @@ --- title: LangChain v1 migration guide -sidebarTitle: Migrate to v1 +sidebarTitle: Migration guide --- + +This migration guide outlines the major changes in LangChain v1. To learn more about the new features of v1, see the [introductory post](/oss/javascript/releases/langchain-v1). + +To upgrade, + + +```bash npm +npm install langchain@next @langchain/core@next +``` +```bash pnpm +pnpm install langchain@next @langchain/core@next +``` +```bash yarn +yarn add langchain@next @langchain/core@next +``` +```bash bun +bun add langchain@next @langchain/core@next +``` + + +## `createAgent` + +In v1, the react agent prebuilt is now in the langchain package. The table below outlines what functionality has changed: + +| Section | What changed | +|---------|--------------| +| [Import path](#import-path) | Package moved from `@langchain/langgraph/prebuilts` to `langchain/agents` | +| [Prompts](#prompts) | Parameter renamed to `systemPrompt`, dynamic prompts use middleware | +| [Pre-model hook](#pre-model-hook) | Replaced by middleware with `beforeModel` method | +| [Post-model hook](#post-model-hook) | Replaced by middleware with `afterModel` method | +| [Custom state](#custom-state) | Defined in middleware, zod objects only | +| [Model](#model) | Dynamic selection via middleware, pre-bound models not supported | +| [Tools](#tools) | Tool error handling moved to middleware with `wrapToolCall` | +| [Structured output](#structured-output) | prompted output removed, use `toolStrategy`/`providerStrategy` | +| [Streaming node name](#streaming-node-name-rename) | Node name changed from `"agent"` to `"model"` | +| [Runtime context](#runtime-context) | `context` property instead of `config.configurable` | +| [Namespace](#simplified-namespace) | Streamlined to focus on agent building blocks, legacy code moved to `@langchain/classic` | + +### Import path + +The import path for the react agent prebuilt has changed from `@langchain/langgraph/prebuilts` to `langchain/agents`. The name of the function has changed from `createReactAgent` to `createAgent`: + +```typescript +import { createReactAgent } from "@langchain/langgraph/prebuilts"; // [!code --] +import { createAgent } from "langchain/agents"; // [!code ++] +``` + +### Prompts + +#### Static prompt rename + +The `prompt` parameter has been renamed to `systemPrompt`: + + +```typescript v1 (new) +import { createAgent } from "langchain/agents"; + +agent = createAgent({ + model, + tools, + systemPrompt: "You are a helpful assistant.", // [!code highlight] +}); +``` +```typescript v0 (old) +import { createReactAgent } from "@langchain/langgraph/prebuilts"; + +agent = createReactAgent({ + model, + tools, + prompt: "You are a helpful assistant.", // [!code highlight] +}); +``` + + +#### `SystemMessage` + +If using `SystemMessage` objects in the system prompt, the string content is now used directly: + + +```typescript v1 (new) +import { SystemMessage, createAgent } from "langchain"; + +agent = createAgent({ + model, + tools, + systemPrompt: "You are a helpful assistant.", // [!code highlight] +}); +``` +```typescript v0 (old) +import { createReactAgent } from "@langchain/langgraph/prebuilts"; + +agent = createReactAgent({ + model, + tools, + prompt: new SystemMessage(content: "You are a helpful assistant."), // [!code highlight] +}); +``` + + +#### Dynamic prompts + +Dynamic prompts are a core context engineering pattern— they adapt what you tell the model based on the current conversation state. To do this, use `dynamicSystemPromptMiddleware`: + + +```typescript v1 (new) +import { createAgent, dynamicSystemPromptMiddleware } from "langchain"; +import { z } from "zod"; + +const contextSchema = z.object({ + userRole: z.enum(["expert", "beginner"]).default("user"), +}); + +const userRolePrompt = dynamicSystemPromptMiddleware((request) => { // [!code highlight] + const userRole = request.runtime.context.userRole; + const basePrompt = "You are a helpful assistant."; + + if (userRole === "expert") { + return `${basePrompt} Provide detailed technical responses.`; + } else if (userRole === "beginner") { + return `${basePrompt} Explain concepts simply and avoid jargon.`; + } + return basePrompt; // [!code highlight] +}); + +const agent = createAgent({ + model, + tools, + middleware: [userRolePrompt], + contextSchema, +}); + +await agent.invoke({ + messages: [new HumanMessage("Explain async programming")], + context: { + userRole: "expert", + }, +}) +``` + +```typescript v0 (old) +import { createReactAgent } from "@langchain/langgraph/prebuilts"; + +const contextSchema = z.object({ + userRole: z.enum(["expert", "beginner"]).default("user"), +}); + +const userRolePrompt = dynamicSystemPromptMiddleware((request) => { + const userRole = request.runtime.context.userRole; + const basePrompt = "You are a helpful assistant."; + + if (userRole === "expert") { + return `${basePrompt} Provide detailed technical responses.`; + } else if (userRole === "beginner") { + return `${basePrompt} Explain concepts simply and avoid jargon.`; + } + return basePrompt; +}); + +agent = createReactAgent({ + model, + tools, + prompt: (state) => { + const userRole = state.context.userRole; + const basePrompt = "You are a helpful assistant."; + + if (userRole === "expert") { + return `${basePrompt} Provide detailed technical responses.`; + } else if (userRole === "beginner") { + return `${basePrompt} Explain concepts simply and avoid jargon.`; + } + return basePrompt; + }, + contextSchema, +}); + +// Use with context +await agent.invoke({ + messages: [new HumanMessage("Explain async programming")], + context: { userRole: "expert" }, +}); +``` + + + +### Pre-model hook + +Pre-model hooks are now implemented as middleware with the `beforeModel` method. This pattern is more extensible-- you can define multiple middlewares to run before the model is called and reuse them across agents. + +Common use cases include: +- Summarizing conversation history +- Trimming messages +- Input guardrails, like PII redaction + +v1 includes built-in summarization middleware: + + +```typescript v1 (new) +import { createAgent, summarizationMiddleware } from "langchain"; + +const agent = createAgent({ + model: "anthropic:claude-sonnet-4-5-20250929", + tools, + middleware: [ + summarizationMiddleware({ + model: "anthropic:claude-sonnet-4-5-20250929", + maxTokensBeforeSummary: 1000, + }), + ], +}); +``` +```typescript v0 (old) +import { createReactAgent } from "@langchain/langgraph/prebuilts"; + +function customSummarization(state) { + // Custom logic for message summarization +} + +const agent = createReactAgent({ + model: "anthropic:claude-sonnet-4-5-20250929", + tools, + preModelHook: customSummarization, +}); +``` + + +### Post-model hook + +Post-model hooks are now implemented as middleware with the `afterModel` method. This lets you compose multiple handlers after the model responds. + +Common use cases include: +- Human-in-the-loop approval +- Output guardrails + +v1 includes a built-in human-in-the-loop middleware: + + +```typescript v1 (new) +import { createAgent, humanInTheLoopMiddleware } from "langchain"; + +const agent = createAgent({ + model: "anthropic:claude-sonnet-4-5-20250929", + tools: [readEmail, sendEmail], + middleware: [ + humanInTheLoopMiddleware({ + interruptOn: { + sendEmail: { allowedDecisions: ["approve", "edit", "reject"] }, + }, + }), + ], +}); +``` +```typescript v0 (old) +import { createReactAgent } from "@langchain/langgraph/prebuilts"; + +function customHumanInTheLoopHook(state) { + // Custom approval logic +} + +const agent = createReactAgent({ + model: "anthropic:claude-sonnet-4-5-20250929", + tools: [readEmail, sendEmail], + postModelHook: customHumanInTheLoopHook, +}); +``` + + +### Custom state + +Custom state is now defined in middleware using the `stateSchema` property. Use Zod to declare additional state fields that are carried through the agent run. + + +```typescript v1 (new) +import { z } from "zod"; +import { createAgent, createMiddleware, tool } from "langchain"; + +const UserState = z.object({ + userName: z.string(), +}); + +const userState = createMiddleware({ + name: "UserState", + stateSchema: UserState, + beforeModel: (state) => { + // Access custom state properties + const name = state.userName; + // Optionally modify messages/system prompt based on state + return; + }, +}); + +const greet = tool( + async () => { + return "Hello!"; + }, + { + name: "greet", + description: "Greet the user", + schema: z.object({}), + } +); + +const agent = createAgent({ + model: "anthropic:claude-sonnet-4-5-20250929", + tools: [greet], + middleware: [userState], +}); + +await agent.invoke({ + messages: [{ role: "user", content: "Hi" }], + userName: "Ada", +}); +``` +```typescript v0 (old) +import { getCurrentTaskInput } from "@langchain/langgraph"; +import { createReactAgent } from "@langchain/langgraph/prebuilts"; +import { z } from "zod"; + +const UserState = z.object({ + userName: z.string(), +}); + +const greet = tool( + async () => { + const state = await getCurrentTaskInput(); + const userName = state.userName; + return `Hello ${userName}!`; + }, +); + +// Custom state was provided via agent-level state schema or accessed ad hoc in hooks +const agent = createReactAgent({ + model: "anthropic:claude-sonnet-4-5-20250929", + tools: [greet], + stateSchema: UserState, +}); +``` + + +### Model + +Dynamic model selection now happens via middleware. Use `wrapModelCall` to swap models (and tools) based on state or runtime context. In `createReactAgent`, this was done via a function passed to the `model` parameter. + +This functionality has been ported to the middleware interface in v1. + +#### Dynamic model selection + + +```typescript v1 (new) +import { createAgent, createMiddleware } from "langchain"; + +const dynamicModel = createMiddleware({ + name: "DynamicModel", + wrapModelCall: (request, handler) => { + const messageCount = request.state.messages.length; + const model = messageCount > 10 ? "openai:gpt-5" : "openai:gpt-5-nano"; + return handler({ ...request, model }); + }, +}); + +const agent = createAgent({ + model: "openai:gpt-5-nano", + tools, + middleware: [dynamicModel], +}); +``` +```typescript v0 (old) +import { createReactAgent } from "@langchain/langgraph/prebuilts"; + +function selectModel(state) { + return state.messages.length > 10 ? "openai:gpt-5" : "openai:gpt-5-nano"; +} + +const agent = createReactAgent({ + model: selectModel, + tools, +}); +``` + + +#### Pre-bound models + +To better support structured output, `createAgent` should receive a plain model (string or instance) and a separate `tools` list. Avoid passing models pre-bound with tools when using structured output. + +```typescript +// No longer supported +// const modelWithTools = new ChatOpenAI({ model: "gpt-4o-mini" }).bindTools([someTool]); +// const agent = createAgent({ model: modelWithTools, tools: [] }); + +// Use instead +const agent = createAgent({ model: "openai:gpt-4o-mini", tools: [someTool] }); +``` + +### Tools + +The `tools` argument to `createAgent` accepts: + +- Functions created with `tool` +- LangChain tool instances +- Objects that represent built-in provider tools + +Use middleware `wrapToolCall` to centralize error handling and logging for tools. + + +```typescript v1 (new) +import { createAgent, createMiddleware } from "langchain"; + +const errorHandling = createMiddleware({ + name: "ToolErrors", + wrapToolCall: (request, handler) => { + try { + return handler(request); + } catch (err) { + return `Error executing ${request.toolName}: ${String(err)}`; + } + }, +}); + +const agent = createAgent({ + model: "anthropic:claude-sonnet-4-5-20250929", + tools: [checkWeather, searchWeb], + middleware: [errorHandling], +}); +``` +```typescript v0 (old) +import { createReactAgent } from "@langchain/langgraph/prebuilts"; + +const agent = createReactAgent({ + model: "anthropic:claude-sonnet-4-5-20250929", + tools: [checkWeather, searchWeb], + // Error handling commonly implemented inside tool code or post hooks +}); +``` + + +### Structured output + +#### Node changes + +Structured output used to be generated in a separate node from the main agent. This is no longer the case. Structured output is generated in the main loop (no extra LLM call), reducing cost and latency. + +#### Tool and provider strategies + +In v1, there are two strategies: + +- `toolStrategy` uses artificial tool calling to generate structured output +- `providerStrategy` uses provider-native structured output generation + + +```typescript v1 (new) +import { createAgent, toolStrategy } from "langchain"; +import { z } from "zod"; + +const OutputSchema = z.object({ + summary: z.string(), + sentiment: z.string(), +}); + +const agent = createAgent({ + model: "openai:gpt-4o-mini", + tools, + // explicitly using tool strategy + responseFormat: toolStrategy(OutputSchema), // [!code highlight] +}); +``` +```typescript v0 (old) +import { createReactAgent } from "@langchain/langgraph/prebuilts"; +import { z } from "zod"; + +const OutputSchema = z.object({ + summary: z.string(), + sentiment: z.string(), +}); + +const agent = createReactAgent({ + model: "openai:gpt-4o-mini", + tools, + // Structured output was driven primarily via tool-calling with fewer options + responseFormat: OutputSchema, +}); +``` + + +#### Prompted output removed + +Prompted output via custom instructions in `responseFormat` is removed in favor of the above strategies. + +### Streaming node name rename + +When streaming events from agents, the node name was changed from `"agent"` to `"model"` to better reflect the node's purpose. + +### Runtime context + +When invoking an agent, pass static, read-only configuration via the `context` config argument. This replaces patterns that used `config.configurable`. + + +```typescript v1 (new) +import { createAgent, HumanMessage } from "langchain"; +import { z } from "zod"; + +const agent = createAgent({ + model: "openai:gpt-4o", + tools, + contextSchema: z.object({ userId: z.string(), sessionId: z.string() }), +}); + +const result = await agent.invoke( + { messages: [new HumanMessage("Hello")] }, + { context: { userId: "123", sessionId: "abc" } }, // [!code highlight] +); +``` +```typescript v0 (old) +import { createReactAgent, HumanMessage } from "@langchain/langgraph/prebuilts"; + +const agent = createReactAgent({ model, tools }); + +// Pass context via config.configurable +const result = await agent.invoke( + { messages: [new HumanMessage("Hello")] }, + { + config: { // [!code highlight] + configurable: { userId: "123", sessionId: "abc" }, // [!code highlight] + }, // [!code highlight] + } +); +``` + + + + The old `config.configurable` pattern still works for backward compatibility, but using the new `context` parameter is recommended for new applications or applications migrating to v1. + + +--- + +## Standard content + +In v1, messages gain provider-agnostic standard content blocks. Access them via `message.contentBlocks` for a consistent, typed view across providers. The existing `message.content` field remains unchanged for strings or provider-native structures. + +### What changed + +- New `contentBlocks` property on messages for normalized content. +- New TypeScript types under `ContentBlock` for strong typing. +- Optional serialization of standard blocks into `content` via `LC_OUTPUT_VERSION=v1` or `outputVersion: "v1"`. + +### Read standardized content + + +```typescript v1 (new) +import { initChatModel } from "langchain"; + +const model = await initChatModel("openai:gpt-5-nano"); +const response = await model.invoke("Explain AI"); + +for (const block of response.contentBlocks) { + if (block.type === "reasoning") { + console.log(block.reasoning); + } else if (block.type === "text") { + console.log(block.text); + } +} +``` +```typescript v0 (old) +// Provider-native formats vary; you needed per-provider handling. +const response = await model.invoke("Explain AI"); +for (const item of response.content as any[]) { + if (item.type === "reasoning") { + // OpenAI-style reasoning + } else if (item.type === "thinking") { + // Anthropic-style thinking + } else if (item.type === "text") { + // Text + } +} +``` + + +### Create multimodal messages + + +```typescript v1 (new) +import { HumanMessage } from "langchain"; + +const message = new HumanMessage({ + contentBlocks: [ + { type: "text", text: "Describe this image." }, + { type: "image", url: "https://example.com/image.jpg" }, + ], +}); +const res = await model.invoke([message]); +``` +```typescript v0 (old) +import { HumanMessage } from "langchain"; + +const message = new HumanMessage({ + // Provider-native structure + content: [ + { type: "text", text: "Describe this image." }, + { type: "image_url", image_url: { url: "https://example.com/image.jpg" } }, + ], +}); +const res = await model.invoke([message]); +``` + + +### Example block types + +```typescript +import { ContentBlock } from "langchain"; + +const textBlock: ContentBlock.Text = { + type: "text", + text: "Hello world", +}; + +const imageBlock: ContentBlock.Multimodal.Image = { + type: "image", + url: "https://example.com/image.png", + mimeType: "image/png", +}; +``` + +See the content blocks [reference](/oss/langchain/messages#content-block-reference) for more details. + +### Serialize standard content + +Standard content blocks are **not serialized** into the `content` attribute by default. If you need to access standard content blocks in the `content` attribute (e.g., when sending messages to a client), you can opt-in to serializing them into `content`. + + +```bash +export LC_OUTPUT_VERSION=v1 +``` +```typescript +import { initChatModel } from "langchain"; + +const model = await initChatModel("openai:gpt-5-nano", { + outputVersion: "v1", +}); +``` + + + + Learn more: [Messages](/oss/langchain/messages#content) and [Standard content blocks](/oss/langchain/messages#standard-content-blocks). See [Multimodal](/oss/langchain/messages#multimodal) for input examples. + + +--- + +## Simplified package + +The `langchain` package namespace is streamlined to focus on agent building blocks. Legacy functionality has moved to `@langchain/classic`. The new package exposes only the most useful and relevant functionality. + +### Exports + +The v1 package includes: + +| Module | What's available | Notes | +|--------|------------------|-------| +| Agents | `createAgent`, `AgentState` | Core agent creation functionality | +| Messages | Message types, content blocks, `trimMessages` | Re-exported from `@langchain/core` | +| Tools | `tool`, `BaseTool`, injection helpers | Re-exported from `@langchain/core` | +| Chat models | `initChatModel`, `BaseChatModel` | Unified model initialization | +| Embeddings | `Embeddings`, `initEmbeddings` | Embedding models | + +### `@langchain/classic` + +If you use legacy chains, the indexing API, or functionality previously re-exported from `@langchain/community`, install `@langchain/classic` and update imports: + + +```bash npm +npm install @langchain/classic +``` +```bash pnpm +pnpm install @langchain/classic +``` +```bash yarn +yarn add @langchain/classic +``` +```bash bun +bun add @langchain/classic +``` + + +```typescript +// v1 (new) +import { ... } from "@langchain/classic"; +import { ... } from "@langchain/classic/chains"; + +// v0 (old) +import { ... } from "langchain"; +import { ... } from "langchain/chains"; +``` + +--- + +## Breaking changes + +### Dropped Node 18 support + +All LangChain packages now require **Node.js 20 or higher**. Node.js 18 reached [end of life](https://nodejs.org/en/about/releases/) in March 2025. + +### New build outputs + +Builds for all langchain packages now use a bundler based approach instead of using raw typescript outputs. If you were importing files from the `dist/` directory (which is not recommended), you will need to update your imports to use the new module system. + +### Legacy code moved to `@langchain/classic` + +Legacy functionality outside the focus of standard interfaces and agents has been moved to the [`@langchain/classic`](https://www.npmjs.com/package/@langchain/classic) package. See the [Simplified package](#simplified-package) section for details on what's available in the core `langchain` package and what moved to `@langchain/classic`. + +### Removal of deprecated APIs + +Methods, functions, and other objects that were already deprecated and slated for removal in 1.0 have been deleted. + + + +The following deprecated APIs have been removed in v1: + +#### Core functionality +- `TraceGroup` - Use LangSmith tracing instead +- `BaseDocumentLoader.loadAndSplit` - Use `.load()` followed by a text splitter +- `RemoteRunnable` - No longer supported + +#### Prompts +- `BasePromptTemplate.serialize` and `.deserialize` - Use JSON serialization directly +- `ChatPromptTemplate.fromPromptMessages` - Use `ChatPromptTemplate.fromMessages` + +#### Retrievers +- `BaseRetrieverInterface.getRelevantDocuments` - Use `.invoke()` instead + +#### Runnables +- `Runnable.bind` - Use `.bindTools()` or other specific binding methods +- `Runnable.map` - Use `.batch()` instead +- `RunnableBatchOptions.maxConcurrency` - Use `maxConcurrency` in the config object + +#### Chat models +- `BaseChatModel.predictMessages` - Use `.invoke()` instead +- `BaseChatModel.predict` - Use `.invoke()` instead +- `BaseChatModel.serialize` - Use JSON serialization directly +- `BaseChatModel.callPrompt` - Use `.invoke()` instead +- `BaseChatModel.call` - Use `.invoke()` instead + +#### LLMs +- `BaseLLMParams.concurrency` - Use `maxConcurrency` in the config object +- `BaseLLM.call` - Use `.invoke()` instead +- `BaseLLM.predict` - Use `.invoke()` instead +- `BaseLLM.predictMessages` - Use `.invoke()` instead +- `BaseLLM.serialize` - Use JSON serialization directly + +#### Streaming +- `createChatMessageChunkEncoderStream` - Use `.stream()` method directly + +#### Tracing +- `BaseTracer.runMap` - Use LangSmith tracing APIs +- `getTracingCallbackHandler` - Use LangSmith tracing +- `getTracingV2CallbackHandler` - Use LangSmith tracing +- `LangChainTracerV1` - Use LangSmith tracing + +#### Memory and storage +- `BaseListChatMessageHistory.addAIChatMessage` - Use `.addMessage()` with `AIMessage` +- `BaseStoreInterface` - Use specific store implementations + +#### Utilities +- `getRuntimeEnvironmentSync` - Use async `getRuntimeEnvironment()` + + + diff --git a/src/oss/javascript/releases/langchain-v1.mdx b/src/oss/javascript/releases/langchain-v1.mdx index e78e66a2a..83f127499 100644 --- a/src/oss/javascript/releases/langchain-v1.mdx +++ b/src/oss/javascript/releases/langchain-v1.mdx @@ -1,300 +1,327 @@ --- -title: LangChain JavaScript v1.0 -sidebarTitle: v1.0 +title: What's new in v1 +sidebarTitle: Release notes --- import AlphaCallout from '/snippets/alpha-lc-callout.mdx'; - - 1.0 Alpha releases are available for the following packages: - - - `langchain` - - `@langchain/core` - - `@langchain/anthropic` - - `@langchain/openai` - - Broader support will be rolled out during the alpha period. - - -## New features - -### Core exports from `langchain` - -The `langchain` package now exports key primitives like `tool`, message types, `ToolNode`, `createAgent`, and more directly from the root package. - -```ts -import { tool, HumanMessage, createAgent } from "langchain"; +**LangChain v1 is a focused, production-ready foundation for building agents.** We've streamlined the framework around three core improvements: + + + + A new standard way to build agents in LangChain, replacing `createReactAgent` from LangGraph with a cleaner, more powerful API. + + + A new `contentBlocks` property that provides unified access to modern LLM features across all providers. + + + The `langchain` package has been streamlined to focus on essential building blocks for agents, with legacy functionality moved to `@langchain/classic`. + + + +To upgrade, + + +```bash npm +npm install langchain@next @langchain/core@next ``` +```bash pnpm +pnpm install langchain@next @langchain/core@next +``` +```bash yarn +yarn add langchain@next @langchain/core@next +``` +```bash bun +bun add langchain@next @langchain/core@next +``` + -### `createAgent` in core `langchain` +For a complete list of changes, see the [migration guide](/oss/javascript/migrate/langchain-v1). -The [React-style agent](/oss/langchain/agents) is now part of the core `langchain` project. Import directly from `langchain`: +## `createAgent` -```ts -import { createAgent, HumanMessage, tool } from "langchain"; -import { z } from "zod"; +`createAgent` is the standard way to build agents in LangChain 1.0. It provides a simpler interface than `createReactAgent` while offering greater customization potential by using middleware. -const getWeather = tool(async ({ city }) => `Sunny in ${city}`, { - name: "getWeather", - description: "Get current weather by city", - schema: z.object({ city: z.string() }), -}); +```ts +import { createAgent } from "langchain"; -const agent = await createAgent({ - // New: pass a model by name - model: "openai:gpt-4o-mini", +const agent = createAgent({ + model: "anthropic:claude-sonnet-4-5-20250929", tools: [getWeather], - responseFormat: z.object({ answer: z.string() }), + systemPrompt: "You are a helpful assistant.", }); -const res = await agent.invoke({ - messages: [new HumanMessage("Weather in SF?")], +const result = await agent.invoke({ + messages: [ + { role: "user", content: "What is the weather in Tokyo?" }, + ], }); -console.log(res.structuredResponse.answer); + +console.log(result.content); ``` -You can now pass a **"model as string"** option to define the model to use. This requires you to have the specific model provider package installed (e.g. `@langchain/openai` for `openai:gpt-4o-mini`). +For more information, see [Agents](/oss/langchain/agents). -### `ToolNode` exported from `langchain` +### Middleware -Build agent graphs that execute tools as a node. This makes tool execution composable within graph workflows. +Middleware is the defining feature of `createAgent`. It makes `createAgent` highly customizable, raising the ceiling for what you can build. -```ts -import { StateGraph } from "@langchain/langgraph"; -import { ToolNode, tool } from "langchain"; +Great agents require [context engineering](/oss/langchain/context-engineering): getting the right information to the model at the right time. Middleware helps you control dynamic prompts, conversation summarization, selective tool access, state management, and guardrails through a composable abstraction. -const tools = [tool(async ({ query }) => `Results for: ${query}`, { - name: "search", schema: z.object({ query: z.string() }) -})]; +#### Prebuilt middleware -const graph = new StateGraph({ channels: { messages: [] } }) - .addNode("tools", new ToolNode(tools)) - .addEdge("__start__", "tools"); +LangChain provides a few prebuilt middlewares for common patterns, including: -const result = await graph.compile().invoke({ - messages: [/* tool call messages */] +- `summarizationMiddleware`: Condense conversation history when it gets too long +- `humanInTheLoopMiddleware`: Require approval for sensitive tool calls +- `piiRedactionMiddleware`: Redact sensitive information before sending to the model + +```ts +import { + createAgent, + summarizationMiddleware, + humanInTheLoopMiddleware, + piiRedactionMiddleware, +} from "langchain"; + +const agent = createAgent({ + model: "anthropic:claude-sonnet-4-5-20250929", + tools: [readEmail, sendEmail], + middleware: [ + piiRedactionMiddleware({ patterns: ["email", "phone", "ssn"] }), + summarizationMiddleware({ + model: "anthropic:claude-sonnet-4-5-20250929", + maxTokensBeforeSummary: 500, + }), + humanInTheLoopMiddleware({ + interruptOn: { + sendEmail: { + allowedDecisions: ["approve", "edit", "reject"], + }, + }, + }), + ], }); ``` -### Default tool-error handling in agents +#### Custom middleware -When `handleToolErrors` is **`true` (default)**, tool exceptions are caught and converted into a **`ToolMessage`** so the agent can recover. Set it to `false` to surface raw errors for strict workflows. +You can also build custom middleware to fit your specific needs. + +Build custom middleware by implementing any of these hooks using the `createMiddleware` function: + +| Hook | When it runs | Use cases | +|-------------------|--------------------------|-----------------------------------------| +| `beforeAgent` | Before calling the agent | Load memory, validate input | +| `beforeModel` | Before each LLM call | Update prompts, trim messages | +| `wrapModelCall` | Around each LLM call | Intercept and modify requests/responses | +| `wrapToolCall` | Around each tool call | Intercept and modify tool execution | +| `afterModel` | After each LLM response | Validate output, apply guardrails | +| `afterAgent` | After agent completes | Save results, cleanup | + +![Middleware flow diagram](/images/middleware_final.png) + +Example custom middleware: ```ts -import { ToolNode, ToolMessage, tool } from "langchain"; - -const tools = [ - tool(/* ... */), - // ... -]; - -// default: handleToolErrors: true → returns a ToolMessage with error text -const forgiving = new ToolNode(tools); - -// strict: throw on tool error -const strict = new ToolNode(tools, { handleToolErrors: false }); - -// dynamic: custom error handling -const dynamic = new ToolNode(tools, { - handleToolErrors: (error, toolCall) => { - if (error instanceof Error && error.message.includes("Fetch Failed")) { - return new ToolMessage({ - content: "Fetch Failed. Please try again.", - tool_call_id: toolCall.id!, - }); +import { createMiddleware } from "langchain"; + +const contextSchema = z.object({ + userExpertise: z.enum(["beginner", "expert"]).default("beginner"), +}) + +const expertiseBasedToolMiddleware = createMiddleware({ + wrapModelCall: async (request, handler) => { + const userLevel = request.runtime.context.userExpertise; + if (userLevel === "expert") { + const tools = [advancedSearch, dataAnalysis]; + return handler( + request.replace("openai:gpt-5", tools) + ); } - - throw error; + const tools = [simpleSearch, basicCalculator]; + return handler( + request.replace("openai:gpt-5-nano", tools) + ); }, }); + +const agent = createAgent({ + model: "anthropic:claude-sonnet-4-5-20250929", + tools: [simpleSearch, advancedSearch, basicCalculator, dataAnalysis], + middleware: [expertiseBasedToolMiddleware], + contextSchema, +}); ``` -### Standard typed message content +For more information, see [the complete middleware guide](/oss/langchain/middleware). -`@langchain/core` features standard, typed message content. This includes standard types for reasoning, citations, server-side tool calls, and other modern LLM features. +### Built on LangGraph -There are no breaking changes associated with existing message content. The standard content can be lazily-parsed off of existing v0 messages using the `contentBlocks` property: +Because `createAgent` is built on LangGraph, you automatically get built in support for long running and reliable agents via: -```ts -import { AIMessage } from "@langchain/core"; + + + Conversations automatically persist across sessions with built-in checkpointing + + + Stream tokens, tool calls, and reasoning traces in real-time + + + Pause agent execution for human approval before sensitive actions + + + Rewind conversations to any point and explore alternate paths and prompts + + -new AIMessage("Hello, world").contentBlocks -``` +You don't need to learn LangGraph to use these features—they work out of the box. -## Breaking changes +### Structured output - - LangChain packages now require Node 20+. Update your engines and CI accordingly. +`createAgent` has improved structured output generation: - ```json - // package.json - { "engines": { "node": ">=20" } } - ``` - +- **Main loop integration**: Structured output is now generated in the main loop instead of requiring an additional LLM call +- **Structured output strategy**: Models can choose between calling tools or using provider-side structured output generation +- **Cost reduction**: Eliminates extra expense from additional LLM calls - - Several legacy export paths have been removed to create a cleaner surface area: +```ts +import { createAgent } from "langchain"; +import { z } from "zod"; - **All `langchain/schema/*` exports removed** +const weatherSchema = z.object({ + temperature: z.number(), + condition: z.string(), +}); - - `langchain/schema/prompt_template` → use `langchain/prompts` - ```diff - - import { PromptTemplate } from "langchain/schema/prompt_template"; - + import { PromptTemplate } from "langchain/prompts"; - ``` +const agent = createAgent({ + model: "openai:gpt-4o-mini", + tools: [getWeather], + responseFormat: weatherSchema, +}); - - `langchain/schema/query_constructor` → use `langchain/chains/query_constructor` - ```diff - - import type { AttributeInfo } from "langchain/schema/query_constructor"; - + import type { AttributeInfo } from "langchain/chains/query_constructor"; - ``` +const result = await agent.invoke({ + messages: [ + { role: "user", content: "What is the weather in Tokyo?" }, + ], +}); - **`langchain/runnables/remote` export removed** +console.log(result.structuredResponse); +``` - If you used this entrypoint, migrate off this subpath (no 1:1 replacement export in `langchain` package). +**Error handling**: Control error handling via the `handleErrors` parameter to `ToolStrategy`: +- **Parsing errors**: Model generates data that doesn't match desired structure +- **Multiple tool calls**: Model generates 2+ tool calls for structured output schemas - **`langchain/smith` export path removed** +--- - Use the separate `langsmith` package and opt-in (see fixes section below). - +## Standard content blocks - - The `./callbacks` entrypoint is gone. Prefer LCEL observability (runnables), agent/tool messages, or provider-native hooks instead. + + 1.0 Alpha releases are available for most packages. Only the following currently support new content blocks: - ```diff - - import { ... } from "langchain/callbacks"; - // Replace with runnable config / tracing via LangSmith (optional) or agent nodes. - ``` - + - `langchain` + - `@langchain/core` + - `@langchain/anthropic` + - `@langchain/openai` - - All `./agents` imports have been removed from the `langchain` package. There is no direct replacement - use `createAgent` instead for building agents. + Broader support for content blocks will be rolled out during the alpha period and following stable release. + - ```diff - - import { AgentExecutor, createOpenAIFunctionsAgent } from "langchain/agents"; - + import { createAgent } from "langchain"; +### Benefits - // Replace legacy agent patterns with createAgent - - const agent = createOpenAIFunctionsAgent({ llm, tools, prompt }); - - const agentExecutor = new AgentExecutor({ agent, tools }); - + const agent = createAgent({ model: "openai:gpt-4o-mini", tools }); - ``` - +- **Provider agnostic**: Access reasoning traces, citations, built-in tools (web search, code interpreters, etc.), and other features using the same API regardless of provider +- **Type safe**: Full type hints for all content block types +- **Backward compatible**: Standard content can be [loaded lazily](/oss/langchain/messages#standard-content-blocks), so there are no associated breaking changes - - The entire `@langchain/azure-openai` package has been removed as all its primitives were deprecated. +For more information, see our guide on [content blocks](/oss/langchain/messages#content) - **Migration:** Use `@langchain/openai` with Azure OpenAI configuration instead: +--- - ```ts - import { AzureChatOpenAI } from "@langchain/openai"; +## Simplified package - const model = new AzureChatOpenAI({ - azureOpenAIApiKey: "your-key", - azureOpenAIApiInstanceName: "your-instance", - azureOpenAIApiDeploymentName: "your-deployment", - azureOpenAIApiVersion: "2023-05-15", - }); - ``` - +LangChain v1 streamlines the `langchain` package namespace to focus on essential building blocks for agents. The package exposes only the most useful and relevant functionality: -## Migration guide +Most of these are re-exported from `@langchain/core` for convenience, which gives you a focused API surface for building agents. -Follow these steps to migrate your JavaScript/TypeScript code to LangChain v1.0: +### Exports -### 1. Node version +The v1 package includes: -Set `engines.node` to `>=20` and update CI runners: +| Module | What's available | Notes | +|--------|------------------|-------| +| `langchain/agents` | `create_agent`, `AgentState` | Core agent creation functionality | +| `langchain/messages` | Message types, content blocks, `trim_messages` | Re-exported from `@langchain/core` | +| `langchain/tools` | `tool`, `BaseTool`, injection helpers | Re-exported from `@langchain/core` | +| `langchain/chat_models` | `init_chat_model`, `BaseChatModel` | Unified model initialization | +| `langchain/embeddings` | `Embeddings`, `init_embeddings` | Embedding models | -```jsonc -// package.json -{ "engines": { "node": ">=20" } } -``` +### `@langchain/classic` -### 2. Import path updates +Legacy functionality has moved to [`@langchain/classic`](https://www.npmjs.com/package/@langchain/classic) to keep the core package lean and focused. -**All `langchain/schema/*` exports removed:** -```diff -- import { PromptTemplate } from "langchain/schema/prompt_template"; -+ import { PromptTemplate } from "langchain/prompts"; +#### What's in `@langchain/classic` -- import type { AttributeInfo } from "langchain/schema/query_constructor"; -+ import type { AttributeInfo } from "langchain/chains/query_constructor"; -``` +- Legacy chains and chain implementations +- The indexing API +- [`@langchain/community`](https://www.npmjs.com/package/@langchain/community) exports +- Other deprecated functionality -**Remove unsupported imports:** -```diff -- import { ... } from "langchain/runnables/remote"; // No longer exported -- import { ... } from "langchain/smith"; // Use separate langsmith package -- import { ... } from "langchain/callbacks"; // Use LCEL observability instead -- import { ... } from "langchain/agents"; // Use createAgent instead -``` +If you use any of this functionality, install [`@langchain/classic`](https://www.npmjs.com/package/@langchain/classic): -**Azure OpenAI package removed:** -```diff -- import { AzureChatOpenAI } from "@langchain/azure-openai"; -+ import { AzureChatOpenAI } from "@langchain/openai"; -// Configure with Azure endpoints in ChatOpenAI constructor + +```bash npm +npm install @langchain/classic ``` - -### 3. Agent migration - -**Replace legacy agent imports with `createAgent`:** - -```diff -- import { AgentExecutor, createOpenAIFunctionsAgent } from "langchain/agents"; -- import { createReactAgent } from "@langchain/langgraph/prebuilt"; -+ import { createAgent } from "langchain"; +```bash pnpm +pnpm install @langchain/classic ``` - -**Migrate from legacy agent patterns:** - -```diff -// Old pattern -- const agent = createOpenAIFunctionsAgent({ llm, tools, prompt }); -- const agentExecutor = new AgentExecutor({ agent, tools }); -- const result = await agentExecutor.invoke({ input: "Hello" }); - -// New pattern -+ const agent = createAgent({ -+ model: "openai:gpt-4o-mini", -+ tools, -+ responseFormat: z.object({ answer: z.string() }) -+ }); -+ const result = await agent.invoke({ -+ messages: [new HumanMessage("Hello")] -+ }); +```bash yarn +yarn add @langchain/classic ``` - -You can now pass **`model` as a model name string** to `createAgent`. This requires you to have the specific model provider package installed (e.g. `@langchain/openai` for `openai:gpt-4o-mini`). - -Use `ToolNode` to encapsulate tool execution in graphs: - -```ts -import { ToolNode } from "langchain"; - -const toolNode = new ToolNode([/* tools */]); +```bash bun +bun add @langchain/classic ``` + -### 4. Error handling configuration - -Decide on error policy for `ToolNode`: +Then update your imports: -```ts -// Default: soft-handling (converts errors to ToolMessage) -const forgiving = new ToolNode([/* tools */], { handleToolErrors: true }); +```typescript +import { ... } from "langchain"; // [!code --] +import { ... } from "@langchain/classic"; // [!code ++] -// Strict: throw on tool error -const strict = new ToolNode([/* tools */], { handleToolErrors: false }); +import { ... } from "langchain/chains"; // [!code --] +import { ... } from "@langchain/classic/chains"; // [!code ++] ``` -## Reporting issues - -Please report any issues discovered with 1.0 on -[GitHub](https://github.com/langchain-ai/langchainjs/issues) using the -[`'v1'` label](https://github.com/langchain-ai/langchainjs/issues?q=state%3Aopen%20label%3Av1). +## Reporting issues + +Please report any issues discovered with 1.0 on [GitHub](https://github.com/langchain-ai/langchainjs/issues) using the [`'v1'` label](https://github.com/langchain-ai/langchainjs/issues?q=state%3Aopen%20label%3Av1). + +## Additional resources + + + + Read the announcement + + + Deep dive into middleware + + + Full agent documentation + + + New content blocks API + + + How to migrate to LangChain v1 + + + Report issues or contribute + + ## See also diff --git a/src/oss/python/migrate/langchain-v1.mdx b/src/oss/python/migrate/langchain-v1.mdx index bc0f5735e..656036a8a 100644 --- a/src/oss/python/migrate/langchain-v1.mdx +++ b/src/oss/python/migrate/langchain-v1.mdx @@ -1,6 +1,6 @@ --- title: LangChain v1 migration guide -sidebarTitle: Migrate to v1 +sidebarTitle: Migration guide --- This guide outlines the major changes between LangChain v1 and previous versions. @@ -76,14 +76,10 @@ The table below outlines what functionality has changed from `create_react_agent The import path for the agent prebuilt has changed from `langgraph.prebuilt` to `langchain.agents`. The name of the function has changed from `create_react_agent` to `create_agent`: - -```python v1 (new) -from langchain.agents import create_agent -``` -```python v0 (old) -from langgraph.prebuilt import create_react_agent +```python +from langgraph.prebuilt import create_react_agent # [!code --] +from langchain.agents import create_agent # [!code ++] ``` - For more information, see [Agents](/oss/python/langchain/agents). @@ -665,6 +661,156 @@ result = agent.invoke( --- +## Standard content + +In v1, messages gain provider-agnostic standard content blocks. Access them via `message.content_blocks` for a consistent, typed view across providers. The existing `message.content` field remains unchanged for strings or provider-native structures. + +### What changed + +- New `content_blocks` property on messages for normalized content +- Standardized block shapes, documented in [Messages](/oss/langchain/messages#standard-content-blocks) +- Optional serialization of standard blocks into `content` via `LC_OUTPUT_VERSION=v1` or `output_version="v1"` + +### Read standardized content + + +```python v1 (new) +from langchain.chat_models import init_chat_model + +model = init_chat_model("openai:gpt-5-nano") +response = model.invoke("Explain AI") + +for block in response.content_blocks: + if block["type"] == "reasoning": + print(block.get("reasoning")) + elif block["type"] == "text": + print(block.get("text")) +``` +```python v0 (old) +# Provider-native formats vary; you needed per-provider handling +response = model.invoke("Explain AI") +for item in response.content: + if item.get("type") == "reasoning": + ... # OpenAI-style reasoning + elif item.get("type") == "thinking": + ... # Anthropic-style thinking + elif item.get("type") == "text": + ... # Text +``` + + +### Create multimodal messages + + +```python v1 (new) +from langchain.messages import HumanMessage + +message = HumanMessage(content_blocks=[ + {"type": "text", "text": "Describe this image."}, + {"type": "image", "url": "https://example.com/image.jpg"}, +]) +res = model.invoke([message]) +``` +```python v0 (old) +from langchain.messages import HumanMessage + +message = HumanMessage(content=[ + # Provider-native structure + {"type": "text", "text": "Describe this image."}, + {"type": "image_url", "image_url": {"url": "https://example.com/image.jpg"}}, +]) +res = model.invoke([message]) +``` + + +### Example block shapes + +```python +# Text block +text_block = { + "type": "text", + "text": "Hello world", +} + +# Image block +image_block = { + "type": "image", + "url": "https://example.com/image.png", + "mime_type": "image/png", +} +``` + +See the content blocks [reference](/oss/langchain/messages#content-block-reference) for more details. + +### Serialize standard content + +Standard content blocks are **not serialized** into the `content` attribute by default. If you need to access standard content blocks in the `content` attribute (e.g., when sending messages to a client), you can opt-in to serializing them into `content`. + + +```bash +export LC_OUTPUT_VERSION=v1 +``` +```python +from langchain.chat_models import init_chat_model + +model = init_chat_model( + "openai:gpt-5-nano", + output_version="v1", +) +``` + + + + Learn more: [Messages](/oss/langchain/messages#content), [Standard content blocks](/oss/langchain/messages#standard-content-blocks), and [Multimodal](/oss/langchain/messages#multimodal). + + +--- + +## Simplified package + +The `langchain` package namespace has been significantly reduced in v1 to focus on essential building blocks for agents. The streamlined package makes it easier to discover and use the core functionality. + +### Namespace + +| Module | What's available | Notes | +|--------|------------------|-------| +| `langchain.agents` | `create_agent`, `AgentState` | Core agent creation functionality | +| `langchain.messages` | Message types, content blocks, `trim_messages` | Re-exported from `langchain-core` | +| `langchain.tools` | `tool`, `BaseTool`, injection helpers | Re-exported from `langchain-core` | +| `langchain.chat_models` | `init_chat_model`, `BaseChatModel` | Unified model initialization | +| `langchain.embeddings` | `Embeddings`, `init_embeddings`, | Embedding models | + +### `langchain-classic` + +If you were using any of the following from the `langchain` package, you'll need to install `langchain-classic` and update your imports: + +- Legacy chains (`LLMChain`, `ConversationChain`, etc.) +- The indexing API +- `langchain-community` re-exports +- Other deprecated functionality + + +```python v1 (new) +# For legacy chains +from langchain_classic.chains import LLMChain + +# For indexing +from langchain_classic.indexes import ... +``` + +```python v0 (old) +from langchain.chains import LLMChain +from langchain.indexes import ... +``` + + +**Installation**: +```bash +uv pip install langchain-classic +``` + +--- + ## Breaking changes ### Dropped Python 3.9 support diff --git a/src/oss/python/releases/langchain-v1.mdx b/src/oss/python/releases/langchain-v1.mdx index 863b0527b..2bdc1897b 100644 --- a/src/oss/python/releases/langchain-v1.mdx +++ b/src/oss/python/releases/langchain-v1.mdx @@ -1,13 +1,13 @@ --- title: What's new in v1 -sidebarTitle: What's new +sidebarTitle: Release notes --- import AlphaCallout from '/snippets/alpha-lc-callout.mdx'; -**LangChain v1 is a focused, production-ready foundation for building agentic applications.** We've streamlined the framework around three core improvements: +**LangChain v1 is a focused, production-ready foundation for building agents.** We've streamlined the framework around three core improvements: @@ -21,6 +21,18 @@ import AlphaCallout from '/snippets/alpha-lc-callout.mdx'; +To upgrade, + + +```bash pip +pip install --pre -U langchain +``` +```bash uv +uv add --prerelease=allow langchain +``` + + +For a complete list of changes, see the [migration guide](/oss/python/migrate/langchain-v1). ## `create_agent` @@ -177,7 +189,7 @@ For more information, see [the complete middleware guide](/oss/langchain/middlew ### Built on LangGraph Because `create_agent` is built on LangGraph, you automatically get built in support -for long running, reliable agents via: +for long running and reliable agents via: @@ -253,7 +265,7 @@ print(repr(result["structured_response"])) Broader support for content blocks will be rolled out during the alpha period and following stable release. -The new `content_blocks` property provides unified access to modern LLM features across all providers: +The new `content_blocks` property introduces a standard representation for message content that works across providers: ```python from langchain_anthropic import ChatAnthropic @@ -297,7 +309,7 @@ The v1 namespace includes: | `langchain.chat_models` | `init_chat_model`, `BaseChatModel` | Unified model initialization | | `langchain.embeddings` | `Embeddings`, `init_embeddings` | Embedding models | -Most of these exports are re-exported from `langchain-core` for convenience, giving you a focused API surface for building agents. +Most of these are re-exported from `langchain-core` for convenience, which gives you a focused API surface for building agents. ```python # Agent building From bd90cb8549e7aff7f407d99aca2d1f7e940cae22 Mon Sep 17 00:00:00 2001 From: Hunter Lovell Date: Tue, 14 Oct 2025 09:42:01 -0700 Subject: [PATCH 2/2] cr --- src/oss/javascript/migrate/langchain-v1.mdx | 7 +++--- src/oss/javascript/releases/langchain-v1.mdx | 26 ++++++++++---------- src/oss/python/releases/langchain-v1.mdx | 2 +- 3 files changed, 17 insertions(+), 18 deletions(-) diff --git a/src/oss/javascript/migrate/langchain-v1.mdx b/src/oss/javascript/migrate/langchain-v1.mdx index 7da29c1ce..0627e37bf 100644 --- a/src/oss/javascript/migrate/langchain-v1.mdx +++ b/src/oss/javascript/migrate/langchain-v1.mdx @@ -410,9 +410,9 @@ import { createAgent, createMiddleware } from "langchain"; const errorHandling = createMiddleware({ name: "ToolErrors", - wrapToolCall: (request, handler) => { + wrapToolCall: async (request, handler) => { try { - return handler(request); + return await handler(request); } catch (err) { return `Error executing ${request.toolName}: ${String(err)}`; } @@ -659,9 +659,8 @@ The v1 package includes: |--------|------------------|-------| | Agents | `createAgent`, `AgentState` | Core agent creation functionality | | Messages | Message types, content blocks, `trimMessages` | Re-exported from `@langchain/core` | -| Tools | `tool`, `BaseTool`, injection helpers | Re-exported from `@langchain/core` | +| Tools | `tool`, tool classes | Re-exported from `@langchain/core` | | Chat models | `initChatModel`, `BaseChatModel` | Unified model initialization | -| Embeddings | `Embeddings`, `initEmbeddings` | Embedding models | ### `@langchain/classic` diff --git a/src/oss/javascript/releases/langchain-v1.mdx b/src/oss/javascript/releases/langchain-v1.mdx index 83f127499..49fcf1598 100644 --- a/src/oss/javascript/releases/langchain-v1.mdx +++ b/src/oss/javascript/releases/langchain-v1.mdx @@ -62,6 +62,17 @@ const result = await agent.invoke({ console.log(result.content); ``` +Under the hood, `createAgent` is built on the basic agent loop -- calling a model, letting it choose tools to execute, and then finishing when it calls no more tools: + +
+ Core agent loop diagram +
+ For more information, see [Agents](/oss/langchain/agents). ### Middleware @@ -72,7 +83,7 @@ Great agents require [context engineering](/oss/langchain/context-engineering): #### Prebuilt middleware -LangChain provides a few prebuilt middlewares for common patterns, including: +LangChain provides a few [prebuilt middlewares](/oss/python/langchain/middleware#built-in-middleware) for common patterns, including: - `summarizationMiddleware`: Condense conversation history when it gets too long - `humanInTheLoopMiddleware`: Require approval for sensitive tool calls @@ -102,7 +113,7 @@ const agent = createAgent({ }, }, }), - ], + ] as const, }); ``` @@ -246,17 +257,6 @@ LangChain v1 streamlines the `langchain` package namespace to focus on essential Most of these are re-exported from `@langchain/core` for convenience, which gives you a focused API surface for building agents. -### Exports - -The v1 package includes: - -| Module | What's available | Notes | -|--------|------------------|-------| -| `langchain/agents` | `create_agent`, `AgentState` | Core agent creation functionality | -| `langchain/messages` | Message types, content blocks, `trim_messages` | Re-exported from `@langchain/core` | -| `langchain/tools` | `tool`, `BaseTool`, injection helpers | Re-exported from `@langchain/core` | -| `langchain/chat_models` | `init_chat_model`, `BaseChatModel` | Unified model initialization | -| `langchain/embeddings` | `Embeddings`, `init_embeddings` | Embedding models | ### `@langchain/classic` diff --git a/src/oss/python/releases/langchain-v1.mdx b/src/oss/python/releases/langchain-v1.mdx index 2bdc1897b..13da50e51 100644 --- a/src/oss/python/releases/langchain-v1.mdx +++ b/src/oss/python/releases/langchain-v1.mdx @@ -75,7 +75,7 @@ Great agents require [context engineering](/oss/langchain/context-engineering): #### Prebuilt middleware -LangChain provides a few prebuilt middlewares for common patterns, including: +LangChain provides a few [prebuilt middlewares](/oss/python/langchain/middleware#built-in-middleware) for common patterns, including: - `PIIRedactionMiddleware`: Redact sensitive information before sending to the model - `SummarizationMiddleware`: Condense conversation history when it gets too long