1
1
![ MCP Toolbox Logo] ( https://raw.githubusercontent.com/googleapis/genai-toolbox/main/logo.png )
2
- # MCP Toolbox LlamaIndex SDK
2
+ # MCP Toolbox LangChain SDK
3
3
4
4
This SDK allows you to seamlessly integrate the functionalities of
5
- [ Toolbox] ( https://github.com/googleapis/genai-toolbox ) into your LlamaIndex LLM
5
+ [ Toolbox] ( https://github.com/googleapis/genai-toolbox ) into your LangChain LLM
6
6
applications, enabling advanced orchestration and interaction with GenAI models.
7
7
8
8
<!-- TOC ignore:true -->
@@ -15,7 +15,10 @@ applications, enabling advanced orchestration and interaction with GenAI models.
15
15
- [ Loading Tools] ( #loading-tools )
16
16
- [ Load a toolset] ( #load-a-toolset )
17
17
- [ Load a single tool] ( #load-a-single-tool )
18
- - [ Use with LlamaIndex] ( #use-with-llamaindex )
18
+ - [ Use with LangChain] ( #use-with-langchain )
19
+ - [ Use with LangGraph] ( #use-with-langgraph )
20
+ - [ Represent Tools as Nodes] ( #represent-tools-as-nodes )
21
+ - [ Connect Tools with LLM] ( #connect-tools-with-llm )
19
22
- [ Manual usage] ( #manual-usage )
20
23
- [ Authenticating Tools] ( #authenticating-tools )
21
24
- [ Supported Authentication Mechanisms] ( #supported-authentication-mechanisms )
@@ -35,48 +38,41 @@ applications, enabling advanced orchestration and interaction with GenAI models.
35
38
## Installation
36
39
37
40
``` bash
38
- pip install toolbox-llamaindex
41
+ pip install toolbox-langchain
39
42
```
40
43
41
44
## Quickstart
42
45
43
46
Here's a minimal example to get you started using
44
- # TODO: add link
45
- [ LlamaIndex] ( ) :
47
+ [ LangGraph] ( https://langchain-ai.github.io/langgraph/reference/prebuilt/#langgraph.prebuilt.chat_agent_executor.create_react_agent ) :
46
48
47
49
``` py
48
- import asyncio
49
-
50
- from llama_index.llms.google_genai import GoogleGenAI
51
- from llama_index.core.agent.workflow import AgentWorkflow
50
+ from toolbox_langchain import ToolboxClient
51
+ from langchain_google_vertexai import ChatVertexAI
52
+ from langgraph.prebuilt import create_react_agent
52
53
53
- from toolbox_llamaindex import ToolboxClient
54
+ toolbox = ToolboxClient(" http://127.0.0.1:5000" )
55
+ tools = toolbox.load_toolset()
54
56
55
- async def run_agent ():
56
- toolbox = ToolboxClient(" http://127.0.0.1:5000" )
57
- tools = toolbox.load_toolset()
57
+ model = ChatVertexAI(model = " gemini-1.5-pro-002" )
58
+ agent = create_react_agent(model, tools)
58
59
59
- vertex_model = GoogleGenAI(
60
- model = " gemini-1.5-pro" ,
61
- vertexai_config = {" project" : " project-id" , " location" : " us-central1" },
62
- )
63
- agent = AgentWorkflow.from_tools_or_functions(
64
- tools,
65
- llm = vertex_model,
66
- system_prompt = " You are a helpful assistant." ,
67
- )
68
- response = await agent.run(user_msg = " Get some response from the agent." )
69
- print (response)
60
+ prompt = " How's the weather today?"
70
61
71
- asyncio.run(run_agent())
62
+ for s in agent.stream({" messages" : [(" user" , prompt)]}, stream_mode = " values" ):
63
+ message = s[" messages" ][- 1 ]
64
+ if isinstance (message, tuple ):
65
+ print (message)
66
+ else :
67
+ message.pretty_print()
72
68
```
73
69
74
70
## Usage
75
71
76
72
Import and initialize the toolbox client.
77
73
78
74
``` py
79
- from toolbox_llamaindex import ToolboxClient
75
+ from toolbox_langchain import ToolboxClient
80
76
81
77
# Replace with your Toolbox service's URL
82
78
toolbox = ToolboxClient(" http://127.0.0.1:5000" )
@@ -106,63 +102,85 @@ tool = toolbox.load_tool("my-tool")
106
102
Loading individual tools gives you finer-grained control over which tools are
107
103
available to your LLM agent.
108
104
109
- ## Use with LlamaIndex
105
+ ## Use with LangChain
110
106
111
107
LangChain's agents can dynamically choose and execute tools based on the user
112
108
input. Include tools loaded from the Toolbox SDK in the agent's toolkit:
113
109
114
110
``` py
115
- from llama_index.llms.google_genai import GoogleGenAI
116
- from llama_index.core.agent.workflow import AgentWorkflow
111
+ from langchain_google_vertexai import ChatVertexAI
117
112
118
- vertex_model = GoogleGenAI(
119
- model = " gemini-1.5-pro" ,
120
- vertexai_config = {" project" : " project-id" , " location" : " us-central1" },
121
- )
113
+ model = ChatVertexAI(model = " gemini-1.5-pro-002" )
122
114
123
115
# Initialize agent with tools
124
- agent = AgentWorkflow.from_tools_or_functions(
125
- tools,
126
- llm = vertex_model,
127
- system_prompt = " You are a helpful assistant." ,
128
- )
129
-
130
- # Query the agent
131
- response = await agent.run(user_msg = " Get some response from the agent." )
132
- print (response)
116
+ agent = model.bind_tools(tools)
117
+
118
+ # Run the agent
119
+ result = agent.invoke(" Do something with the tools" )
133
120
```
134
121
135
- ### Maintain state
122
+ ## Use with LangGraph
136
123
137
- To maintain state for the agent, add context as follows:
124
+ Integrate the Toolbox SDK with LangGraph to use Toolbox service tools within a
125
+ graph-based workflow. Follow the [ official
126
+ guide] ( https://langchain-ai.github.io/langgraph/ ) with minimal changes.
127
+
128
+ ### Represent Tools as Nodes
129
+
130
+ Represent each tool as a LangGraph node, encapsulating the tool's execution within the node's functionality:
138
131
139
132
``` py
140
- from llama_index.core.agent.workflow import AgentWorkflow
141
- from llama_index.core.workflow import Context
142
- from llama_index.llms.google_genai import GoogleGenAI
143
-
144
- vertex_model = GoogleGenAI(
145
- model = " gemini-1.5-pro" ,
146
- vertexai_config = {" project" : " twisha-dev" , " location" : " us-central1" },
147
- )
148
- agent = AgentWorkflow.from_tools_or_functions(
149
- tools,
150
- llm = vertex_model,
151
- system_prompt = " You are a helpful assistant" ,
152
- )
153
-
154
- # Save memory in agent context
155
- ctx = Context(agent)
156
- response = await agent.run(user_msg = " Give me some response." , ctx = ctx)
157
- print (response)
133
+ from toolbox_langchain import ToolboxClient
134
+ from langgraph.graph import StateGraph, MessagesState
135
+ from langgraph.prebuilt import ToolNode
136
+
137
+ # Define the function that calls the model
138
+ def call_model (state : MessagesState):
139
+ messages = state[' messages' ]
140
+ response = model.invoke(messages)
141
+ return {" messages" : [response]} # Return a list to add to existing messages
142
+
143
+ model = ChatVertexAI(model = " gemini-1.5-pro-002" )
144
+ builder = StateGraph(MessagesState)
145
+ tool_node = ToolNode(tools)
146
+
147
+ builder.add_node(" agent" , call_model)
148
+ builder.add_node(" tools" , tool_node)
149
+ ```
150
+
151
+ ### Connect Tools with LLM
152
+
153
+ Connect tool nodes with LLM nodes. The LLM decides which tool to use based on
154
+ input or context. Tool output can be fed back into the LLM:
155
+
156
+ ``` py
157
+ from typing import Literal
158
+ from langgraph.graph import END , START
159
+ from langchain_core.messages import HumanMessage
160
+
161
+ # Define the function that determines whether to continue or not
162
+ def should_continue (state : MessagesState) -> Literal[" tools" , END ]:
163
+ messages = state[' messages' ]
164
+ last_message = messages[- 1 ]
165
+ if last_message.tool_calls:
166
+ return " tools" # Route to "tools" node if LLM makes a tool call
167
+ return END # Otherwise, stop
168
+
169
+ builder.add_edge(START , " agent" )
170
+ builder.add_conditional_edges(" agent" , should_continue)
171
+ builder.add_edge(" tools" , ' agent' )
172
+
173
+ graph = builder.compile()
174
+
175
+ graph.invoke({" messages" : [HumanMessage(content = " Do something with the tools" )]})
158
176
```
159
177
160
178
## Manual usage
161
179
162
- Execute a tool manually using the ` call ` method:
180
+ Execute a tool manually using the ` invoke ` method:
163
181
164
182
``` py
165
- result = tools[0 ].call ({" name" : " Alice" , " age" : 30 })
183
+ result = tools[0 ].invoke ({" name" : " Alice" , " age" : 30 })
166
184
```
167
185
168
186
This is useful for testing tools or when you need precise control over tool
@@ -232,7 +250,7 @@ auth_tools = toolbox.load_toolset(auth_tokens={"my_auth": get_auth_token})
232
250
233
251
``` py
234
252
import asyncio
235
- from toolbox_llamaindex import ToolboxClient
253
+ from toolbox_langchain import ToolboxClient
236
254
237
255
async def get_auth_token ():
238
256
# ... Logic to retrieve ID token (e.g., from local storage, OAuth flow)
@@ -243,7 +261,7 @@ toolbox = ToolboxClient("http://127.0.0.1:5000")
243
261
tool = toolbox.load_tool(" my-tool" )
244
262
245
263
auth_tool = tool.add_auth_token(" my_auth" , get_auth_token)
246
- result = auth_tool.call ({" input" : " some input" })
264
+ result = auth_tool.invoke ({" input" : " some input" })
247
265
print (result)
248
266
```
249
267
@@ -311,7 +329,7 @@ use the asynchronous interfaces of the `ToolboxClient`.
311
329
312
330
``` py
313
331
import asyncio
314
- from toolbox_llamaindex import ToolboxClient
332
+ from toolbox_langchain import ToolboxClient
315
333
316
334
async def main ():
317
335
toolbox = ToolboxClient(" http://127.0.0.1:5000" )
@@ -321,4 +339,4 @@ async def main():
321
339
322
340
if __name__ == " __main__" :
323
341
asyncio.run(main())
324
- ```
342
+ ```
0 commit comments