Skip to content

Commit 35ae8b4

Browse files
authored
feat: add mcp tool type
feat: add mcp tool type
2 parents 70de258 + f86b87c commit 35ae8b4

File tree

6 files changed

+41
-29
lines changed

6 files changed

+41
-29
lines changed

python/dify_plugin/entities/tool.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -394,6 +394,7 @@ class ToolProviderType(Enum):
394394
API = "api"
395395
APP = "app"
396396
DATASET_RETRIEVAL = "dataset-retrieval"
397+
MCP = "mcp"
397398

398399
@classmethod
399400
def value_of(cls, value: str) -> "ToolProviderType":

python/examples/agent/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,4 +21,4 @@ ReAct alternates between the LLM reasoning about the situation and taking action
2121
#### Pros:
2222
- **Leverages External Information:** Effectively uses external tools to gather information for tasks the model cannot handle alone.
2323
- **Explainable Reasoning:** Interwoven reasoning and action steps allow some tracking of the Agent's process.
24-
- **Wide Applicability:** Suitable for tasks requiring external knowledge or specific actions, such as Q&A, information retrieval, and task execution.
24+
- **Wide Applicability:** Suitable for tasks requiring external knowledge or specific actions, such as Q&A, information retrieval, and task execution.

python/examples/agent/manifest.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
1-
version: 0.0.10
1+
version: 0.0.15
22
type: plugin
33
author: "langgenius"
44
name: "agent"
55
label:
66
en_US: "Dify Agent Strategies"
77
zh_Hans: "Dify Agent 策略"
8-
created_at: "2024-07-12T08:03:44.658609186Z"
8+
created_at: "2025-01-08T15:22:00.000000000Z"
99
icon: icon.svg
1010
description:
1111
en_US: Dify official Agent strategies collection
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
dify_plugin==0.0.1b65
1+
dify_plugin==0.2.1

python/examples/agent/strategies/ReAct.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -50,8 +50,8 @@ class AgentPromptEntity(BaseModel):
5050

5151

5252
class ReActAgentStrategy(AgentStrategy):
53-
def __init__(self, session):
54-
super().__init__(session)
53+
def __init__(self, session, runtime):
54+
super().__init__(session, runtime)
5555
self.query = ""
5656
self.instruction = ""
5757
self.history_prompt_messages = []

python/examples/agent/strategies/function_calling.py

Lines changed: 34 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,12 @@
2323
UserPromptMessage,
2424
)
2525
from dify_plugin.entities.tool import LogMetadata, ToolInvokeMessage, ToolProviderType
26-
from dify_plugin.interfaces.agent import AgentModelConfig, AgentStrategy, ToolEntity, ToolInvokeMeta
26+
from dify_plugin.interfaces.agent import (
27+
AgentModelConfig,
28+
AgentStrategy,
29+
ToolEntity,
30+
ToolInvokeMeta,
31+
)
2732

2833

2934
class FunctionCallingParams(BaseModel):
@@ -35,8 +40,8 @@ class FunctionCallingParams(BaseModel):
3540

3641

3742
class FunctionCallingAgentStrategy(AgentStrategy):
38-
def __init__(self, session):
39-
super().__init__(session)
43+
def __init__(self, runtime, session):
44+
super().__init__(runtime, session)
4045
self.query = ""
4146
self.instruction = ""
4247

@@ -59,6 +64,8 @@ def _invoke(self, parameters: dict[str, Any]) -> Generator[AgentInvokeMessage]:
5964
self.query = query
6065
self.instruction = fc_params.instruction
6166
history_prompt_messages = fc_params.model.history_prompt_messages
67+
history_prompt_messages.insert(0, self._system_prompt_message)
68+
history_prompt_messages.append(self._user_prompt_message)
6269

6370
# convert tool messages
6471
tools = fc_params.tools
@@ -96,7 +103,8 @@ def _invoke(self, parameters: dict[str, Any]) -> Generator[AgentInvokeMessage]:
96103
)
97104
yield round_log
98105

99-
if iteration_step == max_iteration_steps:
106+
# If max_iteration_steps=1, need to execute tool calls
107+
if iteration_step == max_iteration_steps and max_iteration_steps > 1:
100108
# the last iteration, remove all tools
101109
prompt_messages_tools = []
102110

@@ -195,7 +203,7 @@ def _invoke(self, parameters: dict[str, Any]) -> Generator[AgentInvokeMessage]:
195203
data={
196204
"output": response,
197205
"tool_name": tool_call_names,
198-
"tool_input": {tool_call[1]: tool_call[2] for tool_call in tool_calls},
206+
"tool_input": [{"name": tool_call[1], "args": tool_call[2]} for tool_call in tool_calls],
199207
},
200208
metadata={
201209
LogMetadata.STARTED_AT: model_started_at,
@@ -208,28 +216,30 @@ def _invoke(self, parameters: dict[str, Any]) -> Generator[AgentInvokeMessage]:
208216
},
209217
)
210218
assistant_message = AssistantPromptMessage(content="", tool_calls=[])
211-
if tool_calls:
212-
assistant_message.tool_calls = [
213-
AssistantPromptMessage.ToolCall(
214-
id=tool_call[0],
215-
type="function",
216-
function=AssistantPromptMessage.ToolCall.ToolCallFunction(
217-
name=tool_call[1],
218-
arguments=json.dumps(tool_call[2], ensure_ascii=False),
219-
),
220-
)
221-
for tool_call in tool_calls
222-
]
223-
else:
219+
if not tool_calls:
224220
assistant_message.content = response
225-
226-
current_thoughts.append(assistant_message)
221+
current_thoughts.append(assistant_message)
227222

228223
final_answer += response + "\n"
229224

230225
# call tools
231226
tool_responses = []
232227
for tool_call_id, tool_call_name, tool_call_args in tool_calls:
228+
current_thoughts.append(
229+
AssistantPromptMessage(
230+
content="",
231+
tool_calls=[
232+
AssistantPromptMessage.ToolCall(
233+
id=tool_call_id,
234+
type="function",
235+
function=AssistantPromptMessage.ToolCall.ToolCallFunction(
236+
name=tool_call_name,
237+
arguments=json.dumps(tool_call_args, ensure_ascii=False),
238+
),
239+
)
240+
],
241+
)
242+
)
233243
tool_instance = tool_instances[tool_call_name]
234244
tool_call_started_at = time.perf_counter()
235245
tool_call_log = self.create_log_message(
@@ -341,8 +351,11 @@ def _invoke(self, parameters: dict[str, Any]) -> Generator[AgentInvokeMessage]:
341351
LogMetadata.TOTAL_TOKENS: current_llm_usage.total_tokens if current_llm_usage else 0,
342352
},
343353
)
354+
# If max_iteration_steps=1, need to return tool responses
355+
if tool_responses and max_iteration_steps == 1:
356+
for resp in tool_responses:
357+
yield self.create_text_message(resp["tool_response"])
344358
iteration_step += 1
345-
346359
yield self.create_json_message(
347360
{
348361
"execution_metadata": {
@@ -452,8 +465,6 @@ def _organize_prompt_messages(
452465
current_thoughts: list[PromptMessage],
453466
history_prompt_messages: list[PromptMessage],
454467
) -> list[PromptMessage]:
455-
history_prompt_messages.insert(0, self._system_prompt_message)
456-
history_prompt_messages.append(self._user_prompt_message)
457468
prompt_messages = [
458469
*history_prompt_messages,
459470
*current_thoughts,

0 commit comments

Comments
 (0)