Skip to content

feat: hitl models support #22

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
May 28, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "uipath-llamaindex"
version = "0.0.19"
version = "0.0.20"
description = "UiPath LlamaIndex SDK"
readme = { file = "README.md", content-type = "text/markdown" }
requires-python = ">=3.10"
Expand Down
1 change: 1 addition & 0 deletions samples/action-center-hitl-agent/.env.example
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
OPENAI_API_KEY=xxx
35 changes: 35 additions & 0 deletions samples/action-center-hitl-agent/agent.mermaid
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
flowchart TD
step__done["_done"]:::stepStyle
step_aggregate_tool_results["aggregate_tool_results"]:::stepStyle
step_call_tool["call_tool"]:::stepStyle
step_init_run["init_run"]:::stepStyle
step_parse_agent_output["parse_agent_output"]:::stepStyle
step_run_agent_step["run_agent_step"]:::stepStyle
step_setup_agent["setup_agent"]:::stepStyle
event_StopEvent([<p>StopEvent</p>]):::stopEventStyle
event_ToolCallResult([<p>ToolCallResult</p>]):::defaultEventStyle
event_AgentInput([<p>AgentInput</p>]):::defaultEventStyle
event_ToolCall([<p>ToolCall</p>]):::defaultEventStyle
event_AgentWorkflowStartEvent([<p>AgentWorkflowStartEvent</p>]):::defaultEventStyle
event_AgentOutput([<p>AgentOutput</p>]):::defaultEventStyle
event_AgentSetup([<p>AgentSetup</p>]):::defaultEventStyle
event_StopEvent --> step__done
step_aggregate_tool_results --> event_AgentInput
step_aggregate_tool_results --> event_StopEvent
event_ToolCallResult --> step_aggregate_tool_results
step_call_tool --> event_ToolCallResult
event_ToolCall --> step_call_tool
step_init_run --> event_AgentInput
event_AgentWorkflowStartEvent --> step_init_run
step_parse_agent_output --> event_StopEvent
step_parse_agent_output --> event_ToolCall
event_AgentOutput --> step_parse_agent_output
step_run_agent_step --> event_AgentOutput
event_AgentSetup --> step_run_agent_step
step_setup_agent --> event_AgentSetup
event_AgentInput --> step_setup_agent
classDef stepStyle fill:#f2f0ff,line-height:1.2
classDef externalStyle fill:#f2f0ff,line-height:1.2
classDef defaultEventStyle fill-opacity:0
classDef stopEventStyle fill:#bfb6fc
classDef inputRequiredStyle fill:#f2f0ff,line-height:1.2
7 changes: 7 additions & 0 deletions samples/action-center-hitl-agent/llama_index.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
{
"dependencies": ["."],
"workflows": {
"agent": "main.py:workflow"
},
"env": ".env"
}
58 changes: 58 additions & 0 deletions samples/action-center-hitl-agent/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
import json

from dotenv import load_dotenv
from llama_index.core.agent.workflow import AgentWorkflow
from llama_index.core.workflow import (
Context,
HumanResponseEvent,
)
from llama_index.llms.openai import OpenAI

from uipath_llamaindex.models import CreateActionEvent

load_dotenv()

llm = OpenAI(model="gpt-4o-mini")


async def may_research_company(ctx: Context, company_name: str) -> bool:
"""Find whether a company may be researched.
Args:
ctx (Context): The context in which this function is called (autopopulated).
company_name (str): Name of the company to be researched.
Returns:
bool: True if the company can be researched, False otherwise.
"""
# emit an event to the external stream to be captured
agent_name = "Company researcher"
ctx.write_event_to_stream(
CreateActionEvent(
prefix="hitl escalation to research company",
app_name="generic_escalation_app",
title=f"Action required for {agent_name}",
data={
"AgentOutput": (f"May I perform a research on company {company_name}?"),
"AgentName": agent_name,
},
app_version=1,
app_folder_path="Shared",
# assignee="(optional)<assignee email>"
)
)

# wait until we see a HumanResponseEvent
hitl_response = await ctx.wait_for_event(HumanResponseEvent)
feedback = json.loads(hitl_response.response)
# act on the input from the event
if isinstance(feedback["Answer"], bool) and feedback["Answer"] is True:
return True
else:
return False


# example user input {"user_msg": "research Uipath company"}
workflow = AgentWorkflow.from_tools_or_functions(
[may_research_company()],
llm=llm,
system_prompt="You are a helpful assistant that can use tools to perform actions requested by user",
)
11 changes: 11 additions & 0 deletions samples/action-center-hitl-agent/pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
[project]
name = "llama-action-center"
version = "0.0.1"
description = "UiPath LlamaIndex Simple HITL Agent"
authors = [{ name = "John Doe", email = "john.doe@myemail.com" }]
readme = { file = "README.md", content-type = "text/markdown" }
requires-python = ">=3.10"
dependencies = [
"uipath-llamaindex==0.0.20",
"llama-index-llms-openai>=0.2.2"
]
27 changes: 27 additions & 0 deletions samples/action-center-hitl-agent/uipath.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
{
"entryPoints": [
{
"filePath": "agent",
"uniqueId": "ca9074cb-d758-483c-a88d-4346b3214fb7",
"type": "agent",
"input": {
"type": "object",
"properties": {
"hack": {
"type": "object"
}
},
"required": []
},
"output": {
"type": "object",
"properties": {},
"required": []
}
}
],
"bindings": {
"version": "2.0",
"resources": []
}
}
1 change: 1 addition & 0 deletions samples/multi-agent/.env.example
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
OPENAI_API_KEY=xxx
35 changes: 35 additions & 0 deletions samples/multi-agent/agent.mermaid
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
flowchart TD
step__done["_done"]:::stepStyle
step_aggregate_tool_results["aggregate_tool_results"]:::stepStyle
step_call_tool["call_tool"]:::stepStyle
step_init_run["init_run"]:::stepStyle
step_parse_agent_output["parse_agent_output"]:::stepStyle
step_run_agent_step["run_agent_step"]:::stepStyle
step_setup_agent["setup_agent"]:::stepStyle
event_StopEvent([<p>StopEvent</p>]):::stopEventStyle
event_ToolCallResult([<p>ToolCallResult</p>]):::defaultEventStyle
event_AgentInput([<p>AgentInput</p>]):::defaultEventStyle
event_ToolCall([<p>ToolCall</p>]):::defaultEventStyle
event_AgentWorkflowStartEvent([<p>AgentWorkflowStartEvent</p>]):::defaultEventStyle
event_AgentOutput([<p>AgentOutput</p>]):::defaultEventStyle
event_AgentSetup([<p>AgentSetup</p>]):::defaultEventStyle
event_StopEvent --> step__done
step_aggregate_tool_results --> event_AgentInput
step_aggregate_tool_results --> event_StopEvent
event_ToolCallResult --> step_aggregate_tool_results
step_call_tool --> event_ToolCallResult
event_ToolCall --> step_call_tool
step_init_run --> event_AgentInput
event_AgentWorkflowStartEvent --> step_init_run
step_parse_agent_output --> event_StopEvent
step_parse_agent_output --> event_ToolCall
event_AgentOutput --> step_parse_agent_output
step_run_agent_step --> event_AgentOutput
event_AgentSetup --> step_run_agent_step
step_setup_agent --> event_AgentSetup
event_AgentInput --> step_setup_agent
classDef stepStyle fill:#f2f0ff,line-height:1.2
classDef externalStyle fill:#f2f0ff,line-height:1.2
classDef defaultEventStyle fill-opacity:0
classDef stopEventStyle fill:#bfb6fc
classDef inputRequiredStyle fill:#f2f0ff,line-height:1.2
7 changes: 7 additions & 0 deletions samples/multi-agent/llama_index.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
{
"dependencies": ["."],
"workflows": {
"agent": "main.py:workflow"
},
"env": ".env"
}
49 changes: 49 additions & 0 deletions samples/multi-agent/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
import json

from dotenv import load_dotenv
from llama_index.core.agent.workflow import AgentWorkflow
from llama_index.core.workflow import (
Context,
HumanResponseEvent,
)
from llama_index.llms.openai import OpenAI

from uipath_llamaindex.models import InvokeProcessEvent

load_dotenv()

llm = OpenAI(model="gpt-4o-mini")


async def may_research_company(ctx: Context, company_name: str) -> str:
"""Find whether a company may be researcher.
Args:
ctx (Context): The context in which this function is called (autopopulated).
company_name (str): Name of the company to be researched.
Returns:
str: company report
"""
# emit an event to the external stream to be captured
ctx.write_event_to_stream(
InvokeProcessEvent(
prefix="invoke langgraph researcher event",
name="my-first-uipath-agent",
# process_folder_path="(optional)<process-folder-path>",
input_arguments={
"topic": company_name,
},
)
)

# wait until we see a HumanResponseEvent
hitl_response = await ctx.wait_for_event(HumanResponseEvent)
feedback = json.loads(hitl_response.response)
# act on the input from the event
return feedback["report"]


workflow = AgentWorkflow.from_tools_or_functions(
[may_research_company],
llm=llm,
system_prompt="You are a helpful assistant that can decide whether a company can be researched or not.",
)
11 changes: 11 additions & 0 deletions samples/multi-agent/pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
[project]
name = "llama-multi-agent"
version = "0.0.1"
description = "UiPath LlamaIndex Simple HITL Agent"
authors = [{ name = "John Doe", email = "john.doe@myemail.com" }]
readme = { file = "README.md", content-type = "text/markdown" }
requires-python = ">=3.10"
dependencies = [
"uipath-llamaindex==0.0.20",
"llama-index-llms-openai>=0.2.2"
]
27 changes: 27 additions & 0 deletions samples/multi-agent/uipath.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
{
"entryPoints": [
{
"filePath": "agent",
"uniqueId": "ca9074cb-d758-483c-a88d-4346b3214fb8",
"type": "agent",
"input": {
"type": "object",
"properties": {
"hack": {
"type": "object"
}
},
"required": []
},
"output": {
"type": "object",
"properties": {},
"required": []
}
}
],
"bindings": {
"version": "2.0",
"resources": []
}
}
22 changes: 15 additions & 7 deletions samples/simple-hitl-agent/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,21 @@
llm = OpenAI(model="gpt-4o-mini")


async def research_company(ctx: Context) -> str:
"""Research a company."""
async def may_research_company(ctx: Context, company_name: str) -> bool:
"""Find whether a company may be researched.
Args:
ctx (Context): The context in which this function is called (autopopulated).
company_name (str): Name of the company to be researched.
Returns:
bool: True if the company can be researched, False otherwise.
"""
print("Researching company...")

# emit an event to the external stream to be captured
ctx.write_event_to_stream(
InputRequiredEvent(prefix="Are you sure you want to proceed?")
InputRequiredEvent(
prefix=f"May I perform a research on company {company_name}? \n (yes/no)"
)
)

# wait until we see a HumanResponseEvent
Expand All @@ -24,13 +32,13 @@ async def research_company(ctx: Context) -> str:

# act on the input from the event
if response.response.strip().lower() == "yes":
return "Research completed successfully."
return True
else:
return "Research task aborted."
return False


workflow = AgentWorkflow.from_tools_or_functions(
[research_company],
[may_research_company],
llm=llm,
system_prompt="You are a helpful assistant that can research companies.",
system_prompt="You are a helpful assistant that can decide whether a company can be researched or not.",
)
4 changes: 2 additions & 2 deletions samples/simple-hitl-agent/pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
[project]
name = "llama-simple-hitl-agent"
version = "0.0.8"
version = "0.0.1"
description = "UiPath LlamaIndex Simple HITL Agent"
authors = [{ name = "John Doe", email = "john.doe@myemail.com" }]
readme = { file = "README.md", content-type = "text/markdown" }
requires-python = ">=3.10"
dependencies = [
"uipath-llamaindex>=0.0.18",
"uipath-llamaindex==0.0.20",
"llama-index-llms-openai>=0.2.2"
]
2 changes: 1 addition & 1 deletion samples/simple-hitl-agent/uipath.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
"entryPoints": [
{
"filePath": "agent",
"uniqueId": "ca9074cb-d758-483c-a88d-4346b3214fb6",
"uniqueId": "ca9074cb-d758-483c-a88d-4346b3214fb7",
"type": "agent",
"input": {
"type": "object",
Expand Down
Loading