Skip to content

Commit 494ce8e

Browse files
radugheoionmincu
authored andcommitted
feat: add bedrock chat models
1 parent 42603d3 commit 494ce8e

9 files changed

Lines changed: 748 additions & 0 deletions

File tree

pyproject.toml

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,13 @@ maintainers = [
2424
{ name = "Cristian Pufu", email = "cristian.pufu@uipath.com" }
2525
]
2626

27+
[project.optional-dependencies]
28+
bedrock = [
29+
"llama-index-llms-bedrock>=0.3.0",
30+
"llama-index-llms-bedrock-converse>=0.3.0",
31+
"boto3>=1.28.0",
32+
]
33+
2734
[project.entry-points."uipath.middlewares"]
2835
register = "uipath_llamaindex.middlewares:register_middleware"
2936

Lines changed: 203 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,203 @@
1+
import logging
2+
import os
3+
from typing import Optional
4+
5+
from uipath.utils import EndpointManager
6+
7+
from .supported_models import BedrockModels
8+
9+
logger = logging.getLogger(__name__)
10+
11+
12+
def _check_bedrock_dependencies() -> None:
13+
"""Check if required dependencies for UiPath Bedrock LLMs are installed."""
14+
import importlib.util
15+
16+
missing_packages = []
17+
18+
if importlib.util.find_spec("llama_index.llms.bedrock") is None:
19+
missing_packages.append("llama-index-llms-bedrock")
20+
21+
if importlib.util.find_spec("llama_index.llms.bedrock_converse") is None:
22+
missing_packages.append("llama-index-llms-bedrock-converse")
23+
24+
if importlib.util.find_spec("boto3") is None:
25+
missing_packages.append("boto3")
26+
27+
if missing_packages:
28+
packages_str = ", ".join(missing_packages)
29+
raise ImportError(
30+
f"The following packages are required to use UiPath Bedrock LLMs: {packages_str}\n"
31+
"Please install them using one of the following methods:\n\n"
32+
" # Using pip:\n"
33+
f" pip install uipath-llamaindex[bedrock]\n\n"
34+
" # Using uv:\n"
35+
f" uv add 'uipath-llamaindex[bedrock]'\n\n"
36+
)
37+
38+
39+
_check_bedrock_dependencies()
40+
41+
import boto3
42+
from llama_index.llms.bedrock import Bedrock
43+
from llama_index.llms.bedrock_converse import BedrockConverse
44+
45+
46+
class AwsBedrockCompletionsPassthroughClient:
47+
def __init__(
48+
self,
49+
model: str,
50+
token: str,
51+
api_flavor: str,
52+
):
53+
self.model = model
54+
self.token = token
55+
self.api_flavor = api_flavor
56+
self._vendor = "awsbedrock"
57+
self._url: Optional[str] = None
58+
59+
@property
60+
def endpoint(self) -> str:
61+
vendor_endpoint = EndpointManager.get_vendor_endpoint()
62+
formatted_endpoint = vendor_endpoint.format(
63+
vendor=self._vendor,
64+
model=self.model,
65+
)
66+
return formatted_endpoint
67+
68+
def _build_base_url(self) -> str:
69+
if not self._url:
70+
env_uipath_url = os.getenv("UIPATH_URL")
71+
72+
if env_uipath_url:
73+
self._url = f"{env_uipath_url.rstrip('/')}/{self.endpoint}"
74+
else:
75+
raise ValueError("UIPATH_URL environment variable is required")
76+
77+
return self._url
78+
79+
def get_client(self):
80+
client = boto3.client(
81+
"bedrock-runtime",
82+
region_name="us-east-1",
83+
aws_access_key_id="none",
84+
aws_secret_access_key="none",
85+
verify=False,
86+
)
87+
client.meta.events.register(
88+
"before-send.bedrock-runtime.*", self._modify_request
89+
)
90+
return client
91+
92+
def _modify_request(self, request, **kwargs):
93+
"""Intercept boto3 request and redirect to LLM Gateway"""
94+
# Detect streaming based on URL suffix:
95+
# - converse-stream / invoke-with-response-stream -> streaming
96+
# - converse / invoke -> non-streaming
97+
streaming = "true" if request.url.endswith("-stream") else "false"
98+
request.url = self._build_base_url()
99+
100+
headers = {
101+
"Authorization": f"Bearer {self.token}",
102+
"X-UiPath-LlmGateway-ApiFlavor": self.api_flavor,
103+
"X-UiPath-Streaming-Enabled": streaming,
104+
}
105+
106+
job_key = os.getenv("UIPATH_JOB_KEY")
107+
process_key = os.getenv("UIPATH_PROCESS_KEY")
108+
if job_key:
109+
headers["X-UiPath-JobKey"] = job_key
110+
if process_key:
111+
headers["X-UiPath-ProcessKey"] = process_key
112+
113+
request.headers.update(headers)
114+
115+
116+
class UiPathChatBedrockConverse(BedrockConverse):
117+
def __init__(
118+
self,
119+
org_id: Optional[str] = None,
120+
tenant_id: Optional[str] = None,
121+
token: Optional[str] = None,
122+
model: str = BedrockModels.anthropic_claude_haiku_4_5,
123+
**kwargs,
124+
):
125+
org_id = org_id or os.getenv("UIPATH_ORGANIZATION_ID")
126+
tenant_id = tenant_id or os.getenv("UIPATH_TENANT_ID")
127+
token = token or os.getenv("UIPATH_ACCESS_TOKEN")
128+
129+
if not org_id:
130+
raise ValueError(
131+
"UIPATH_ORGANIZATION_ID environment variable or org_id parameter is required"
132+
)
133+
if not tenant_id:
134+
raise ValueError(
135+
"UIPATH_TENANT_ID environment variable or tenant_id parameter is required"
136+
)
137+
if not token:
138+
raise ValueError(
139+
"UIPATH_ACCESS_TOKEN environment variable or token parameter is required"
140+
)
141+
142+
passthrough_client = AwsBedrockCompletionsPassthroughClient(
143+
model=model,
144+
token=token,
145+
api_flavor="converse",
146+
)
147+
148+
client = passthrough_client.get_client()
149+
150+
super().__init__(
151+
model=model,
152+
client=client,
153+
region_name="us-east-1",
154+
aws_access_key_id="none",
155+
aws_secret_access_key="none",
156+
**kwargs
157+
)
158+
159+
160+
class UiPathChatBedrock(Bedrock):
161+
def __init__(
162+
self,
163+
org_id: Optional[str] = None,
164+
tenant_id: Optional[str] = None,
165+
token: Optional[str] = None,
166+
model: str = BedrockModels.anthropic_claude_haiku_4_5,
167+
context_size: int = 200000,
168+
**kwargs,
169+
):
170+
org_id = org_id or os.getenv("UIPATH_ORGANIZATION_ID")
171+
tenant_id = tenant_id or os.getenv("UIPATH_TENANT_ID")
172+
token = token or os.getenv("UIPATH_ACCESS_TOKEN")
173+
174+
if not org_id:
175+
raise ValueError(
176+
"UIPATH_ORGANIZATION_ID environment variable or org_id parameter is required"
177+
)
178+
if not tenant_id:
179+
raise ValueError(
180+
"UIPATH_TENANT_ID environment variable or tenant_id parameter is required"
181+
)
182+
if not token:
183+
raise ValueError(
184+
"UIPATH_ACCESS_TOKEN environment variable or token parameter is required"
185+
)
186+
187+
passthrough_client = AwsBedrockCompletionsPassthroughClient(
188+
model=model,
189+
token=token,
190+
api_flavor="invoke",
191+
)
192+
193+
client = passthrough_client.get_client()
194+
195+
super().__init__(
196+
model=model,
197+
client=client,
198+
context_size=context_size,
199+
aws_access_key_id="none",
200+
aws_secret_access_key="none",
201+
region_name="us-east-1",
202+
**kwargs
203+
)
Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
class OpenAIModels:
2+
"""Supported OpenAI model identifiers."""
3+
4+
# GPT-4o models
5+
gpt_4o_2024_05_13 = "gpt-4o-2024-05-13"
6+
gpt_4o_2024_08_06 = "gpt-4o-2024-08-06"
7+
gpt_4o_2024_11_20 = "gpt-4o-2024-11-20"
8+
gpt_4o_mini_2024_07_18 = "gpt-4o-mini-2024-07-18"
9+
10+
# GPT-4.1 models
11+
gpt_4_1_2025_04_14 = "gpt-4.1-2025-04-14"
12+
gpt_4_1_mini_2025_04_14 = "gpt-4.1-mini-2025-04-14"
13+
gpt_4_1_nano_2025_04_14 = "gpt-4.1-nano-2025-04-14"
14+
15+
# GPT-5 models
16+
gpt_5_2025_08_07 = "gpt-5-2025-08-07"
17+
gpt_5_chat_2025_08_07 = "gpt-5-chat-2025-08-07"
18+
gpt_5_mini_2025_08_07 = "gpt-5-mini-2025-08-07"
19+
gpt_5_nano_2025_08_07 = "gpt-5-nano-2025-08-07"
20+
21+
# GPT-5.1 models
22+
gpt_5_1_2025_11_13 = "gpt-5.1-2025-11-13"
23+
24+
25+
class GeminiModels:
26+
"""Supported Google Gemini model identifiers."""
27+
28+
gemini_2_5_pro = "gemini-2.5-pro"
29+
gemini_2_5_flash = "gemini-2.5-flash"
30+
gemini_2_0_flash_001 = "gemini-2.0-flash-001"
31+
32+
33+
class BedrockModels:
34+
"""Supported AWS Bedrock model identifiers."""
35+
36+
# Claude 3.7 models
37+
anthropic_claude_3_7_sonnet = "anthropic.claude-3-7-sonnet-20250219-v1:0"
38+
39+
# Claude 4 models
40+
anthropic_claude_sonnet_4 = "anthropic.claude-sonnet-4-20250514-v1:0"
41+
anthropic_claude_sonnet_4_5 = "anthropic.claude-sonnet-4-5-20250929-v1:0"
42+
anthropic_claude_haiku_4_5 = "anthropic.claude-haiku-4-5-20251001-v1:0"

testcases/chat-models/input.json

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
{
2+
"prompt": "Count from 1 to 5."
3+
}
Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
{
2+
"workflows": {
3+
"agent": "src/main.py:agent"
4+
}
5+
}
Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
[project]
2+
name = "chat-models-agent"
3+
version = "0.0.1"
4+
description = "Chat models testing agent for UiPath LlamaIndex"
5+
authors = [{ name = "John Doe", email = "john.doe@myemail.com" }]
6+
dependencies = [
7+
"python-dotenv>=1.0.1",
8+
"uipath-llamaindex",
9+
"pydantic>=2.10.6",
10+
"typing-extensions>=4.12.2",
11+
]
12+
requires-python = ">=3.11"
13+
14+
[tool.uv.sources]
15+
uipath-llamaindex = { path = "../../", editable = true }

testcases/chat-models/run.sh

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
#!/bin/bash
2+
set -e
3+
4+
echo "Syncing dependencies..."
5+
uv sync
6+
7+
echo "Adding UiPath LlamaIndex Chat Models Extras..."
8+
uv add uipath-llamaindex[bedrock]
9+
10+
# uv add uipath-llamaindex[vertex]
11+
12+
echo "Authenticating with UiPath..."
13+
uv run uipath auth --client-id="$CLIENT_ID" --client-secret="$CLIENT_SECRET" --base-url="$BASE_URL"
14+
15+
echo "Initializing the project..."
16+
uv run uipath init
17+
18+
echo "Packing agent..."
19+
uv run uipath pack
20+
21+
echo "Running agent..."
22+
uv run uipath run agent --file input.json
23+
24+
echo "Running agent again with empty UIPATH_JOB_KEY..."
25+
export UIPATH_JOB_KEY=""
26+
uv run uipath run agent --trace-file .uipath/traces.jsonl --file input.json >> local_run_output.log
Lines changed: 82 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,82 @@
1+
import json
2+
import os
3+
4+
print("Checking chat models test agent output...")
5+
6+
uipath_dir = ".uipath"
7+
assert os.path.exists(uipath_dir), "NuGet package directory (.uipath) not found"
8+
9+
nupkg_files = [f for f in os.listdir(uipath_dir) if f.endswith(".nupkg")]
10+
assert nupkg_files, "NuGet package file (.nupkg) not found in .uipath directory"
11+
12+
print(f"NuGet package found: {nupkg_files[0]}")
13+
14+
output_file = "__uipath/output.json"
15+
assert os.path.isfile(output_file), "Agent output file not found"
16+
17+
print("Agent output file found")
18+
19+
with open(output_file, "r", encoding="utf-8") as f:
20+
output_data = json.load(f)
21+
22+
status = output_data.get("status")
23+
assert status == "successful", f"Agent execution failed with status: {status}"
24+
25+
print("Agent execution status: successful")
26+
27+
assert "output" in output_data, "Missing 'output' field in agent response"
28+
29+
output_content = output_data["output"]
30+
31+
assert "success" in output_content, "Missing 'success' field in output"
32+
success = output_content["success"]
33+
34+
assert "result_summary" in output_content, "Missing 'result_summary' field in output"
35+
result_summary = output_content["result_summary"]
36+
assert result_summary and result_summary.strip() != "", "Result summary is empty"
37+
38+
print(f"\nTest Results:")
39+
print(f" Success: {success}")
40+
print(f" Summary:\n{result_summary}")
41+
42+
if not success:
43+
print("\n" + "=" * 80)
44+
print("TEST FAILURES")
45+
print("=" * 80)
46+
lines = result_summary.split("\n")
47+
for line in lines:
48+
if line.strip():
49+
print(line)
50+
print("=" * 80)
51+
52+
assert success is True, f"Test did not succeed. See detailed results above."
53+
54+
assert "chunks_received" in output_content, "Missing 'chunks_received' field"
55+
chunks_received = output_content["chunks_received"]
56+
assert chunks_received is not None and chunks_received > 0, (
57+
f"Expected positive chunks_received, got: {chunks_received}"
58+
)
59+
print(f"Chunks received: {chunks_received}")
60+
61+
assert "content_length" in output_content, "Missing 'content_length' field"
62+
content_length = output_content["content_length"]
63+
assert content_length is not None and content_length > 0, (
64+
f"Expected positive content_length, got: {content_length}"
65+
)
66+
print(f"Content length: {content_length}")
67+
68+
assert "tool_calls_count" in output_content, "Missing 'tool_calls_count' field"
69+
tool_calls_count = output_content["tool_calls_count"]
70+
assert tool_calls_count is not None and tool_calls_count > 0, (
71+
f"Expected positive tool_calls_count, got: {tool_calls_count}"
72+
)
73+
print(f"Tool calls count: {tool_calls_count}")
74+
75+
with open("local_run_output.log", "r", encoding="utf-8") as f:
76+
local_run_output = f.read()
77+
78+
assert "Successful execution." in local_run_output, (
79+
f"Response does not contain 'Successful execution.'. Actual response: {local_run_output}"
80+
)
81+
82+
print("All validations passed successfully!")

0 commit comments

Comments
 (0)