Skip to content

Commit dc9ca33

Browse files
committed
fix: add async session to BedrockConverse client
1 parent 0bff8d4 commit dc9ca33

8 files changed

Lines changed: 37 additions & 16 deletions

File tree

playground.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
from llama_index.core.llms import ChatMessage
2-
from uipath_llamaindex.llms import UiPathVertex, GeminiModels
2+
3+
from uipath_llamaindex.llms import GeminiModels, UiPathVertex
34

45

56
def test_all_methods():

pyproject.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@ bedrock = [
2929
"llama-index-llms-bedrock>=0.3.0",
3030
"llama-index-llms-bedrock-converse>=0.3.0",
3131
"boto3>=1.28.0",
32+
"aiobotocore>=2.5.0",
3233
]
3334
vertex = [
3435
"llama-index-llms-vertex>=0.4.0",

src/uipath_llamaindex/llms/__init__.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,14 +2,12 @@
22
OpenAIModel,
33
UiPathOpenAI,
44
)
5-
6-
from .vertex import UiPathVertex
7-
85
from .supported_models import (
96
BedrockModels,
107
GeminiModels,
118
OpenAIModels,
129
)
10+
from .vertex import UiPathVertex
1311

1412
__all__ = [
1513
"UiPathOpenAI",

src/uipath_llamaindex/llms/bedrock.py

Lines changed: 17 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,9 @@ def _check_bedrock_dependencies() -> None:
2424
if importlib.util.find_spec("boto3") is None:
2525
missing_packages.append("boto3")
2626

27+
if importlib.util.find_spec("aiobotocore") is None:
28+
missing_packages.append("aiobotocore")
29+
2730
if missing_packages:
2831
packages_str = ", ".join(missing_packages)
2932
raise ImportError(
@@ -89,6 +92,16 @@ def get_client(self):
8992
)
9093
return client
9194

95+
def get_session(self):
96+
"""Get aiobotocore session for async operations with custom event handlers."""
97+
from aiobotocore.session import get_session
98+
99+
session = get_session()
100+
session.get_component("event_emitter").register(
101+
"before-send.bedrock-runtime.*", self._modify_request
102+
)
103+
return session
104+
92105
def _modify_request(self, request, **kwargs):
93106
"""Intercept boto3 request and redirect to LLM Gateway"""
94107
# Detect streaming based on URL suffix:
@@ -146,14 +159,16 @@ def __init__(
146159
)
147160

148161
client = passthrough_client.get_client()
162+
botocore_session = passthrough_client.get_session()
149163

150164
super().__init__(
151165
model=model,
152166
client=client,
167+
botocore_session=botocore_session,
153168
region_name="us-east-1",
154169
aws_access_key_id="none",
155170
aws_secret_access_key="none",
156-
**kwargs
171+
**kwargs,
157172
)
158173

159174

@@ -199,5 +214,5 @@ def __init__(
199214
aws_access_key_id="none",
200215
aws_secret_access_key="none",
201216
region_name="us-east-1",
202-
**kwargs
217+
**kwargs,
203218
)

src/uipath_llamaindex/llms/vertex.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,9 @@ def redirected_request(method, url, **kwargs_inner):
6767
is_streaming = kwargs_inner.get("stream", False)
6868
headers["X-UiPath-Streaming-Enabled"] = "true" if is_streaming else "false"
6969

70-
return original_request(method, self.llmgw_url, headers=headers, **kwargs_inner)
70+
return original_request(
71+
method, self.llmgw_url, headers=headers, **kwargs_inner
72+
)
7173

7274
self._session.request = redirected_request # type: ignore[method-assign]
7375

@@ -199,7 +201,9 @@ def _build_base_url(self) -> str:
199201
env_uipath_url = os.getenv("UIPATH_URL")
200202

201203
if env_uipath_url:
202-
self._uipath_url = f"{env_uipath_url.rstrip('/')}/{self._uipath_endpoint}"
204+
self._uipath_url = (
205+
f"{env_uipath_url.rstrip('/')}/{self._uipath_endpoint}"
206+
)
203207
else:
204208
raise ValueError("UIPATH_URL environment variable is required")
205209

testcases/chat-models/src/assert.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@
3535
result_summary = output_content["result_summary"]
3636
assert result_summary and result_summary.strip() != "", "Result summary is empty"
3737

38-
print(f"\nTest Results:")
38+
print("\nTest Results:")
3939
print(f" Success: {success}")
4040
print(f" Summary:\n{result_summary}")
4141

@@ -49,7 +49,7 @@
4949
print(line)
5050
print("=" * 80)
5151

52-
assert success is True, f"Test did not succeed. See detailed results above."
52+
assert success is True, "Test did not succeed. See detailed results above."
5353

5454
assert "chunks_received" in output_content, "Missing 'chunks_received' field"
5555
chunks_received = output_content["chunks_received"]

testcases/chat-models/src/main.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -228,7 +228,7 @@ async def test_single_model_all(
228228
model_results[method_name] = "⊘ Not available"
229229

230230
# Test tool calling with predict_and_call
231-
logger.info(f" Testing tool_calling...")
231+
logger.info(" Testing tool_calling...")
232232
try:
233233
if hasattr(model, "apredict_and_call"):
234234
response = await model.apredict_and_call(
@@ -241,17 +241,17 @@ async def test_single_model_all(
241241
logger.info(f" Tool calls detected: {tool_calls_count}")
242242
model_results["tool_calling"] = f"✓ ({tool_calls_count} calls)"
243243
else:
244-
logger.warning(f" No tool calls detected")
244+
logger.warning(" No tool calls detected")
245245
model_results["tool_calling"] = "✗ No tool calls detected"
246246
else:
247-
logger.warning(f" Tool calling not available")
247+
logger.warning(" Tool calling not available")
248248
model_results["tool_calling"] = "⊘ Not available"
249249
except Exception as e:
250250
logger.error(f" Tool calling failed: {e}")
251251
model_results["tool_calling"] = f"✗ {format_error_message(str(e))}"
252252

253253
# Test structured output
254-
logger.info(f" Testing structured_output...")
254+
logger.info(" Testing structured_output...")
255255
try:
256256
if hasattr(model, "as_structured_llm"):
257257
sllm = model.as_structured_llm(PersonInfo)
@@ -269,7 +269,7 @@ async def test_single_model_all(
269269
logger.warning(f" Response is not PersonInfo: {type(response)}")
270270
model_results["structured_output"] = f"✗ Wrong type: {type(response)}"
271271
else:
272-
logger.warning(f" Structured output not available")
272+
logger.warning(" Structured output not available")
273273
model_results["structured_output"] = "⊘ Not available"
274274
except Exception as e:
275275
logger.error(f" Structured output failed: {e}")
@@ -357,7 +357,7 @@ async def run_tests(self, ev: TestInput) -> TestOutput:
357357
logger.info("TEST RESULTS")
358358
logger.info("=" * 80)
359359
logger.info(f"Success: {not has_failures}")
360-
logger.info(f"Summary:\n" + "\n".join(summary_lines))
360+
logger.info("Summary:\n" + "\n".join(summary_lines))
361361
logger.info(f"Chunks Received: {total_result.chunks}")
362362
logger.info(f"Content Length: {total_result.content_length}")
363363
logger.info(f"Tool Calls: {total_result.tool_calls}")

uv.lock

Lines changed: 2 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)