Skip to content

Commit 8450aa8

Browse files
authored
Merge pull request #587 from UiPath/fix/httpx
fix(httpx): fix HTTP 415 unsupported media type
2 parents 9e949a2 + 8a7df69 commit 8450aa8

7 files changed

Lines changed: 76 additions & 88 deletions

File tree

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "uipath"
3-
version = "2.1.50"
3+
version = "2.1.51"
44
description = "Python SDK and CLI for UiPath Platform, enabling programmatic interaction with automation services, process management, and deployment tools."
55
readme = { file = "README.md", content-type = "text/markdown" }
66
requires-python = ">=3.10"

src/uipath/_cli/_evals/progress_reporter.py

Lines changed: 36 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ async def create_eval_set_run(self):
6565
method=spec.method,
6666
url=spec.endpoint,
6767
params=spec.params,
68-
content=spec.content,
68+
json=spec.json,
6969
headers=spec.headers,
7070
)
7171
self._eval_set_run_id = json.loads(response.content)["id"]
@@ -84,7 +84,7 @@ async def create_eval_run(self, eval_item: dict[str, Any]):
8484
method=spec.method,
8585
url=spec.endpoint,
8686
params=spec.params,
87-
content=spec.content,
87+
json=spec.json,
8888
headers=spec.headers,
8989
)
9090
return json.loads(response.content)["id"]
@@ -116,7 +116,7 @@ async def update_eval_run(
116116
method=spec.method,
117117
url=spec.endpoint,
118118
params=spec.params,
119-
content=spec.content,
119+
json=spec.json,
120120
headers=spec.headers,
121121
)
122122

@@ -127,7 +127,7 @@ async def update_eval_set_run(self):
127127
method=spec.method,
128128
url=spec.endpoint,
129129
params=spec.params,
130-
content=spec.content,
130+
json=spec.json,
131131
headers=spec.headers,
132132
)
133133

@@ -201,18 +201,16 @@ def _update_eval_run_spec(
201201
endpoint=Endpoint(
202202
f"agentsruntime_/api/execution/agents/{self._project_id}/evalRun"
203203
),
204-
content=json.dumps(
205-
{
206-
"evalRunId": eval_run_id,
207-
"status": EvaluationStatus.COMPLETED.value,
208-
"result": {
209-
"output": {"content": {**actual_output}},
210-
"evaluatorScores": evaluator_scores,
211-
},
212-
"completionMetrics": {"duration": int(execution_time)},
213-
"assertionRuns": assertion_runs,
214-
}
215-
),
204+
json={
205+
"evalRunId": eval_run_id,
206+
"status": EvaluationStatus.COMPLETED.value,
207+
"result": {
208+
"output": {"content": {**actual_output}},
209+
"evaluatorScores": evaluator_scores,
210+
},
211+
"completionMetrics": {"duration": int(execution_time)},
212+
"assertionRuns": assertion_runs,
213+
},
216214
headers=self._tenant_header(),
217215
)
218216

@@ -222,18 +220,16 @@ def _create_eval_run_spec(self, eval_item: dict[str, Any]) -> RequestSpec:
222220
endpoint=Endpoint(
223221
f"agentsruntime_/api/execution/agents/{self._project_id}/evalRun"
224222
),
225-
content=json.dumps(
226-
{
227-
"evalSetRunId": self._eval_set_run_id,
228-
"evalSnapshot": {
229-
"id": eval_item["id"],
230-
"name": eval_item["name"],
231-
"inputs": eval_item.get("inputs"),
232-
"expectedOutput": eval_item.get("expectedOutput", {}),
233-
},
234-
"status": EvaluationStatus.IN_PROGRESS.value,
235-
}
236-
),
223+
json={
224+
"evalSetRunId": self._eval_set_run_id,
225+
"evalSnapshot": {
226+
"id": eval_item["id"],
227+
"name": eval_item["name"],
228+
"inputs": eval_item.get("inputs"),
229+
"expectedOutput": eval_item.get("expectedOutput", {}),
230+
},
231+
"status": EvaluationStatus.IN_PROGRESS.value,
232+
},
237233
headers=self._tenant_header(),
238234
)
239235

@@ -247,15 +243,13 @@ def _create_eval_set_run_spec(
247243
endpoint=Endpoint(
248244
f"agentsruntime_/api/execution/agents/{self._project_id}/evalSetRun"
249245
),
250-
content=json.dumps(
251-
{
252-
"agentId": self._project_id,
253-
"evalSetId": self._eval_set_id,
254-
"agentSnapshot": agent_snapshot_dict,
255-
"status": EvaluationStatus.IN_PROGRESS.value,
256-
"numberOfEvalsExecuted": self._no_of_evals,
257-
}
258-
),
246+
json={
247+
"agentId": self._project_id,
248+
"evalSetId": self._eval_set_id,
249+
"agentSnapshot": agent_snapshot_dict,
250+
"status": EvaluationStatus.IN_PROGRESS.value,
251+
"numberOfEvalsExecuted": self._no_of_evals,
252+
},
259253
headers=self._tenant_header(),
260254
)
261255

@@ -293,13 +287,11 @@ def _update_eval_set_run_spec(
293287
endpoint=Endpoint(
294288
f"agentsruntime_/api/execution/agents/{self._project_id}/evalSetRun"
295289
),
296-
content=json.dumps(
297-
{
298-
"evalSetRunId": self._eval_set_run_id,
299-
"status": EvaluationStatus.COMPLETED.value,
300-
"evaluatorScores": evaluator_scores,
301-
}
302-
),
290+
json={
291+
"evalSetRunId": self._eval_set_run_id,
292+
"status": EvaluationStatus.COMPLETED.value,
293+
"evaluatorScores": evaluator_scores,
294+
},
303295
headers=self._tenant_header(),
304296
)
305297

src/uipath/_services/context_grounding_service.py

Lines changed: 19 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
import json
21
from typing import Any, List, Optional, Tuple, Union
32

43
import httpx
@@ -353,7 +352,7 @@ def search(
353352
response = self.request(
354353
spec.method,
355354
spec.endpoint,
356-
content=spec.content,
355+
json=spec.json,
357356
)
358357

359358
return TypeAdapter(List[ContextGroundingQueryResponse]).validate_python(
@@ -403,7 +402,7 @@ async def search_async(
403402
response = await self.request_async(
404403
spec.method,
405404
spec.endpoint,
406-
content=spec.content,
405+
json=spec.json,
407406
)
408407

409408
return TypeAdapter(List[ContextGroundingQueryResponse]).validate_python(
@@ -592,21 +591,19 @@ def _create_spec(
592591
return RequestSpec(
593592
method="POST",
594593
endpoint=Endpoint("/ecs_/v2/indexes/create"),
595-
content=json.dumps(
596-
{
597-
"name": name,
598-
"description": description,
599-
"dataSource": {
600-
"@odata.type": ORCHESTRATOR_STORAGE_BUCKET_DATA_SOURCE,
601-
"folder": storage_bucket_folder_path,
602-
"bucketName": storage_bucket_name,
603-
"fileNameGlob": file_name_glob
604-
if file_name_glob is not None
605-
else "*",
606-
"directoryPath": "/",
607-
},
608-
}
609-
),
594+
json={
595+
"name": name,
596+
"description": description,
597+
"dataSource": {
598+
"@odata.type": ORCHESTRATOR_STORAGE_BUCKET_DATA_SOURCE,
599+
"folder": storage_bucket_folder_path,
600+
"bucketName": storage_bucket_name,
601+
"fileNameGlob": file_name_glob
602+
if file_name_glob is not None
603+
else "*",
604+
"directoryPath": "/",
605+
},
606+
},
610607
headers={
611608
**header_folder(folder_key, None),
612609
},
@@ -657,12 +654,10 @@ def _search_spec(
657654
return RequestSpec(
658655
method="POST",
659656
endpoint=Endpoint("/ecs_/v1/search"),
660-
content=json.dumps(
661-
{
662-
"query": {"query": query, "numberOfResults": number_of_results},
663-
"schema": {"name": name},
664-
}
665-
),
657+
json={
658+
"query": {"query": query, "numberOfResults": number_of_results},
659+
"schema": {"name": name},
660+
},
666661
headers={
667662
**header_folder(folder_key, None),
668663
},

src/uipath/_services/jobs_service.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
import json
21
import os
32
import shutil
43
import tempfile
@@ -81,7 +80,7 @@ def resume(
8180
spec.method,
8281
url=spec.endpoint,
8382
headers=spec.headers,
84-
content=spec.content,
83+
json=spec.json,
8584
)
8685

8786
async def resume_async(
@@ -142,7 +141,7 @@ async def main(): # noqa: D103
142141
spec.method,
143142
url=spec.endpoint,
144143
headers=spec.headers,
145-
content=spec.content,
144+
json=spec.json,
146145
)
147146

148147
@property
@@ -412,7 +411,7 @@ def _resume_spec(
412411
endpoint=Endpoint(
413412
f"/orchestrator_/api/JobTriggers/DeliverPayload/{inbox_id}"
414413
),
415-
content=json.dumps({"payload": payload}),
414+
json={"payload": payload},
416415
headers={
417416
**header_folder(folder_key, folder_path),
418417
},

src/uipath/_services/llm_gateway_service.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@
1616
UiPathLlmChatService: Service using UiPath's normalized API format
1717
"""
1818

19-
import json
2019
from typing import Any, Dict, List, Optional, Union
2120

2221
from pydantic import BaseModel
@@ -195,7 +194,7 @@ async def embeddings(
195194
response = await self.request_async(
196195
"POST",
197196
endpoint,
198-
content=json.dumps({"input": input}),
197+
json={"input": input},
199198
params={"api-version": API_VERSION},
200199
headers=DEFAULT_LLM_HEADERS,
201200
)
@@ -322,7 +321,7 @@ class Country(BaseModel):
322321
response = await self.request_async(
323322
"POST",
324323
endpoint,
325-
content=json.dumps(request_body),
324+
json=request_body,
326325
params={"api-version": API_VERSION},
327326
headers=DEFAULT_LLM_HEADERS,
328327
)
@@ -534,7 +533,7 @@ class Country(BaseModel):
534533
response = await self.request_async(
535534
"POST",
536535
endpoint,
537-
content=json.dumps(request_body),
536+
json=request_body,
538537
params={"api-version": NORMALIZED_API_VERSION},
539538
headers=headers,
540539
)

tests/sdk/services/test_jobs_service.py

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -214,7 +214,8 @@ def test_resume_with_inbox_id(
214214
sent_request.url
215215
== f"{base_url}{org}{tenant}/orchestrator_/api/JobTriggers/DeliverPayload/{inbox_id}"
216216
)
217-
assert json.loads(sent_request.content) == {"payload": payload}
217+
218+
assert json.loads(sent_request.content.decode()) == {"payload": payload}
218219

219220
assert HEADER_USER_AGENT in sent_request.headers
220221
assert (
@@ -255,7 +256,8 @@ def test_resume_with_job_id(
255256
sent_requests[1].url
256257
== f"{base_url}{org}{tenant}/orchestrator_/api/JobTriggers/DeliverPayload/{inbox_id}"
257258
)
258-
assert json.loads(sent_requests[1].content) == {"payload": payload}
259+
260+
assert json.loads(sent_requests[1].content.decode()) == {"payload": payload}
259261

260262
assert HEADER_USER_AGENT in sent_requests[1].headers
261263
assert (
@@ -289,7 +291,8 @@ async def test_resume_async_with_inbox_id(
289291
sent_request.url
290292
== f"{base_url}{org}{tenant}/orchestrator_/api/JobTriggers/DeliverPayload/{inbox_id}"
291293
)
292-
assert json.loads(sent_request.content) == {"payload": payload}
294+
295+
assert json.loads(sent_request.content.decode()) == {"payload": payload}
293296

294297
assert HEADER_USER_AGENT in sent_request.headers
295298
assert (
@@ -331,7 +334,8 @@ async def test_resume_async_with_job_id(
331334
sent_requests[1].url
332335
== f"{base_url}{org}{tenant}/orchestrator_/api/JobTriggers/DeliverPayload/{inbox_id}"
333336
)
334-
assert json.loads(sent_requests[1].content) == {"payload": payload}
337+
338+
assert json.loads(sent_requests[1].content.decode()) == {"payload": payload}
335339

336340
assert HEADER_USER_AGENT in sent_requests[1].headers
337341
assert (

tests/sdk/services/test_uipath_llm_integration.py

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
import json
21
import os
32
from unittest.mock import MagicMock, patch
43

@@ -280,9 +279,9 @@ async def test_basic_chat_completions_mocked(self, mock_request, llm_service):
280279
# Verify the correct endpoint and payload
281280
args, kwargs = mock_request.call_args
282281
assert "/llmgateway_/api/chat/completions" in args[1]
283-
assert json.loads(kwargs["content"])["messages"] == messages
284-
assert json.loads(kwargs["content"])["max_tokens"] == 50
285-
assert json.loads(kwargs["content"])["temperature"] == 0
282+
assert kwargs["json"]["messages"] == messages
283+
assert kwargs["json"]["max_tokens"] == 50
284+
assert kwargs["json"]["temperature"] == 0
286285

287286
@pytest.mark.asyncio
288287
@patch.object(UiPathLlmChatService, "request_async")
@@ -509,6 +508,6 @@ async def test_no_tools_mocked(self, mock_request, llm_service):
509508

510509
# Verify the correct payload was sent
511510
args, kwargs = mock_request.call_args
512-
assert json.loads(kwargs["content"])["messages"] == messages
513-
assert json.loads(kwargs["content"])["max_tokens"] == 100
514-
assert json.loads(kwargs["content"])["temperature"] == 0.7
511+
assert kwargs["json"]["messages"] == messages
512+
assert kwargs["json"]["max_tokens"] == 100
513+
assert kwargs["json"]["temperature"] == 0.7

0 commit comments

Comments
 (0)