1717"""
1818
1919import json
20- from typing import Any , Dict , List , Optional
20+ from typing import Any , Dict , List , Optional , Union
21+
22+ from pydantic import BaseModel
2123
2224from .._config import Config
2325from .._execution_context import ExecutionContext
@@ -76,6 +78,67 @@ class EmbeddingModels(object):
7678 text_embedding_ada_002 = "text-embedding-ada-002"
7779
7880
81+ def _cleanup_schema (model_class : type [BaseModel ]) -> Dict [str , Any ]:
82+ """Clean up a Pydantic model schema for use with LLM Gateway.
83+
84+ This function converts a Pydantic model's JSON schema to a format that's
85+ compatible with the LLM Gateway's JSON schema requirements by removing
86+ titles and other metadata that might cause validation issues.
87+
88+ Args:
89+ model_class (type[BaseModel]): A Pydantic BaseModel class to convert to schema.
90+
91+ Returns:
92+ dict: A cleaned JSON schema dictionary suitable for LLM Gateway response_format.
93+
94+ Examples:
95+ ```python
96+ from pydantic import BaseModel
97+ from typing import List
98+
99+ class Country(BaseModel):
100+ name: str
101+ capital: str
102+ languages: List[str]
103+
104+ schema = _cleanup_schema(Country)
105+ # Returns a clean schema without titles and unnecessary metadata
106+ ```
107+ """
108+ schema = model_class .model_json_schema ()
109+
110+ def clean_properties (properties ):
111+ """Clean property definitions by removing titles and cleaning nested items."""
112+ cleaned_props = {}
113+ for prop_name , prop_def in properties .items ():
114+ if isinstance (prop_def , dict ):
115+ cleaned_prop = {}
116+ for key , value in prop_def .items ():
117+ if key == "title" : # Skip title
118+ continue
119+ elif key == "items" and isinstance (value , dict ):
120+ # Clean nested items
121+ cleaned_items = {}
122+ for item_key , item_value in value .items ():
123+ if item_key != "title" :
124+ cleaned_items [item_key ] = item_value
125+ cleaned_prop [key ] = cleaned_items
126+ else :
127+ cleaned_prop [key ] = value
128+ cleaned_props [prop_name ] = cleaned_prop
129+ return cleaned_props
130+
131+ # Create clean schema
132+ clean_schema = {
133+ "type" : "object" ,
134+ "properties" : clean_properties (schema .get ("properties" , {})),
135+ "required" : schema .get ("required" , []),
136+ "additionalProperties" : False ,
137+ }
138+
139+ return clean_schema
140+
141+
79142class UiPathOpenAIService (BaseService ):
80143 """Service for calling UiPath's LLM Gateway using OpenAI-compatible API.
81144
@@ -146,7 +209,7 @@ async def chat_completions(
146209 model : str = ChatModels .gpt_4o_mini_2024_07_18 ,
147210 max_tokens : int = 50 ,
148211 temperature : float = 0 ,
149- response_format : Optional [Dict [str , Any ]] = None ,
212+ response_format : Optional [Union [ Dict [str , Any ], type [ BaseModel ] ]] = None ,
150213 api_version : str = API_VERSION ,
151214 ):
152215 """Generate chat completions using UiPath's LLM Gateway service.
@@ -168,9 +231,11 @@ async def chat_completions(
168231 temperature (float, optional): Temperature for sampling, between 0 and 1.
169232 Lower values (closer to 0) make output more deterministic and focused,
170233 higher values make it more creative and random. Defaults to 0.
171- response_format (Optional[Dict[str, Any]], optional): An object specifying the format
172- that the model must output. Used to enable JSON mode or other structured outputs.
173- Defaults to None.
234+ response_format (Optional[Union[Dict[str, Any], type[BaseModel]]], optional):
235+ An object specifying the format that the model must output. Can be either:
236+ - A dictionary with response format configuration (traditional format)
237+ - A Pydantic BaseModel class (automatically converted to JSON schema)
238+ Used to enable JSON mode or other structured outputs. Defaults to None.
174239 api_version (str, optional): The API version to use. Defaults to API_VERSION.
175240
176241 Returns:
@@ -198,11 +263,31 @@ async def chat_completions(
198263 max_tokens=200,
199264 temperature=0.3
200265 )
266+
267+ # Using Pydantic model for structured response
268+ from pydantic import BaseModel
269+ from typing import List
270+
271+ class Country(BaseModel):
272+ name: str
273+ capital: str
274+ languages: List[str]
275+
276+ response = await service.chat_completions(
277+ messages=[
278+ {"role": "system", "content": "You are a helpful assistant. Respond with structured JSON."},
279+ {"role": "user", "content": "Tell me about Canada."}
280+ ],
281+ response_format=Country, # Pass BaseModel directly
282+ max_tokens=1000
283+ )
201284 ```
202285
203286 Note:
204287 The conversation history can be included to provide context to the model.
205288 Each message should have both 'role' and 'content' keys.
289+ When using a Pydantic BaseModel as response_format, it will be automatically
290+ converted to the appropriate JSON schema format for the LLM Gateway.
206291 """
207292 endpoint = EndpointManager .get_passthrough_endpoint ().format (
208293 model = model , api_version = api_version
@@ -215,9 +300,24 @@ async def chat_completions(
215300 "temperature" : temperature ,
216301 }
217302
218- # Add response_format if provided
303+ # Handle response_format - convert BaseModel to schema if needed
219304 if response_format :
220- request_body ["response_format" ] = response_format
305+ if isinstance (response_format , type ) and issubclass (
306+ response_format , BaseModel
307+ ):
308+ # Convert Pydantic model to JSON schema format
309+ cleaned_schema = _cleanup_schema (response_format )
310+ request_body ["response_format" ] = {
311+ "type" : "json_schema" ,
312+ "json_schema" : {
313+ "name" : response_format .__name__ .lower (),
314+ "strict" : True ,
315+ "schema" : cleaned_schema ,
316+ },
317+ }
318+ else :
319+ # Use provided dictionary format directly
320+ request_body ["response_format" ] = response_format
221321
222322 response = await self .request_async (
223323 "POST" ,
@@ -258,7 +358,7 @@ async def chat_completions(
258358 top_p : float = 1 ,
259359 tools : Optional [List [ToolDefinition ]] = None ,
260360 tool_choice : Optional [ToolChoice ] = None ,
261- response_format : Optional [Dict [str , Any ]] = None ,
361+ response_format : Optional [Union [ Dict [str , Any ], type [ BaseModel ] ]] = None ,
262362 api_version : str = NORMALIZED_API_VERSION ,
263363 ):
264364 """Generate chat completions using UiPath's normalized LLM Gateway API.
@@ -295,9 +395,11 @@ async def chat_completions(
295395 tool_choice (Optional[ToolChoice], optional): Controls which tools the model can call.
296396 Can be "auto" (model decides), "none" (no tools), or a specific tool choice.
297397 Defaults to None.
298- response_format (Optional[Dict[str, Any]], optional): An object specifying the format
299- that the model must output. Used to enable JSON mode or other structured outputs.
300- Defaults to None.
398+ response_format (Optional[Union[Dict[str, Any], type[BaseModel]]], optional):
399+ An object specifying the format that the model must output. Can be either:
400+ - A dictionary with response format configuration (traditional format)
401+ - A Pydantic BaseModel class (automatically converted to JSON schema)
402+ Used to enable JSON mode or other structured outputs. Defaults to None.
301403 api_version (str, optional): The normalized API version to use.
302404 Defaults to NORMALIZED_API_VERSION.
303405
@@ -349,6 +451,25 @@ async def chat_completions(
349451 presence_penalty=0.2,
350452 n=3 # Generate 3 alternative responses
351453 )
454+
455+ # Using Pydantic model for structured response
456+ from pydantic import BaseModel
457+ from typing import List
458+
459+ class Country(BaseModel):
460+ name: str
461+ capital: str
462+ languages: List[str]
463+
464+ response = await service.chat_completions(
465+ messages=[
466+ {"role": "system", "content": "You are a helpful assistant. Respond with structured JSON."},
467+ {"role": "user", "content": "Tell me about Canada."}
468+ ],
469+ response_format=Country, # Pass BaseModel directly
470+ max_tokens=1000
471+ )
472+ )
352473 ```
353474
354475 Note:
@@ -370,9 +491,24 @@ async def chat_completions(
370491 "top_p" : top_p ,
371492 }
372493
373- # Add response_format if provided
494+ # Handle response_format - convert BaseModel to schema if needed
374495 if response_format :
375- request_body ["response_format" ] = response_format
496+ if isinstance (response_format , type ) and issubclass (
497+ response_format , BaseModel
498+ ):
499+ # Convert Pydantic model to JSON schema format
500+ cleaned_schema = _cleanup_schema (response_format )
501+ request_body ["response_format" ] = {
502+ "type" : "json_schema" ,
503+ "json_schema" : {
504+ "name" : response_format .__name__ .lower (),
505+ "strict" : True ,
506+ "schema" : cleaned_schema ,
507+ },
508+ }
509+ else :
510+ # Use provided dictionary format directly
511+ request_body ["response_format" ] = response_format
376512
377513 # Add tools if provided - convert to UiPath format
378514 if tools :
0 commit comments