Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 13 additions & 4 deletions src/google/adk/models/gemini_llm_connection.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,12 +105,21 @@ async def send_content(self, content: types.Content):
# All parts have to be function responses.
function_responses = [part.function_response for part in content.parts]
logger.debug('Sending LLM function response: %s', function_responses)
await self._gemini_session.send(
input=types.LiveClientToolResponse(
function_responses=function_responses
),
await self._gemini_session.send_tool_response(
function_responses=function_responses
)
else:
# 3.1 models reject LiveClientContent for mid-conversation text
if self._model_version and '3.1' in self._model_version:
text_parts = [p.text for p in content.parts if p.text]
if text_parts:
combined = ' '.join(text_parts)
logger.debug(
'Sending text via realtime input for 3.1 model: %s',
combined[:100],
)
await self._gemini_session.send_realtime_input(text=combined)
return
logger.debug('Sending LLM new content %s', content)
is_gemini_31 = model_name_utils.is_gemini_3_1_flash_live(
self._model_version
Expand Down
10 changes: 10 additions & 0 deletions src/google/adk/models/google_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -415,6 +415,16 @@ async def connect(self, llm_request: LlmRequest) -> BaseLlmConnection:
' backend. Please use Vertex AI backend.'
)
llm_request.live_connect_config.tools = llm_request.config.tools

# 3.1 models require history_config for session history to work
model_name = llm_request.model or ''
if '3.1' in model_name and not getattr(
llm_request.live_connect_config, 'history_config', None
):
llm_request.live_connect_config.history_config = types.HistoryConfig(
initial_history_in_client_content=True
)

logger.debug('Connecting to live with llm_request:%s', llm_request)
logger.debug('Live connect config: %s', llm_request.live_connect_config)
async with self._live_api_client.aio.live.connect(
Expand Down