diff --git a/src/google/adk/models/gemini_llm_connection.py b/src/google/adk/models/gemini_llm_connection.py index 8d8ed5d6dc..fe9938b1e3 100644 --- a/src/google/adk/models/gemini_llm_connection.py +++ b/src/google/adk/models/gemini_llm_connection.py @@ -109,6 +109,17 @@ async def send_content(self, content: types.Content): function_responses=function_responses ) else: + # 3.1 models reject LiveClientContent for mid-conversation text + if self._model_version and '3.1' in self._model_version: + text_parts = [p.text for p in content.parts if p.text] + if text_parts: + combined = ' '.join(text_parts) + logger.debug( + 'Sending text via realtime input for 3.1 model: %s', + combined[:100], + ) + await self._gemini_session.send_realtime_input(text=combined) + return logger.debug('Sending LLM new content %s', content) is_gemini_31 = model_name_utils.is_gemini_3_1_flash_live( self._model_version diff --git a/src/google/adk/models/google_llm.py b/src/google/adk/models/google_llm.py index 0114d73a82..8e289e2c7c 100644 --- a/src/google/adk/models/google_llm.py +++ b/src/google/adk/models/google_llm.py @@ -415,6 +415,16 @@ async def connect(self, llm_request: LlmRequest) -> BaseLlmConnection: ' backend. Please use Vertex AI backend.' ) llm_request.live_connect_config.tools = llm_request.config.tools + + # 3.1 models require history_config for session history to work + model_name = llm_request.model or '' + if '3.1' in model_name and not getattr( + llm_request.live_connect_config, 'history_config', None + ): + llm_request.live_connect_config.history_config = types.HistoryConfig( + initial_history_in_client_content=True + ) + logger.debug('Connecting to live with llm_request:%s', llm_request) logger.debug('Live connect config: %s', llm_request.live_connect_config) async with self._live_api_client.aio.live.connect(