diff --git a/Desktop/claude/zero_hunter/platform_12/sourcecode/adk-python-main/adk-python-main/pyproject.toml b/Desktop/claude/zero_hunter/platform_12/sourcecode/adk-python-main/adk-python-main/pyproject.toml
new file mode 100644
index 0000000000..6f7e8aaa08
--- /dev/null
+++ b/Desktop/claude/zero_hunter/platform_12/sourcecode/adk-python-main/adk-python-main/pyproject.toml
@@ -0,0 +1,235 @@
+[project]
+# Project metadata. Available keys are documented at:
+# https://packaging.python.org/en/latest/specifications/declaring-project-metadata
+
+name = "google-adk"
+description = "Agent Development Kit"
+readme = "README.md"
+requires-python = ">=3.10"
+license = { file = "LICENSE" }
+authors = [{ name = "Google LLC", email = "googleapis-packages@google.com" }]
+classifiers = [ # List of https://pypi.org/classifiers/
+ "Typing :: Typed",
+ "Intended Audience :: Developers",
+ "Intended Audience :: Science/Research",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.10",
+ "Programming Language :: Python :: 3.11",
+ "Programming Language :: Python :: 3.12",
+ "Programming Language :: Python :: 3.13",
+ "Programming Language :: Python :: 3.14",
+ "Operating System :: OS Independent",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+ "License :: OSI Approved :: Apache Software License",
+]
+dependencies = [
+ # go/keep-sorted start
+ "PyYAML>=6.0.2, <7.0.0", # For APIHubToolset.
+ "aiosqlite>=0.21.0", # For SQLite database
+ "anyio>=4.9.0, <5.0.0", # For MCP Session Manager
+ "authlib>=1.6.6, <2.0.0", # For RestAPI Tool
+ "click>=8.1.8, <9.0.0", # For CLI tools
+ "fastapi>=0.124.1, <1.0.0", # FastAPI framework
+ "google-api-python-client>=2.157.0, <3.0.0", # Google API client discovery
+ "google-auth[pyopenssl]>=2.47.0", # Google Auth library
+ "google-cloud-aiplatform[agent_engines]>=1.132.0, <2.0.0", # For VertexAI integrations, e.g. example store.
+ "google-cloud-bigquery-storage>=2.0.0",
+ "google-cloud-bigquery>=2.2.0",
+ "google-cloud-bigtable>=2.32.0", # For Bigtable database
+ "google-cloud-dataplex>=1.7.0,<3.0.0", # For Dataplex Catalog Search tool
+ "google-cloud-discoveryengine>=0.13.12, <0.14.0", # For Discovery Engine Search Tool
+ "google-cloud-pubsub>=2.0.0, <3.0.0", # For Pub/Sub Tool
+ "google-cloud-secret-manager>=2.22.0, <3.0.0", # Fetching secrets in RestAPI Tool
+ "google-cloud-spanner>=3.56.0, <4.0.0", # For Spanner database
+ "google-cloud-speech>=2.30.0, <3.0.0", # For Audio Transcription
+ "google-cloud-storage>=2.18.0, <4.0.0", # For GCS Artifact service
+ "google-genai>=1.64.0, <2.0.0", # Google GenAI SDK
+ "graphviz>=0.20.2, <1.0.0", # Graphviz for graph rendering
+ "httpx>=0.27.0, <1.0.0", # HTTP client library
+ "jsonschema>=4.23.0, <5.0.0", # Agent Builder config validation
+ "mcp>=1.23.0, <2.0.0", # For MCP Toolset
+ "nh3>=0.2.0, <1.0.0", # For SVG/HTML sanitization (XSS prevention)
+ "opentelemetry-api>=1.36.0, <1.39.0", # OpenTelemetry - keep below 1.39.0 due to current agent_engines exporter constraints.
+ "opentelemetry-exporter-gcp-logging>=1.9.0a0, <2.0.0",
+ "opentelemetry-exporter-gcp-monitoring>=1.9.0a0, <2.0.0",
+ "opentelemetry-exporter-gcp-trace>=1.9.0, <2.0.0",
+ "opentelemetry-exporter-otlp-proto-http>=1.36.0",
+ "opentelemetry-resourcedetector-gcp>=1.9.0a0, <2.0.0",
+ "opentelemetry-sdk>=1.36.0, <1.39.0",
+ "pyarrow>=14.0.0",
+ "pydantic>=2.12.0, <3.0.0", # For data validation/models
+ "python-dateutil>=2.9.0.post0, <3.0.0", # For Vertext AI Session Service
+ "python-dotenv>=1.0.0, <2.0.0", # To manage environment variables
+ "requests>=2.32.4, <3.0.0",
+ "sqlalchemy-spanner>=1.14.0", # Spanner database session service
+ "sqlalchemy>=2.0, <3.0.0", # SQL database ORM
+ "starlette>=0.49.1, <1.0.0", # For FastAPI CLI
+ "tenacity>=9.0.0, <10.0.0", # For Retry management
+ "typing-extensions>=4.5, <5",
+ "tzlocal>=5.3, <6.0", # Time zone utilities
+ "uvicorn>=0.34.0, <1.0.0", # ASGI server for FastAPI
+ "watchdog>=6.0.0, <7.0.0", # For file change detection and hot reload
+ "websockets>=15.0.1, <16.0.0", # For BaseLlmFlow
+ # go/keep-sorted end
+]
+dynamic = ["version"]
+
+[project.urls]
+homepage = "https://google.github.io/adk-docs/"
+repository = "https://github.com/google/adk-python"
+changelog = "https://github.com/google/adk-python/blob/main/CHANGELOG.md"
+documentation = "https://google.github.io/adk-docs/"
+
+[project.scripts]
+adk = "google.adk.cli:main"
+
+[project.optional-dependencies]
+
+dev = [
+ # go/keep-sorted start
+ "flit>=3.10.0",
+ "isort>=6.0.0",
+ "mypy>=1.15.0",
+ "pyink>=25.12.0",
+ "pylint>=2.6.0",
+ # go/keep-sorted end
+]
+
+a2a = [
+ # go/keep-sorted start
+ "a2a-sdk>=0.3.4,<0.4.0",
+ # go/keep-sorted end
+]
+
+community = [
+ # go/keep-sorted start
+ "google-adk-community",
+ # go/keep-sorted end
+]
+
+eval = [
+ # go/keep-sorted start
+ "Jinja2>=3.1.4,<4.0.0", # For eval template rendering
+ "gepa>=0.1.0",
+ "google-cloud-aiplatform[evaluation]>=1.143.0",
+ "pandas>=2.2.3",
+ "rouge-score>=0.1.2",
+ "tabulate>=0.9.0",
+ # go/keep-sorted end
+]
+
+test = [
+ # go/keep-sorted start
+ "a2a-sdk>=0.3.0,<0.4.0",
+ "anthropic>=0.43.0", # For anthropic model tests
+ "crewai[tools];python_version>='3.11' and python_version<'3.12'", # For CrewaiTool tests; chromadb/pypika fail on 3.12+
+ "google-cloud-firestore>=2.11.0",
+ "google-cloud-parametermanager>=0.4.0, <1.0.0",
+ "kubernetes>=29.0.0", # For GkeCodeExecutor
+ "langchain-community>=0.3.17",
+ "langgraph>=0.2.60, <0.4.8", # For LangGraphAgent
+ "litellm>=1.75.5, <=1.82.6", # For LiteLLM tests. Upper bound pinned: versions 1.82.7+ compromised in supply chain attack.
+ "llama-index-readers-file>=0.4.0", # For retrieval tests
+ "openai>=1.100.2", # For LiteLLM
+ "opentelemetry-instrumentation-google-genai>=0.3b0, <1.0.0",
+ "pypika>=0.50.0", # For crewai->chromadb dependency
+ "pytest-asyncio>=0.25.0",
+ "pytest-mock>=3.14.0",
+ "pytest-xdist>=3.6.1",
+ "pytest>=9.0.0,<10.0.0",
+ "python-multipart>=0.0.9",
+ "rouge-score>=0.1.2",
+ "slack-bolt>=1.22.0",
+ "tabulate>=0.9.0",
+ # go/keep-sorted end
+]
+
+docs = [
+ "autodoc_pydantic",
+ "furo",
+ "myst-parser",
+ "sphinx<9.0.0",
+ "sphinx-autodoc-typehints",
+ "sphinx-rtd-theme",
+]
+
+# Optional extensions
+extensions = [
+ "anthropic>=0.43.0", # For anthropic model support
+ "beautifulsoup4>=3.2.2", # For load_web_page tool.
+ "crewai[tools];python_version>='3.11' and python_version<'3.12'", # For CrewaiTool; chromadb/pypika fail on 3.12+
+ "docker>=7.0.0", # For ContainerCodeExecutor
+ "google-cloud-firestore>=2.11.0", # For Firestore services
+ "google-cloud-parametermanager>=0.4.0, <1.0.0",
+ "kubernetes>=29.0.0", # For GkeCodeExecutor
+ "k8s-agent-sandbox>=0.1.1.post3", # For GkeCodeExecutor sandbox mode
+ "langgraph>=0.2.60, <0.4.8", # For LangGraphAgent
+ "litellm>=1.75.5, <=1.82.6", # For LiteLlm class. Upper bound pinned: versions 1.82.7+ compromised in supply chain attack.
+ "llama-index-readers-file>=0.4.0", # For retrieval using LlamaIndex.
+ "llama-index-embeddings-google-genai>=0.3.0", # For files retrieval using LlamaIndex.
+ "lxml>=5.3.0", # For load_web_page tool.
+ "pypika>=0.50.0", # For crewai->chromadb dependency
+ "toolbox-adk>=1.0.0, <2.0.0", # For tools.toolbox_toolset.ToolboxToolset
+]
+
+otel-gcp = ["opentelemetry-instrumentation-google-genai>=0.6b0, <1.0.0"]
+
+toolbox = ["toolbox-adk>=1.0.0, <2.0.0"]
+
+slack = ["slack-bolt>=1.22.0"]
+
+[tool.pyink]
+# Format py files following Google style-guide
+line-length = 80
+unstable = true
+pyink-indentation = 2
+pyink-use-majority-quotes = true
+pyink-annotation-pragmas = [
+ "noqa",
+ "pylint:",
+ "type: ignore",
+ "pytype:",
+ "mypy:",
+ "pyright:",
+ "pyre-",
+]
+
+
+[build-system]
+# Build system specify which backend is used to build/install the project (flit,
+# poetry, setuptools,...). All backends are supported by `pip install`
+requires = ["flit_core >=3.8,<4"]
+build-backend = "flit_core.buildapi"
+
+
+[tool.flit.sdist]
+include = ['src/**/*', 'README.md', 'pyproject.toml', 'LICENSE']
+exclude = ['src/**/*.sh']
+
+
+[tool.flit.module]
+name = "google.adk"
+include = ["py.typed"]
+
+
+[tool.isort]
+profile = "google"
+single_line_exclusions = []
+line_length = 200 # Prevent line wrap flickering.
+known_third_party = ["google.adk"]
+
+
+[tool.pytest.ini_options]
+testpaths = ["tests"]
+asyncio_default_fixture_loop_scope = "function"
+asyncio_mode = "auto"
+
+
+[tool.mypy]
+python_version = "3.10"
+exclude = ["tests/", "contributing/samples/"]
+plugins = ["pydantic.mypy"]
+strict = true
+disable_error_code = ["import-not-found", "import-untyped", "unused-ignore"]
+follow_imports = "skip"
diff --git a/Desktop/claude/zero_hunter/platform_12/sourcecode/adk-python-main/adk-python-main/src/google/adk/tools/load_artifacts_tool.py b/Desktop/claude/zero_hunter/platform_12/sourcecode/adk-python-main/adk-python-main/src/google/adk/tools/load_artifacts_tool.py
new file mode 100644
index 0000000000..71a2dea017
--- /dev/null
+++ b/Desktop/claude/zero_hunter/platform_12/sourcecode/adk-python-main/adk-python-main/src/google/adk/tools/load_artifacts_tool.py
@@ -0,0 +1,307 @@
+# Copyright 2026 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import annotations
+
+import base64
+import binascii
+import json
+import logging
+from typing import Any
+from typing import TYPE_CHECKING
+
+import nh3
+from google.genai import types
+from typing_extensions import override
+
+from ..features import FeatureName
+from ..features import is_feature_enabled
+from .base_tool import BaseTool
+
+# MIME types Gemini accepts for inline data in requests.
+_GEMINI_SUPPORTED_INLINE_MIME_PREFIXES = (
+ 'image/',
+ 'audio/',
+ 'video/',
+)
+_GEMINI_SUPPORTED_INLINE_MIME_TYPES = frozenset({'application/pdf'})
+_TEXT_LIKE_MIME_TYPES = frozenset({
+ 'application/csv',
+ 'application/json',
+ 'application/xml',
+ 'image/svg+xml',
+})
+
+# SVG sanitization allowlist for XSS prevention
+ALLOWED_SVG_TAGS = {
+ 'svg', 'g', 'path', 'circle', 'rect', 'ellipse', 'line', 'polyline', 'polygon',
+ 'text', 'tspan', 'defs', 'symbol', 'use', 'image', 'title', 'desc',
+ 'marker', 'linearGradient', 'radialGradient', 'stop', 'clipPath', 'mask',
+}
+
+ALLOWED_SVG_ATTRS = {
+ 'fill', 'stroke', 'stroke-width', 'opacity', 'x', 'y', 'width', 'height',
+ 'cx', 'cy', 'r', 'rx', 'ry', 'x1', 'y1', 'x2', 'y2', 'd', 'points',
+ 'transform', 'viewBox', 'preserveAspectRatio', 'id', 'class', 'href', 'xlink:href',
+ 'font-size', 'font-family', 'text-anchor', 'font-weight', 'fill-opacity',
+ 'stroke-opacity', 'stroke-linecap', 'stroke-linejoin', 'stroke-dasharray',
+}
+
+if TYPE_CHECKING:
+ from ..models.llm_request import LlmRequest
+ from .tool_context import ToolContext
+
+logger = logging.getLogger('google_adk.' + __name__)
+
+
+def _normalize_mime_type(mime_type: str | None) -> str | None:
+ """Returns the normalized MIME type, without parameters like charset."""
+ if not mime_type:
+ return None
+ return mime_type.split(';', 1)[0].strip()
+
+
+def _is_inline_mime_type_supported(mime_type: str | None) -> bool:
+ """Returns True if Gemini accepts this MIME type as inline data."""
+ normalized = _normalize_mime_type(mime_type)
+ if not normalized:
+ return False
+ if normalized == 'image/svg+xml':
+ return False
+ return normalized.startswith(_GEMINI_SUPPORTED_INLINE_MIME_PREFIXES) or (
+ normalized in _GEMINI_SUPPORTED_INLINE_MIME_TYPES
+ )
+
+
+def _maybe_base64_to_bytes(data: str) -> bytes | None:
+ """Best-effort base64 decode for both std and urlsafe formats."""
+ try:
+ return base64.b64decode(data, validate=True)
+ except (binascii.Error, ValueError):
+ try:
+ return base64.urlsafe_b64decode(data)
+ except (binascii.Error, ValueError):
+ return None
+
+
+def _sanitize_svg_strict(svg_str: str) -> str | None:
+ """Sanitize SVG using nh3 strict allowlist to prevent XSS.
+
+ Uses strict allowlist approach to prevent all XSS vectors including:
+ - Event handlers (100+ variations)
+ - CDATA sections
+ - XML namespace tricks
+ - JavaScript URLs
+ """
+ try:
+ sanitized = nh3.clean(
+ svg_str,
+ tags=ALLOWED_SVG_TAGS,
+ attributes={'*': ALLOWED_SVG_ATTRS},
+ )
+ return sanitized if sanitized.strip() else None
+ except Exception as e:
+ logger.warning(f'SVG sanitization failed: {e}')
+ return None
+
+
+def _as_safe_part_for_llm(
+ artifact: types.Part, artifact_name: str
+) -> types.Part:
+ """Returns a Part that is safe to send to Gemini."""
+ inline_data = artifact.inline_data
+ if inline_data is None:
+ return artifact
+
+ if _is_inline_mime_type_supported(inline_data.mime_type):
+ return artifact
+
+ mime_type = _normalize_mime_type(inline_data.mime_type) or (
+ 'application/octet-stream'
+ )
+ data = inline_data.data
+ if data is None:
+ return types.Part.from_text(
+ text=(
+ f'[Artifact: {artifact_name}, type: {mime_type}. '
+ 'No inline data was provided.]'
+ )
+ )
+
+ if isinstance(data, str):
+ decoded = _maybe_base64_to_bytes(data)
+ if decoded is None:
+ return types.Part.from_text(text=data)
+ data = decoded
+
+ if mime_type.startswith('text/') or mime_type in _TEXT_LIKE_MIME_TYPES:
+ text_content = None
+ try:
+ text_content = data.decode('utf-8')
+ except UnicodeDecodeError:
+ text_content = data.decode('utf-8', errors='replace')
+
+ if mime_type == 'image/svg+xml':
+ sanitized_svg = _sanitize_svg_strict(text_content)
+ if sanitized_svg is not None:
+ text_content = sanitized_svg
+ else:
+ return types.Part.from_text(
+ text=f'[Artifact: {artifact_name}, SVG sanitization failed]'
+ )
+
+ return types.Part.from_text(text=text_content)
+
+ size_kb = len(data) / 1024
+ return types.Part.from_text(
+ text=(
+ f'[Binary artifact: {artifact_name}, '
+ f'type: {mime_type}, size: {size_kb:.1f} KB. '
+ 'Content cannot be displayed inline.]'
+ )
+ )
+
+
+class LoadArtifactsTool(BaseTool):
+ """A tool that loads the artifacts and adds them to the session."""
+
+ def __init__(self):
+ super().__init__(
+ name='load_artifacts',
+ description=("""Loads artifacts into the session for this request.
+
+NOTE: Call when you need access to artifacts (for example, uploads saved by the
+web UI)."""),
+ )
+
+ def _get_declaration(self) -> types.FunctionDeclaration | None:
+ if is_feature_enabled(FeatureName.JSON_SCHEMA_FOR_FUNC_DECL):
+ return types.FunctionDeclaration(
+ name=self.name,
+ description=self.description,
+ parameters_json_schema={
+ 'type': 'object',
+ 'properties': {
+ 'artifact_names': {
+ 'type': 'array',
+ 'items': {'type': 'string'},
+ },
+ },
+ },
+ )
+ return types.FunctionDeclaration(
+ name=self.name,
+ description=self.description,
+ parameters=types.Schema(
+ type=types.Type.OBJECT,
+ properties={
+ 'artifact_names': types.Schema(
+ type=types.Type.ARRAY,
+ items=types.Schema(
+ type=types.Type.STRING,
+ ),
+ )
+ },
+ ),
+ )
+
+ @override
+ async def run_async(
+ self, *, args: dict[str, Any], tool_context: ToolContext
+ ) -> Any:
+ artifact_names: list[str] = args.get('artifact_names', [])
+ return {
+ 'artifact_names': artifact_names,
+ 'status': (
+ 'artifact contents temporarily inserted and removed. to access'
+ ' these artifacts, call load_artifacts tool again.'
+ ),
+ }
+
+ @override
+ async def process_llm_request(
+ self, *, tool_context: ToolContext, llm_request: LlmRequest
+ ) -> None:
+ await super().process_llm_request(
+ tool_context=tool_context,
+ llm_request=llm_request,
+ )
+ await self._append_artifacts_to_llm_request(
+ tool_context=tool_context, llm_request=llm_request
+ )
+
+ async def _append_artifacts_to_llm_request(
+ self, *, tool_context: ToolContext, llm_request: LlmRequest
+ ):
+ artifact_names = await tool_context.list_artifacts()
+ if not artifact_names:
+ return
+
+ # Tell the model about the available artifacts.
+ llm_request.append_instructions([f"""You have a list of artifacts:
+ {json.dumps(artifact_names)}
+
+ When the user asks questions about any of the artifacts, you should call the
+ `load_artifacts` function to load the artifact. Always call load_artifacts
+ before answering questions related to the artifacts, regardless of whether the
+ artifacts have been loaded before. Do not depend on prior answers about the
+ artifacts.
+ """])
+
+ # Attach the content of the artifacts if the model requests them.
+ # This only adds the content to the model request, instead of the session.
+ if llm_request.contents and llm_request.contents[-1].parts:
+ function_response = llm_request.contents[-1].parts[0].function_response
+ if function_response and function_response.name == 'load_artifacts':
+ response = function_response.response or {}
+ artifact_names = response.get('artifact_names', [])
+ for artifact_name in artifact_names:
+ # Try session-scoped first (default behavior)
+ artifact = await tool_context.load_artifact(artifact_name)
+
+ # If not found and name doesn't already have user: prefix,
+ # try cross-session artifacts with user: prefix
+ if artifact is None and not artifact_name.startswith('user:'):
+ prefixed_name = f'user:{artifact_name}'
+ artifact = await tool_context.load_artifact(prefixed_name)
+
+ if artifact is None:
+ logger.warning('Artifact "%s" not found, skipping', artifact_name)
+ continue
+
+ artifact_part = _as_safe_part_for_llm(artifact, artifact_name)
+ if artifact_part is not artifact:
+ mime_type = (
+ artifact.inline_data.mime_type if artifact.inline_data else None
+ )
+ logger.debug(
+ 'Converted artifact "%s" (mime_type=%s) to text Part',
+ artifact_name,
+ mime_type,
+ )
+
+ llm_request.contents.append(
+ types.Content(
+ role='user',
+ parts=[
+ types.Part.from_text(
+ text=f'Artifact {artifact_name} is:'
+ ),
+ artifact_part,
+ ],
+ )
+ )
+
+
+load_artifacts_tool = LoadArtifactsTool()
diff --git a/Desktop/claude/zero_hunter/platform_12/sourcecode/adk-python-main/adk-python-main/tests/unittests/tools/test_load_artifacts_tool.py b/Desktop/claude/zero_hunter/platform_12/sourcecode/adk-python-main/adk-python-main/tests/unittests/tools/test_load_artifacts_tool.py
new file mode 100644
index 0000000000..cc77a5b1fa
--- /dev/null
+++ b/Desktop/claude/zero_hunter/platform_12/sourcecode/adk-python-main/adk-python-main/tests/unittests/tools/test_load_artifacts_tool.py
@@ -0,0 +1,291 @@
+# Copyright 2026 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import base64
+
+from google.adk.features import FeatureName
+from google.adk.features._feature_registry import temporary_feature_override
+from google.adk.models.llm_request import LlmRequest
+from google.adk.tools.load_artifacts_tool import _maybe_base64_to_bytes
+from google.adk.tools.load_artifacts_tool import _sanitize_svg_strict
+from google.adk.tools.load_artifacts_tool import load_artifacts_tool
+from google.genai import types
+from pytest import mark
+
+
+class _StubToolContext:
+ """Minimal ToolContext stub for LoadArtifactsTool tests."""
+
+ def __init__(self, artifacts_by_name: dict[str, types.Part]):
+ self._artifacts_by_name = artifacts_by_name
+
+ async def list_artifacts(self) -> list[str]:
+ return list(self._artifacts_by_name.keys())
+
+ async def load_artifact(self, name: str) -> types.Part | None:
+ return self._artifacts_by_name.get(name)
+
+
+@mark.asyncio
+async def test_load_artifacts_converts_unsupported_mime_to_text():
+ """Unsupported inline MIME types are converted to text parts."""
+ artifact_name = 'test.csv'
+ csv_bytes = b'col1,col2\n1,2\n'
+ artifact = types.Part(
+ inline_data=types.Blob(data=csv_bytes, mime_type='application/csv')
+ )
+
+ tool_context = _StubToolContext({artifact_name: artifact})
+ llm_request = LlmRequest(
+ contents=[
+ types.Content(
+ role='user',
+ parts=[
+ types.Part(
+ function_response=types.FunctionResponse(
+ name='load_artifacts',
+ response={'artifact_names': [artifact_name]},
+ )
+ )
+ ],
+ )
+ ]
+ )
+
+ await load_artifacts_tool.process_llm_request(
+ tool_context=tool_context, llm_request=llm_request
+ )
+
+ assert llm_request.contents[-1].parts[0].text == (
+ f'Artifact {artifact_name} is:'
+ )
+ artifact_part = llm_request.contents[-1].parts[1]
+ assert artifact_part.inline_data is None
+ assert artifact_part.text == csv_bytes.decode('utf-8')
+
+
+@mark.asyncio
+async def test_load_artifacts_converts_base64_unsupported_mime_to_text():
+ """Unsupported base64 string data is converted to text parts."""
+ artifact_name = 'test.csv'
+ csv_bytes = b'col1,col2\n1,2\n'
+ csv_base64 = base64.b64encode(csv_bytes).decode('ascii')
+ artifact = types.Part(
+ inline_data=types.Blob(data=csv_base64, mime_type='application/csv')
+ )
+
+ tool_context = _StubToolContext({artifact_name: artifact})
+ llm_request = LlmRequest(
+ contents=[
+ types.Content(
+ role='user',
+ parts=[
+ types.Part(
+ function_response=types.FunctionResponse(
+ name='load_artifacts',
+ response={'artifact_names': [artifact_name]},
+ )
+ )
+ ],
+ )
+ ]
+ )
+
+ await load_artifacts_tool.process_llm_request(
+ tool_context=tool_context, llm_request=llm_request
+ )
+
+ artifact_part = llm_request.contents[-1].parts[1]
+ assert artifact_part.inline_data is None
+ assert artifact_part.text == csv_bytes.decode('utf-8')
+
+
+@mark.asyncio
+async def test_load_artifacts_keeps_supported_mime_types():
+ """Supported inline MIME types are passed through unchanged."""
+ artifact_name = 'test.pdf'
+ artifact = types.Part(
+ inline_data=types.Blob(data=b'%PDF-1.4', mime_type='application/pdf')
+ )
+
+ tool_context = _StubToolContext({artifact_name: artifact})
+ llm_request = LlmRequest(
+ contents=[
+ types.Content(
+ role='user',
+ parts=[
+ types.Part(
+ function_response=types.FunctionResponse(
+ name='load_artifacts',
+ response={'artifact_names': [artifact_name]},
+ )
+ )
+ ],
+ )
+ ]
+ )
+
+ await load_artifacts_tool.process_llm_request(
+ tool_context=tool_context, llm_request=llm_request
+ )
+
+ artifact_part = llm_request.contents[-1].parts[1]
+ assert artifact_part.inline_data is not None
+ assert artifact_part.inline_data.mime_type == 'application/pdf'
+
+
+def test_maybe_base64_to_bytes_decodes_standard_base64():
+ """Standard base64 encoded strings are decoded correctly."""
+ original = b'hello world'
+ encoded = base64.b64encode(original).decode('ascii')
+ assert _maybe_base64_to_bytes(encoded) == original
+
+
+def test_maybe_base64_to_bytes_decodes_urlsafe_base64():
+ """URL-safe base64 encoded strings are decoded correctly."""
+ original = b'\xfb\xff\xfe' # bytes that produce +/ in std but -_ in urlsafe
+ encoded = base64.urlsafe_b64encode(original).decode('ascii')
+ assert _maybe_base64_to_bytes(encoded) == original
+
+
+def test_maybe_base64_to_bytes_returns_none_for_invalid():
+ """Invalid base64 strings return None."""
+ # Single character is invalid (base64 requires length % 4 == 0 after padding)
+ assert _maybe_base64_to_bytes('x') is None
+
+
+def test_get_declaration_with_json_schema_feature_enabled():
+ """Test that _get_declaration uses parameters_json_schema when feature is enabled."""
+ with temporary_feature_override(FeatureName.JSON_SCHEMA_FOR_FUNC_DECL, True):
+ declaration = load_artifacts_tool._get_declaration()
+
+ assert declaration.name == 'load_artifacts'
+ assert declaration.parameters is None
+ assert declaration.parameters_json_schema == {
+ 'type': 'object',
+ 'properties': {
+ 'artifact_names': {
+ 'type': 'array',
+ 'items': {'type': 'string'},
+ },
+ },
+ }
+
+
+# SVG XSS Prevention Tests
+@mark.parametrize(
+ 'payload,dangerous_patterns',
+ [
+ # Test 1: CDATA Bypass
+ (
+ '',
+ ['foreignObject', 'CDATA'],
+ ),
+ # Test 2: xlink:href JavaScript Protocol
+ (
+ '',
+ ['javascript:', 'alert'],
+ ),
+ # Test 3: Namespace Injection
+ (
+ '',
+ ['script', 'foo:script', 'xmlns:foo'],
+ ),
+ # Test 4: Event Handler - onanimationend
+ (
+ '',
+ ['onanimationend'],
+ ),
+ # Test 5: Event Handler - ontransitionend
+ (
+ '',
+ ['ontransitionend'],
+ ),
+ # Test 6: Event Handler - onpointerenter
+ (
+ '',
+ ['onpointerenter'],
+ ),
+ # Test 7: foreignObject + onerror (Original PoC)
+ (
+ '',
+ ['foreignObject', 'onerror'],
+ ),
+ ],
+)
+def test_sanitize_svg_xss_vectors(payload: str, dangerous_patterns: list[str]):
+ """Test that all XSS vectors are blocked by SVG sanitization."""
+ result = _sanitize_svg_strict(payload)
+ assert result is not None
+ for pattern in dangerous_patterns:
+ assert pattern not in result, f'Dangerous pattern "{pattern}" found in sanitized SVG'
+
+
+def test_sanitize_svg_preserves_safe_content():
+ """Test that safe SVG content is preserved after sanitization."""
+ svg = ''
+ result = _sanitize_svg_strict(svg)
+ assert result is not None
+ assert 'circle' in result
+ assert 'cx="50"' in result or 'cx=50' in result
+ assert 'fill="blue"' in result or 'fill=blue' in result
+
+
+def test_sanitize_svg_handles_malformed_input():
+ """Test that malformed SVG is handled gracefully."""
+ malformed_svgs = [
+ ''
+ result = _sanitize_svg_content(svg)
+ assert '' in result
+ assert 'Safe content' in result
+ assert '