From 6388ba3b2054e60d218eae6ec8abc621ed0a1139 Mon Sep 17 00:00:00 2001 From: George Weale Date: Wed, 10 Dec 2025 10:07:39 -0800 Subject: [PATCH 1/3] fix: Avoid false positive "App name mismatch" warnings in Runner When users instantiate LlmAgent directly (not subclassed), the origin inference incorrectly detected ADK's internal google/adk/agents/ path as a mismatch. Use metadata from AgentLoader when available Skip inference for google.adk.* module Close #3143 Co-authored-by: George Weale PiperOrigin-RevId: 842774292 --- src/google/adk/runners.py | 29 +++++++ tests/unittests/test_runners.py | 140 ++++++++++++++++++++++++++++++++ 2 files changed, 169 insertions(+) diff --git a/src/google/adk/runners.py b/src/google/adk/runners.py index 33187ff0c3..7e1a2e5d02 100644 --- a/src/google/adk/runners.py +++ b/src/google/adk/runners.py @@ -245,9 +245,38 @@ def _validate_runner_params( def _infer_agent_origin( self, agent: BaseAgent ) -> tuple[Optional[str], Optional[Path]]: + """Infer the origin app name and directory from an agent's module location. + + Returns: + A tuple of (origin_app_name, origin_path): + - origin_app_name: The inferred app name (directory name containing the + agent), or None if inference is not possible/applicable. + - origin_path: The directory path where the agent is defined, or None + if the path cannot be determined. + + Both values are None when: + - The agent has no associated module + - The agent is defined in google.adk.* (ADK internal modules) + - The module has no __file__ attribute + """ + # First, check for metadata set by AgentLoader (most reliable source). + # AgentLoader sets these attributes when loading agents. + origin_app_name = getattr(agent, '_adk_origin_app_name', None) + origin_path = getattr(agent, '_adk_origin_path', None) + if origin_app_name is not None and origin_path is not None: + return origin_app_name, origin_path + + # Fall back to heuristic inference for programmatic usage. module = inspect.getmodule(agent.__class__) if not module: return None, None + + # Skip ADK internal modules. When users instantiate LlmAgent directly + # (not subclassed), inspect.getmodule() returns the ADK module. This + # could falsely match 'agents' in 'google/adk/agents/' path. + if module.__name__.startswith('google.adk.'): + return None, None + module_file = getattr(module, '__file__', None) if not module_file: return None, None diff --git a/tests/unittests/test_runners.py b/tests/unittests/test_runners.py index 7c4de3ecce..d692f7e380 100644 --- a/tests/unittests/test_runners.py +++ b/tests/unittests/test_runners.py @@ -12,7 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +import importlib from pathlib import Path +import sys import textwrap from typing import Optional from unittest.mock import AsyncMock @@ -898,5 +900,143 @@ def test_should_append_event_other_event(self): assert self.runner._should_append_event(event, is_live_call=True) is True +@pytest.fixture +def user_agent_module(tmp_path, monkeypatch): + """Fixture that creates a temporary user agent module for testing. + + Yields a callable that creates an agent module with the given name and + returns the loaded agent. + """ + created_modules = [] + original_path = None + + def _create_agent(agent_dir_name: str): + nonlocal original_path + agent_dir = tmp_path / "agents" / agent_dir_name + agent_dir.mkdir(parents=True, exist_ok=True) + (tmp_path / "agents" / "__init__.py").write_text("", encoding="utf-8") + (agent_dir / "__init__.py").write_text("", encoding="utf-8") + + agent_source = f"""\ +from google.adk.agents.llm_agent import LlmAgent + +class MyAgent(LlmAgent): + pass + +root_agent = MyAgent(name="{agent_dir_name}", model="gemini-2.0-flash") +""" + (agent_dir / "agent.py").write_text(agent_source, encoding="utf-8") + + monkeypatch.chdir(tmp_path) + if original_path is None: + original_path = str(tmp_path) + sys.path.insert(0, original_path) + + module_name = f"agents.{agent_dir_name}.agent" + module = importlib.import_module(module_name) + created_modules.append(module_name) + return module.root_agent + + yield _create_agent + + # Cleanup + if original_path and original_path in sys.path: + sys.path.remove(original_path) + for mod_name in list(sys.modules.keys()): + if mod_name.startswith("agents"): + del sys.modules[mod_name] + + +class TestRunnerInferAgentOrigin: + """Tests for Runner._infer_agent_origin method.""" + + def setup_method(self): + """Set up test fixtures.""" + self.session_service = InMemorySessionService() + self.artifact_service = InMemoryArtifactService() + + def test_infer_agent_origin_uses_adk_metadata_when_available(self): + """Test that _infer_agent_origin uses _adk_origin_* metadata when set.""" + agent = MockLlmAgent("test_agent") + # Simulate metadata set by AgentLoader + agent._adk_origin_app_name = "my_app" + agent._adk_origin_path = Path("/workspace/agents/my_app") + + runner = Runner( + app_name="my_app", + agent=agent, + session_service=self.session_service, + artifact_service=self.artifact_service, + ) + + origin_name, origin_path = runner._infer_agent_origin(agent) + assert origin_name == "my_app" + assert origin_path == Path("/workspace/agents/my_app") + + def test_infer_agent_origin_no_false_positive_for_direct_llm_agent(self): + """Test that using LlmAgent directly doesn't trigger mismatch warning. + + Regression test for GitHub issue #3143: Users who instantiate LlmAgent + directly and run from a directory that is a parent of the ADK installation + were getting false positive 'App name mismatch' warnings. + + This also verifies that _infer_agent_origin returns None for ADK internal + modules (google.adk.*). + """ + agent = LlmAgent( + name="my_custom_agent", + model="gemini-2.0-flash", + ) + + runner = Runner( + app_name="my_custom_agent", + agent=agent, + session_service=self.session_service, + artifact_service=self.artifact_service, + ) + + # Should return None for ADK internal modules + origin_name, _ = runner._infer_agent_origin(agent) + assert origin_name is None + # No mismatch warning should be generated + assert runner._app_name_alignment_hint is None + + def test_infer_agent_origin_with_subclassed_agent_in_user_code( + self, user_agent_module + ): + """Test that subclassed agents in user code still trigger origin inference.""" + agent = user_agent_module("my_agent") + + runner = Runner( + app_name="my_agent", + agent=agent, + session_service=self.session_service, + artifact_service=self.artifact_service, + ) + + # Should infer origin correctly from user's code + origin_name, origin_path = runner._infer_agent_origin(agent) + assert origin_name == "my_agent" + assert runner._app_name_alignment_hint is None + + def test_infer_agent_origin_detects_mismatch_for_user_agent( + self, user_agent_module + ): + """Test that mismatched app_name is detected for user-defined agents.""" + agent = user_agent_module("actual_name") + + runner = Runner( + app_name="wrong_name", # Intentionally wrong + agent=agent, + session_service=self.session_service, + artifact_service=self.artifact_service, + ) + + # Should detect the mismatch + assert runner._app_name_alignment_hint is not None + assert "wrong_name" in runner._app_name_alignment_hint + assert "actual_name" in runner._app_name_alignment_hint + + if __name__ == "__main__": pytest.main([__file__]) From 51a638b6b85943d4aaec4ee37c95a55386ebac90 Mon Sep 17 00:00:00 2001 From: Xuan Yang Date: Wed, 10 Dec 2025 10:30:36 -0800 Subject: [PATCH 2/3] chore: Introduce `build_function_declaration_with_json_schema` to use pydantic to generate json schema for FunctionTool Co-authored-by: Xuan Yang PiperOrigin-RevId: 842783745 --- .../adk/tools/_function_tool_declarations.py | 230 +++++ .../tools/test_function_tool_declarations.py | 842 ++++++++++++++++++ 2 files changed, 1072 insertions(+) create mode 100644 src/google/adk/tools/_function_tool_declarations.py create mode 100644 tests/unittests/tools/test_function_tool_declarations.py diff --git a/src/google/adk/tools/_function_tool_declarations.py b/src/google/adk/tools/_function_tool_declarations.py new file mode 100644 index 0000000000..7b37390856 --- /dev/null +++ b/src/google/adk/tools/_function_tool_declarations.py @@ -0,0 +1,230 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Function tool declaration builder using Pydantic's JSON schema generation. + +This module provides a streamlined approach to building FunctionDeclaration +objects by leveraging Pydantic's `create_model` and `model_json_schema()` +capabilities instead of manual type parsing. + +The GenAI SDK supports `parameters_json_schema` which accepts raw JSON schema, +allowing us to delegate schema generation complexity to Pydantic. +""" + +from __future__ import annotations + +import inspect +import logging +from typing import Any +from typing import Callable +from typing import get_type_hints +from typing import Optional +from typing import Type + +from google.genai import types +import pydantic +from pydantic import create_model +from pydantic import fields as pydantic_fields + + +def _get_function_fields( + func: Callable[..., Any], + ignore_params: Optional[list[str]] = None, +) -> dict[str, tuple[type[Any], Any]]: + """Extract function parameters as Pydantic field definitions. + + Args: + func: The callable to extract parameters from. + ignore_params: List of parameter names to exclude from the schema. + + Returns: + A dictionary mapping parameter names to (type, default) tuples suitable + for Pydantic's create_model. + """ + if ignore_params is None: + ignore_params = [] + + sig = inspect.signature(func) + fields: dict[str, tuple[type[Any], Any]] = {} + + # Get type hints with forward reference resolution + try: + type_hints = get_type_hints(func) + except TypeError: + # Can happen with mock objects or complex annotations + type_hints = {} + + for name, param in sig.parameters.items(): + if name in ignore_params: + continue + + if param.kind not in ( + inspect.Parameter.POSITIONAL_OR_KEYWORD, + inspect.Parameter.KEYWORD_ONLY, + inspect.Parameter.POSITIONAL_ONLY, + ): + continue + + # Get annotation, preferring resolved type hints + if name in type_hints: + ann = type_hints[name] + elif param.annotation is not inspect._empty: + ann = param.annotation + else: + ann = Any + + if param.default is inspect._empty: + default = pydantic_fields.PydanticUndefined + else: + default = param.default + + fields[name] = (ann, default) + + return fields + + +def _build_parameters_json_schema( + func: Callable[..., Any], + ignore_params: Optional[list[str]] = None, +) -> Optional[dict[str, Any]]: + """Build JSON schema for function parameters using Pydantic. + + Args: + func: The callable to generate schema for. + ignore_params: List of parameter names to exclude. + + Returns: + A JSON schema dict, or None if the function has no parameters. + """ + fields = _get_function_fields(func, ignore_params) + if not fields: + return None + + # Create a Pydantic model dynamically + func_name = getattr(func, '__name__', 'Callable') + model = create_model( + f'{func_name}Params', + **fields, # type: ignore[arg-type] + ) + + return model.model_json_schema() + + +def _build_response_json_schema( + func: Callable[..., Any], +) -> Optional[dict[str, Any]]: + """Build JSON schema for function return type using Pydantic. + + Args: + func: The callable to generate return schema for. + + Returns: + A JSON schema dict for the return type, or None if no return annotation. + """ + return_annotation = inspect.signature(func).return_annotation + + if return_annotation is inspect._empty: + return None + + # Handle string annotations (forward references) + if isinstance(return_annotation, str): + try: + type_hints = get_type_hints(func) + return_annotation = type_hints.get('return', return_annotation) + except TypeError: + pass + + try: + adapter = pydantic.TypeAdapter( + return_annotation, + config=pydantic.ConfigDict(arbitrary_types_allowed=True), + ) + return adapter.json_schema() + except Exception: + logging.warning( + 'Failed to build response JSON schema for %s', + func.__name__, + exc_info=True, + ) + # Fall back to untyped response + return None + + +def build_function_declaration_with_json_schema( + func: Callable[..., Any] | Type[pydantic.BaseModel], + ignore_params: Optional[list[str]] = None, +) -> types.FunctionDeclaration: + """Build a FunctionDeclaration using Pydantic's JSON schema generation. + + This function provides a simplified approach compared to manual type parsing. + It uses Pydantic's `create_model` to dynamically create a model from function + parameters, then uses `model_json_schema()` to generate the JSON schema. + + The generated schema is passed to `parameters_json_schema` which the GenAI + SDK supports natively. + + Args: + func: The callable or Pydantic model to generate declaration for. + ignore_params: List of parameter names to exclude from the schema. + + Returns: + A FunctionDeclaration with the function's schema. + + Example: + >>> from enum import Enum + >>> from typing import List, Optional + >>> + >>> class Color(Enum): + ... RED = "red" + ... GREEN = "green" + ... + >>> def paint_room( + ... color: Color, + ... rooms: List[str], + ... dry_time_hours: Optional[int] = None, + ... ) -> str: + ... '''Paint rooms with the specified color.''' + ... return f"Painted {len(rooms)} rooms {color.value}" + >>> + >>> decl = build_function_declaration_with_json_schema(paint_room) + >>> decl.name + 'paint_room' + """ + # Handle Pydantic BaseModel classes + if isinstance(func, type) and issubclass(func, pydantic.BaseModel): + schema = func.model_json_schema() + description = inspect.cleandoc(func.__doc__) if func.__doc__ else None + return types.FunctionDeclaration( + name=func.__name__, + description=description, + parameters_json_schema=schema, + ) + + # Handle Callable functions + description = inspect.cleandoc(func.__doc__) if func.__doc__ else None + func_name = getattr(func, '__name__', 'Callable') + declaration = types.FunctionDeclaration( + name=func_name, + description=description, + ) + + parameters_schema = _build_parameters_json_schema(func, ignore_params) + if parameters_schema: + declaration.parameters_json_schema = parameters_schema + + response_schema = _build_response_json_schema(func) + if response_schema: + declaration.response_json_schema = response_schema + + return declaration diff --git a/tests/unittests/tools/test_function_tool_declarations.py b/tests/unittests/tools/test_function_tool_declarations.py new file mode 100644 index 0000000000..252bc9868c --- /dev/null +++ b/tests/unittests/tools/test_function_tool_declarations.py @@ -0,0 +1,842 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for the Pydantic-based function declaration builder. + +These tests verify that the simplified Pydantic approach generates correct +JSON schemas for various function signatures, including edge cases. +""" + +from __future__ import annotations + +from collections.abc import Sequence +from enum import Enum +from typing import Any +from typing import Literal +from typing import Optional + +from absl.testing import parameterized +from google.adk.tools._function_tool_declarations import build_function_declaration_with_json_schema +from google.adk.tools.tool_context import ToolContext +from pydantic import BaseModel +from pydantic import Field +from pydantic.dataclasses import dataclass as pyd_dataclass + + +class Color(Enum): + """A simple enum for testing.""" + + RED = "red" + GREEN = "green" + BLUE = "blue" + + +class Priority(Enum): + """An integer enum for testing.""" + + LOW = 1 + MEDIUM = 2 + HIGH = 3 + + +class Address(BaseModel): + """A Pydantic model for nested object testing.""" + + street: str = Field(..., description="Street address") + city: str = Field(..., description="City name") + zip_code: str = Field(..., pattern=r"^\d{5}$", description="US ZIP code") + + +class Person(BaseModel): + """A Pydantic model with nested model.""" + + name: str + age: int + address: Optional[Address] = None + + +@pyd_dataclass +class Window: + """A Pydantic dataclass for testing.""" + + width: int + height: int + + +class TestBasicTypes(parameterized.TestCase): + """Tests for basic Python types.""" + + @parameterized.named_parameters( + ( + "string", + lambda name: f"Hello, {name}!", + {"name": {"title": "Name", "type": "string"}}, + {"type": "string"}, + ), + ( + "integer", + lambda n: n * 2, + {"n": {"title": "N", "type": "integer"}}, + {"type": "integer"}, + ), + ( + "float", + lambda x: x * x, + {"x": {"title": "X", "type": "number"}}, + {"type": "number"}, + ), + ( + "boolean", + lambda enabled: not enabled, + {"enabled": {"title": "Enabled", "type": "boolean"}}, + {"type": "boolean"}, + ), + ) + def test_basic_parameter_types( + self, func, expected_param_props, expected_response_schema + ): + """Test functions with single basic type parameters.""" + # We need to define the functions within the test or use types from typing + # to properly capture annotations. For simplicity, we'll define them here. + if func.__code__.co_varnames[0] == "name": + + def test_func(name: str) -> str: + return func(name) + + elif func.__code__.co_varnames[0] == "n": + + def test_func(n: int) -> int: + return func(n) + + elif func.__code__.co_varnames[0] == "x": + + def test_func(x: float) -> float: + return func(x) + + elif func.__code__.co_varnames[0] == "enabled": + + def test_func(enabled: bool) -> bool: + return func(enabled) + + else: + raise ValueError("Unexpected function signature") + + decl = build_function_declaration_with_json_schema(test_func) + + self.assertIsNotNone(decl.parameters_json_schema) + schema = decl.parameters_json_schema + + self.assertEqual(schema["properties"], expected_param_props) + self.assertEqual(decl.response_json_schema, expected_response_schema) + self.assertEqual(set(schema["required"]), set(expected_param_props.keys())) + + def test_string_parameter_details(self): + """Test function with string parameter details.""" + + def greet(name: str) -> str: + """Greet someone by name.""" + return f"Hello, {name}!" + + decl = build_function_declaration_with_json_schema(greet) + + self.assertEqual(decl.name, "greet") + self.assertEqual(decl.description, "Greet someone by name.") + self.assertEqual( + decl.parameters_json_schema, + { + "type": "object", + "properties": { + "name": { + "title": "Name", + "type": "string", + } + }, + "required": ["name"], + "title": "greetParams", + }, + ) + + self.assertEqual(decl.response_json_schema, {"type": "string"}) + + def test_multiple_parameters(self): + """Test function with multiple parameters of different types.""" + + def create_user(name: str, age: int, active: bool) -> str: + """Create a new user.""" + return f"Created {name}" + + decl = build_function_declaration_with_json_schema(create_user) + schema = decl.parameters_json_schema + + self.assertLen(schema["properties"], 3) + self.assertEqual(schema["properties"]["name"]["type"], "string") + self.assertEqual(schema["properties"]["age"]["type"], "integer") + self.assertEqual(schema["properties"]["active"]["type"], "boolean") + self.assertEqual(set(schema["required"]), {"name", "age", "active"}) + self.assertEqual( + decl.response_json_schema, + { + "type": "string", + }, + ) + + +class TestDefaultValues(parameterized.TestCase): + """Tests for parameters with default values.""" + + def test_string_with_default(self): + """Test string parameter with default value.""" + + def greet(name: str = "World") -> str: + """Greet someone.""" + return f"Hello, {name}!" + + decl = build_function_declaration_with_json_schema(greet) + schema = decl.parameters_json_schema + + assert schema["properties"]["name"]["default"] == "World" + self.assertNotIn("name", schema.get("required", [])) + assert decl.response_json_schema == { + "type": "string", + } + + def test_int_with_default(self): + """Test integer parameter with default value.""" + + def repeat(text: str, times: int = 3) -> str: + """Repeat text.""" + return text * times + + decl = build_function_declaration_with_json_schema(repeat) + schema = decl.parameters_json_schema + + # times should have default, text should be required + assert "text" in schema["required"] + assert schema["properties"]["times"]["default"] == 3 + self.assertNotIn("times", schema.get("required", [])) + assert decl.response_json_schema == { + "type": "string", + } + + def test_none_default(self): + """Test parameter with None as default.""" + + def search(query: str, limit: Optional[int] = None) -> str: + """Search for something.""" + return query + + decl = build_function_declaration_with_json_schema(search) + schema = decl.parameters_json_schema + + assert "query" in schema["required"] + # limit should not be required since it has default None + self.assertNotIn("limit", schema.get("required", [])) + assert schema["properties"]["limit"]["default"] is None + assert decl.response_json_schema == { + "type": "string", + } + + +class TestCollectionTypes(parameterized.TestCase): + """Tests for list, dict, and other collection types.""" + + @parameterized.named_parameters( + ( + "strings", + ", ".join, + "items", + str, + "string", + "string", + ), + ( + "integers", + sum, + "numbers", + int, + "integer", + "integer", + ), + ) + def test_list_parameters( + self, + func_impl, + param_name, + item_type, + expected_item_schema_type, + expected_response_schema_type, + ): + """Test list parameters with different item types.""" + + if item_type == str: + + def test_func(items: list[str]) -> str: + return func_impl(items) + + test_func.__name__ = "join_strings" + elif item_type == int: + + def test_func(numbers: list[int]) -> int: + return func_impl(numbers) + + test_func.__name__ = "sum_numbers" + else: + raise ValueError("Unsupported item type") + + decl = build_function_declaration_with_json_schema(test_func) + schema = decl.parameters_json_schema + + self.assertEqual(schema["properties"][param_name]["type"], "array") + self.assertEqual( + schema["properties"][param_name]["items"]["type"], + expected_item_schema_type, + ) + self.assertEqual( + decl.response_json_schema, + { + "type": expected_response_schema_type, + }, + ) + + def test_dict_parameter(self): + """Test dict[str, Any] parameter.""" + + def process_data(data: dict[str, Any]) -> str: + """Process a dictionary.""" + return str(data) + + decl = build_function_declaration_with_json_schema(process_data) + schema = decl.parameters_json_schema + + self.assertEqual(schema["properties"]["data"]["type"], "object") + self.assertEqual( + decl.response_json_schema, + { + "type": "string", + }, + ) + + def test_dict_with_typed_values(self): + """Test dict[str, int] parameter.""" + + def sum_scores(scores: dict[str, int]) -> int: + """Sum all scores.""" + return sum(scores.values()) + + decl = build_function_declaration_with_json_schema(sum_scores) + schema = decl.parameters_json_schema + + self.assertEqual(schema["properties"]["scores"]["type"], "object") + # additionalProperties should specify int type + self.assertEqual( + schema["properties"]["scores"] + .get("additionalProperties", {}) + .get("type"), + "integer", + ) + self.assertEqual( + decl.response_json_schema, + { + "type": "integer", + }, + ) + + def test_sequence_type(self): + """Test Sequence[str] parameter (from collections.abc).""" + + def process_items(items: Sequence[str]) -> int: + """Process items and return count.""" + return len(list(items)) + + decl = build_function_declaration_with_json_schema(process_items) + schema = decl.parameters_json_schema + + self.assertEqual(schema["properties"]["items"]["type"], "array") + self.assertEqual(schema["properties"]["items"]["items"]["type"], "string") + self.assertEqual( + decl.response_json_schema, + { + "type": "integer", + }, + ) + + def test_tuple_fixed_length(self): + """Test tuple[int, int] parameter (fixed length).""" + + def add_point(coords: tuple[int, int]) -> int: + """Add coordinates.""" + x, y = coords + return x + y + + decl = build_function_declaration_with_json_schema(add_point) + schema = decl.parameters_json_schema + + # Fixed-length tuples use prefixItems + coords_schema = schema["properties"]["coords"] + self.assertEqual(coords_schema["type"], "array") + self.assertIn("prefixItems", coords_schema) + self.assertLen(coords_schema["prefixItems"], 2) + self.assertEqual( + decl.response_json_schema, + { + "type": "integer", + }, + ) + + +class TestEnumAndLiteral(parameterized.TestCase): + """Tests for Enum and Literal types.""" + + def test_string_enum(self): + """Test Enum parameter with string values.""" + + def set_color(color: Color) -> str: + """Set the color.""" + return color.value + + decl = build_function_declaration_with_json_schema(set_color) + schema = decl.parameters_json_schema + + self.assertIn("$defs", schema) + self.assertIn("color", schema["properties"]) + color_schema = schema["properties"]["color"] + self.assertIn("$ref", color_schema) + self.assertEqual( + decl.response_json_schema, + { + "type": "string", + }, + ) + + def test_literal_type(self): + """Test Literal type parameter.""" + + def set_mode(mode: Literal["fast", "slow", "auto"]) -> str: + """Set the mode.""" + return mode + + decl = build_function_declaration_with_json_schema(set_mode) + schema = decl.parameters_json_schema + + mode_schema = schema["properties"]["mode"] + self.assertEqual(mode_schema.get("enum"), ["fast", "slow", "auto"]) + self.assertEqual( + decl.response_json_schema, + { + "type": "string", + }, + ) + + def test_literal_with_default(self): + """Test Literal type with default value.""" + + def configure(mode: Literal["on", "off"] = "on") -> str: + """Configure something.""" + return mode + + decl = build_function_declaration_with_json_schema(configure) + schema = decl.parameters_json_schema + + self.assertEqual(schema["properties"]["mode"]["default"], "on") + self.assertEqual( + decl.response_json_schema, + { + "type": "string", + }, + ) + + +class TestOptionalAndUnion(parameterized.TestCase): + """Tests for Optional and Union types.""" + + def test_optional_string(self): + """Test Optional[str] parameter.""" + + def greet(name: Optional[str] = None) -> str: + """Greet someone.""" + return f"Hello, {name or 'World'}!" + + decl = build_function_declaration_with_json_schema(greet) + schema = decl.parameters_json_schema + + # Optional should be represented with anyOf including null + name_schema = schema["properties"]["name"] + self.assertIn("anyOf", name_schema) + self.assertLen(name_schema["anyOf"], 2) + self.assertEqual( + decl.response_json_schema, + { + "type": "string", + }, + ) + + def test_union_of_primitives(self): + """Test Union[int, str] parameter.""" + + def process(value: int | str) -> str: + """Process a value.""" + return str(value) + + decl = build_function_declaration_with_json_schema(process) + schema = decl.parameters_json_schema + + value_schema = schema["properties"]["value"] + self.assertIn("anyOf", value_schema) + self.assertLen(value_schema["anyOf"], 2) + self.assertEqual( + decl.response_json_schema, + { + "type": "string", + }, + ) + + def test_complex_union(self): + """Test Union[int, str, dict[str, float]] parameter.""" + + def flexible_input( + payload: int | str | dict[str, float] = 0, + ) -> str: + """Accept flexible input.""" + return str(payload) + + decl = build_function_declaration_with_json_schema(flexible_input) + schema = decl.parameters_json_schema + + payload_schema = schema["properties"]["payload"] + self.assertIn("anyOf", payload_schema) + self.assertLen(payload_schema["anyOf"], 3) + self.assertEqual( + decl.response_json_schema, + { + "type": "string", + }, + ) + + +class TestNestedObjects(parameterized.TestCase): + """Tests for nested Pydantic models and dataclasses.""" + + def test_pydantic_model_parameter(self): + """Test parameter that is a Pydantic model.""" + + def save_address(address: Address) -> str: + """Save an address.""" + return f"Saved address in {address.city}" + + decl = build_function_declaration_with_json_schema(save_address) + schema = decl.parameters_json_schema + + # Should have $defs for the nested model + self.assertIn("address", schema["properties"]) + self.assertIn("$ref", schema["properties"]["address"]) + + address_def = schema["$defs"]["Address"] + self.assertEqual(address_def["type"], "object") + self.assertIn("street", address_def["properties"]) + self.assertEqual( + address_def["properties"]["zip_code"]["pattern"], r"^\d{5}$" + ) + self.assertEqual( + decl.response_json_schema, + { + "type": "string", + }, + ) + + def test_nested_pydantic_model(self): + """Test Pydantic model with nested model.""" + + def save_person(person: Person) -> str: + """Save a person.""" + return f"Saved {person.name}" + + decl = build_function_declaration_with_json_schema(save_person) + schema = decl.parameters_json_schema + + # Should handle nested Address model + self.assertIn("$defs", schema) + person_defs = schema["$defs"]["Person"] + self.assertEqual(person_defs["type"], "object") + self.assertIn("address", person_defs["properties"]) + self.assertIn("person", schema["properties"]) + self.assertIn("$ref", schema["properties"]["person"]) + self.assertEqual( + decl.response_json_schema, + { + "type": "string", + }, + ) + + def test_pydantic_dataclass_parameter(self): + """Test parameter that is a Pydantic dataclass.""" + + def resize_window(window: Window) -> str: + """Resize a window.""" + return f"Resized to {window.width}x{window.height}" + + decl = build_function_declaration_with_json_schema(resize_window) + schema = decl.parameters_json_schema + + self.assertIn("window", schema["properties"]) + self.assertIn("$ref", schema["properties"]["window"]) + self.assertEqual( + decl.response_json_schema, + { + "type": "string", + }, + ) + + def test_list_of_pydantic_models(self): + """Test list of Pydantic models.""" + + def save_addresses(addresses: list[Address]) -> int: + """Save multiple addresses.""" + return len(addresses) + + decl = build_function_declaration_with_json_schema(save_addresses) + schema = decl.parameters_json_schema + + addr_schema = schema["properties"]["addresses"] + self.assertEqual(addr_schema["type"], "array") + self.assertEqual( + decl.response_json_schema, + { + "type": "integer", + }, + ) + + +class TestSpecialCases(parameterized.TestCase): + """Tests for special cases and edge cases.""" + + def test_no_parameters(self): + """Test function with no parameters.""" + + def get_time() -> str: + """Get current time.""" + return "12:00" + + decl = build_function_declaration_with_json_schema(get_time) + + self.assertEqual(decl.name, "get_time") + self.assertIsNone(decl.parameters_json_schema) + self.assertEqual( + decl.response_json_schema, + { + "type": "string", + }, + ) + + def test_no_type_annotations(self): + """Test function with no type annotations.""" + + def legacy_function(x, y): + """A legacy function without types.""" + return x + y + + decl = build_function_declaration_with_json_schema(legacy_function) + schema = decl.parameters_json_schema + + # Should still generate schema, with Any type + self.assertIn("x", schema["properties"]) + self.assertIsNone(schema["properties"]["x"].get("type")) + self.assertIn("y", schema["properties"]) + self.assertIsNone(schema["properties"]["y"].get("type")) + # No return type annotation, so response schema should be None + self.assertIsNone(decl.response_json_schema) + + def test_any_type_parameter(self): + """Test parameter with Any type.""" + + def process_any(data: Any) -> str: + """Process any data.""" + return str(data) + + decl = build_function_declaration_with_json_schema(process_any) + schema = decl.parameters_json_schema + + # Any type should be represented somehow + self.assertIn("data", schema["properties"]) + self.assertIsNone(schema["properties"]["data"].get("type")) + self.assertEqual( + decl.response_json_schema, + { + "type": "string", + }, + ) + + def test_tool_context_ignored_via_ignore_params(self): + """Test that tool_context parameter is ignored when passed in ignore_params.""" + + def my_tool(query: str, tool_context: ToolContext) -> str: + """A tool that uses context.""" + return query + + decl = build_function_declaration_with_json_schema( + my_tool, ignore_params=["tool_context"] + ) + schema = decl.parameters_json_schema + + self.assertIn("query", schema["properties"]) + self.assertNotIn("tool_context", schema["properties"]) + self.assertEqual( + decl.response_json_schema, + { + "type": "string", + }, + ) + + def test_ignore_params(self): + """Test ignoring specific parameters.""" + + def complex_func(a: str, b: int, c: float, internal: str) -> str: + """A function with internal parameter.""" + return a + + decl = build_function_declaration_with_json_schema( + complex_func, ignore_params=["internal"] + ) + schema = decl.parameters_json_schema + + self.assertIn("a", schema["properties"]) + self.assertIn("b", schema["properties"]) + self.assertIn("c", schema["properties"]) + self.assertNotIn("internal", schema["properties"]) + self.assertEqual( + decl.response_json_schema, + { + "type": "string", + }, + ) + + def test_docstring_preserved(self): + """Test that docstring is preserved as description.""" + + def well_documented(x: int) -> int: + """This is a well-documented function. + + It does something useful. + + Args: + x: The number to square. + + Returns: + The squared number. + """ + return x + + decl = build_function_declaration_with_json_schema(well_documented) + + self.assertIn("well-documented function", decl.description) + self.assertIn("something useful", decl.description) + self.assertEqual( + decl.response_json_schema, + { + "type": "integer", + }, + ) + + def test_no_docstring(self): + """Test function without docstring.""" + + def undocumented(x: int) -> int: + return x + + decl = build_function_declaration_with_json_schema(undocumented) + + self.assertIsNone(decl.description) + self.assertEqual( + decl.response_json_schema, + { + "type": "integer", + }, + ) + + +class TestComplexFunction(parameterized.TestCase): + """Test the complex function from the user's prototype.""" + + def test_complex_function_schema(self): + """Test the complex function with many type variations.""" + + def complex_fn( + color: Color, + tags: list[str], + mode: Literal["fast", "slow"] = "fast", + count: Optional[int] = None, + address: Optional[Address] = None, + window: Optional[Window] = None, + payload: int | str | dict[str, float] = 0, + colors: Optional[list[Color]] = None, + ) -> None: + """A complex function with many parameter types.""" + del color, tags, mode, count, address, window, payload, colors + + decl = build_function_declaration_with_json_schema(complex_fn) + + self.assertEqual(decl.name, "complex_fn") + self.assertIsNotNone(decl.parameters_json_schema) + + schema = decl.parameters_json_schema + props = schema["properties"] + + # Verify all parameters are present + self.assertIn("color", props) + self.assertIn("tags", props) + self.assertIn("mode", props) + self.assertIn("count", props) + self.assertIn("address", props) + self.assertIn("window", props) + self.assertIn("payload", props) + self.assertIn("colors", props) + + # tags should be array of strings + self.assertEqual(props["tags"]["type"], "array") + + # mode should have enum + self.assertEqual(props["mode"].get("enum"), ["fast", "slow"]) + # Return type is None, which maps to JSON schema null type + self.assertEqual( + decl.response_json_schema, + { + "type": "null", + }, + ) + + +class TestPydanticModelAsFunction(parameterized.TestCase): + """Tests for using Pydantic BaseModel directly.""" + + def test_base_model_class(self): + """Test passing a Pydantic BaseModel class directly.""" + + class CreateUserRequest(BaseModel): + """Request to create a user.""" + + name: str + email: str + age: Optional[int] = None + + decl = build_function_declaration_with_json_schema(CreateUserRequest) + + self.assertEqual(decl.name, "CreateUserRequest") + self.assertIsNotNone(decl.parameters_json_schema) + + schema = decl.parameters_json_schema + self.assertIn("name", schema["properties"]) + self.assertIn("email", schema["properties"]) + self.assertIn("age", schema["properties"]) + # When passing a BaseModel, there is no function return, so response schema + # is None + self.assertIsNone(decl.response_json_schema) From b23deeb9abffbcdaf64e518ad62d70070bbc7c0a Mon Sep 17 00:00:00 2001 From: Xuan Yang Date: Wed, 10 Dec 2025 10:44:30 -0800 Subject: [PATCH 3/3] docs: Update adk docs update workflow to invoke the updater agent once per suggestion Co-authored-by: Xuan Yang PiperOrigin-RevId: 842789476 --- .../adk_docs_updater/main.py | 84 ++++++++++++++++--- .../samples/adk_documentation/utils.py | 46 ++++++++++ 2 files changed, 119 insertions(+), 11 deletions(-) diff --git a/contributing/samples/adk_documentation/adk_docs_updater/main.py b/contributing/samples/adk_documentation/adk_docs_updater/main.py index 32d75047ed..3c3839fb61 100644 --- a/contributing/samples/adk_documentation/adk_docs_updater/main.py +++ b/contributing/samples/adk_documentation/adk_docs_updater/main.py @@ -24,13 +24,14 @@ from adk_documentation.settings import DOC_REPO from adk_documentation.tools import get_issue from adk_documentation.utils import call_agent_async +from adk_documentation.utils import parse_suggestions from google.adk.cli.utils import logs from google.adk.runners import InMemoryRunner APP_NAME = "adk_docs_updater" USER_ID = "adk_docs_updater_user" -logs.setup_adk_logger(level=logging.DEBUG) +logs.setup_adk_logger(level=logging.INFO) def process_arguments(): @@ -68,23 +69,84 @@ async def main(): print(f"Failed to get issue {issue_number}: {get_issue_response}\n") return issue = get_issue_response["issue"] + issue_title = issue.get("title", "") + issue_body = issue.get("body", "") + + # Parse numbered suggestions from issue body + suggestions = parse_suggestions(issue_body) + + if not suggestions: + print(f"No numbered suggestions found in issue #{issue_number}.") + print("Falling back to processing the entire issue as a single task.") + suggestions = [(1, issue_body)] + + print(f"Found {len(suggestions)} suggestion(s) in issue #{issue_number}.") + print("=" * 80) runner = InMemoryRunner( agent=agent.root_agent, app_name=APP_NAME, ) - session = await runner.session_service.create_session( - app_name=APP_NAME, - user_id=USER_ID, - ) - response = await call_agent_async( - runner, - USER_ID, - session.id, - f"Please update the ADK docs according to the following issue:\n{issue}", + results = [] + for suggestion_num, suggestion_text in suggestions: + print(f"\n>>> Processing suggestion #{suggestion_num}...") + print("-" * 80) + + # Create a new session for each suggestion to avoid context interference + session = await runner.session_service.create_session( + app_name=APP_NAME, + user_id=USER_ID, + ) + + prompt = f""" + Please update the ADK docs according to suggestion #{suggestion_num} from issue #{issue_number}. + + Issue title: {issue_title} + + Suggestion to process: + {suggestion_text} + + Note: Focus only on this specific suggestion. Create exactly one pull request for this suggestion. + """ + + try: + response = await call_agent_async( + runner, + USER_ID, + session.id, + prompt, + ) + results.append({ + "suggestion_num": suggestion_num, + "status": "success", + "response": response, + }) + print(f"<<<< Suggestion #{suggestion_num} completed.") + except Exception as e: + results.append({ + "suggestion_num": suggestion_num, + "status": "error", + "error": str(e), + }) + print(f"<<<< Suggestion #{suggestion_num} failed: {e}") + + print("-" * 80) + + # Print summary + print("\n" + "=" * 80) + print("SUMMARY") + print("=" * 80) + successful = [r for r in results if r["status"] == "success"] + failed = [r for r in results if r["status"] == "error"] + print( + f"Total: {len(results)}, Success: {len(successful)}, Failed:" + f" {len(failed)}" ) - print(f"<<<< Agent Final Output: {response}\n") + if failed: + print("\nFailed suggestions:") + for r in failed: + print(f" - Suggestion #{r['suggestion_num']}: {r['error']}") if __name__ == "__main__": diff --git a/contributing/samples/adk_documentation/utils.py b/contributing/samples/adk_documentation/utils.py index 22b04cb9cf..1fd2efbf4a 100644 --- a/contributing/samples/adk_documentation/utils.py +++ b/contributing/samples/adk_documentation/utils.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. +import re from typing import Any from typing import Dict from typing import List +from typing import Tuple from adk_documentation.settings import GITHUB_TOKEN from google.adk.agents.run_config import RunConfig @@ -96,3 +98,47 @@ async def call_agent_async( final_response_text += text return final_response_text + + +def parse_suggestions(issue_body: str) -> List[Tuple[int, str]]: + """Parse numbered suggestions from issue body. + + Supports multiple formats: + - Format A (markdown headers): "### 1. Title" + - Format B (numbered list with bold): "1. **Title**" + + Args: + issue_body: The body text of the GitHub issue. + + Returns: + A list of tuples, where each tuple contains: + - The suggestion number (1-based) + - The full text of that suggestion + """ + # Try different patterns in order of preference + patterns = [ + # Format A: "### 1. Title" (markdown header with number) + (r"(?=^###\s+\d+\.)", r"^###\s+(\d+)\."), + # Format B: "1. **Title**" (numbered list with bold) + (r"(?=^\d+\.\s+\*\*)", r"^(\d+)\.\s+\*\*"), + ] + + for split_pattern, match_pattern in patterns: + parts = re.split(split_pattern, issue_body, flags=re.MULTILINE) + + suggestions = [] + for part in parts: + part = part.strip() + if not part: + continue + + match = re.match(match_pattern, part) + if match: + suggestion_num = int(match.group(1)) + suggestions.append((suggestion_num, part)) + + # If we found suggestions with this pattern, return them + if suggestions: + return suggestions + + return []