From a5f0d333d7f26f2966ed511d5d9def7a1933f0c2 Mon Sep 17 00:00:00 2001 From: Xuan Yang Date: Tue, 13 Jan 2026 09:50:26 -0800 Subject: [PATCH 1/5] feat: Use json schema for RestApiTool declaration when feature enabled Co-authored-by: Xuan Yang PiperOrigin-RevId: 855767527 --- .../openapi_spec_parser/rest_api_tool.py | 17 ++++++-- .../openapi_spec_parser/test_rest_api_tool.py | 41 +++++++++++++++++++ 2 files changed, 54 insertions(+), 4 deletions(-) diff --git a/src/google/adk/tools/openapi_tool/openapi_spec_parser/rest_api_tool.py b/src/google/adk/tools/openapi_tool/openapi_spec_parser/rest_api_tool.py index 27c6acdaeb..a2340b951d 100644 --- a/src/google/adk/tools/openapi_tool/openapi_spec_parser/rest_api_tool.py +++ b/src/google/adk/tools/openapi_tool/openapi_spec_parser/rest_api_tool.py @@ -33,6 +33,8 @@ from ....agents.readonly_context import ReadonlyContext from ....auth.auth_credential import AuthCredential from ....auth.auth_schemes import AuthScheme +from ....features import FeatureName +from ....features import is_feature_enabled from ..._gemini_schema_util import _to_gemini_schema from ..._gemini_schema_util import _to_snake_case from ...base_tool import BaseTool @@ -221,10 +223,17 @@ def from_parsed_operation_str( def _get_declaration(self) -> FunctionDeclaration: """Returns the function declaration in the Gemini Schema format.""" schema_dict = self._operation_parser.get_json_schema() - parameters = _to_gemini_schema(schema_dict) - function_decl = FunctionDeclaration( - name=self.name, description=self.description, parameters=parameters - ) + if is_feature_enabled(FeatureName.JSON_SCHEMA_FOR_FUNC_DECL): + function_decl = FunctionDeclaration( + name=self.name, + description=self.description, + parameters_json_schema=schema_dict, + ) + else: + parameters = _to_gemini_schema(schema_dict) + function_decl = FunctionDeclaration( + name=self.name, description=self.description, parameters=parameters + ) return function_decl def configure_auth_scheme( diff --git a/tests/unittests/tools/openapi_tool/openapi_spec_parser/test_rest_api_tool.py b/tests/unittests/tools/openapi_tool/openapi_spec_parser/test_rest_api_tool.py index ddf09aeb4a..309d7c3774 100644 --- a/tests/unittests/tools/openapi_tool/openapi_spec_parser/test_rest_api_tool.py +++ b/tests/unittests/tools/openapi_tool/openapi_spec_parser/test_rest_api_tool.py @@ -29,6 +29,8 @@ from google.adk.auth.auth_credential import AuthCredentialTypes from google.adk.auth.auth_credential import HttpAuth from google.adk.auth.auth_credential import HttpCredentials +from google.adk.features import FeatureName +from google.adk.features._feature_registry import temporary_feature_override from google.adk.sessions.state import State from google.adk.tools.openapi_tool.auth.auth_helpers import token_to_scheme_credential from google.adk.tools.openapi_tool.common.common import ApiParameter @@ -204,6 +206,45 @@ def test_get_declaration( assert declaration.description == "Test description" assert isinstance(declaration.parameters, Schema) + def test_get_declaration_with_json_schema_feature_enabled( + self, sample_endpoint, sample_operation + ): + """Test that _get_declaration uses parameters_json_schema when feature is enabled.""" + mock_parser = MagicMock(spec=OperationParser) + mock_parser.get_json_schema.return_value = { + "type": "object", + "properties": { + "test_param": {"type": "string"}, + }, + "required": ["test_param"], + } + + tool = RestApiTool( + name="test_tool", + description="Test description", + endpoint=sample_endpoint, + operation=sample_operation, + should_parse_operation=False, + ) + tool._operation_parser = mock_parser + + with temporary_feature_override( + FeatureName.JSON_SCHEMA_FOR_FUNC_DECL, True + ): + declaration = tool._get_declaration() + + assert isinstance(declaration, FunctionDeclaration) + assert declaration.name == "test_tool" + assert declaration.description == "Test description" + assert declaration.parameters is None + assert declaration.parameters_json_schema == { + "type": "object", + "properties": { + "test_param": {"type": "string"}, + }, + "required": ["test_param"], + } + @patch( "google.adk.tools.openapi_tool.openapi_spec_parser.rest_api_tool.requests.request" ) From f668a5de44ce3f4b5bf6991f5386ecd8970bb6cd Mon Sep 17 00:00:00 2001 From: "Xiang (Sean) Zhou" Date: Tue, 13 Jan 2026 10:36:49 -0800 Subject: [PATCH 2/5] chore: Update comments about why we can return upon flushing audio caches Co-authored-by: Xiang (Sean) Zhou PiperOrigin-RevId: 855788674 --- src/google/adk/flows/llm_flows/base_llm_flow.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/google/adk/flows/llm_flows/base_llm_flow.py b/src/google/adk/flows/llm_flows/base_llm_flow.py index 3e850ae207..72997ad9f9 100644 --- a/src/google/adk/flows/llm_flows/base_llm_flow.py +++ b/src/google/adk/flows/llm_flows/base_llm_flow.py @@ -633,6 +633,12 @@ async def _postprocess_live( for event in flushed_events: yield event if flushed_events: + # NOTE below return is O.K. for now, because currently we only flush + # events on interrupted or turn_complete. turn_complete is a pure + # control event and interrupted is not with content but those content + # is ignorable because model is already interrupted. If we have other + # case to flush events in the future that are not pure control events, + # we should not return here. return # Builds the event. From ab62b1bffd7ad2df5809d430ad1823872b8bd67a Mon Sep 17 00:00:00 2001 From: "Xiang (Sean) Zhou" Date: Tue, 13 Jan 2026 10:38:04 -0800 Subject: [PATCH 3/5] fix: Use the agent name as the author of the audio event Co-authored-by: Xiang (Sean) Zhou PiperOrigin-RevId: 855789317 --- .../flows/llm_flows/audio_cache_manager.py | 8 ++- .../llm_flows/test_audio_cache_manager.py | 51 +++++++++++++++++++ 2 files changed, 58 insertions(+), 1 deletion(-) diff --git a/src/google/adk/flows/llm_flows/audio_cache_manager.py b/src/google/adk/flows/llm_flows/audio_cache_manager.py index a6308b3fe6..f5d74c899c 100644 --- a/src/google/adk/flows/llm_flows/audio_cache_manager.py +++ b/src/google/adk/flows/llm_flows/audio_cache_manager.py @@ -181,10 +181,16 @@ async def _flush_cache_to_services( artifact_ref = f'artifact://{invocation_context.app_name}/{invocation_context.user_id}/{invocation_context.session.id}/_adk_live/{filename}#{revision_id}' # Create event with file data reference to add to session + # For model events, author should be the agent name, not the role + author = ( + invocation_context.agent.name + if audio_cache[0].role == 'model' + else audio_cache[0].role + ) audio_event = Event( id=Event.new_id(), invocation_id=invocation_context.invocation_id, - author=audio_cache[0].role, + author=author, content=types.Content( role=audio_cache[0].role, parts=[ diff --git a/tests/unittests/flows/llm_flows/test_audio_cache_manager.py b/tests/unittests/flows/llm_flows/test_audio_cache_manager.py index 28d9b6849a..fddf0fe869 100644 --- a/tests/unittests/flows/llm_flows/test_audio_cache_manager.py +++ b/tests/unittests/flows/llm_flows/test_audio_cache_manager.py @@ -387,3 +387,54 @@ async def test_filename_uses_first_chunk_timestamp(self): assert filename.startswith( f'adk_live_audio_storage_input_audio_{expected_timestamp_ms}' ) + + @pytest.mark.asyncio + async def test_flush_event_author_for_user_audio(self): + """Test that flushed user audio events have 'user' as author.""" + invocation_context = await testing_utils.create_invocation_context( + testing_utils.create_test_agent() + ) + + # Set up mock artifact service + mock_artifact_service = AsyncMock() + mock_artifact_service.save_artifact.return_value = 123 + invocation_context.artifact_service = mock_artifact_service + + # Cache user input audio + input_blob = types.Blob(data=b'user_audio_data', mime_type='audio/pcm') + self.manager.cache_audio(invocation_context, input_blob, 'input') + + # Flush cache and get events + events = await self.manager.flush_caches( + invocation_context, flush_user_audio=True, flush_model_audio=False + ) + + # Verify event author is 'user' for user audio + assert len(events) == 1 + assert events[0].author == 'user' + assert events[0].content.role == 'user' + + @pytest.mark.asyncio + async def test_flush_event_author_for_model_audio(self): + """Test that flushed model audio events have agent name as author, not 'model'.""" + agent = testing_utils.create_test_agent(name='my_test_agent') + invocation_context = await testing_utils.create_invocation_context(agent) + + # Set up mock artifact service + mock_artifact_service = AsyncMock() + mock_artifact_service.save_artifact.return_value = 123 + invocation_context.artifact_service = mock_artifact_service + + # Cache model output audio + output_blob = types.Blob(data=b'model_audio_data', mime_type='audio/wav') + self.manager.cache_audio(invocation_context, output_blob, 'output') + + # Flush cache and get events + events = await self.manager.flush_caches( + invocation_context, flush_user_audio=False, flush_model_audio=True + ) + + # Verify event author is agent name (not 'model') for model audio + assert len(events) == 1 + assert events[0].author == 'my_test_agent' # Agent name, not 'model' + assert events[0].content.role == 'model' # Role is still 'model' From 277084e31368302e6338b69d456affd35d5fedfe Mon Sep 17 00:00:00 2001 From: Google Team Member Date: Tue, 13 Jan 2026 10:59:22 -0800 Subject: [PATCH 4/5] refactor(tools): Update `ToolboxToolset` to wrap `toolbox-adk` ### Description of Change **Problem:** The `ToolboxToolset` was implemented directly within `adk-python`, leading to code duplication and potential drift from the core `toolbox-adk` implementation. **Solution:** Refactored `ToolboxToolset` to act as a rigorous wrapper around the `toolbox-adk` package, which delegates all functionality to `toolbox_adk.ToolboxToolset`. ### Testing Plan **Unit Tests:** - [x] I have added or updated unit tests for my change. - [x] All unit tests pass locally. Summary: - Verified initialization flows through to `toolbox-adk`. - Verified `auth_token_getters` are correctly propagated. - Verified type hints are static-analysis friendly. **Manual End-to-End (E2E) Tests:** Manually verified standard toolbox loading and execution with the new wrapper: ```python from google.adk.tools import ToolboxToolset from toolbox_adk import CredentialStrategy # Loading with toolset_name ts = ToolboxToolset( server_url='http://localhost:8080', toolset_name='calculator', credentials=CredentialStrategy.toolbox_identity() ) tools = await ts.get_tools() print(f'Loaded {len(tools)} tools') ``` ### Checklist - [x] I have read the [CONTRIBUTING.md](https://github.com/google/adk-python/blob/main/CONTRIBUTING.md) document. - [x] I have performed a self-review of my own code. - [x] I have commented my code, particularly in hard-to-understand areas. - [x] I have added tests that prove my fix is effective or that my feature works. - [x] New and existing unit tests pass locally with my changes. - [x] I have manually tested my changes end-to-end. - [x] Any dependent changes have been merged and published in downstream modules. PiperOrigin-RevId: 855798474 --- pyproject.toml | 2 +- src/google/adk/tools/toolbox_toolset.py | 104 +++++++++++++----------- 2 files changed, 56 insertions(+), 50 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index b17c29ff21..f612ef4df2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -157,7 +157,7 @@ extensions = [ "llama-index-readers-file>=0.4.0", # For retrieval using LlamaIndex. "llama-index-embeddings-google-genai>=0.3.0", # For files retrieval using LlamaIndex. "lxml>=5.3.0", # For load_web_page tool. - "toolbox-core>=0.1.0", # For tools.toolbox_toolset.ToolboxToolset + "toolbox-adk>=0.1.0", # For tools.toolbox_toolset.ToolboxToolset ] otel-gcp = ["opentelemetry-instrumentation-google-genai>=0.3b0, <1.0.0"] diff --git a/src/google/adk/tools/toolbox_toolset.py b/src/google/adk/tools/toolbox_toolset.py index 51c50d194d..73f27f3fc2 100644 --- a/src/google/adk/tools/toolbox_toolset.py +++ b/src/google/adk/tools/toolbox_toolset.py @@ -12,29 +12,41 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + from typing import Any from typing import Callable from typing import List from typing import Mapping from typing import Optional +from typing import TYPE_CHECKING from typing import Union -import toolbox_core as toolbox from typing_extensions import override from ..agents.readonly_context import ReadonlyContext from .base_tool import BaseTool from .base_toolset import BaseToolset -from .function_tool import FunctionTool + +if TYPE_CHECKING: + from toolbox_adk import CredentialConfig class ToolboxToolset(BaseToolset): """A class that provides access to toolbox toolsets. + This class acts as a bridge to the `toolbox-adk` package. + You must install `toolbox-adk` to use this class. + Example: ```python - toolbox_toolset = ToolboxToolset("http://127.0.0.1:5000", - toolset_name="my-toolset") + from toolbox_adk import CredentialStrategy + + toolbox_toolset = ToolboxToolset( + server_url="http://127.0.0.1:5000", + # toolset_name and tool_names are optional. If omitted, all tools are + loaded. + credentials=CredentialStrategy.toolbox_identity() ) ``` """ @@ -44,64 +56,58 @@ def __init__( server_url: str, toolset_name: Optional[str] = None, tool_names: Optional[List[str]] = None, - auth_token_getters: Optional[dict[str, Callable[[], str]]] = None, + auth_token_getters: Optional[Mapping[str, Callable[[], str]]] = None, bound_params: Optional[ Mapping[str, Union[Callable[[], Any], Any]] ] = None, + credentials: Optional[CredentialConfig] = None, + additional_headers: Optional[Mapping[str, str]] = None, + **kwargs, ): """Args: - server_url: The URL of the toolbox server. - toolset_name: The name of the toolbox toolset to load. - tool_names: The names of the tools to load. - auth_token_getters: A mapping of authentication service names to - callables that return the corresponding authentication token. see: - https://github.com/googleapis/mcp-toolbox-sdk-python/tree/main/packages/toolbox-core#authenticating-tools - for details. - bound_params: A mapping of parameter names to bind to specific values or - callables that are called to produce values as needed. see: - https://github.com/googleapis/mcp-toolbox-sdk-python/tree/main/packages/toolbox-core#binding-parameter-values - for details. - The resulting ToolboxToolset will contain both tools loaded by tool_names - and toolset_name. + server_url: The URL of the toolbox server. + toolset_name: The name of the toolbox toolset to load. + tool_names: The names of the tools to load. + auth_token_getters: (Deprecated) Map of auth token getters. + bound_params: Parameters to bind to the tools. + credentials: (Optional) toolbox_adk.CredentialConfig object. + additional_headers: (Optional) Static headers dictionary. + **kwargs: Additional arguments passed to the underlying + toolbox_adk.ToolboxToolset. """ - if not tool_names and not toolset_name: - raise ValueError("tool_names and toolset_name cannot both be None") + if not toolset_name and not tool_names: + raise ValueError( + "Either 'toolset_name' or 'tool_names' must be provided." + ) + + try: + from toolbox_adk import ToolboxToolset as RealToolboxToolset # pylint: disable=import-outside-toplevel + except ImportError as exc: + raise ImportError( + "ToolboxToolset requires the 'toolbox-adk' package. " + "Please install it using `pip install toolbox-adk`." + ) from exc + super().__init__() - self._server_url = server_url - self._toolbox_client = toolbox.ToolboxClient(server_url) - self._toolset_name = toolset_name - self._tool_names = tool_names - self._auth_token_getters = auth_token_getters or {} - self._bound_params = bound_params or {} + + self._delegate = RealToolboxToolset( + server_url=server_url, + toolset_name=toolset_name, + tool_names=tool_names, + credentials=credentials, + additional_headers=additional_headers, + bound_params=bound_params, + auth_token_getters=auth_token_getters, + **kwargs, + ) @override async def get_tools( self, readonly_context: Optional[ReadonlyContext] = None ) -> list[BaseTool]: - tools = [] - if self._toolset_name: - tools.extend([ - FunctionTool(tool) - for tool in await self._toolbox_client.load_toolset( - self._toolset_name, - auth_token_getters=self._auth_token_getters, - bound_params=self._bound_params, - ) - ]) - if self._tool_names: - tools.extend([ - FunctionTool( - await self._toolbox_client.load_tool( - tool_name, - auth_token_getters=self._auth_token_getters, - bound_params=self._bound_params, - ) - ) - for tool_name in self._tool_names - ]) - return tools + return await self._delegate.get_tools(readonly_context) @override async def close(self): - self._toolbox_client.close() + await self._delegate.close() From 1bedffe4570b4954a36a621c396951ce3f0b70fd Mon Sep 17 00:00:00 2001 From: "Xiang (Sean) Zhou" Date: Tue, 13 Jan 2026 12:27:54 -0800 Subject: [PATCH 5/5] chore: Remove dead codes for flushing model audio when generation completes LlmResponse so far doesn't expose generation_complete signal, removing the dead codes. Co-authored-by: Xiang (Sean) Zhou PiperOrigin-RevId: 855835802 --- src/google/adk/flows/llm_flows/base_llm_flow.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/src/google/adk/flows/llm_flows/base_llm_flow.py b/src/google/adk/flows/llm_flows/base_llm_flow.py index 72997ad9f9..91b57cb873 100644 --- a/src/google/adk/flows/llm_flows/base_llm_flow.py +++ b/src/google/adk/flows/llm_flows/base_llm_flow.py @@ -974,13 +974,8 @@ async def _handle_control_event_flush( flush_user_audio=True, flush_model_audio=True, ) - elif getattr(llm_response, 'generation_complete', False): - # model generation complete so we can flush model audio - return await self.audio_cache_manager.flush_caches( - invocation_context, - flush_user_audio=False, - flush_model_audio=True, - ) + # TODO: Once generation_complete is surfaced on LlmResponse, we can flush + # model audio here (flush_user_audio=False, flush_model_audio=True). return [] async def _run_and_handle_error(