Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion contributing/samples/api_registry_agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
mcp_server_name=MCP_SERVER_NAME,
)
root_agent = LlmAgent(
model="gemini-2.0-flash",
model="gemini-2.5-flash",
name="bigquery_assistant",
instruction=f"""
You are a helpful data analyst assistant with access to BigQuery. The project ID is: {PROJECT_ID}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ This sample demonstrates how to use the `ApplicationIntegrationToolset` within a
## Prerequisites

1. **Set up Integration Connection:**
* You need an existing [Integration connection](https://cloud.google.com/integration-connectors/docs/overview) configured to interact with your Jira instance. Follow the [documentation](https://google.github.io/adk-docs/tools/google-cloud-tools/#use-integration-connectors) to provision the Integration Connector in Google Cloud and then use this [documentation](https://cloud.google.com/integration-connectors/docs/connectors/jiracloud/configure) to create an Jira connection. Note the `Connection Name`, `Project ID`, and `Location` of your connection.
* You need an existing [Integration connection](https://cloud.google.com/integration-connectors/docs/overview) configured to interact with your Jira instance. Follow the [documentation](https://google.github.io/adk-docs/tools/google-cloud-tools/#use-integration-connectors) to provision the Integration Connector in Google Cloud and then use this [documentation](https://cloud.google.com/integration-connectors/docs/connectors/jiracloud/configure) to create a Jira connection. Note the `Connection Name`, `Project ID`, and `Location` of your connection.
*

2. **Configure Environment Variables:**
Expand Down
2 changes: 1 addition & 1 deletion contributing/samples/mcp_dynamic_header_agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
from google.adk.tools.mcp_tool.mcp_toolset import McpToolset

root_agent = LlmAgent(
model='gemini-2.0-flash',
model='gemini-2.5-flash',
name='tenant_agent',
instruction="""You are a helpful assistant that helps users get tenant
information. Call the get_tenant_data tool when the user asks for tenant data.""",
Expand Down
2 changes: 1 addition & 1 deletion contributing/samples/mcp_postgres_agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
)

root_agent = LlmAgent(
model="gemini-2.0-flash",
model="gemini-2.5-flash",
name="postgres_agent",
instruction=(
"You are a PostgreSQL database assistant. "
Expand Down
2 changes: 1 addition & 1 deletion contributing/samples/mcp_service_account_agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
SCOPES = {"https://www.googleapis.com/auth/cloud-platform": ""}

root_agent = LlmAgent(
model="gemini-2.0-flash",
model="gemini-2.5-flash",
name="enterprise_assistant",
instruction="""
Help the user with the tools available to you.
Expand Down
2 changes: 1 addition & 1 deletion contributing/samples/mcp_sse_agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
)

root_agent = LlmAgent(
model='gemini-2.0-flash',
model='gemini-2.5-flash',
name='enterprise_assistant',
instruction=McpInstructionProvider(
connection_params=connection_params,
Expand Down
2 changes: 1 addition & 1 deletion contributing/samples/mcp_stdio_notion_agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
})

root_agent = LlmAgent(
model="gemini-2.0-flash",
model="gemini-2.5-flash",
name="notion_agent",
instruction=(
"You are my workspace assistant. "
Expand Down
2 changes: 1 addition & 1 deletion contributing/samples/mcp_stdio_server_agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
_allowed_path = os.path.dirname(os.path.abspath(__file__))

root_agent = LlmAgent(
model='gemini-2.0-flash',
model='gemini-2.5-flash',
name='enterprise_assistant',
instruction=f"""\
Help user accessing their file systems.
Expand Down
2 changes: 1 addition & 1 deletion contributing/samples/mcp_streamablehttp_agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
_allowed_path = os.path.dirname(os.path.abspath(__file__))

root_agent = LlmAgent(
model='gemini-2.0-flash',
model='gemini-2.5-flash',
name='enterprise_assistant',
instruction=f"""\
Help user accessing their file systems.
Expand Down
2 changes: 1 addition & 1 deletion contributing/samples/multi_agent_seq_config/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ The whole process is:

1. An agent backed by a cheap and fast model to write initial version.
2. An agent backed by a smarter and a little more expensive to review the code.
3. An final agent backed by the smartest and slowest model to write the final revision.
3. A final agent backed by the smartest and slowest model to write the final revision.

Sample queries:

Expand Down
2 changes: 1 addition & 1 deletion contributing/samples/spanner_rag_agent/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ type.

## 💬 Sample prompts

* I'd like to buy a starter bike for my 3 year old child, can you show me the recommendation?
* I'd like to buy a starter bike for my 3-year-old child, can you show me the recommendation?

![Spanner RAG Sample Agent](Spanner_RAG_Sample_Agent.png)

Expand Down
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ test = [
"kubernetes>=29.0.0", # For GkeCodeExecutor
"langchain-community>=0.3.17",
"langgraph>=0.2.60, <0.4.8", # For LangGraphAgent
"litellm>=1.75.5, <2.0.0", # For LiteLLM tests
"litellm>=1.75.5, <1.81.0", # For LiteLLM tests
"llama-index-readers-file>=0.4.0", # For retrieval tests
"openai>=1.100.2", # For LiteLLM
"pytest-asyncio>=0.25.0",
Expand Down Expand Up @@ -153,7 +153,7 @@ extensions = [
"docker>=7.0.0", # For ContainerCodeExecutor
"kubernetes>=29.0.0", # For GkeCodeExecutor
"langgraph>=0.2.60, <0.4.8", # For LangGraphAgent
"litellm>=1.75.5", # For LiteLlm class. Currently has OpenAI limitations. TODO: once LiteLlm fix it
"litellm>=1.75.5, <1.81.0", # For LiteLlm class. Currently has OpenAI limitations. TODO: once LiteLlm fix it
"llama-index-readers-file>=0.4.0", # For retrieval using LlamaIndex.
"llama-index-embeddings-google-genai>=0.3.0", # For files retrieval using LlamaIndex.
"lxml>=5.3.0", # For load_web_page tool.
Expand Down
38 changes: 26 additions & 12 deletions src/google/adk/flows/llm_flows/base_llm_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,6 @@
from ...agents.run_config import StreamingMode
from ...agents.transcription_entry import TranscriptionEntry
from ...events.event import Event
from ...features import FeatureName
from ...features import is_feature_enabled
from ...models.base_llm_connection import BaseLlmConnection
from ...models.llm_request import LlmRequest
from ...models.llm_response import LlmResponse
Expand Down Expand Up @@ -274,6 +272,25 @@ async def _send_to_model(
await llm_connection.send_realtime(live_request.blob)

if live_request.content:
content = live_request.content
# Persist user text content to session (similar to non-live mode)
# Skip function responses - they are already handled separately
is_function_response = content.parts and any(
part.function_response for part in content.parts
)
if not is_function_response:
if not content.role:
content.role = 'user'
user_content_event = Event(
id=Event.new_id(),
invocation_id=invocation_context.invocation_id,
author='user',
content=content,
)
await invocation_context.session_service.append_event(
session=invocation_context.session,
event=user_content_event,
)
await llm_connection.send_content(live_request.content)

async def _receive_from_model(
Expand Down Expand Up @@ -393,8 +410,8 @@ async def _run_one_step_async(
current_invocation=True, current_branch=True
)

# Long-running tool calls should have been handled before this point.
# If there are still long-running tool calls, it means the agent is paused
# Long running tool calls should have been handled before this point.
# If there are still long running tool calls, it means the agent is paused
# before, and its branch hasn't been resumed yet.
if (
invocation_context.is_resumable
Expand Down Expand Up @@ -551,14 +568,11 @@ async def _postprocess_async(
# Handles function calls.
if model_response_event.get_function_calls():

if is_feature_enabled(FeatureName.PROGRESSIVE_SSE_STREAMING):
# In progressive SSE streaming mode stage 1, we skip partial FC events
# Only execute FCs in the final aggregated event (partial=False)
if (
invocation_context.run_config.streaming_mode == StreamingMode.SSE
and model_response_event.partial
):
return
# Skip partial function call events - they should not trigger execution
# since partial events are not saved to session (see runners.py).
# Only execute function calls in the non-partial events.
if model_response_event.partial:
return

async with Aclosing(
self._postprocess_handle_function_calls_async(
Expand Down
1 change: 1 addition & 0 deletions src/google/adk/runners.py
Original file line number Diff line number Diff line change
Expand Up @@ -815,6 +815,7 @@ async def _exec_with_plugin(
await self.session_service.append_event(
session=session, event=buffered_event
)
yield buffered_event # yield buffered events to caller
buffered_events = []
else:
# non-transcription event or empty transcription event, for
Expand Down
110 changes: 0 additions & 110 deletions src/google/adk/tools/mcp_tool/mcp_auth_utils.py

This file was deleted.

Loading
Loading