Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,13 @@
# Changelog


## [1.22.1](https://github.com/google/adk-python/compare/v1.22.0...v1.22.1) (2026-01-09)

### Bug Fixes
* Add back `adk migrate session` CLI ([8fb2be2](https://github.com/google/adk-python/commit/8fb2be216f11dabe7fa361a0402e5e6316878ad8)).
* Escape database reserved keyword ([94d48fc](https://github.com/google/adk-python/commit/94d48fce32a1f07cef967d50e82f2b1975b4abd9)).


## [1.22.0](https://github.com/google/adk-python/compare/v1.21.0...v1.22.0) (2026-01-08)

### Features
Expand Down
32 changes: 30 additions & 2 deletions src/google/adk/a2a/utils/agent_card_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ async def _build_llm_agent_skills(agent: LlmAgent) -> List[AgentSkill]:
id=agent.name,
name='model',
description=agent_description,
examples=agent_examples,
examples=_extract_inputs_from_examples(agent_examples),
input_modes=_get_input_modes(agent),
output_modes=_get_output_modes(agent),
tags=['llm'],
Expand Down Expand Up @@ -239,7 +239,7 @@ async def _build_non_llm_agent_skills(agent: BaseAgent) -> List[AgentSkill]:
id=agent.name,
name=agent_name,
description=agent_description,
examples=agent_examples,
examples=_extract_inputs_from_examples(agent_examples),
input_modes=_get_input_modes(agent),
output_modes=_get_output_modes(agent),
tags=[agent_type],
Expand Down Expand Up @@ -350,6 +350,7 @@ def _build_llm_agent_description_with_instructions(agent: LlmAgent) -> str:

def _replace_pronouns(text: str) -> str:
"""Replace pronouns and conjugate common verbs for agent description.

(e.g., "You are" -> "I am", "your" -> "my").
"""
pronoun_map = {
Expand Down Expand Up @@ -460,6 +461,33 @@ def _get_default_description(agent: BaseAgent) -> str:
return 'A custom agent'


def _extract_inputs_from_examples(examples: Optional[list[dict]]) -> list[str]:
"""Extracts only the input strings so they can be added to an AgentSkill."""
if examples is None:
return []

extracted_inputs = []
for example in examples:
example_input = example.get('input')
if not example_input:
continue

parts = example_input.get('parts')
if parts is not None:
part_texts = []
for part in parts:
text = part.get('text')
if text is not None:
part_texts.append(text)
extracted_inputs.append('\n'.join(part_texts))
else:
text = example_input.get('text')
if text is not None:
extracted_inputs.append(text)

return extracted_inputs


async def _extract_examples_from_agent(
agent: BaseAgent,
) -> Optional[List[Dict]]:
Expand Down
1 change: 1 addition & 0 deletions src/google/adk/auth/auth_credential.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ class HttpAuth(BaseModelWithConfig):
# Examples: 'basic', 'bearer'
scheme: str
credentials: HttpCredentials
additional_headers: Optional[Dict[str, str]] = None


class OAuth2Auth(BaseModelWithConfig):
Expand Down
1 change: 1 addition & 0 deletions src/google/adk/cli/cli_tools_click.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
from . import cli_deploy
from .. import version
from ..evaluation.constants import MISSING_EVAL_DEPENDENCIES_MESSAGE
from ..sessions.migration import migration_runner
from .cli import run_cli
from .fast_api import get_fast_api_app
from .utils import envs
Expand Down
1 change: 0 additions & 1 deletion src/google/adk/flows/llm_flows/base_llm_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -303,7 +303,6 @@ def get_author_for_event(llm_response):
else:
return invocation_context.agent.name

assert invocation_context.live_request_queue
try:
while True:
async with Aclosing(llm_connection.receive()) as agen:
Expand Down
78 changes: 75 additions & 3 deletions src/google/adk/models/lite_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,6 +181,45 @@ def _infer_mime_type_from_uri(uri: str) -> Optional[str]:
return None


def _looks_like_openai_file_id(file_uri: str) -> bool:
"""Returns True when file_uri resembles an OpenAI/Azure file id."""
return file_uri.startswith("file-")


def _redact_file_uri_for_log(
file_uri: str, *, display_name: str | None = None
) -> str:
"""Returns a privacy-preserving identifier for logs."""
if display_name:
return display_name
if _looks_like_openai_file_id(file_uri):
return "file-<redacted>"
try:
parsed = urlparse(file_uri)
except ValueError:
return "<unparseable>"
if not parsed.scheme:
return "<unknown>"
segments = [segment for segment in parsed.path.split("/") if segment]
tail = segments[-1] if segments else ""
if tail:
return f"{parsed.scheme}://<redacted>/{tail}"
return f"{parsed.scheme}://<redacted>"


def _requires_file_uri_fallback(
provider: str, model: str, file_uri: str
) -> bool:
"""Returns True when `file_uri` should not be sent as a file content block."""
if provider in _FILE_ID_REQUIRED_PROVIDERS:
return not _looks_like_openai_file_id(file_uri)
if provider == "anthropic":
return True
if provider == "vertex_ai" and not _is_litellm_gemini_model(model):
return True
return False


def _decode_inline_text_data(raw_bytes: bytes) -> str:
"""Decodes inline file bytes that represent textual content."""
try:
Expand Down Expand Up @@ -447,6 +486,7 @@ async def _content_to_message_param(
content: types.Content,
*,
provider: str = "",
model: str = "",
) -> Union[Message, list[Message]]:
"""Converts a types.Content to a litellm Message or list of Messages.

Expand All @@ -456,6 +496,7 @@ async def _content_to_message_param(
Args:
content: The content to convert.
provider: The LLM provider name (e.g., "openai", "azure").
model: The LiteLLM model string, used for provider-specific behavior.

Returns:
A litellm Message, a list of litellm Messages.
Expand Down Expand Up @@ -499,7 +540,9 @@ async def _content_to_message_param(

if role == "user":
user_parts = [part for part in content.parts if not part.thought]
message_content = await _get_content(user_parts, provider=provider) or None
message_content = (
await _get_content(user_parts, provider=provider, model=model) or None
)
return ChatCompletionUserMessage(role="user", content=message_content)
else: # assistant/model
tool_calls = []
Expand All @@ -523,7 +566,7 @@ async def _content_to_message_param(
content_parts.append(part)

final_content = (
await _get_content(content_parts, provider=provider)
await _get_content(content_parts, provider=provider, model=model)
if content_parts
else None
)
Expand Down Expand Up @@ -620,6 +663,7 @@ async def _get_content(
parts: Iterable[types.Part],
*,
provider: str = "",
model: str = "",
) -> OpenAIMessageContent:
"""Converts a list of parts to litellm content.

Expand All @@ -629,6 +673,8 @@ async def _get_content(
Args:
parts: The parts to convert.
provider: The LLM provider name (e.g., "openai", "azure").
model: The LiteLLM model string (e.g., "openai/gpt-4o",
"vertex_ai/gemini-2.5-flash").

Returns:
The litellm content.
Expand Down Expand Up @@ -709,6 +755,32 @@ async def _get_content(
f"{part.inline_data.mime_type}."
)
elif part.file_data and part.file_data.file_uri:
if (
provider in _FILE_ID_REQUIRED_PROVIDERS
and _looks_like_openai_file_id(part.file_data.file_uri)
):
content_objects.append({
"type": "file",
"file": {"file_id": part.file_data.file_uri},
})
continue

if _requires_file_uri_fallback(provider, model, part.file_data.file_uri):
logger.debug(
"File URI %s not supported for provider %s, using text fallback",
_redact_file_uri_for_log(
part.file_data.file_uri,
display_name=part.file_data.display_name,
),
provider,
)
identifier = part.file_data.display_name or part.file_data.file_uri
content_objects.append({
"type": "text",
"text": f'[File reference: "{identifier}"]',
})
continue

file_object: ChatCompletionFileUrlObject = {
"file_id": part.file_data.file_uri,
}
Expand Down Expand Up @@ -1363,7 +1435,7 @@ async def _get_completion_inputs(
messages: List[Message] = []
for content in llm_request.contents or []:
message_param_or_list = await _content_to_message_param(
content, provider=provider
content, provider=provider, model=model
)
if isinstance(message_param_or_list, list):
messages.extend(message_param_or_list)
Expand Down
12 changes: 6 additions & 6 deletions src/google/adk/telemetry/tracing.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ def trace_tool_call(
_safe_json_serialize(args),
)
else:
span.set_attribute('gcp.vertex.agent.tool_call_args', {})
span.set_attribute('gcp.vertex.agent.tool_call_args', '{}')

# Tracing tool response
tool_call_id = '<not specified>'
Expand Down Expand Up @@ -179,7 +179,7 @@ def trace_tool_call(
_safe_json_serialize(tool_response),
)
else:
span.set_attribute('gcp.vertex.agent.tool_response', {})
span.set_attribute('gcp.vertex.agent.tool_response', '{}')


def trace_merged_tool_calls(
Expand Down Expand Up @@ -219,7 +219,7 @@ def trace_merged_tool_calls(
function_response_event_json,
)
else:
span.set_attribute('gcp.vertex.agent.tool_response', {})
span.set_attribute('gcp.vertex.agent.tool_response', '{}')
# Setting empty llm request and response (as UI expect these) while not
# applicable for tool_response.
span.set_attribute('gcp.vertex.agent.llm_request', '{}')
Expand Down Expand Up @@ -265,7 +265,7 @@ def trace_call_llm(
_safe_json_serialize(_build_llm_request_for_trace(llm_request)),
)
else:
span.set_attribute('gcp.vertex.agent.llm_request', {})
span.set_attribute('gcp.vertex.agent.llm_request', '{}')
# Consider removing once GenAI SDK provides a way to record this info.
if llm_request.config:
if llm_request.config.top_p:
Expand All @@ -290,7 +290,7 @@ def trace_call_llm(
llm_response_json,
)
else:
span.set_attribute('gcp.vertex.agent.llm_response', {})
span.set_attribute('gcp.vertex.agent.llm_response', '{}')

if llm_response.usage_metadata is not None:
span.set_attribute(
Expand Down Expand Up @@ -346,7 +346,7 @@ def trace_send_data(
]),
)
else:
span.set_attribute('gcp.vertex.agent.data', {})
span.set_attribute('gcp.vertex.agent.data', '{}')


def _build_llm_request_for_trace(llm_request: LlmRequest) -> dict[str, Any]:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,14 +74,18 @@ def exchange_credential(

try:
if auth_credential.service_account.use_default_credential:
credentials, _ = google.auth.default(
credentials, project_id = google.auth.default(
scopes=["https://www.googleapis.com/auth/cloud-platform"],
)
quota_project_id = (
getattr(credentials, "quota_project_id", None) or project_id
)
else:
config = auth_credential.service_account
credentials = service_account.Credentials.from_service_account_info(
config.service_account_credential.model_dump(), scopes=config.scopes
)
quota_project_id = None

credentials.refresh(Request())

Expand All @@ -90,6 +94,11 @@ def exchange_credential(
http=HttpAuth(
scheme="bearer",
credentials=HttpCredentials(token=credentials.token),
additional_headers={
"x-goog-user-project": quota_project_id,
}
if quota_project_id
else None,
),
)
return updated_credential
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -320,6 +320,13 @@ def _prepare_request_params(
user_agent = f"google-adk/{adk_version} (tool: {self.name})"
header_params["User-Agent"] = user_agent

if (
self.auth_credential
and self.auth_credential.http
and self.auth_credential.http.additional_headers
):
header_params.update(self.auth_credential.http.additional_headers)

params_map: Dict[str, ApiParameter] = {p.py_name: p for p in parameters}

# Fill in path, query, header and cookie parameters to the request
Expand Down
2 changes: 1 addition & 1 deletion src/google/adk/version.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,4 +13,4 @@
# limitations under the License.

# version: major.minor.patch
__version__ = "1.22.0"
__version__ = "1.22.1"
Loading
Loading