From 986270333fcc2c691b9592d609bf53671c47b997 Mon Sep 17 00:00:00 2001 From: Florian-BACHO Date: Thu, 6 Nov 2025 14:35:48 +0100 Subject: [PATCH 1/3] Add GoogleGenAI token usage information in additional_kwargs as required by MLFlow Tracing --- .../llama_index/llms/google_genai/utils.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/llama-index-integrations/llms/llama-index-llms-google-genai/llama_index/llms/google_genai/utils.py b/llama-index-integrations/llms/llama-index-llms-google-genai/llama_index/llms/google_genai/utils.py index 3b0f3f0a79..19f8f556d4 100644 --- a/llama-index-integrations/llms/llama-index-llms-google-genai/llama_index/llms/google_genai/utils.py +++ b/llama-index-integrations/llms/llama-index-llms-google-genai/llama_index/llms/google_genai/utils.py @@ -145,15 +145,23 @@ def chat_from_gemini_response( **response_feedback, } thought_tokens: Optional[int] = None + additional_kwargs: Dict[str, Any] = {"thought_signatures": []} if response.usage_metadata: raw["usage_metadata"] = response.usage_metadata.model_dump() + + # Set token usage information as required by MLFlow Tracing + additional_kwargs["prompt_tokens"] = response.usage_metadata.prompt_token_count + additional_kwargs["completion_tokens"] = ( + response.usage_metadata.candidates_token_count + ) + additional_kwargs["total_tokens"] = response.usage_metadata.total_token_count + if response.usage_metadata.thoughts_token_count: thought_tokens = response.usage_metadata.thoughts_token_count if hasattr(response, "cached_content") and response.cached_content: raw["cached_content"] = response.cached_content - additional_kwargs: Dict[str, Any] = {"thought_signatures": []} content_blocks = [] if ( len(response.candidates) > 0 From 1e272e3e7aed5cb402073fc31aee7fce8b62f44a Mon Sep 17 00:00:00 2001 From: Florian-BACHO Date: Thu, 6 Nov 2025 15:10:50 +0100 Subject: [PATCH 2/3] Bump version --- .../llms/llama-index-llms-google-genai/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama-index-integrations/llms/llama-index-llms-google-genai/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-google-genai/pyproject.toml index 3f07a8fce9..ed1facc7cd 100644 --- a/llama-index-integrations/llms/llama-index-llms-google-genai/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-google-genai/pyproject.toml @@ -27,7 +27,7 @@ dev = [ [project] name = "llama-index-llms-google-genai" -version = "0.7.1" +version = "0.7.2" description = "llama-index llms google genai integration" authors = [{name = "Your Name", email = "you@example.com"}] requires-python = ">=3.9,<4.0" From 7b32303993ce82564f270b0a46cc415afd7c0c69 Mon Sep 17 00:00:00 2001 From: Logan Markewich Date: Sun, 9 Nov 2025 15:34:43 -0600 Subject: [PATCH 3/3] vbump --- .../llms/llama-index-llms-google-genai/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama-index-integrations/llms/llama-index-llms-google-genai/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-google-genai/pyproject.toml index ed1facc7cd..92678305e0 100644 --- a/llama-index-integrations/llms/llama-index-llms-google-genai/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-google-genai/pyproject.toml @@ -27,7 +27,7 @@ dev = [ [project] name = "llama-index-llms-google-genai" -version = "0.7.2" +version = "0.7.3" description = "llama-index llms google genai integration" authors = [{name = "Your Name", email = "you@example.com"}] requires-python = ">=3.9,<4.0"