diff --git a/actions/setup/js/parse_gemini_log.cjs b/actions/setup/js/parse_gemini_log.cjs
index 3d2760ce38..30b38a061d 100644
--- a/actions/setup/js/parse_gemini_log.cjs
+++ b/actions/setup/js/parse_gemini_log.cjs
@@ -1,7 +1,7 @@
// @ts-check
///
-const { createEngineLogParser } = require("./log_parser_shared.cjs");
+const { createEngineLogParser, generateConversationMarkdown, generateInformationSection, formatInitializationSummary, formatToolUse } = require("./log_parser_shared.cjs");
const main = createEngineLogParser({
parserName: "Gemini",
@@ -10,8 +10,13 @@ const main = createEngineLogParser({
});
/**
- * Parse Gemini CLI streaming JSON log output and format as markdown.
- * Gemini CLI outputs one JSON object per line when using --output-format stream-json (JSONL).
+ * Parse Gemini CLI JSONL log output and format as markdown.
+ * Gemini CLI outputs one JSON object per line (JSONL) with typed entries:
+ * - type "init": session initialization with model and session_id
+ * - type "message": user/assistant messages, assistant uses delta:true for streaming chunks
+ * - type "tool_use": tool invocations with tool_name, tool_id, and parameters
+ * - type "tool_result": tool responses with tool_id, status, and output
+ * - type "result": final stats with token usage, duration, and tool call count
* @param {string} logContent - The raw log content to parse
* @returns {{markdown: string, logEntries: Array, mcpFailures: Array, maxTurnsHit: boolean}} Parsed log data
*/
@@ -25,74 +30,161 @@ function parseGeminiLog(logContent) {
};
}
- let markdown = "";
- let totalInputTokens = 0;
- let totalOutputTokens = 0;
- let lastResponse = "";
-
- const lines = logContent.split("\n");
- for (const line of lines) {
+ // Parse JSONL lines
+ /** @type {Array} */
+ const rawEntries = [];
+ for (const line of logContent.split("\n")) {
const trimmed = line.trim();
- if (!trimmed) {
+ if (!trimmed || !trimmed.startsWith("{")) {
continue;
}
-
- // Try to parse each line as a JSON object (Gemini --output-format json output)
try {
- const parsed = JSON.parse(trimmed);
-
- if (parsed.response) {
- lastResponse = parsed.response;
- }
-
- // Aggregate token usage from stats
- if (parsed.stats && parsed.stats.models) {
- for (const modelStats of Object.values(parsed.stats.models)) {
- if (modelStats && typeof modelStats === "object") {
- if (typeof modelStats.input_tokens === "number") {
- totalInputTokens += modelStats.input_tokens;
- }
- if (typeof modelStats.output_tokens === "number") {
- totalOutputTokens += modelStats.output_tokens;
- }
- }
- }
- }
+ rawEntries.push(JSON.parse(trimmed));
} catch (_e) {
- // Not JSON - skip non-JSON lines
+ // Skip non-JSON lines
}
}
- // Build markdown output
- if (lastResponse) {
- markdown += "## 🤖 Reasoning\n\n";
- markdown += lastResponse + "\n\n";
+ if (rawEntries.length === 0) {
+ return {
+ markdown: "## 🤖 Gemini\n\nLog format not recognized as Gemini JSONL.\n\n",
+ logEntries: [],
+ mcpFailures: [],
+ maxTurnsHit: false,
+ };
}
- markdown += "## 📊 Information\n\n";
- const totalTokens = totalInputTokens + totalOutputTokens;
- if (totalTokens > 0) {
- markdown += `**Total Tokens Used:** ${totalTokens.toLocaleString()}\n\n`;
- if (totalInputTokens > 0) {
- markdown += `**Input Tokens:** ${totalInputTokens.toLocaleString()}\n\n`;
- }
- if (totalOutputTokens > 0) {
- markdown += `**Output Tokens:** ${totalOutputTokens.toLocaleString()}\n\n`;
- }
+ // Transform Gemini JSONL entries into canonical logEntries format
+ const logEntries = transformGeminiEntries(rawEntries);
+
+ // Extract the final result entry for stats
+ const resultEntry = rawEntries.find(e => e.type === "result");
+
+ // Generate conversation markdown using shared function
+ const conversationResult = generateConversationMarkdown(logEntries, {
+ formatToolCallback: (toolUse, toolResult) => formatToolUse(toolUse, toolResult, { includeDetailedParameters: false }),
+ formatInitCallback: initEntry => formatInitializationSummary(initEntry, { includeSlashCommands: false }),
+ });
+
+ let markdown = conversationResult.markdown;
+
+ // Add Information section using Gemini-specific stats from the result entry
+ if (resultEntry && resultEntry.stats) {
+ const stats = resultEntry.stats;
+ const syntheticEntry = {
+ usage: {
+ input_tokens: stats.input_tokens || 0,
+ output_tokens: stats.output_tokens || 0,
+ cache_read_input_tokens: stats.cached || 0,
+ },
+ duration_ms: stats.duration_ms || 0,
+ num_turns: stats.tool_calls || 0,
+ };
+ markdown += generateInformationSection(syntheticEntry);
+ } else {
+ markdown += generateInformationSection(null);
}
return {
markdown,
- logEntries: [],
+ logEntries,
mcpFailures: [],
maxTurnsHit: false,
};
}
+/**
+ * Checks whether a canonical log entry is an assistant text entry eligible for merging
+ * with a subsequent streaming delta chunk.
+ * @param {any} entry - The candidate last entry
+ * @returns {boolean} True when the entry is a mergeable assistant text entry
+ */
+function isConsecutiveDeltaEntry(entry) {
+ return entry && entry.type === "assistant" && entry.message && Array.isArray(entry.message.content) && entry.message.content.length === 1 && entry.message.content[0].type === "text";
+}
+
+/**
+ * Transforms raw Gemini JSONL entries into the canonical logEntries format
+ * used by the shared generateConversationMarkdown function.
+ *
+ * Gemini entry types and their canonical mappings:
+ * - "init" → {type:"system", subtype:"init", model, session_id}
+ * - "message" (assistant, delta:true) → merged into {type:"assistant", message:{content:[{type:"text"}]}}
+ * - "tool_use" → {type:"assistant", message:{content:[{type:"tool_use", id, name, input}]}}
+ * - "tool_result" → {type:"user", message:{content:[{type:"tool_result", tool_use_id, content, is_error}]}}
+ *
+ * @param {Array} rawEntries - Raw parsed JSONL entries
+ * @returns {Array} Canonical log entries for generateConversationMarkdown
+ */
+function transformGeminiEntries(rawEntries) {
+ /** @type {Array} */
+ const entries = [];
+
+ for (const raw of rawEntries) {
+ if (raw.type === "init") {
+ entries.push({
+ type: "system",
+ subtype: "init",
+ model: raw.model,
+ session_id: raw.session_id,
+ });
+ } else if (raw.type === "message" && raw.role === "assistant") {
+ const text = raw.content || "";
+ if (!text.trim()) {
+ continue;
+ }
+ // Merge consecutive streaming delta chunks into one assistant text entry
+ const last = entries[entries.length - 1];
+ if (raw.delta === true && isConsecutiveDeltaEntry(last)) {
+ last.message.content[0].text += text;
+ } else {
+ entries.push({
+ type: "assistant",
+ message: {
+ content: [{ type: "text", text }],
+ },
+ });
+ }
+ } else if (raw.type === "tool_use") {
+ entries.push({
+ type: "assistant",
+ message: {
+ content: [
+ {
+ type: "tool_use",
+ id: raw.tool_id,
+ name: raw.tool_name,
+ input: raw.parameters || {},
+ },
+ ],
+ },
+ });
+ } else if (raw.type === "tool_result") {
+ const output = typeof raw.output === "string" ? raw.output : JSON.stringify(raw.output || "");
+ entries.push({
+ type: "user",
+ message: {
+ content: [
+ {
+ type: "tool_result",
+ tool_use_id: raw.tool_id,
+ content: output,
+ is_error: raw.status !== "success",
+ },
+ ],
+ },
+ });
+ }
+ }
+
+ return entries;
+}
+
// Export for testing
if (typeof module !== "undefined" && module.exports) {
module.exports = {
main,
parseGeminiLog,
+ transformGeminiEntries,
};
}
diff --git a/actions/setup/js/parse_gemini_log.test.cjs b/actions/setup/js/parse_gemini_log.test.cjs
new file mode 100644
index 0000000000..4197e4e54d
--- /dev/null
+++ b/actions/setup/js/parse_gemini_log.test.cjs
@@ -0,0 +1,262 @@
+import { describe, it, expect, beforeEach, afterEach, vi } from "vitest";
+
+describe("parse_gemini_log.cjs", () => {
+ let mockCore;
+ let parseGeminiLog, transformGeminiEntries;
+
+ beforeEach(async () => {
+ mockCore = {
+ debug: vi.fn(),
+ info: vi.fn(),
+ warning: vi.fn(),
+ error: vi.fn(),
+ setFailed: vi.fn(),
+ setOutput: vi.fn(),
+ summary: {
+ addRaw: vi.fn().mockReturnThis(),
+ write: vi.fn().mockResolvedValue(),
+ },
+ };
+ global.core = mockCore;
+
+ const module = await import("./parse_gemini_log.cjs?" + Date.now());
+ parseGeminiLog = module.parseGeminiLog;
+ transformGeminiEntries = module.transformGeminiEntries;
+ });
+
+ afterEach(() => {
+ delete global.core;
+ });
+
+ describe("parseGeminiLog function", () => {
+ it("should return a default message for empty input", () => {
+ const result = parseGeminiLog("");
+
+ expect(result.markdown).toContain("No log content provided");
+ expect(result.logEntries).toEqual([]);
+ expect(result.mcpFailures).toEqual([]);
+ expect(result.maxTurnsHit).toBe(false);
+ });
+
+ it("should return error message for null input", () => {
+ const result = parseGeminiLog(null);
+
+ expect(result.markdown).toContain("No log content provided");
+ });
+
+ it("should return unrecognized format message for non-JSON content", () => {
+ const result = parseGeminiLog("plain text log content\nnot json at all");
+
+ expect(result.markdown).toContain("Log format not recognized as Gemini JSONL");
+ });
+
+ it("should parse init entry and show model in initialization section", () => {
+ const logContent = [JSON.stringify({ type: "init", timestamp: "2026-01-01T00:00:00Z", session_id: "sess-123", model: "gemini-2.0-flash" })].join("\n");
+
+ const result = parseGeminiLog(logContent);
+
+ expect(result.markdown).toContain("## 🚀 Initialization");
+ expect(result.markdown).toContain("gemini-2.0-flash");
+ expect(result.markdown).toContain("sess-123");
+ });
+
+ it("should merge consecutive assistant delta messages into one reasoning block", () => {
+ const logContent = [JSON.stringify({ type: "message", role: "assistant", content: "I will analyze", delta: true }), JSON.stringify({ type: "message", role: "assistant", content: " the repository.", delta: true })].join("\n");
+
+ const result = parseGeminiLog(logContent);
+
+ expect(result.markdown).toContain("## 🤖 Reasoning");
+ expect(result.markdown).toContain("I will analyze the repository.");
+ });
+
+ it("should render tool use with success status", () => {
+ const logContent = [
+ JSON.stringify({ type: "tool_use", tool_name: "list_pull_requests", tool_id: "tool_001", parameters: { owner: "github", repo: "gh-aw" } }),
+ JSON.stringify({ type: "tool_result", tool_id: "tool_001", status: "success", output: '{"items":[]}' }),
+ ].join("\n");
+
+ const result = parseGeminiLog(logContent);
+
+ expect(result.markdown).toContain("✅");
+ expect(result.markdown).toContain("list_pull_requests");
+ });
+
+ it("should render tool use with error status", () => {
+ const logContent = [
+ JSON.stringify({ type: "tool_use", tool_name: "create_issue", tool_id: "tool_002", parameters: { title: "Test" } }),
+ JSON.stringify({ type: "tool_result", tool_id: "tool_002", status: "error", output: "Permission denied" }),
+ ].join("\n");
+
+ const result = parseGeminiLog(logContent);
+
+ expect(result.markdown).toContain("❌");
+ expect(result.markdown).toContain("create_issue");
+ });
+
+ it("should extract token stats from result entry", () => {
+ const logContent = [
+ JSON.stringify({
+ type: "result",
+ status: "success",
+ stats: {
+ total_tokens: 1000,
+ input_tokens: 900,
+ output_tokens: 100,
+ cached: 200,
+ duration_ms: 5000,
+ tool_calls: 3,
+ },
+ }),
+ ].join("\n");
+
+ const result = parseGeminiLog(logContent);
+
+ expect(result.markdown).toContain("## 📊 Information");
+ expect(result.markdown).toContain("900");
+ expect(result.markdown).toContain("100");
+ });
+
+ it("should parse a complete conversation flow", () => {
+ const logContent = [
+ JSON.stringify({ type: "init", timestamp: "2026-01-01T00:00:00Z", session_id: "sess-abc", model: "auto-gemini-3" }),
+ JSON.stringify({ type: "message", role: "user", content: "Please list PRs." }),
+ JSON.stringify({ type: "message", role: "assistant", content: "I will list the PRs.", delta: true }),
+ JSON.stringify({ type: "tool_use", tool_name: "list_pull_requests", tool_id: "tool_003", parameters: { owner: "github", repo: "gh-aw" } }),
+ JSON.stringify({ type: "tool_result", tool_id: "tool_003", status: "success", output: '{"items":[{"number":1}]}' }),
+ JSON.stringify({ type: "message", role: "assistant", content: "Found 1 PR.", delta: true }),
+ JSON.stringify({ type: "result", status: "success", stats: { total_tokens: 500, input_tokens: 400, output_tokens: 100, cached: 50, duration_ms: 3000, tool_calls: 1 } }),
+ ].join("\n");
+
+ const result = parseGeminiLog(logContent);
+
+ expect(result.markdown).toContain("## 🚀 Initialization");
+ expect(result.markdown).toContain("auto-gemini-3");
+ expect(result.markdown).toContain("## 🤖 Reasoning");
+ expect(result.markdown).toContain("I will list the PRs.");
+ expect(result.markdown).toContain("## 🤖 Commands and Tools");
+ expect(result.markdown).toContain("list_pull_requests");
+ expect(result.markdown).toContain("## 📊 Information");
+ expect(result.logEntries.length).toBeGreaterThan(0);
+ expect(result.mcpFailures).toEqual([]);
+ expect(result.maxTurnsHit).toBe(false);
+ });
+
+ it("should skip non-JSON lines in the log", () => {
+ const logContent = ["[INFO] Starting agent", JSON.stringify({ type: "init", session_id: "sess-xyz", model: "gemini-pro" }), "[INFO] Agent complete"].join("\n");
+
+ const result = parseGeminiLog(logContent);
+
+ expect(result.markdown).toContain("gemini-pro");
+ expect(result.markdown).not.toContain("[INFO]");
+ });
+ });
+
+ describe("transformGeminiEntries function", () => {
+ it("should transform init entry to system init format", () => {
+ const raw = [{ type: "init", session_id: "sess-1", model: "gemini-flash" }];
+
+ const entries = transformGeminiEntries(raw);
+
+ expect(entries).toHaveLength(1);
+ expect(entries[0].type).toBe("system");
+ expect(entries[0].subtype).toBe("init");
+ expect(entries[0].model).toBe("gemini-flash");
+ expect(entries[0].session_id).toBe("sess-1");
+ });
+
+ it("should merge consecutive delta assistant messages", () => {
+ const raw = [
+ { type: "message", role: "assistant", content: "Hello", delta: true },
+ { type: "message", role: "assistant", content: " world", delta: true },
+ { type: "message", role: "assistant", content: "!", delta: true },
+ ];
+
+ const entries = transformGeminiEntries(raw);
+
+ expect(entries).toHaveLength(1);
+ expect(entries[0].type).toBe("assistant");
+ expect(entries[0].message.content[0].text).toBe("Hello world!");
+ });
+
+ it("should not merge non-consecutive delta messages", () => {
+ const raw = [
+ { type: "message", role: "assistant", content: "First message.", delta: true },
+ { type: "tool_use", tool_name: "bash", tool_id: "t1", parameters: {} },
+ { type: "message", role: "assistant", content: "Second message.", delta: true },
+ ];
+
+ const entries = transformGeminiEntries(raw);
+
+ const assistantEntries = entries.filter(e => e.type === "assistant" && e.message?.content?.[0]?.type === "text");
+ expect(assistantEntries).toHaveLength(2);
+ expect(assistantEntries[0].message.content[0].text).toBe("First message.");
+ expect(assistantEntries[1].message.content[0].text).toBe("Second message.");
+ });
+
+ it("should transform tool_use to assistant entry", () => {
+ const raw = [{ type: "tool_use", tool_name: "search_code", tool_id: "tool_abc", parameters: { query: "test" } }];
+
+ const entries = transformGeminiEntries(raw);
+
+ expect(entries).toHaveLength(1);
+ expect(entries[0].type).toBe("assistant");
+ expect(entries[0].message.content[0].type).toBe("tool_use");
+ expect(entries[0].message.content[0].id).toBe("tool_abc");
+ expect(entries[0].message.content[0].name).toBe("search_code");
+ expect(entries[0].message.content[0].input).toEqual({ query: "test" });
+ });
+
+ it("should transform tool_result to user entry with success status", () => {
+ const raw = [{ type: "tool_result", tool_id: "tool_abc", status: "success", output: "result data" }];
+
+ const entries = transformGeminiEntries(raw);
+
+ expect(entries).toHaveLength(1);
+ expect(entries[0].type).toBe("user");
+ expect(entries[0].message.content[0].type).toBe("tool_result");
+ expect(entries[0].message.content[0].tool_use_id).toBe("tool_abc");
+ expect(entries[0].message.content[0].content).toBe("result data");
+ expect(entries[0].message.content[0].is_error).toBe(false);
+ });
+
+ it("should transform tool_result to user entry with error status", () => {
+ const raw = [{ type: "tool_result", tool_id: "tool_xyz", status: "error", output: "Something went wrong" }];
+
+ const entries = transformGeminiEntries(raw);
+
+ expect(entries[0].message.content[0].is_error).toBe(true);
+ });
+
+ it("should skip user messages and result entries", () => {
+ const raw = [
+ { type: "message", role: "user", content: "User prompt" },
+ { type: "result", status: "success", stats: {} },
+ ];
+
+ const entries = transformGeminiEntries(raw);
+
+ expect(entries).toHaveLength(0);
+ });
+
+ it("should skip empty assistant delta messages", () => {
+ const raw = [
+ { type: "message", role: "assistant", content: "", delta: true },
+ { type: "message", role: "assistant", content: " ", delta: true },
+ { type: "message", role: "assistant", content: "Valid content", delta: true },
+ ];
+
+ const entries = transformGeminiEntries(raw);
+
+ expect(entries).toHaveLength(1);
+ expect(entries[0].message.content[0].text).toBe("Valid content");
+ });
+
+ it("should serialize non-string tool_result output as JSON", () => {
+ const raw = [{ type: "tool_result", tool_id: "t1", status: "success", output: { items: [1, 2] } }];
+
+ const entries = transformGeminiEntries(raw);
+
+ expect(entries[0].message.content[0].content).toBe('{"items":[1,2]}');
+ });
+ });
+});
diff --git a/actions/setup/sh/convert_gateway_config_gemini.sh b/actions/setup/sh/convert_gateway_config_gemini.sh
index 1fc86ce58a..ce2aa4cdd2 100644
--- a/actions/setup/sh/convert_gateway_config_gemini.sh
+++ b/actions/setup/sh/convert_gateway_config_gemini.sh
@@ -94,7 +94,9 @@ jq --arg urlPrefix "$URL_PREFIX" '
# Fix the URL to use the correct domain
.url |= (. | sub("^http://[^/]+/mcp/"; $urlPrefix + "/mcp/"))
)
- )
+ ) |
+ # Allow Gemini CLI to read files from /tmp/gh-aw/ (e.g. MCP payload files)
+ .includeDirectories = ["/tmp/gh-aw/"]
' "$MCP_GATEWAY_OUTPUT" > "$GEMINI_SETTINGS_FILE"
echo "Gemini configuration written to $GEMINI_SETTINGS_FILE"