From 79df3cdbbc7aa1d2fcd6afaddd6d98eaac749bd4 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Mon, 15 Sep 2025 23:21:52 +0000 Subject: [PATCH 1/7] Initial plan From 142685d47e7c73d98d203224cbd92d099c861606 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Mon, 15 Sep 2025 23:35:52 +0000 Subject: [PATCH 2/7] Refactor mcp-inspect to mcp inspect command structure with new functionality Co-authored-by: pelikhan <4175913+pelikhan@users.noreply.github.com> --- .github/workflows/test-mcp.md | 24 +++ cmd/gh-aw/main.go | 3 +- pkg/cli/mcp.go | 24 +++ pkg/cli/mcp_inspect.go | 368 +++++++++++++++++++++++++++++++++- 4 files changed, 407 insertions(+), 12 deletions(-) create mode 100644 .github/workflows/test-mcp.md create mode 100644 pkg/cli/mcp.go diff --git a/.github/workflows/test-mcp.md b/.github/workflows/test-mcp.md new file mode 100644 index 0000000000..cca2b6ddf8 --- /dev/null +++ b/.github/workflows/test-mcp.md @@ -0,0 +1,24 @@ +--- +on: + workflow_dispatch: {} + +permissions: read-all + +tools: + github: + docker_image_version: "latest" + playwright: + docker_image_version: "v1.41.0" + allowed_domains: ["example.com", "*.github.com"] + +safe-outputs: + create-issue: + title-prefix: "[Test] " + add-issue-comment: + +engine: claude +--- + +# Test MCP Configuration + +This is a test workflow to demonstrate MCP configuration generation and server launching. \ No newline at end of file diff --git a/cmd/gh-aw/main.go b/cmd/gh-aw/main.go index 9f9f6e5710..90957914d1 100644 --- a/cmd/gh-aw/main.go +++ b/cmd/gh-aw/main.go @@ -373,7 +373,8 @@ func init() { rootCmd.AddCommand(enableCmd) rootCmd.AddCommand(disableCmd) rootCmd.AddCommand(cli.NewLogsCommand()) - rootCmd.AddCommand(cli.NewMCPInspectCommand()) + rootCmd.AddCommand(cli.NewMCPCommand()) + rootCmd.AddCommand(cli.NewMCPInspectCommand()) // Legacy backwards compatibility rootCmd.AddCommand(versionCmd) } diff --git a/pkg/cli/mcp.go b/pkg/cli/mcp.go new file mode 100644 index 0000000000..27fa690bd6 --- /dev/null +++ b/pkg/cli/mcp.go @@ -0,0 +1,24 @@ +package cli + +import ( + "github.com/spf13/cobra" +) + +// NewMCPCommand creates the mcp command with subcommands +func NewMCPCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "mcp", + Short: "Model Context Protocol (MCP) server management", + Long: `Manage Model Context Protocol (MCP) servers used by agentic workflows. + +This command provides subcommands for inspecting, configuring, and launching MCP servers.`, + Run: func(cmd *cobra.Command, args []string) { + _ = cmd.Help() + }, + } + + // Add subcommands + cmd.AddCommand(NewMCPInspectSubCommand()) + + return cmd +} \ No newline at end of file diff --git a/pkg/cli/mcp_inspect.go b/pkg/cli/mcp_inspect.go index 0471795e92..9d78deb8fc 100644 --- a/pkg/cli/mcp_inspect.go +++ b/pkg/cli/mcp_inspect.go @@ -189,16 +189,13 @@ func listWorkflowsWithMCP(workflowsDir string, verbose bool) error { return nil } -// NewMCPInspectCommand creates the mcp-inspect command +// NewMCPInspectCommand creates the mcp-inspect command (legacy, kept for backwards compatibility) func NewMCPInspectCommand() *cobra.Command { - var serverFilter string - var toolFilter string - var spawnInspector bool - - cmd := &cobra.Command{ - Use: "mcp-inspect [workflow-file]", - Short: "Inspect MCP servers and list available tools, resources, and roots", - Long: `Inspect MCP servers used by a workflow and display available tools, resources, and roots. + cmd := NewMCPInspectSubCommand() + cmd.Use = "mcp-inspect [workflow-file]" + + // Update examples to show legacy command syntax + cmd.Long = `Inspect MCP servers used by a workflow and display available tools, resources, and roots. This command starts each MCP server configured in the workflow, queries its capabilities, and displays the results in a formatted table. It supports stdio, Docker, and HTTP MCP servers. @@ -216,6 +213,45 @@ The command will: - Start each MCP server (stdio, docker, http) - Query available tools, resources, and roots - Validate required secrets are available +- Display results in formatted tables with error details + +NOTE: This command is deprecated. Use 'gh aw mcp inspect' instead.` + + return cmd +} + +// NewMCPInspectSubCommand creates the mcp inspect subcommand +func NewMCPInspectSubCommand() *cobra.Command { + var serverFilter string + var toolFilter string + var spawnInspector bool + var generateConfig bool + var launchServers bool + + cmd := &cobra.Command{ + Use: "inspect [workflow-file]", + Short: "Inspect MCP servers and list available tools, resources, and roots", + Long: `Inspect MCP servers used by a workflow and display available tools, resources, and roots. + +This command can generate MCP configurations using the Claude agentic engine, parse them, +and launch all configured servers including github, playwright, and safe-outputs. + +Examples: + gh aw mcp inspect # List workflows with MCP servers + gh aw mcp inspect weekly-research # Inspect MCP servers in weekly-research.md + gh aw mcp inspect repomind --server repo-mind # Inspect only the repo-mind server + gh aw mcp inspect weekly-research --server github --tool create_issue # Show details for a specific tool + gh aw mcp inspect weekly-research -v # Verbose output with detailed connection info + gh aw mcp inspect weekly-research --inspector # Launch @modelcontextprotocol/inspector + gh aw mcp inspect weekly-research --generate-config # Generate MCP config using Claude engine + gh aw mcp inspect weekly-research --launch-servers # Launch all configured servers + +The command will: +- Parse the workflow file to extract MCP server configurations +- Optionally generate MCP configuration using the Claude agentic engine +- Start each MCP server (stdio, docker, http) +- Query available tools, resources, and roots +- Validate required secrets are available - Display results in formatted tables with error details`, Args: cobra.MaximumNArgs(1), RunE: func(cmd *cobra.Command, args []string) error { @@ -225,8 +261,8 @@ The command will: } verbose, _ := cmd.Flags().GetBool("verbose") - if cmd.Parent() != nil { - parentVerbose, _ := cmd.Parent().PersistentFlags().GetBool("verbose") + if cmd.Parent() != nil && cmd.Parent().Parent() != nil { + parentVerbose, _ := cmd.Parent().Parent().PersistentFlags().GetBool("verbose") verbose = verbose || parentVerbose } @@ -235,6 +271,16 @@ The command will: return fmt.Errorf("--tool flag requires --server flag to be specified") } + // Handle generate config flag + if generateConfig { + return generateMCPConfig(workflowFile, verbose) + } + + // Handle launch servers flag + if launchServers { + return launchMCPServers(workflowFile, serverFilter, verbose) + } + // Handle spawn inspector flag if spawnInspector { return spawnMCPInspector(workflowFile, serverFilter, verbose) @@ -248,6 +294,8 @@ The command will: cmd.Flags().StringVar(&toolFilter, "tool", "", "Show detailed information about a specific tool (requires --server)") cmd.Flags().BoolP("verbose", "v", false, "Enable verbose output with detailed connection information") cmd.Flags().BoolVar(&spawnInspector, "inspector", false, "Launch the official @modelcontextprotocol/inspector tool") + cmd.Flags().BoolVar(&generateConfig, "generate-config", false, "Generate MCP server configuration using Claude agentic engine") + cmd.Flags().BoolVar(&launchServers, "launch-servers", false, "Launch all configured MCP servers (github, playwright, safe-outputs)") return cmd } @@ -452,3 +500,301 @@ func spawnMCPInspector(workflowFile string, serverFilter string, verbose bool) e return cmd.Run() } + +// generateMCPConfig uses the Claude agentic engine to generate MCP server configuration +func generateMCPConfig(workflowFile string, verbose bool) error { + if workflowFile == "" { + return fmt.Errorf("workflow file is required for config generation") + } + + workflowsDir := workflow.GetWorkflowDir() + + // Normalize the workflow file path + if !strings.HasSuffix(workflowFile, ".md") { + workflowFile += ".md" + } + + workflowPath := filepath.Join(workflowsDir, workflowFile) + if !filepath.IsAbs(workflowPath) { + cwd, err := os.Getwd() + if err != nil { + return fmt.Errorf("failed to get current directory: %w", err) + } + workflowPath = filepath.Join(cwd, workflowPath) + } + + // Check if file exists + if _, err := os.Stat(workflowPath); os.IsNotExist(err) { + return fmt.Errorf("workflow file not found: %s", workflowPath) + } + + if verbose { + fmt.Println(console.FormatInfoMessage(fmt.Sprintf("Generating MCP configuration for: %s", workflowPath))) + } + + // Parse the workflow file + content, err := os.ReadFile(workflowPath) + if err != nil { + return fmt.Errorf("failed to read workflow file: %w", err) + } + + workflowData, err := parser.ExtractFrontmatterFromContent(string(content)) + if err != nil { + return fmt.Errorf("failed to parse workflow file: %w", err) + } + + // Create Claude engine to generate MCP configuration + claudeEngine := workflow.NewClaudeEngine() + + // Extract tools from frontmatter + tools := make(map[string]any) + if toolsSection, hasTools := workflowData.Frontmatter["tools"]; hasTools { + if toolsMap, ok := toolsSection.(map[string]any); ok { + tools = toolsMap + } + } + + // Extract MCP tool names from existing configurations + mcpConfigs, err := parser.ExtractMCPConfigurations(workflowData.Frontmatter, "") + if err != nil { + return fmt.Errorf("failed to extract MCP configurations: %w", err) + } + + // Build list of MCP servers to include in config + mcpTools := []string{} + + // Add existing MCP server configurations + for _, config := range mcpConfigs { + mcpTools = append(mcpTools, config.Name) + } + + // Add standard servers if configured (avoid duplicates) + if _, hasGithub := tools["github"]; hasGithub { + found := false + for _, existing := range mcpTools { + if existing == "github" { + found = true + break + } + } + if !found { + mcpTools = append(mcpTools, "github") + } + } + + if _, hasPlaywright := tools["playwright"]; hasPlaywright { + found := false + for _, existing := range mcpTools { + if existing == "playwright" { + found = true + break + } + } + if !found { + mcpTools = append(mcpTools, "playwright") + } + } + + if _, hasSafeOutputs := workflowData.Frontmatter["safe-outputs"]; hasSafeOutputs { + found := false + for _, existing := range mcpTools { + if existing == "safe-outputs" { + found = true + break + } + } + if !found { + mcpTools = append(mcpTools, "safe-outputs") + } + } + + if len(mcpTools) == 0 { + fmt.Println(console.FormatWarningMessage("No MCP tools found in workflow")) + return nil + } + + // Create a minimal WorkflowData for MCP config generation + workflowDataForMCP := &workflow.WorkflowData{ + Tools: tools, + NetworkPermissions: nil, // Will be populated if needed + } + + // Generate the MCP configuration + var mcpConfigBuilder strings.Builder + claudeEngine.RenderMCPConfig(&mcpConfigBuilder, tools, mcpTools, workflowDataForMCP) + + fmt.Println(console.FormatSuccessMessage(fmt.Sprintf("Generated MCP configuration for %d server(s)", len(mcpTools)))) + fmt.Println(console.FormatInfoMessage("MCP Configuration:")) + fmt.Println() + fmt.Println(mcpConfigBuilder.String()) + + return nil +} + +// launchMCPServers launches all MCP servers configured in the workflow +func launchMCPServers(workflowFile string, serverFilter string, verbose bool) error { + if workflowFile == "" { + return fmt.Errorf("workflow file is required for launching servers") + } + + workflowsDir := workflow.GetWorkflowDir() + + // Normalize the workflow file path + if !strings.HasSuffix(workflowFile, ".md") { + workflowFile += ".md" + } + + workflowPath := filepath.Join(workflowsDir, workflowFile) + if !filepath.IsAbs(workflowPath) { + cwd, err := os.Getwd() + if err != nil { + return fmt.Errorf("failed to get current directory: %w", err) + } + workflowPath = filepath.Join(cwd, workflowPath) + } + + // Check if file exists + if _, err := os.Stat(workflowPath); os.IsNotExist(err) { + return fmt.Errorf("workflow file not found: %s", workflowPath) + } + + if verbose { + fmt.Println(console.FormatInfoMessage(fmt.Sprintf("Launching MCP servers from: %s", workflowPath))) + } + + // Parse the workflow file to extract MCP configurations + content, err := os.ReadFile(workflowPath) + if err != nil { + return err + } + + workflowData, err := parser.ExtractFrontmatterFromContent(string(content)) + if err != nil { + return err + } + + // Extract MCP configurations + mcpConfigs, err := parser.ExtractMCPConfigurations(workflowData.Frontmatter, serverFilter) + if err != nil { + return err + } + + if len(mcpConfigs) == 0 { + if serverFilter != "" { + fmt.Println(console.FormatWarningMessage(fmt.Sprintf("No MCP servers matching filter '%s' found in workflow", serverFilter))) + } else { + fmt.Println(console.FormatWarningMessage("No MCP servers found in workflow")) + } + return nil + } + + fmt.Println(console.FormatInfoMessage(fmt.Sprintf("Found %d MCP server(s) to launch:", len(mcpConfigs)))) + for _, config := range mcpConfigs { + fmt.Printf(" • %s (%s)\n", config.Name, config.Type) + } + fmt.Println() + + var serverProcesses []*exec.Cmd + var wg sync.WaitGroup + + // Launch each MCP server + for _, config := range mcpConfigs { + if verbose { + fmt.Println(console.FormatInfoMessage(fmt.Sprintf("Starting server: %s", config.Name))) + } + + // Create the command for the MCP server + var cmd *exec.Cmd + if config.Container != "" { + // Docker container mode + args := append([]string{"run", "--rm", "-i"}, config.Args...) + cmd = exec.Command("docker", args...) + } else { + // Direct command mode + if config.Command == "" { + fmt.Println(console.FormatWarningMessage(fmt.Sprintf("Skipping server %s: no command specified", config.Name))) + continue + } + cmd = exec.Command(config.Command, config.Args...) + } + + // Set environment variables + cmd.Env = os.Environ() + for key, value := range config.Env { + // Resolve environment variable references + resolvedValue := os.ExpandEnv(value) + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", key, resolvedValue)) + } + + // Start the server process + if err := cmd.Start(); err != nil { + fmt.Println(console.FormatWarningMessage(fmt.Sprintf("Failed to start server %s: %v", config.Name, err))) + continue + } + + serverProcesses = append(serverProcesses, cmd) + + // Monitor the process in the background + wg.Add(1) + go func(serverCmd *exec.Cmd, serverName string) { + defer wg.Done() + if err := serverCmd.Wait(); err != nil && verbose { + fmt.Println(console.FormatWarningMessage(fmt.Sprintf("Server %s exited with error: %v", serverName, err))) + } + }(cmd, config.Name) + + if verbose { + fmt.Println(console.FormatSuccessMessage(fmt.Sprintf("Started server: %s (PID: %d)", config.Name, cmd.Process.Pid))) + } + } + + if len(serverProcesses) == 0 { + return fmt.Errorf("no MCP servers were successfully started") + } + + // Give servers a moment to start up + time.Sleep(2 * time.Second) + fmt.Println(console.FormatSuccessMessage(fmt.Sprintf("Successfully launched %d MCP server(s)", len(serverProcesses)))) + + // Set up cleanup function for servers + defer func() { + if len(serverProcesses) > 0 { + fmt.Println(console.FormatInfoMessage("Cleaning up MCP servers...")) + for i, cmd := range serverProcesses { + if cmd.Process != nil { + if err := cmd.Process.Kill(); err != nil && verbose { + fmt.Println(console.FormatWarningMessage(fmt.Sprintf("Failed to kill server process %d: %v", cmd.Process.Pid, err))) + } + } + // Give each process a chance to clean up + if i < len(serverProcesses)-1 { + time.Sleep(100 * time.Millisecond) + } + } + // Wait for all background goroutines to finish (with timeout) + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + + select { + case <-done: + // All finished + case <-time.After(5 * time.Second): + // Timeout waiting for cleanup + if verbose { + fmt.Println(console.FormatWarningMessage("Timeout waiting for server cleanup")) + } + } + } + }() + + fmt.Println(console.FormatInfoMessage("MCP servers are running. Press Ctrl+C to stop all servers.")) + + // Wait for interrupt signal + fmt.Println(console.FormatInfoMessage("Use 'gh aw mcp inspect --inspector' to launch the MCP inspector tool")) + + // Keep the process alive until interrupted + select {} +} From 7be1c7b4f135fb58e93d01b61cd733ac06b48027 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Mon, 15 Sep 2025 23:40:42 +0000 Subject: [PATCH 3/7] Add comprehensive tests for new mcp command structure Co-authored-by: pelikhan <4175913+pelikhan@users.noreply.github.com> --- .github/workflows/test-mcp.lock.yml | 2611 +++++++++++++++++++++++++++ .github/workflows/test-mcp.md | 24 - pkg/cli/mcp.go | 2 +- pkg/cli/mcp_inspect.go | 20 +- pkg/cli/mcp_test.go | 135 ++ 5 files changed, 2757 insertions(+), 35 deletions(-) create mode 100644 .github/workflows/test-mcp.lock.yml delete mode 100644 .github/workflows/test-mcp.md create mode 100644 pkg/cli/mcp_test.go diff --git a/.github/workflows/test-mcp.lock.yml b/.github/workflows/test-mcp.lock.yml new file mode 100644 index 0000000000..a2f15df5e5 --- /dev/null +++ b/.github/workflows/test-mcp.lock.yml @@ -0,0 +1,2611 @@ +# This file was automatically generated by gh-aw. DO NOT EDIT. +# To update this file, edit the corresponding .md file and run: +# gh aw compile + +name: "Test MCP Configuration" +on: + workflow_dispatch: {} + +permissions: {} + +concurrency: + group: "gh-aw-${{ github.workflow }}" + +run-name: "Test MCP Configuration" + +jobs: + test-mcp-configuration: + runs-on: ubuntu-latest + permissions: read-all + outputs: + output: ${{ steps.collect_output.outputs.output }} + steps: + - name: Checkout repository + uses: actions/checkout@v5 + - name: Generate Claude Settings + run: | + mkdir -p /tmp/.claude + cat > /tmp/.claude/settings.json << 'EOF' + { + "hooks": { + "PreToolUse": [ + { + "matcher": "WebFetch|WebSearch", + "hooks": [ + { + "type": "command", + "command": ".claude/hooks/network_permissions.py" + } + ] + } + ] + } + } + EOF + - name: Generate Network Permissions Hook + run: | + mkdir -p .claude/hooks + cat > .claude/hooks/network_permissions.py << 'EOF' + #!/usr/bin/env python3 + """ + Network permissions validator for Claude Code engine. + Generated by gh-aw from engine network permissions configuration. + """ + + import json + import sys + import urllib.parse + import re + + # Domain allow-list (populated during generation) + ALLOWED_DOMAINS = ["crl3.digicert.com","crl4.digicert.com","ocsp.digicert.com","ts-crl.ws.symantec.com","ts-ocsp.ws.symantec.com","crl.geotrust.com","ocsp.geotrust.com","crl.thawte.com","ocsp.thawte.com","crl.verisign.com","ocsp.verisign.com","crl.globalsign.com","ocsp.globalsign.com","crls.ssl.com","ocsp.ssl.com","crl.identrust.com","ocsp.identrust.com","crl.sectigo.com","ocsp.sectigo.com","crl.usertrust.com","ocsp.usertrust.com","s.symcb.com","s.symcd.com","json-schema.org","json.schemastore.org","archive.ubuntu.com","security.ubuntu.com","ppa.launchpad.net","keyserver.ubuntu.com","azure.archive.ubuntu.com","api.snapcraft.io","packagecloud.io","packages.cloud.google.com","packages.microsoft.com"] + + def extract_domain(url_or_query): + """Extract domain from URL or search query.""" + if not url_or_query: + return None + + if url_or_query.startswith(('http://', 'https://')): + return urllib.parse.urlparse(url_or_query).netloc.lower() + + # Check for domain patterns in search queries + match = re.search(r'site:([a-zA-Z0-9.-]+\.[a-zA-Z]{2,})', url_or_query) + if match: + return match.group(1).lower() + + return None + + def is_domain_allowed(domain): + """Check if domain is allowed.""" + if not domain: + # If no domain detected, allow only if not under deny-all policy + return bool(ALLOWED_DOMAINS) # False if empty list (deny-all), True if has domains + + # Empty allowed domains means deny all + if not ALLOWED_DOMAINS: + return False + + for pattern in ALLOWED_DOMAINS: + regex = pattern.replace('.', r'\.').replace('*', '.*') + if re.match(f'^{regex}$', domain): + return True + return False + + # Main logic + try: + data = json.load(sys.stdin) + tool_name = data.get('tool_name', '') + tool_input = data.get('tool_input', {}) + + if tool_name not in ['WebFetch', 'WebSearch']: + sys.exit(0) # Allow other tools + + target = tool_input.get('url') or tool_input.get('query', '') + domain = extract_domain(target) + + # For WebSearch, apply domain restrictions consistently + # If no domain detected in search query, check if restrictions are in place + if tool_name == 'WebSearch' and not domain: + # Since this hook is only generated when network permissions are configured, + # empty ALLOWED_DOMAINS means deny-all policy + if not ALLOWED_DOMAINS: # Empty list means deny all + print(f"Network access blocked: deny-all policy in effect", file=sys.stderr) + print(f"No domains are allowed for WebSearch", file=sys.stderr) + sys.exit(2) # Block under deny-all policy + else: + print(f"Network access blocked for web-search: no specific domain detected", file=sys.stderr) + print(f"Allowed domains: {', '.join(ALLOWED_DOMAINS)}", file=sys.stderr) + sys.exit(2) # Block general searches when domain allowlist is configured + + if not is_domain_allowed(domain): + print(f"Network access blocked for domain: {domain}", file=sys.stderr) + print(f"Allowed domains: {', '.join(ALLOWED_DOMAINS)}", file=sys.stderr) + sys.exit(2) # Block with feedback to Claude + + sys.exit(0) # Allow + + except Exception as e: + print(f"Network validation error: {e}", file=sys.stderr) + sys.exit(2) # Block on errors + + EOF + chmod +x .claude/hooks/network_permissions.py + - name: Setup agent output + id: setup_agent_output + uses: actions/github-script@v7 + with: + script: | + function main() { + const fs = require("fs"); + const crypto = require("crypto"); + // Generate a random filename for the output file + const randomId = crypto.randomBytes(8).toString("hex"); + const outputFile = `/tmp/aw_output_${randomId}.txt`; + // Ensure the /tmp directory exists + fs.mkdirSync("/tmp", { recursive: true }); + // We don't create the file, as the name is sufficiently random + // and some engines (Claude) fails first Write to the file + // if it exists and has not been read. + // Set the environment variable for subsequent steps + core.exportVariable("GITHUB_AW_SAFE_OUTPUTS", outputFile); + // Also set as step output for reference + core.setOutput("output_file", outputFile); + } + main(); + - name: Setup Safe Outputs Collector MCP + env: + GITHUB_AW_SAFE_OUTPUTS_CONFIG: "{\"add-issue-comment\":{\"enabled\":true},\"create-issue\":true}" + run: | + mkdir -p /tmp/safe-outputs + cat > /tmp/safe-outputs/mcp-server.cjs << 'EOF' + const fs = require("fs"); + const encoder = new TextEncoder(); + const configEnv = process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG; + if (!configEnv) throw new Error("GITHUB_AW_SAFE_OUTPUTS_CONFIG not set"); + const safeOutputsConfig = JSON.parse(configEnv); + const outputFile = process.env.GITHUB_AW_SAFE_OUTPUTS; + if (!outputFile) + throw new Error("GITHUB_AW_SAFE_OUTPUTS not set, no output file"); + const SERVER_INFO = { name: "safe-outputs-mcp-server", version: "1.0.0" }; + const debug = msg => process.stderr.write(`[${SERVER_INFO.name}] ${msg}\n`); + function writeMessage(obj) { + const json = JSON.stringify(obj); + debug(`send: ${json}`); + const message = json + "\n"; + const bytes = encoder.encode(message); + fs.writeSync(1, bytes); + } + class ReadBuffer { + append(chunk) { + this._buffer = this._buffer ? Buffer.concat([this._buffer, chunk]) : chunk; + } + readMessage() { + if (!this._buffer) { + return null; + } + const index = this._buffer.indexOf("\n"); + if (index === -1) { + return null; + } + const line = this._buffer.toString("utf8", 0, index).replace(/\r$/, ""); + this._buffer = this._buffer.subarray(index + 1); + if (line.trim() === "") { + return this.readMessage(); // Skip empty lines recursively + } + try { + return JSON.parse(line); + } catch (error) { + throw new Error( + `Parse error: ${error instanceof Error ? error.message : String(error)}` + ); + } + } + } + const readBuffer = new ReadBuffer(); + function onData(chunk) { + readBuffer.append(chunk); + processReadBuffer(); + } + function processReadBuffer() { + while (true) { + try { + const message = readBuffer.readMessage(); + if (!message) { + break; + } + debug(`recv: ${JSON.stringify(message)}`); + handleMessage(message); + } catch (error) { + // For parse errors, we can't know the request id, so we shouldn't send a response + // according to JSON-RPC spec. Just log the error. + debug( + `Parse error: ${error instanceof Error ? error.message : String(error)}` + ); + } + } + } + function replyResult(id, result) { + if (id === undefined || id === null) return; // notification + const res = { jsonrpc: "2.0", id, result }; + writeMessage(res); + } + function replyError(id, code, message, data) { + // Don't send error responses for notifications (id is null/undefined) + if (id === undefined || id === null) { + debug(`Error for notification: ${message}`); + return; + } + const error = { code, message }; + if (data !== undefined) { + error.data = data; + } + const res = { + jsonrpc: "2.0", + id, + error, + }; + writeMessage(res); + } + function isToolEnabled(name) { + return safeOutputsConfig[name] && safeOutputsConfig[name].enabled; + } + function appendSafeOutput(entry) { + if (!outputFile) throw new Error("No output file configured"); + const jsonLine = JSON.stringify(entry) + "\n"; + try { + fs.appendFileSync(outputFile, jsonLine); + } catch (error) { + throw new Error( + `Failed to write to output file: ${error instanceof Error ? error.message : String(error)}` + ); + } + } + const defaultHandler = type => args => { + const entry = { ...(args || {}), type }; + appendSafeOutput(entry); + return { + content: [ + { + type: "text", + text: `success`, + }, + ], + }; + }; + const TOOLS = Object.fromEntries( + [ + { + name: "create-issue", + description: "Create a new GitHub issue", + inputSchema: { + type: "object", + required: ["title", "body"], + properties: { + title: { type: "string", description: "Issue title" }, + body: { type: "string", description: "Issue body/description" }, + labels: { + type: "array", + items: { type: "string" }, + description: "Issue labels", + }, + }, + additionalProperties: false, + }, + }, + { + name: "create-discussion", + description: "Create a new GitHub discussion", + inputSchema: { + type: "object", + required: ["title", "body"], + properties: { + title: { type: "string", description: "Discussion title" }, + body: { type: "string", description: "Discussion body/content" }, + category: { type: "string", description: "Discussion category" }, + }, + additionalProperties: false, + }, + }, + { + name: "add-issue-comment", + description: "Add a comment to a GitHub issue or pull request", + inputSchema: { + type: "object", + required: ["body"], + properties: { + body: { type: "string", description: "Comment body/content" }, + issue_number: { + type: "number", + description: "Issue or PR number (optional for current context)", + }, + }, + additionalProperties: false, + }, + }, + { + name: "create-pull-request", + description: "Create a new GitHub pull request", + inputSchema: { + type: "object", + required: ["title", "body"], + properties: { + title: { type: "string", description: "Pull request title" }, + body: { + type: "string", + description: "Pull request body/description", + }, + branch: { + type: "string", + description: + "Optional branch name (will be auto-generated if not provided)", + }, + labels: { + type: "array", + items: { type: "string" }, + description: "Optional labels to add to the PR", + }, + }, + additionalProperties: false, + }, + }, + { + name: "create-pull-request-review-comment", + description: "Create a review comment on a GitHub pull request", + inputSchema: { + type: "object", + required: ["path", "line", "body"], + properties: { + path: { + type: "string", + description: "File path for the review comment", + }, + line: { + type: ["number", "string"], + description: "Line number for the comment", + }, + body: { type: "string", description: "Comment body content" }, + start_line: { + type: ["number", "string"], + description: "Optional start line for multi-line comments", + }, + side: { + type: "string", + enum: ["LEFT", "RIGHT"], + description: "Optional side of the diff: LEFT or RIGHT", + }, + }, + additionalProperties: false, + }, + }, + { + name: "create-code-scanning-alert", + description: "Create a code scanning alert", + inputSchema: { + type: "object", + required: ["file", "line", "severity", "message"], + properties: { + file: { + type: "string", + description: "File path where the issue was found", + }, + line: { + type: ["number", "string"], + description: "Line number where the issue was found", + }, + severity: { + type: "string", + enum: ["error", "warning", "info", "note"], + description: "Severity level", + }, + message: { + type: "string", + description: "Alert message describing the issue", + }, + column: { + type: ["number", "string"], + description: "Optional column number", + }, + ruleIdSuffix: { + type: "string", + description: "Optional rule ID suffix for uniqueness", + }, + }, + additionalProperties: false, + }, + }, + { + name: "add-issue-label", + description: "Add labels to a GitHub issue or pull request", + inputSchema: { + type: "object", + required: ["labels"], + properties: { + labels: { + type: "array", + items: { type: "string" }, + description: "Labels to add", + }, + issue_number: { + type: "number", + description: "Issue or PR number (optional for current context)", + }, + }, + additionalProperties: false, + }, + }, + { + name: "update-issue", + description: "Update a GitHub issue", + inputSchema: { + type: "object", + properties: { + status: { + type: "string", + enum: ["open", "closed"], + description: "Optional new issue status", + }, + title: { type: "string", description: "Optional new issue title" }, + body: { type: "string", description: "Optional new issue body" }, + issue_number: { + type: ["number", "string"], + description: "Optional issue number for target '*'", + }, + }, + additionalProperties: false, + }, + }, + { + name: "push-to-pr-branch", + description: "Push changes to a pull request branch", + inputSchema: { + type: "object", + properties: { + message: { type: "string", description: "Optional commit message" }, + pull_request_number: { + type: ["number", "string"], + description: "Optional pull request number for target '*'", + }, + }, + additionalProperties: false, + }, + }, + { + name: "missing-tool", + description: + "Report a missing tool or functionality needed to complete tasks", + inputSchema: { + type: "object", + required: ["tool", "reason"], + properties: { + tool: { type: "string", description: "Name of the missing tool" }, + reason: { type: "string", description: "Why this tool is needed" }, + alternatives: { + type: "string", + description: "Possible alternatives or workarounds", + }, + }, + additionalProperties: false, + }, + }, + ] + .filter(({ name }) => isToolEnabled(name)) + .map(tool => [tool.name, tool]) + ); + debug(`v${SERVER_INFO.version} ready on stdio`); + debug(` output file: ${outputFile}`); + debug(` config: ${JSON.stringify(safeOutputsConfig)}`); + debug(` tools: ${Object.keys(TOOLS).join(", ")}`); + if (!Object.keys(TOOLS).length) + throw new Error("No tools enabled in configuration"); + function handleMessage(req) { + // Validate basic JSON-RPC structure + if (!req || typeof req !== "object") { + debug(`Invalid message: not an object`); + return; + } + if (req.jsonrpc !== "2.0") { + debug(`Invalid message: missing or invalid jsonrpc field`); + return; + } + const { id, method, params } = req; + // Validate method field + if (!method || typeof method !== "string") { + replyError(id, -32600, "Invalid Request: method must be a string"); + return; + } + try { + if (method === "initialize") { + const clientInfo = params?.clientInfo ?? {}; + console.error(`client initialized:`, clientInfo); + const protocolVersion = params?.protocolVersion ?? undefined; + const result = { + serverInfo: SERVER_INFO, + ...(protocolVersion ? { protocolVersion } : {}), + capabilities: { + tools: {}, + }, + }; + replyResult(id, result); + } else if (method === "tools/list") { + const list = []; + Object.values(TOOLS).forEach(tool => { + list.push({ + name: tool.name, + description: tool.description, + inputSchema: tool.inputSchema, + }); + }); + replyResult(id, { tools: list }); + } else if (method === "tools/call") { + const name = params?.name; + const args = params?.arguments ?? {}; + if (!name || typeof name !== "string") { + replyError(id, -32602, "Invalid params: 'name' must be a string"); + return; + } + const tool = TOOLS[name]; + if (!tool) { + replyError(id, -32601, `Tool not found: ${name}`); + return; + } + const handler = tool.handler || defaultHandler(tool.name); + const requiredFields = + tool.inputSchema && Array.isArray(tool.inputSchema.required) + ? tool.inputSchema.required + : []; + if (requiredFields.length) { + const missing = requiredFields.filter(f => args[f] === undefined); + if (missing.length) { + replyError( + id, + -32602, + `Invalid arguments: missing ${missing.map(m => `'${m}'`).join(", ")}` + ); + return; + } + } + const result = handler(args); + const content = result && result.content ? result.content : []; + replyResult(id, { content }); + } else if (/^notifications\//.test(method)) { + debug(`ignore ${method}`); + } else { + replyError(id, -32601, `Method not found: ${method}`); + } + } catch (e) { + replyError(id, -32603, "Internal error", { + message: e instanceof Error ? e.message : String(e), + }); + } + } + process.stdin.on("data", onData); + process.stdin.on("error", err => debug(`stdin error: ${err}`)); + process.stdin.resume(); + debug(`listening...`); + EOF + chmod +x /tmp/safe-outputs/mcp-server.cjs + + - name: Setup MCPs + env: + GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} + GITHUB_AW_SAFE_OUTPUTS_CONFIG: "{\"add-issue-comment\":{\"enabled\":true},\"create-issue\":true}" + run: | + mkdir -p /tmp/mcp-config + cat > /tmp/mcp-config/mcp-servers.json << 'EOF' + { + "mcpServers": { + "github": { + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "-e", + "GITHUB_PERSONAL_ACCESS_TOKEN", + "ghcr.io/github/github-mcp-server:latest" + ], + "env": { + "GITHUB_PERSONAL_ACCESS_TOKEN": "${{ secrets.GITHUB_TOKEN }}" + } + }, + "playwright": { + "command": "npx", + "args": [ + "@playwright/mcp@latest", + "--allowed-origins", + "example.com,*.github.com" + ] + }, + "safe_outputs": { + "command": "node", + "args": ["/tmp/safe-outputs/mcp-server.cjs"], + "env": { + "GITHUB_AW_SAFE_OUTPUTS": "${{ env.GITHUB_AW_SAFE_OUTPUTS }}", + "GITHUB_AW_SAFE_OUTPUTS_CONFIG": ${{ toJSON(env.GITHUB_AW_SAFE_OUTPUTS_CONFIG) }} + } + } + } + } + EOF + - name: Create prompt + env: + GITHUB_AW_PROMPT: /tmp/aw-prompts/prompt.txt + GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} + run: | + mkdir -p /tmp/aw-prompts + cat > $GITHUB_AW_PROMPT << 'EOF' + # Test MCP Configuration + + This is a test workflow to demonstrate MCP configuration generation and server launching. + + + --- + + ## Adding a Comment to an Issue or Pull Request, Creating an Issue, Reporting Missing Tools or Functionality + + **IMPORTANT**: To do the actions mentioned in the header of this section, use the **safe-outputs** tools, do NOT attempt to use `gh`, do NOT attempt to use the GitHub API. You don't have write access to the GitHub repo. + EOF + - name: Print prompt to step summary + run: | + echo "## Generated Prompt" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo '``````markdown' >> $GITHUB_STEP_SUMMARY + cat $GITHUB_AW_PROMPT >> $GITHUB_STEP_SUMMARY + echo '``````' >> $GITHUB_STEP_SUMMARY + env: + GITHUB_AW_PROMPT: /tmp/aw-prompts/prompt.txt + - name: Generate agentic run info + uses: actions/github-script@v7 + with: + script: | + const fs = require('fs'); + + const awInfo = { + engine_id: "claude", + engine_name: "Claude Code", + model: "", + version: "", + workflow_name: "Test MCP Configuration", + experimental: false, + supports_tools_whitelist: true, + supports_http_transport: true, + run_id: context.runId, + run_number: context.runNumber, + run_attempt: process.env.GITHUB_RUN_ATTEMPT, + repository: context.repo.owner + '/' + context.repo.repo, + ref: context.ref, + sha: context.sha, + actor: context.actor, + event_name: context.eventName, + staged: false, + created_at: new Date().toISOString() + }; + + // Write to /tmp directory to avoid inclusion in PR + const tmpPath = '/tmp/aw_info.json'; + fs.writeFileSync(tmpPath, JSON.stringify(awInfo, null, 2)); + console.log('Generated aw_info.json at:', tmpPath); + console.log(JSON.stringify(awInfo, null, 2)); + - name: Upload agentic run info + if: always() + uses: actions/upload-artifact@v4 + with: + name: aw_info.json + path: /tmp/aw_info.json + if-no-files-found: warn + - name: Execute Claude Code CLI + id: agentic_execution + # Allowed tools (sorted): + # - ExitPlanMode + # - Glob + # - Grep + # - LS + # - NotebookRead + # - Read + # - Task + # - TodoWrite + # - Write + # - mcp__github__download_workflow_run_artifact + # - mcp__github__get_code_scanning_alert + # - mcp__github__get_commit + # - mcp__github__get_dependabot_alert + # - mcp__github__get_discussion + # - mcp__github__get_discussion_comments + # - mcp__github__get_file_contents + # - mcp__github__get_issue + # - mcp__github__get_issue_comments + # - mcp__github__get_job_logs + # - mcp__github__get_me + # - mcp__github__get_notification_details + # - mcp__github__get_pull_request + # - mcp__github__get_pull_request_comments + # - mcp__github__get_pull_request_diff + # - mcp__github__get_pull_request_files + # - mcp__github__get_pull_request_reviews + # - mcp__github__get_pull_request_status + # - mcp__github__get_secret_scanning_alert + # - mcp__github__get_tag + # - mcp__github__get_workflow_run + # - mcp__github__get_workflow_run_logs + # - mcp__github__get_workflow_run_usage + # - mcp__github__list_branches + # - mcp__github__list_code_scanning_alerts + # - mcp__github__list_commits + # - mcp__github__list_dependabot_alerts + # - mcp__github__list_discussion_categories + # - mcp__github__list_discussions + # - mcp__github__list_issues + # - mcp__github__list_notifications + # - mcp__github__list_pull_requests + # - mcp__github__list_secret_scanning_alerts + # - mcp__github__list_tags + # - mcp__github__list_workflow_jobs + # - mcp__github__list_workflow_run_artifacts + # - mcp__github__list_workflow_runs + # - mcp__github__list_workflows + # - mcp__github__search_code + # - mcp__github__search_issues + # - mcp__github__search_orgs + # - mcp__github__search_pull_requests + # - mcp__github__search_repositories + # - mcp__github__search_users + # - mcp__playwright__browser_click + # - mcp__playwright__browser_close + # - mcp__playwright__browser_console_messages + # - mcp__playwright__browser_drag + # - mcp__playwright__browser_evaluate + # - mcp__playwright__browser_file_upload + # - mcp__playwright__browser_fill_form + # - mcp__playwright__browser_handle_dialog + # - mcp__playwright__browser_hover + # - mcp__playwright__browser_install + # - mcp__playwright__browser_navigate + # - mcp__playwright__browser_navigate_back + # - mcp__playwright__browser_network_requests + # - mcp__playwright__browser_press_key + # - mcp__playwright__browser_resize + # - mcp__playwright__browser_select_option + # - mcp__playwright__browser_snapshot + # - mcp__playwright__browser_tabs + # - mcp__playwright__browser_take_screenshot + # - mcp__playwright__browser_type + # - mcp__playwright__browser_wait_for + timeout-minutes: 5 + run: | + set -o pipefail + # Execute Claude Code CLI with prompt from file + npx @anthropic-ai/claude-code@latest --print --mcp-config /tmp/mcp-config/mcp-servers.json --allowed-tools "ExitPlanMode,Glob,Grep,LS,NotebookRead,Read,Task,TodoWrite,Write,mcp__github__download_workflow_run_artifact,mcp__github__get_code_scanning_alert,mcp__github__get_commit,mcp__github__get_dependabot_alert,mcp__github__get_discussion,mcp__github__get_discussion_comments,mcp__github__get_file_contents,mcp__github__get_issue,mcp__github__get_issue_comments,mcp__github__get_job_logs,mcp__github__get_me,mcp__github__get_notification_details,mcp__github__get_pull_request,mcp__github__get_pull_request_comments,mcp__github__get_pull_request_diff,mcp__github__get_pull_request_files,mcp__github__get_pull_request_reviews,mcp__github__get_pull_request_status,mcp__github__get_secret_scanning_alert,mcp__github__get_tag,mcp__github__get_workflow_run,mcp__github__get_workflow_run_logs,mcp__github__get_workflow_run_usage,mcp__github__list_branches,mcp__github__list_code_scanning_alerts,mcp__github__list_commits,mcp__github__list_dependabot_alerts,mcp__github__list_discussion_categories,mcp__github__list_discussions,mcp__github__list_issues,mcp__github__list_notifications,mcp__github__list_pull_requests,mcp__github__list_secret_scanning_alerts,mcp__github__list_tags,mcp__github__list_workflow_jobs,mcp__github__list_workflow_run_artifacts,mcp__github__list_workflow_runs,mcp__github__list_workflows,mcp__github__search_code,mcp__github__search_issues,mcp__github__search_orgs,mcp__github__search_pull_requests,mcp__github__search_repositories,mcp__github__search_users,mcp__playwright__browser_click,mcp__playwright__browser_close,mcp__playwright__browser_console_messages,mcp__playwright__browser_drag,mcp__playwright__browser_evaluate,mcp__playwright__browser_file_upload,mcp__playwright__browser_fill_form,mcp__playwright__browser_handle_dialog,mcp__playwright__browser_hover,mcp__playwright__browser_install,mcp__playwright__browser_navigate,mcp__playwright__browser_navigate_back,mcp__playwright__browser_network_requests,mcp__playwright__browser_press_key,mcp__playwright__browser_resize,mcp__playwright__browser_select_option,mcp__playwright__browser_snapshot,mcp__playwright__browser_tabs,mcp__playwright__browser_take_screenshot,mcp__playwright__browser_type,mcp__playwright__browser_wait_for" --debug --verbose --permission-mode bypassPermissions --output-format json --settings /tmp/.claude/settings.json "$(cat /tmp/aw-prompts/prompt.txt)" 2>&1 | tee /tmp/test-mcp-configuration.log + env: + ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} + DISABLE_TELEMETRY: "1" + DISABLE_ERROR_REPORTING: "1" + DISABLE_BUG_COMMAND: "1" + GITHUB_AW_PROMPT: /tmp/aw-prompts/prompt.txt + GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} + - name: Ensure log file exists + if: always() + run: | + # Ensure log file exists + touch /tmp/test-mcp-configuration.log + # Show last few lines for debugging + echo "=== Last 10 lines of Claude execution log ===" + tail -10 /tmp/test-mcp-configuration.log || echo "No log content available" + - name: Print Agent output + env: + GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} + run: | + echo "## Agent Output (JSONL)" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo '``````json' >> $GITHUB_STEP_SUMMARY + if [ -f ${{ env.GITHUB_AW_SAFE_OUTPUTS }} ]; then + cat ${{ env.GITHUB_AW_SAFE_OUTPUTS }} >> $GITHUB_STEP_SUMMARY + # Ensure there's a newline after the file content if it doesn't end with one + if [ -s ${{ env.GITHUB_AW_SAFE_OUTPUTS }} ] && [ "$(tail -c1 ${{ env.GITHUB_AW_SAFE_OUTPUTS }})" != "" ]; then + echo "" >> $GITHUB_STEP_SUMMARY + fi + else + echo "No agent output file found" >> $GITHUB_STEP_SUMMARY + fi + echo '``````' >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + - name: Upload agentic output file + if: always() + uses: actions/upload-artifact@v4 + with: + name: safe_output.jsonl + path: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} + if-no-files-found: warn + - name: Ingest agent output + id: collect_output + uses: actions/github-script@v7 + env: + GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} + GITHUB_AW_SAFE_OUTPUTS_CONFIG: "{\"add-issue-comment\":{\"enabled\":true},\"create-issue\":true}" + with: + script: | + async function main() { + const fs = require("fs"); + /** + * Sanitizes content for safe output in GitHub Actions + * @param {string} content - The content to sanitize + * @returns {string} The sanitized content + */ + function sanitizeContent(content) { + if (!content || typeof content !== "string") { + return ""; + } + // Read allowed domains from environment variable + const allowedDomainsEnv = process.env.GITHUB_AW_ALLOWED_DOMAINS; + const defaultAllowedDomains = [ + "github.com", + "github.io", + "githubusercontent.com", + "githubassets.com", + "github.dev", + "codespaces.new", + ]; + const allowedDomains = allowedDomainsEnv + ? allowedDomainsEnv + .split(",") + .map(d => d.trim()) + .filter(d => d) + : defaultAllowedDomains; + let sanitized = content; + // Neutralize @mentions to prevent unintended notifications + sanitized = neutralizeMentions(sanitized); + // Remove control characters (except newlines and tabs) + sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ""); + // XML character escaping + sanitized = sanitized + .replace(/&/g, "&") // Must be first to avoid double-escaping + .replace(//g, ">") + .replace(/"/g, """) + .replace(/'/g, "'"); + // URI filtering - replace non-https protocols with "(redacted)" + sanitized = sanitizeUrlProtocols(sanitized); + // Domain filtering for HTTPS URIs + sanitized = sanitizeUrlDomains(sanitized); + // Limit total length to prevent DoS (0.5MB max) + const maxLength = 524288; + if (sanitized.length > maxLength) { + sanitized = + sanitized.substring(0, maxLength) + + "\n[Content truncated due to length]"; + } + // Limit number of lines to prevent log flooding (65k max) + const lines = sanitized.split("\n"); + const maxLines = 65000; + if (lines.length > maxLines) { + sanitized = + lines.slice(0, maxLines).join("\n") + + "\n[Content truncated due to line count]"; + } + // Remove ANSI escape sequences + sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ""); + // Neutralize common bot trigger phrases + sanitized = neutralizeBotTriggers(sanitized); + // Trim excessive whitespace + return sanitized.trim(); + /** + * Remove unknown domains + * @param {string} s - The string to process + * @returns {string} The string with unknown domains redacted + */ + function sanitizeUrlDomains(s) { + return s.replace( + /\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, + (match, domain) => { + // Extract the hostname part (before first slash, colon, or other delimiter) + const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); + // Check if this domain or any parent domain is in the allowlist + const isAllowed = allowedDomains.some(allowedDomain => { + const normalizedAllowed = allowedDomain.toLowerCase(); + return ( + hostname === normalizedAllowed || + hostname.endsWith("." + normalizedAllowed) + ); + }); + return isAllowed ? match : "(redacted)"; + } + ); + } + /** + * Remove unknown protocols except https + * @param {string} s - The string to process + * @returns {string} The string with non-https protocols redacted + */ + function sanitizeUrlProtocols(s) { + // Match both protocol:// and protocol: patterns + return s.replace( + /\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, + (match, protocol) => { + // Allow https (case insensitive), redact everything else + return protocol.toLowerCase() === "https" ? match : "(redacted)"; + } + ); + } + /** + * Neutralizes @mentions by wrapping them in backticks + * @param {string} s - The string to process + * @returns {string} The string with neutralized mentions + */ + function neutralizeMentions(s) { + // Replace @name or @org/team outside code with `@name` + return s.replace( + /(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, + (_m, p1, p2) => `${p1}\`@${p2}\`` + ); + } + /** + * Neutralizes bot trigger phrases by wrapping them in backticks + * @param {string} s - The string to process + * @returns {string} The string with neutralized bot triggers + */ + function neutralizeBotTriggers(s) { + // Neutralize common bot trigger phrases like "fixes #123", "closes #asdfs", etc. + return s.replace( + /\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, + (match, action, ref) => `\`${action} #${ref}\`` + ); + } + } + /** + * Gets the maximum allowed count for a given output type + * @param {string} itemType - The output item type + * @param {any} config - The safe-outputs configuration + * @returns {number} The maximum allowed count + */ + function getMaxAllowedForType(itemType, config) { + // Check if max is explicitly specified in config + if ( + config && + config[itemType] && + typeof config[itemType] === "object" && + config[itemType].max + ) { + return config[itemType].max; + } + // Use default limits for plural-supported types + switch (itemType) { + case "create-issue": + return 1; // Only one issue allowed + case "add-issue-comment": + return 1; // Only one comment allowed + case "create-pull-request": + return 1; // Only one pull request allowed + case "create-pull-request-review-comment": + return 10; // Default to 10 review comments allowed + case "add-issue-label": + return 5; // Only one labels operation allowed + case "update-issue": + return 1; // Only one issue update allowed + case "push-to-pr-branch": + return 1; // Only one push to branch allowed + case "create-discussion": + return 1; // Only one discussion allowed + case "missing-tool": + return 1000; // Allow many missing tool reports (default: unlimited) + case "create-code-scanning-alert": + return 1000; // Allow many repository security advisories (default: unlimited) + default: + return 1; // Default to single item for unknown types + } + } + /** + * Attempts to repair common JSON syntax issues in LLM-generated content + * @param {string} jsonStr - The potentially malformed JSON string + * @returns {string} The repaired JSON string + */ + function repairJson(jsonStr) { + let repaired = jsonStr.trim(); + // remove invalid control characters like + // U+0014 (DC4) — represented here as "\u0014" + // Escape control characters not allowed in JSON strings (U+0000 through U+001F) + // Preserve common JSON escapes for \b, \f, \n, \r, \t and use \uXXXX for the rest. + /** @type {Record} */ + const _ctrl = { 8: "\\b", 9: "\\t", 10: "\\n", 12: "\\f", 13: "\\r" }; + repaired = repaired.replace(/[\u0000-\u001F]/g, ch => { + const c = ch.charCodeAt(0); + return _ctrl[c] || "\\u" + c.toString(16).padStart(4, "0"); + }); + // Fix single quotes to double quotes (must be done first) + repaired = repaired.replace(/'/g, '"'); + // Fix missing quotes around object keys + repaired = repaired.replace( + /([{,]\s*)([a-zA-Z_$][a-zA-Z0-9_$]*)\s*:/g, + '$1"$2":' + ); + // Fix newlines and tabs inside strings by escaping them + repaired = repaired.replace(/"([^"\\]*)"/g, (match, content) => { + if ( + content.includes("\n") || + content.includes("\r") || + content.includes("\t") + ) { + const escaped = content + .replace(/\\/g, "\\\\") + .replace(/\n/g, "\\n") + .replace(/\r/g, "\\r") + .replace(/\t/g, "\\t"); + return `"${escaped}"`; + } + return match; + }); + // Fix unescaped quotes inside string values + repaired = repaired.replace( + /"([^"]*)"([^":,}\]]*)"([^"]*)"(\s*[,:}\]])/g, + (match, p1, p2, p3, p4) => `"${p1}\\"${p2}\\"${p3}"${p4}` + ); + // Fix wrong bracket/brace types - arrays should end with ] not } + repaired = repaired.replace( + /(\[\s*(?:"[^"]*"(?:\s*,\s*"[^"]*")*\s*),?)\s*}/g, + "$1]" + ); + // Fix missing closing braces/brackets + const openBraces = (repaired.match(/\{/g) || []).length; + const closeBraces = (repaired.match(/\}/g) || []).length; + if (openBraces > closeBraces) { + repaired += "}".repeat(openBraces - closeBraces); + } else if (closeBraces > openBraces) { + repaired = "{".repeat(closeBraces - openBraces) + repaired; + } + // Fix missing closing brackets for arrays + const openBrackets = (repaired.match(/\[/g) || []).length; + const closeBrackets = (repaired.match(/\]/g) || []).length; + if (openBrackets > closeBrackets) { + repaired += "]".repeat(openBrackets - closeBrackets); + } else if (closeBrackets > openBrackets) { + repaired = "[".repeat(closeBrackets - openBrackets) + repaired; + } + // Fix trailing commas in objects and arrays (AFTER fixing brackets/braces) + repaired = repaired.replace(/,(\s*[}\]])/g, "$1"); + return repaired; + } + /** + * Attempts to parse JSON with repair fallback + * @param {string} jsonStr - The JSON string to parse + * @returns {Object|undefined} The parsed JSON object, or undefined if parsing fails + */ + function parseJsonWithRepair(jsonStr) { + try { + // First, try normal JSON.parse + return JSON.parse(jsonStr); + } catch (originalError) { + try { + // If that fails, try repairing and parsing again + const repairedJson = repairJson(jsonStr); + return JSON.parse(repairedJson); + } catch (repairError) { + // If repair also fails, throw the error + core.info(`invalid input json: ${jsonStr}`); + const originalMsg = + originalError instanceof Error + ? originalError.message + : String(originalError); + const repairMsg = + repairError instanceof Error + ? repairError.message + : String(repairError); + throw new Error( + `JSON parsing failed. Original: ${originalMsg}. After attempted repair: ${repairMsg}` + ); + } + } + } + const outputFile = process.env.GITHUB_AW_SAFE_OUTPUTS; + const safeOutputsConfig = process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG; + if (!outputFile) { + core.info("GITHUB_AW_SAFE_OUTPUTS not set, no output to collect"); + core.setOutput("output", ""); + return; + } + if (!fs.existsSync(outputFile)) { + core.info(`Output file does not exist: ${outputFile}`); + core.setOutput("output", ""); + return; + } + const outputContent = fs.readFileSync(outputFile, "utf8"); + if (outputContent.trim() === "") { + core.info("Output file is empty"); + core.setOutput("output", ""); + return; + } + core.info(`Raw output content length: ${outputContent.length}`); + // Parse the safe-outputs configuration + /** @type {any} */ + let expectedOutputTypes = {}; + if (safeOutputsConfig) { + try { + expectedOutputTypes = JSON.parse(safeOutputsConfig); + core.info( + `Expected output types: ${JSON.stringify(Object.keys(expectedOutputTypes))}` + ); + } catch (error) { + const errorMsg = error instanceof Error ? error.message : String(error); + core.info(`Warning: Could not parse safe-outputs config: ${errorMsg}`); + } + } + // Parse JSONL content + const lines = outputContent.trim().split("\n"); + const parsedItems = []; + const errors = []; + for (let i = 0; i < lines.length; i++) { + const line = lines[i].trim(); + if (line === "") continue; // Skip empty lines + try { + /** @type {any} */ + const item = parseJsonWithRepair(line); + // If item is undefined (failed to parse), add error and process next line + if (item === undefined) { + errors.push(`Line ${i + 1}: Invalid JSON - JSON parsing failed`); + continue; + } + // Validate that the item has a 'type' field + if (!item.type) { + errors.push(`Line ${i + 1}: Missing required 'type' field`); + continue; + } + // Validate against expected output types + const itemType = item.type; + if (!expectedOutputTypes[itemType]) { + errors.push( + `Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(", ")}` + ); + continue; + } + // Check for too many items of the same type + const typeCount = parsedItems.filter( + existing => existing.type === itemType + ).length; + const maxAllowed = getMaxAllowedForType(itemType, expectedOutputTypes); + if (typeCount >= maxAllowed) { + errors.push( + `Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.` + ); + continue; + } + // Basic validation based on type + switch (itemType) { + case "create-issue": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'title' string field` + ); + continue; + } + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-issue requires a 'body' string field` + ); + continue; + } + // Sanitize text content + item.title = sanitizeContent(item.title); + item.body = sanitizeContent(item.body); + // Sanitize labels if present + if (item.labels && Array.isArray(item.labels)) { + item.labels = item.labels.map( + /** @param {any} label */ label => + typeof label === "string" ? sanitizeContent(label) : label + ); + } + break; + case "add-issue-comment": + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: add-issue-comment requires a 'body' string field` + ); + continue; + } + // Sanitize text content + item.body = sanitizeContent(item.body); + break; + case "create-pull-request": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'title' string field` + ); + continue; + } + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request requires a 'body' string field` + ); + continue; + } + // Sanitize text content + item.title = sanitizeContent(item.title); + item.body = sanitizeContent(item.body); + // Sanitize branch name if present + if (item.branch && typeof item.branch === "string") { + item.branch = sanitizeContent(item.branch); + } + // Sanitize labels if present + if (item.labels && Array.isArray(item.labels)) { + item.labels = item.labels.map( + /** @param {any} label */ label => + typeof label === "string" ? sanitizeContent(label) : label + ); + } + break; + case "add-issue-label": + if (!item.labels || !Array.isArray(item.labels)) { + errors.push( + `Line ${i + 1}: add-issue-label requires a 'labels' array field` + ); + continue; + } + if ( + item.labels.some( + /** @param {any} label */ label => typeof label !== "string" + ) + ) { + errors.push( + `Line ${i + 1}: add-issue-label labels array must contain only strings` + ); + continue; + } + // Sanitize label strings + item.labels = item.labels.map( + /** @param {any} label */ label => sanitizeContent(label) + ); + break; + case "update-issue": + // Check that at least one updateable field is provided + const hasValidField = + item.status !== undefined || + item.title !== undefined || + item.body !== undefined; + if (!hasValidField) { + errors.push( + `Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields` + ); + continue; + } + // Validate status if provided + if (item.status !== undefined) { + if ( + typeof item.status !== "string" || + (item.status !== "open" && item.status !== "closed") + ) { + errors.push( + `Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'` + ); + continue; + } + } + // Validate title if provided + if (item.title !== undefined) { + if (typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'title' must be a string` + ); + continue; + } + item.title = sanitizeContent(item.title); + } + // Validate body if provided + if (item.body !== undefined) { + if (typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: update-issue 'body' must be a string` + ); + continue; + } + item.body = sanitizeContent(item.body); + } + // Validate issue_number if provided (for target "*") + if (item.issue_number !== undefined) { + if ( + typeof item.issue_number !== "number" && + typeof item.issue_number !== "string" + ) { + errors.push( + `Line ${i + 1}: update-issue 'issue_number' must be a number or string` + ); + continue; + } + } + break; + case "push-to-pr-branch": + // Validate message if provided (optional) + if (item.message !== undefined) { + if (typeof item.message !== "string") { + errors.push( + `Line ${i + 1}: push-to-pr-branch 'message' must be a string` + ); + continue; + } + item.message = sanitizeContent(item.message); + } + // Validate pull_request_number if provided (for target "*") + if (item.pull_request_number !== undefined) { + if ( + typeof item.pull_request_number !== "number" && + typeof item.pull_request_number !== "string" + ) { + errors.push( + `Line ${i + 1}: push-to-pr-branch 'pull_request_number' must be a number or string` + ); + continue; + } + } + break; + case "create-pull-request-review-comment": + // Validate required path field + if (!item.path || typeof item.path !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'path' string field` + ); + continue; + } + // Validate required line field + if ( + item.line === undefined || + (typeof item.line !== "number" && typeof item.line !== "string") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'line' number or string field` + ); + continue; + } + // Validate line is a positive integer + const lineNumber = + typeof item.line === "string" ? parseInt(item.line, 10) : item.line; + if ( + isNaN(lineNumber) || + lineNumber <= 0 || + !Number.isInteger(lineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'line' must be a positive integer` + ); + continue; + } + // Validate required body field + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment requires a 'body' string field` + ); + continue; + } + // Sanitize required text content + item.body = sanitizeContent(item.body); + // Validate optional start_line field + if (item.start_line !== undefined) { + if ( + typeof item.start_line !== "number" && + typeof item.start_line !== "string" + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a number or string` + ); + continue; + } + const startLineNumber = + typeof item.start_line === "string" + ? parseInt(item.start_line, 10) + : item.start_line; + if ( + isNaN(startLineNumber) || + startLineNumber <= 0 || + !Number.isInteger(startLineNumber) + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a positive integer` + ); + continue; + } + if (startLineNumber > lineNumber) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be less than or equal to 'line'` + ); + continue; + } + } + // Validate optional side field + if (item.side !== undefined) { + if ( + typeof item.side !== "string" || + (item.side !== "LEFT" && item.side !== "RIGHT") + ) { + errors.push( + `Line ${i + 1}: create-pull-request-review-comment 'side' must be 'LEFT' or 'RIGHT'` + ); + continue; + } + } + break; + case "create-discussion": + if (!item.title || typeof item.title !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'title' string field` + ); + continue; + } + if (!item.body || typeof item.body !== "string") { + errors.push( + `Line ${i + 1}: create-discussion requires a 'body' string field` + ); + continue; + } + // Sanitize text content + item.title = sanitizeContent(item.title); + item.body = sanitizeContent(item.body); + break; + case "missing-tool": + // Validate required tool field + if (!item.tool || typeof item.tool !== "string") { + errors.push( + `Line ${i + 1}: missing-tool requires a 'tool' string field` + ); + continue; + } + // Validate required reason field + if (!item.reason || typeof item.reason !== "string") { + errors.push( + `Line ${i + 1}: missing-tool requires a 'reason' string field` + ); + continue; + } + // Sanitize text content + item.tool = sanitizeContent(item.tool); + item.reason = sanitizeContent(item.reason); + // Validate optional alternatives field + if (item.alternatives !== undefined) { + if (typeof item.alternatives !== "string") { + errors.push( + `Line ${i + 1}: missing-tool 'alternatives' must be a string` + ); + continue; + } + item.alternatives = sanitizeContent(item.alternatives); + } + break; + case "create-code-scanning-alert": + // Validate required fields + if (!item.file || typeof item.file !== "string") { + errors.push( + `Line ${i + 1}: create-code-scanning-alert requires a 'file' field (string)` + ); + continue; + } + if ( + item.line === undefined || + item.line === null || + (typeof item.line !== "number" && typeof item.line !== "string") + ) { + errors.push( + `Line ${i + 1}: create-code-scanning-alert requires a 'line' field (number or string)` + ); + continue; + } + // Additional validation: line must be parseable as a positive integer + const parsedLine = parseInt(item.line, 10); + if (isNaN(parsedLine) || parsedLine <= 0) { + errors.push( + `Line ${i + 1}: create-code-scanning-alert 'line' must be a valid positive integer (got: ${item.line})` + ); + continue; + } + if (!item.severity || typeof item.severity !== "string") { + errors.push( + `Line ${i + 1}: create-code-scanning-alert requires a 'severity' field (string)` + ); + continue; + } + if (!item.message || typeof item.message !== "string") { + errors.push( + `Line ${i + 1}: create-code-scanning-alert requires a 'message' field (string)` + ); + continue; + } + // Validate severity level + const allowedSeverities = ["error", "warning", "info", "note"]; + if (!allowedSeverities.includes(item.severity.toLowerCase())) { + errors.push( + `Line ${i + 1}: create-code-scanning-alert 'severity' must be one of: ${allowedSeverities.join(", ")}` + ); + continue; + } + // Validate optional column field + if (item.column !== undefined) { + if ( + typeof item.column !== "number" && + typeof item.column !== "string" + ) { + errors.push( + `Line ${i + 1}: create-code-scanning-alert 'column' must be a number or string` + ); + continue; + } + // Additional validation: must be parseable as a positive integer + const parsedColumn = parseInt(item.column, 10); + if (isNaN(parsedColumn) || parsedColumn <= 0) { + errors.push( + `Line ${i + 1}: create-code-scanning-alert 'column' must be a valid positive integer (got: ${item.column})` + ); + continue; + } + } + // Validate optional ruleIdSuffix field + if (item.ruleIdSuffix !== undefined) { + if (typeof item.ruleIdSuffix !== "string") { + errors.push( + `Line ${i + 1}: create-code-scanning-alert 'ruleIdSuffix' must be a string` + ); + continue; + } + if (!/^[a-zA-Z0-9_-]+$/.test(item.ruleIdSuffix.trim())) { + errors.push( + `Line ${i + 1}: create-code-scanning-alert 'ruleIdSuffix' must contain only alphanumeric characters, hyphens, and underscores` + ); + continue; + } + } + // Normalize severity to lowercase and sanitize string fields + item.severity = item.severity.toLowerCase(); + item.file = sanitizeContent(item.file); + item.severity = sanitizeContent(item.severity); + item.message = sanitizeContent(item.message); + if (item.ruleIdSuffix) { + item.ruleIdSuffix = sanitizeContent(item.ruleIdSuffix); + } + break; + default: + errors.push(`Line ${i + 1}: Unknown output type '${itemType}'`); + continue; + } + core.info(`Line ${i + 1}: Valid ${itemType} item`); + parsedItems.push(item); + } catch (error) { + const errorMsg = error instanceof Error ? error.message : String(error); + errors.push(`Line ${i + 1}: Invalid JSON - ${errorMsg}`); + } + } + // Report validation results + if (errors.length > 0) { + core.warning("Validation errors found:"); + errors.forEach(error => core.warning(` - ${error}`)); + if (parsedItems.length === 0) { + core.setFailed(errors.map(e => ` - ${e}`).join("\n")); + return; + } + // For now, we'll continue with valid items but log the errors + // In the future, we might want to fail the workflow for invalid items + } + core.info(`Successfully parsed ${parsedItems.length} valid output items`); + // Set the parsed and validated items as output + const validatedOutput = { + items: parsedItems, + errors: errors, + }; + // Store validatedOutput JSON in "agent_output.json" file + const agentOutputFile = "/tmp/agent_output.json"; + const validatedOutputJson = JSON.stringify(validatedOutput); + try { + // Ensure the /tmp directory exists + fs.mkdirSync("/tmp", { recursive: true }); + fs.writeFileSync(agentOutputFile, validatedOutputJson, "utf8"); + core.info(`Stored validated output to: ${agentOutputFile}`); + // Set the environment variable GITHUB_AW_AGENT_OUTPUT to the file path + core.exportVariable("GITHUB_AW_AGENT_OUTPUT", agentOutputFile); + } catch (error) { + const errorMsg = error instanceof Error ? error.message : String(error); + core.error(`Failed to write agent output file: ${errorMsg}`); + } + core.setOutput("output", JSON.stringify(validatedOutput)); + core.setOutput("raw_output", outputContent); + } + // Call the main function + await main(); + - name: Print sanitized agent output + run: | + echo "## Processed Output" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo '``````json' >> $GITHUB_STEP_SUMMARY + echo '${{ steps.collect_output.outputs.output }}' >> $GITHUB_STEP_SUMMARY + echo '``````' >> $GITHUB_STEP_SUMMARY + - name: Upload sanitized agent output + if: always() && env.GITHUB_AW_AGENT_OUTPUT + uses: actions/upload-artifact@v4 + with: + name: agent_output.json + path: ${{ env.GITHUB_AW_AGENT_OUTPUT }} + if-no-files-found: warn + - name: Upload engine output files + uses: actions/upload-artifact@v4 + with: + name: agent_outputs + path: | + output.txt + if-no-files-found: ignore + - name: Clean up engine output files + run: | + rm -f output.txt + - name: Parse agent logs for step summary + if: always() + uses: actions/github-script@v7 + env: + GITHUB_AW_AGENT_OUTPUT: /tmp/test-mcp-configuration.log + with: + script: | + function main() { + const fs = require("fs"); + try { + // Get the log file path from environment + const logFile = process.env.GITHUB_AW_AGENT_OUTPUT; + if (!logFile) { + core.info("No agent log file specified"); + return; + } + if (!fs.existsSync(logFile)) { + core.info(`Log file not found: ${logFile}`); + return; + } + const logContent = fs.readFileSync(logFile, "utf8"); + const result = parseClaudeLog(logContent); + // Append to GitHub step summary + core.summary.addRaw(result.markdown).write(); + // Check for MCP server failures and fail the job if any occurred + if (result.mcpFailures && result.mcpFailures.length > 0) { + const failedServers = result.mcpFailures.join(", "); + core.setFailed(`MCP server(s) failed to launch: ${failedServers}`); + } + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + core.setFailed(errorMessage); + } + } + /** + * Parses Claude log content and converts it to markdown format + * @param {string} logContent - The raw log content as a string + * @returns {{markdown: string, mcpFailures: string[]}} Result with formatted markdown content and MCP failure list + */ + function parseClaudeLog(logContent) { + try { + const logEntries = JSON.parse(logContent); + if (!Array.isArray(logEntries)) { + return { + markdown: + "## Agent Log Summary\n\nLog format not recognized as Claude JSON array.\n", + mcpFailures: [], + }; + } + let markdown = ""; + const mcpFailures = []; + // Check for initialization data first + const initEntry = logEntries.find( + entry => entry.type === "system" && entry.subtype === "init" + ); + if (initEntry) { + markdown += "## 🚀 Initialization\n\n"; + const initResult = formatInitializationSummary(initEntry); + markdown += initResult.markdown; + mcpFailures.push(...initResult.mcpFailures); + markdown += "\n"; + } + markdown += "## 🤖 Commands and Tools\n\n"; + const toolUsePairs = new Map(); // Map tool_use_id to tool_result + const commandSummary = []; // For the succinct summary + // First pass: collect tool results by tool_use_id + for (const entry of logEntries) { + if (entry.type === "user" && entry.message?.content) { + for (const content of entry.message.content) { + if (content.type === "tool_result" && content.tool_use_id) { + toolUsePairs.set(content.tool_use_id, content); + } + } + } + } + // Collect all tool uses for summary + for (const entry of logEntries) { + if (entry.type === "assistant" && entry.message?.content) { + for (const content of entry.message.content) { + if (content.type === "tool_use") { + const toolName = content.name; + const input = content.input || {}; + // Skip internal tools - only show external commands and API calls + if ( + [ + "Read", + "Write", + "Edit", + "MultiEdit", + "LS", + "Grep", + "Glob", + "TodoWrite", + ].includes(toolName) + ) { + continue; // Skip internal file operations and searches + } + // Find the corresponding tool result to get status + const toolResult = toolUsePairs.get(content.id); + let statusIcon = "❓"; + if (toolResult) { + statusIcon = toolResult.is_error === true ? "❌" : "✅"; + } + // Add to command summary (only external tools) + if (toolName === "Bash") { + const formattedCommand = formatBashCommand(input.command || ""); + commandSummary.push(`* ${statusIcon} \`${formattedCommand}\``); + } else if (toolName.startsWith("mcp__")) { + const mcpName = formatMcpName(toolName); + commandSummary.push(`* ${statusIcon} \`${mcpName}(...)\``); + } else { + // Handle other external tools (if any) + commandSummary.push(`* ${statusIcon} ${toolName}`); + } + } + } + } + } + // Add command summary + if (commandSummary.length > 0) { + for (const cmd of commandSummary) { + markdown += `${cmd}\n`; + } + } else { + markdown += "No commands or tools used.\n"; + } + // Add Information section from the last entry with result metadata + markdown += "\n## 📊 Information\n\n"; + // Find the last entry with metadata + const lastEntry = logEntries[logEntries.length - 1]; + if ( + lastEntry && + (lastEntry.num_turns || + lastEntry.duration_ms || + lastEntry.total_cost_usd || + lastEntry.usage) + ) { + if (lastEntry.num_turns) { + markdown += `**Turns:** ${lastEntry.num_turns}\n\n`; + } + if (lastEntry.duration_ms) { + const durationSec = Math.round(lastEntry.duration_ms / 1000); + const minutes = Math.floor(durationSec / 60); + const seconds = durationSec % 60; + markdown += `**Duration:** ${minutes}m ${seconds}s\n\n`; + } + if (lastEntry.total_cost_usd) { + markdown += `**Total Cost:** $${lastEntry.total_cost_usd.toFixed(4)}\n\n`; + } + if (lastEntry.usage) { + const usage = lastEntry.usage; + if (usage.input_tokens || usage.output_tokens) { + markdown += `**Token Usage:**\n`; + if (usage.input_tokens) + markdown += `- Input: ${usage.input_tokens.toLocaleString()}\n`; + if (usage.cache_creation_input_tokens) + markdown += `- Cache Creation: ${usage.cache_creation_input_tokens.toLocaleString()}\n`; + if (usage.cache_read_input_tokens) + markdown += `- Cache Read: ${usage.cache_read_input_tokens.toLocaleString()}\n`; + if (usage.output_tokens) + markdown += `- Output: ${usage.output_tokens.toLocaleString()}\n`; + markdown += "\n"; + } + } + if ( + lastEntry.permission_denials && + lastEntry.permission_denials.length > 0 + ) { + markdown += `**Permission Denials:** ${lastEntry.permission_denials.length}\n\n`; + } + } + markdown += "\n## 🤖 Reasoning\n\n"; + // Second pass: process assistant messages in sequence + for (const entry of logEntries) { + if (entry.type === "assistant" && entry.message?.content) { + for (const content of entry.message.content) { + if (content.type === "text" && content.text) { + // Add reasoning text directly (no header) + const text = content.text.trim(); + if (text && text.length > 0) { + markdown += text + "\n\n"; + } + } else if (content.type === "tool_use") { + // Process tool use with its result + const toolResult = toolUsePairs.get(content.id); + const toolMarkdown = formatToolUse(content, toolResult); + if (toolMarkdown) { + markdown += toolMarkdown; + } + } + } + } + } + return { markdown, mcpFailures }; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + return { + markdown: `## Agent Log Summary\n\nError parsing Claude log: ${errorMessage}\n`, + mcpFailures: [], + }; + } + } + /** + * Formats initialization information from system init entry + * @param {any} initEntry - The system init entry containing tools, mcp_servers, etc. + * @returns {{markdown: string, mcpFailures: string[]}} Result with formatted markdown string and MCP failure list + */ + function formatInitializationSummary(initEntry) { + let markdown = ""; + const mcpFailures = []; + // Display model and session info + if (initEntry.model) { + markdown += `**Model:** ${initEntry.model}\n\n`; + } + if (initEntry.session_id) { + markdown += `**Session ID:** ${initEntry.session_id}\n\n`; + } + if (initEntry.cwd) { + // Show a cleaner path by removing common prefixes + const cleanCwd = initEntry.cwd.replace( + /^\/home\/runner\/work\/[^\/]+\/[^\/]+/, + "." + ); + markdown += `**Working Directory:** ${cleanCwd}\n\n`; + } + // Display MCP servers status + if (initEntry.mcp_servers && Array.isArray(initEntry.mcp_servers)) { + markdown += "**MCP Servers:**\n"; + for (const server of initEntry.mcp_servers) { + const statusIcon = + server.status === "connected" + ? "✅" + : server.status === "failed" + ? "❌" + : "❓"; + markdown += `- ${statusIcon} ${server.name} (${server.status})\n`; + // Track failed MCP servers + if (server.status === "failed") { + mcpFailures.push(server.name); + } + } + markdown += "\n"; + } + // Display tools by category + if (initEntry.tools && Array.isArray(initEntry.tools)) { + markdown += "**Available Tools:**\n"; + // Categorize tools + /** @type {{ [key: string]: string[] }} */ + const categories = { + Core: [], + "File Operations": [], + "Git/GitHub": [], + MCP: [], + Other: [], + }; + for (const tool of initEntry.tools) { + if ( + ["Task", "Bash", "BashOutput", "KillBash", "ExitPlanMode"].includes( + tool + ) + ) { + categories["Core"].push(tool); + } else if ( + [ + "Read", + "Edit", + "MultiEdit", + "Write", + "LS", + "Grep", + "Glob", + "NotebookEdit", + ].includes(tool) + ) { + categories["File Operations"].push(tool); + } else if (tool.startsWith("mcp__github__")) { + categories["Git/GitHub"].push(formatMcpName(tool)); + } else if ( + tool.startsWith("mcp__") || + ["ListMcpResourcesTool", "ReadMcpResourceTool"].includes(tool) + ) { + categories["MCP"].push( + tool.startsWith("mcp__") ? formatMcpName(tool) : tool + ); + } else { + categories["Other"].push(tool); + } + } + // Display categories with tools + for (const [category, tools] of Object.entries(categories)) { + if (tools.length > 0) { + markdown += `- **${category}:** ${tools.length} tools\n`; + if (tools.length <= 5) { + // Show all tools if 5 or fewer + markdown += ` - ${tools.join(", ")}\n`; + } else { + // Show first few and count + markdown += ` - ${tools.slice(0, 3).join(", ")}, and ${tools.length - 3} more\n`; + } + } + } + markdown += "\n"; + } + // Display slash commands if available + if (initEntry.slash_commands && Array.isArray(initEntry.slash_commands)) { + const commandCount = initEntry.slash_commands.length; + markdown += `**Slash Commands:** ${commandCount} available\n`; + if (commandCount <= 10) { + markdown += `- ${initEntry.slash_commands.join(", ")}\n`; + } else { + markdown += `- ${initEntry.slash_commands.slice(0, 5).join(", ")}, and ${commandCount - 5} more\n`; + } + markdown += "\n"; + } + return { markdown, mcpFailures }; + } + /** + * Formats a tool use entry with its result into markdown + * @param {any} toolUse - The tool use object containing name, input, etc. + * @param {any} toolResult - The corresponding tool result object + * @returns {string} Formatted markdown string + */ + function formatToolUse(toolUse, toolResult) { + const toolName = toolUse.name; + const input = toolUse.input || {}; + // Skip TodoWrite except the very last one (we'll handle this separately) + if (toolName === "TodoWrite") { + return ""; // Skip for now, would need global context to find the last one + } + // Helper function to determine status icon + function getStatusIcon() { + if (toolResult) { + return toolResult.is_error === true ? "❌" : "✅"; + } + return "❓"; // Unknown by default + } + let markdown = ""; + const statusIcon = getStatusIcon(); + switch (toolName) { + case "Bash": + const command = input.command || ""; + const description = input.description || ""; + // Format the command to be single line + const formattedCommand = formatBashCommand(command); + if (description) { + markdown += `${description}:\n\n`; + } + markdown += `${statusIcon} \`${formattedCommand}\`\n\n`; + break; + case "Read": + const filePath = input.file_path || input.path || ""; + const relativePath = filePath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); // Remove /home/runner/work/repo/repo/ prefix + markdown += `${statusIcon} Read \`${relativePath}\`\n\n`; + break; + case "Write": + case "Edit": + case "MultiEdit": + const writeFilePath = input.file_path || input.path || ""; + const writeRelativePath = writeFilePath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); + markdown += `${statusIcon} Write \`${writeRelativePath}\`\n\n`; + break; + case "Grep": + case "Glob": + const query = input.query || input.pattern || ""; + markdown += `${statusIcon} Search for \`${truncateString(query, 80)}\`\n\n`; + break; + case "LS": + const lsPath = input.path || ""; + const lsRelativePath = lsPath.replace( + /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, + "" + ); + markdown += `${statusIcon} LS: ${lsRelativePath || lsPath}\n\n`; + break; + default: + // Handle MCP calls and other tools + if (toolName.startsWith("mcp__")) { + const mcpName = formatMcpName(toolName); + const params = formatMcpParameters(input); + markdown += `${statusIcon} ${mcpName}(${params})\n\n`; + } else { + // Generic tool formatting - show the tool name and main parameters + const keys = Object.keys(input); + if (keys.length > 0) { + // Try to find the most important parameter + const mainParam = + keys.find(k => + ["query", "command", "path", "file_path", "content"].includes(k) + ) || keys[0]; + const value = String(input[mainParam] || ""); + if (value) { + markdown += `${statusIcon} ${toolName}: ${truncateString(value, 100)}\n\n`; + } else { + markdown += `${statusIcon} ${toolName}\n\n`; + } + } else { + markdown += `${statusIcon} ${toolName}\n\n`; + } + } + } + return markdown; + } + /** + * Formats MCP tool name from internal format to display format + * @param {string} toolName - The raw tool name (e.g., mcp__github__search_issues) + * @returns {string} Formatted tool name (e.g., github::search_issues) + */ + function formatMcpName(toolName) { + // Convert mcp__github__search_issues to github::search_issues + if (toolName.startsWith("mcp__")) { + const parts = toolName.split("__"); + if (parts.length >= 3) { + const provider = parts[1]; // github, etc. + const method = parts.slice(2).join("_"); // search_issues, etc. + return `${provider}::${method}`; + } + } + return toolName; + } + /** + * Formats MCP parameters into a human-readable string + * @param {Record} input - The input object containing parameters + * @returns {string} Formatted parameters string + */ + function formatMcpParameters(input) { + const keys = Object.keys(input); + if (keys.length === 0) return ""; + const paramStrs = []; + for (const key of keys.slice(0, 4)) { + // Show up to 4 parameters + const value = String(input[key] || ""); + paramStrs.push(`${key}: ${truncateString(value, 40)}`); + } + if (keys.length > 4) { + paramStrs.push("..."); + } + return paramStrs.join(", "); + } + /** + * Formats a bash command by normalizing whitespace and escaping + * @param {string} command - The raw bash command string + * @returns {string} Formatted and escaped command string + */ + function formatBashCommand(command) { + if (!command) return ""; + // Convert multi-line commands to single line by replacing newlines with spaces + // and collapsing multiple spaces + let formatted = command + .replace(/\n/g, " ") // Replace newlines with spaces + .replace(/\r/g, " ") // Replace carriage returns with spaces + .replace(/\t/g, " ") // Replace tabs with spaces + .replace(/\s+/g, " ") // Collapse multiple spaces into one + .trim(); // Remove leading/trailing whitespace + // Escape backticks to prevent markdown issues + formatted = formatted.replace(/`/g, "\\`"); + // Truncate if too long (keep reasonable length for summary) + const maxLength = 80; + if (formatted.length > maxLength) { + formatted = formatted.substring(0, maxLength) + "..."; + } + return formatted; + } + /** + * Truncates a string to a maximum length with ellipsis + * @param {string} str - The string to truncate + * @param {number} maxLength - Maximum allowed length + * @returns {string} Truncated string with ellipsis if needed + */ + function truncateString(str, maxLength) { + if (!str) return ""; + if (str.length <= maxLength) return str; + return str.substring(0, maxLength) + "..."; + } + // Export for testing + if (typeof module !== "undefined" && module.exports) { + module.exports = { + parseClaudeLog, + formatToolUse, + formatInitializationSummary, + formatBashCommand, + truncateString, + }; + } + main(); + - name: Upload agent logs + if: always() + uses: actions/upload-artifact@v4 + with: + name: test-mcp-configuration.log + path: /tmp/test-mcp-configuration.log + if-no-files-found: warn + + create_issue: + needs: test-mcp-configuration + runs-on: ubuntu-latest + permissions: + contents: read + issues: write + timeout-minutes: 10 + outputs: + issue_number: ${{ steps.create_issue.outputs.issue_number }} + issue_url: ${{ steps.create_issue.outputs.issue_url }} + steps: + - name: Check team membership for workflow + id: check-team-member + uses: actions/github-script@v7 + env: + GITHUB_AW_REQUIRED_ROLES: admin,maintainer + with: + script: | + async function setCancelled(message) { + try { + await github.rest.actions.cancelWorkflowRun({ + owner: context.repo.owner, + repo: context.repo.repo, + run_id: context.runId, + }); + core.info(`Cancellation requested for this workflow run: ${message}`); + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + core.warning(`Failed to cancel workflow run: ${errorMessage}`); + core.setFailed(message); // Fallback if API call fails + } + } + async function main() { + const { eventName } = context; + // skip check for safe events + const safeEvents = ["workflow_dispatch", "workflow_run", "schedule"]; + if (safeEvents.includes(eventName)) { + core.info(`✅ Event ${eventName} does not require validation`); + return; + } + const actor = context.actor; + const { owner, repo } = context.repo; + const requiredPermissionsEnv = process.env.GITHUB_AW_REQUIRED_ROLES; + const requiredPermissions = requiredPermissionsEnv + ? requiredPermissionsEnv.split(",").filter(p => p.trim() !== "") + : []; + if (!requiredPermissions || requiredPermissions.length === 0) { + core.error( + "❌ Configuration error: Required permissions not specified. Contact repository administrator." + ); + await setCancelled( + "Configuration error: Required permissions not specified" + ); + return; + } + // Check if the actor has the required repository permissions + try { + core.debug( + `Checking if user '${actor}' has required permissions for ${owner}/${repo}` + ); + core.debug(`Required permissions: ${requiredPermissions.join(", ")}`); + const repoPermission = + await github.rest.repos.getCollaboratorPermissionLevel({ + owner: owner, + repo: repo, + username: actor, + }); + const permission = repoPermission.data.permission; + core.debug(`Repository permission level: ${permission}`); + // Check if user has one of the required permission levels + for (const requiredPerm of requiredPermissions) { + if ( + permission === requiredPerm || + (requiredPerm === "maintainer" && permission === "maintain") + ) { + core.info(`✅ User has ${permission} access to repository`); + return; + } + } + core.warning( + `User permission '${permission}' does not meet requirements: ${requiredPermissions.join(", ")}` + ); + } catch (repoError) { + const errorMessage = + repoError instanceof Error ? repoError.message : String(repoError); + core.error(`Repository permission check failed: ${errorMessage}`); + await setCancelled(`Repository permission check failed: ${errorMessage}`); + return; + } + // Cancel the workflow when permission check fails + core.warning( + `❌ Access denied: Only authorized users can trigger this workflow. User '${actor}' is not authorized. Required permissions: ${requiredPermissions.join(", ")}` + ); + await setCancelled( + `Access denied: User '${actor}' is not authorized. Required permissions: ${requiredPermissions.join(", ")}` + ); + } + await main(); + - name: Create Output Issue + id: create_issue + uses: actions/github-script@v7 + env: + GITHUB_AW_AGENT_OUTPUT: ${{ needs.test-mcp-configuration.outputs.output }} + GITHUB_AW_ISSUE_TITLE_PREFIX: "[Test] " + with: + script: | + async function main() { + // Check if we're in staged mode + const isStaged = process.env.GITHUB_AW_SAFE_OUTPUTS_STAGED === "true"; + // Read the validated output content from environment variable + const outputContent = process.env.GITHUB_AW_AGENT_OUTPUT; + if (!outputContent) { + core.info("No GITHUB_AW_AGENT_OUTPUT environment variable found"); + return; + } + if (outputContent.trim() === "") { + core.info("Agent output content is empty"); + return; + } + core.info(`Agent output content length: ${outputContent.length}`); + // Parse the validated output JSON + let validatedOutput; + try { + validatedOutput = JSON.parse(outputContent); + } catch (error) { + core.setFailed( + `Error parsing agent output JSON: ${error instanceof Error ? error.message : String(error)}` + ); + return; + } + if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { + core.info("No valid items found in agent output"); + return; + } + // Find all create-issue items + const createIssueItems = validatedOutput.items.filter( + /** @param {any} item */ item => item.type === "create-issue" + ); + if (createIssueItems.length === 0) { + core.info("No create-issue items found in agent output"); + return; + } + core.info(`Found ${createIssueItems.length} create-issue item(s)`); + // If in staged mode, emit step summary instead of creating issues + if (isStaged) { + let summaryContent = "## 🎭 Staged Mode: Create Issues Preview\n\n"; + summaryContent += + "The following issues would be created if staged mode was disabled:\n\n"; + for (let i = 0; i < createIssueItems.length; i++) { + const item = createIssueItems[i]; + summaryContent += `### Issue ${i + 1}\n`; + summaryContent += `**Title:** ${item.title || "No title provided"}\n\n`; + if (item.body) { + summaryContent += `**Body:**\n${item.body}\n\n`; + } + if (item.labels && item.labels.length > 0) { + summaryContent += `**Labels:** ${item.labels.join(", ")}\n\n`; + } + summaryContent += "---\n\n"; + } + // Write to step summary + await core.summary.addRaw(summaryContent).write(); + core.info("📝 Issue creation preview written to step summary"); + return; + } + // Check if we're in an issue context (triggered by an issue event) + const parentIssueNumber = context.payload?.issue?.number; + // Parse labels from environment variable (comma-separated string) + const labelsEnv = process.env.GITHUB_AW_ISSUE_LABELS; + let envLabels = labelsEnv + ? labelsEnv + .split(",") + .map(/** @param {string} label */ label => label.trim()) + .filter(/** @param {string} label */ label => label) + : []; + const createdIssues = []; + // Process each create-issue item + for (let i = 0; i < createIssueItems.length; i++) { + const createIssueItem = createIssueItems[i]; + core.info( + `Processing create-issue item ${i + 1}/${createIssueItems.length}: title=${createIssueItem.title}, bodyLength=${createIssueItem.body.length}` + ); + // Merge environment labels with item-specific labels + let labels = [...envLabels]; + if (createIssueItem.labels && Array.isArray(createIssueItem.labels)) { + labels = [...labels, ...createIssueItem.labels].filter(Boolean); + } + // Extract title and body from the JSON item + let title = createIssueItem.title ? createIssueItem.title.trim() : ""; + let bodyLines = createIssueItem.body.split("\n"); + // If no title was found, use the body content as title (or a default) + if (!title) { + title = createIssueItem.body || "Agent Output"; + } + // Apply title prefix if provided via environment variable + const titlePrefix = process.env.GITHUB_AW_ISSUE_TITLE_PREFIX; + if (titlePrefix && !title.startsWith(titlePrefix)) { + title = titlePrefix + title; + } + if (parentIssueNumber) { + core.info("Detected issue context, parent issue #" + parentIssueNumber); + // Add reference to parent issue in the child issue body + bodyLines.push(`Related to #${parentIssueNumber}`); + } + // Add AI disclaimer with run id, run htmlurl + // Add AI disclaimer with workflow run information + const runId = context.runId; + const runUrl = context.payload.repository + ? `${context.payload.repository.html_url}/actions/runs/${runId}` + : `https://github.com/actions/runs/${runId}`; + bodyLines.push( + ``, + ``, + `> Generated by Agentic Workflow [Run](${runUrl})`, + "" + ); + // Prepare the body content + const body = bodyLines.join("\n").trim(); + core.info(`Creating issue with title: ${title}`); + core.info(`Labels: ${labels}`); + core.info(`Body length: ${body.length}`); + try { + // Create the issue using GitHub API + const { data: issue } = await github.rest.issues.create({ + owner: context.repo.owner, + repo: context.repo.repo, + title: title, + body: body, + labels: labels, + }); + core.info("Created issue #" + issue.number + ": " + issue.html_url); + createdIssues.push(issue); + // If we have a parent issue, add a comment to it referencing the new child issue + if (parentIssueNumber) { + try { + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: parentIssueNumber, + body: `Created related issue: #${issue.number}`, + }); + core.info("Added comment to parent issue #" + parentIssueNumber); + } catch (error) { + core.info( + `Warning: Could not add comment to parent issue: ${error instanceof Error ? error.message : String(error)}` + ); + } + } + // Set output for the last created issue (for backward compatibility) + if (i === createIssueItems.length - 1) { + core.setOutput("issue_number", issue.number); + core.setOutput("issue_url", issue.html_url); + } + } catch (error) { + const errorMessage = + error instanceof Error ? error.message : String(error); + // Special handling for disabled issues repository + if ( + errorMessage.includes("Issues has been disabled in this repository") + ) { + core.info( + `⚠ Cannot create issue "${title}": Issues are disabled for this repository` + ); + core.info( + "Consider enabling issues in repository settings if you want to create issues automatically" + ); + continue; // Skip this issue but continue processing others + } + core.error(`✗ Failed to create issue "${title}": ${errorMessage}`); + throw error; + } + } + // Write summary for all created issues + if (createdIssues.length > 0) { + let summaryContent = "\n\n## GitHub Issues\n"; + for (const issue of createdIssues) { + summaryContent += `- Issue #${issue.number}: [${issue.title}](${issue.html_url})\n`; + } + await core.summary.addRaw(summaryContent).write(); + } + core.info(`Successfully created ${createdIssues.length} issue(s)`); + } + await main(); + + create_issue_comment: + needs: test-mcp-configuration + if: github.event.issue.number || github.event.pull_request.number + runs-on: ubuntu-latest + permissions: + contents: read + issues: write + pull-requests: write + timeout-minutes: 10 + outputs: + comment_id: ${{ steps.create_comment.outputs.comment_id }} + comment_url: ${{ steps.create_comment.outputs.comment_url }} + steps: + - name: Add Issue Comment + id: create_comment + uses: actions/github-script@v7 + env: + GITHUB_AW_AGENT_OUTPUT: ${{ needs.test-mcp-configuration.outputs.output }} + with: + script: | + async function main() { + // Check if we're in staged mode + const isStaged = process.env.GITHUB_AW_SAFE_OUTPUTS_STAGED === "true"; + // Read the validated output content from environment variable + const outputContent = process.env.GITHUB_AW_AGENT_OUTPUT; + if (!outputContent) { + core.info("No GITHUB_AW_AGENT_OUTPUT environment variable found"); + return; + } + if (outputContent.trim() === "") { + core.info("Agent output content is empty"); + return; + } + core.info(`Agent output content length: ${outputContent.length}`); + // Parse the validated output JSON + let validatedOutput; + try { + validatedOutput = JSON.parse(outputContent); + } catch (error) { + core.setFailed( + `Error parsing agent output JSON: ${error instanceof Error ? error.message : String(error)}` + ); + return; + } + if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { + core.info("No valid items found in agent output"); + return; + } + // Find all add-issue-comment items + const commentItems = validatedOutput.items.filter( + /** @param {any} item */ item => item.type === "add-issue-comment" + ); + if (commentItems.length === 0) { + core.info("No add-issue-comment items found in agent output"); + return; + } + core.info(`Found ${commentItems.length} add-issue-comment item(s)`); + // If in staged mode, emit step summary instead of creating comments + if (isStaged) { + let summaryContent = "## 🎭 Staged Mode: Add Comments Preview\n\n"; + summaryContent += + "The following comments would be added if staged mode was disabled:\n\n"; + for (let i = 0; i < commentItems.length; i++) { + const item = commentItems[i]; + summaryContent += `### Comment ${i + 1}\n`; + if (item.issue_number) { + summaryContent += `**Target Issue:** #${item.issue_number}\n\n`; + } else { + summaryContent += `**Target:** Current issue/PR\n\n`; + } + summaryContent += `**Body:**\n${item.body || "No content provided"}\n\n`; + summaryContent += "---\n\n"; + } + // Write to step summary + await core.summary.addRaw(summaryContent).write(); + core.info("📝 Comment creation preview written to step summary"); + return; + } + // Get the target configuration from environment variable + const commentTarget = process.env.GITHUB_AW_COMMENT_TARGET || "triggering"; + core.info(`Comment target configuration: ${commentTarget}`); + // Check if we're in an issue or pull request context + const isIssueContext = + context.eventName === "issues" || context.eventName === "issue_comment"; + const isPRContext = + context.eventName === "pull_request" || + context.eventName === "pull_request_review" || + context.eventName === "pull_request_review_comment"; + // Validate context based on target configuration + if (commentTarget === "triggering" && !isIssueContext && !isPRContext) { + core.info( + 'Target is "triggering" but not running in issue or pull request context, skipping comment creation' + ); + return; + } + const createdComments = []; + // Process each comment item + for (let i = 0; i < commentItems.length; i++) { + const commentItem = commentItems[i]; + core.info( + `Processing add-issue-comment item ${i + 1}/${commentItems.length}: bodyLength=${commentItem.body.length}` + ); + // Determine the issue/PR number and comment endpoint for this comment + let issueNumber; + let commentEndpoint; + if (commentTarget === "*") { + // For target "*", we need an explicit issue number from the comment item + if (commentItem.issue_number) { + issueNumber = parseInt(commentItem.issue_number, 10); + if (isNaN(issueNumber) || issueNumber <= 0) { + core.info( + `Invalid issue number specified: ${commentItem.issue_number}` + ); + continue; + } + commentEndpoint = "issues"; + } else { + core.info( + 'Target is "*" but no issue_number specified in comment item' + ); + continue; + } + } else if (commentTarget && commentTarget !== "triggering") { + // Explicit issue number specified in target + issueNumber = parseInt(commentTarget, 10); + if (isNaN(issueNumber) || issueNumber <= 0) { + core.info( + `Invalid issue number in target configuration: ${commentTarget}` + ); + continue; + } + commentEndpoint = "issues"; + } else { + // Default behavior: use triggering issue/PR + if (isIssueContext) { + if (context.payload.issue) { + issueNumber = context.payload.issue.number; + commentEndpoint = "issues"; + } else { + core.info("Issue context detected but no issue found in payload"); + continue; + } + } else if (isPRContext) { + if (context.payload.pull_request) { + issueNumber = context.payload.pull_request.number; + commentEndpoint = "issues"; // PR comments use the issues API endpoint + } else { + core.info( + "Pull request context detected but no pull request found in payload" + ); + continue; + } + } + } + if (!issueNumber) { + core.info("Could not determine issue or pull request number"); + continue; + } + // Extract body from the JSON item + let body = commentItem.body.trim(); + // Add AI disclaimer with run id, run htmlurl + const runId = context.runId; + const runUrl = context.payload.repository + ? `${context.payload.repository.html_url}/actions/runs/${runId}` + : `https://github.com/actions/runs/${runId}`; + body += `\n\n> Generated by Agentic Workflow [Run](${runUrl})\n`; + core.info(`Creating comment on ${commentEndpoint} #${issueNumber}`); + core.info(`Comment content length: ${body.length}`); + try { + // Create the comment using GitHub API + const { data: comment } = await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: issueNumber, + body: body, + }); + core.info("Created comment #" + comment.id + ": " + comment.html_url); + createdComments.push(comment); + // Set output for the last created comment (for backward compatibility) + if (i === commentItems.length - 1) { + core.setOutput("comment_id", comment.id); + core.setOutput("comment_url", comment.html_url); + } + } catch (error) { + core.error( + `✗ Failed to create comment: ${error instanceof Error ? error.message : String(error)}` + ); + throw error; + } + } + // Write summary for all created comments + if (createdComments.length > 0) { + let summaryContent = "\n\n## GitHub Comments\n"; + for (const comment of createdComments) { + summaryContent += `- Comment #${comment.id}: [View Comment](${comment.html_url})\n`; + } + await core.summary.addRaw(summaryContent).write(); + } + core.info(`Successfully created ${createdComments.length} comment(s)`); + return createdComments; + } + await main(); + diff --git a/.github/workflows/test-mcp.md b/.github/workflows/test-mcp.md deleted file mode 100644 index cca2b6ddf8..0000000000 --- a/.github/workflows/test-mcp.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -on: - workflow_dispatch: {} - -permissions: read-all - -tools: - github: - docker_image_version: "latest" - playwright: - docker_image_version: "v1.41.0" - allowed_domains: ["example.com", "*.github.com"] - -safe-outputs: - create-issue: - title-prefix: "[Test] " - add-issue-comment: - -engine: claude ---- - -# Test MCP Configuration - -This is a test workflow to demonstrate MCP configuration generation and server launching. \ No newline at end of file diff --git a/pkg/cli/mcp.go b/pkg/cli/mcp.go index 27fa690bd6..7c3ffe4fe7 100644 --- a/pkg/cli/mcp.go +++ b/pkg/cli/mcp.go @@ -21,4 +21,4 @@ This command provides subcommands for inspecting, configuring, and launching MCP cmd.AddCommand(NewMCPInspectSubCommand()) return cmd -} \ No newline at end of file +} diff --git a/pkg/cli/mcp_inspect.go b/pkg/cli/mcp_inspect.go index 9d78deb8fc..5706986180 100644 --- a/pkg/cli/mcp_inspect.go +++ b/pkg/cli/mcp_inspect.go @@ -193,7 +193,7 @@ func listWorkflowsWithMCP(workflowsDir string, verbose bool) error { func NewMCPInspectCommand() *cobra.Command { cmd := NewMCPInspectSubCommand() cmd.Use = "mcp-inspect [workflow-file]" - + // Update examples to show legacy command syntax cmd.Long = `Inspect MCP servers used by a workflow and display available tools, resources, and roots. @@ -216,7 +216,7 @@ The command will: - Display results in formatted tables with error details NOTE: This command is deprecated. Use 'gh aw mcp inspect' instead.` - + return cmd } @@ -276,7 +276,7 @@ The command will: return generateMCPConfig(workflowFile, verbose) } - // Handle launch servers flag + // Handle launch servers flag if launchServers { return launchMCPServers(workflowFile, serverFilter, verbose) } @@ -545,7 +545,7 @@ func generateMCPConfig(workflowFile string, verbose bool) error { // Create Claude engine to generate MCP configuration claudeEngine := workflow.NewClaudeEngine() - + // Extract tools from frontmatter tools := make(map[string]any) if toolsSection, hasTools := workflowData.Frontmatter["tools"]; hasTools { @@ -562,7 +562,7 @@ func generateMCPConfig(workflowFile string, verbose bool) error { // Build list of MCP servers to include in config mcpTools := []string{} - + // Add existing MCP server configurations for _, config := range mcpConfigs { mcpTools = append(mcpTools, config.Name) @@ -581,7 +581,7 @@ func generateMCPConfig(workflowFile string, verbose bool) error { mcpTools = append(mcpTools, "github") } } - + if _, hasPlaywright := tools["playwright"]; hasPlaywright { found := false for _, existing := range mcpTools { @@ -594,7 +594,7 @@ func generateMCPConfig(workflowFile string, verbose bool) error { mcpTools = append(mcpTools, "playwright") } } - + if _, hasSafeOutputs := workflowData.Frontmatter["safe-outputs"]; hasSafeOutputs { found := false for _, existing := range mcpTools { @@ -622,7 +622,7 @@ func generateMCPConfig(workflowFile string, verbose bool) error { // Generate the MCP configuration var mcpConfigBuilder strings.Builder claudeEngine.RenderMCPConfig(&mcpConfigBuilder, tools, mcpTools, workflowDataForMCP) - + fmt.Println(console.FormatSuccessMessage(fmt.Sprintf("Generated MCP configuration for %d server(s)", len(mcpTools)))) fmt.Println(console.FormatInfoMessage("MCP Configuration:")) fmt.Println() @@ -791,10 +791,10 @@ func launchMCPServers(workflowFile string, serverFilter string, verbose bool) er }() fmt.Println(console.FormatInfoMessage("MCP servers are running. Press Ctrl+C to stop all servers.")) - + // Wait for interrupt signal fmt.Println(console.FormatInfoMessage("Use 'gh aw mcp inspect --inspector' to launch the MCP inspector tool")) - + // Keep the process alive until interrupted select {} } diff --git a/pkg/cli/mcp_test.go b/pkg/cli/mcp_test.go new file mode 100644 index 0000000000..ae5e5d0c90 --- /dev/null +++ b/pkg/cli/mcp_test.go @@ -0,0 +1,135 @@ +package cli + +import ( + "strings" + "testing" + + "github.com/spf13/cobra" +) + +func TestNewMCPCommand(t *testing.T) { + tests := []struct { + name string + test func(t *testing.T) + }{ + { + name: "mcp command structure", + test: func(t *testing.T) { + cmd := NewMCPCommand() + + if cmd.Use != "mcp" { + t.Errorf("Expected Use to be 'mcp', got '%s'", cmd.Use) + } + + if cmd.Short == "" { + t.Error("Expected Short description to be set") + } + + if !strings.Contains(cmd.Long, "Model Context Protocol") { + t.Error("Expected Long description to mention Model Context Protocol") + } + }, + }, + { + name: "mcp command has inspect subcommand", + test: func(t *testing.T) { + cmd := NewMCPCommand() + + var inspectCmd *cobra.Command + for _, subCmd := range cmd.Commands() { + if subCmd.Use == "inspect [workflow-file]" { + inspectCmd = subCmd + break + } + } + + if inspectCmd == nil { + t.Error("Expected 'inspect' subcommand to be present") + } + + if inspectCmd != nil && inspectCmd.Short == "" { + t.Error("Expected inspect subcommand to have Short description") + } + }, + }, + { + name: "mcp inspect has required flags", + test: func(t *testing.T) { + cmd := NewMCPInspectSubCommand() + + expectedFlags := []string{"server", "tool", "verbose", "inspector", "generate-config", "launch-servers"} + + for _, flagName := range expectedFlags { + flag := cmd.Flags().Lookup(flagName) + if flag == nil { + t.Errorf("Expected flag '%s' to be present", flagName) + } + } + }, + }, + { + name: "legacy mcp-inspect command", + test: func(t *testing.T) { + cmd := NewMCPInspectCommand() + + if cmd.Use != "mcp-inspect [workflow-file]" { + t.Errorf("Expected legacy command Use to be 'mcp-inspect [workflow-file]', got '%s'", cmd.Use) + } + + if !strings.Contains(cmd.Long, "deprecated") { + t.Error("Expected legacy command Long description to mention it's deprecated") + } + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, tt.test) + } +} + +func TestMCPInspectSubCommand(t *testing.T) { + tests := []struct { + name string + test func(t *testing.T) + }{ + { + name: "inspect subcommand structure", + test: func(t *testing.T) { + cmd := NewMCPInspectSubCommand() + + if cmd.Use != "inspect [workflow-file]" { + t.Errorf("Expected Use to be 'inspect [workflow-file]', got '%s'", cmd.Use) + } + + if cmd.Short == "" { + t.Error("Expected Short description to be set") + } + + expectedFeatures := []string{"generate MCP configurations", "Claude agentic engine", "github, playwright, and safe-outputs"} + for _, feature := range expectedFeatures { + if !strings.Contains(cmd.Long, feature) { + t.Errorf("Expected Long description to mention '%s'", feature) + } + } + }, + }, + { + name: "inspect subcommand examples", + test: func(t *testing.T) { + cmd := NewMCPInspectSubCommand() + + expectedExamples := []string{"--generate-config", "--launch-servers", "--inspector"} + for _, example := range expectedExamples { + if !strings.Contains(cmd.Long, example) { + t.Errorf("Expected Long description to include example with '%s'", example) + } + } + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, tt.test) + } +} From c31f8c1d413518d06d7c22ce08ef2dd5dc4c984b Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Mon, 15 Sep 2025 23:58:15 +0000 Subject: [PATCH 4/7] Remove legacy mcp-inspect command and update docs to use new mcp inspect syntax Co-authored-by: pelikhan <4175913+pelikhan@users.noreply.github.com> --- cmd/gh-aw/main.go | 1 - docs/src/content/docs/guides/mcps.md | 26 +++++++++++-------- docs/src/content/docs/reference/tools.md | 2 +- docs/src/content/docs/tools/cli.md | 20 +++++++++------ pkg/cli/mcp_inspect.go | 32 +----------------------- pkg/cli/mcp_test.go | 14 ----------- 6 files changed, 31 insertions(+), 64 deletions(-) diff --git a/cmd/gh-aw/main.go b/cmd/gh-aw/main.go index 90957914d1..f46a5323d6 100644 --- a/cmd/gh-aw/main.go +++ b/cmd/gh-aw/main.go @@ -374,7 +374,6 @@ func init() { rootCmd.AddCommand(disableCmd) rootCmd.AddCommand(cli.NewLogsCommand()) rootCmd.AddCommand(cli.NewMCPCommand()) - rootCmd.AddCommand(cli.NewMCPInspectCommand()) // Legacy backwards compatibility rootCmd.AddCommand(versionCmd) } diff --git a/docs/src/content/docs/guides/mcps.md b/docs/src/content/docs/guides/mcps.md index c9904e2be5..c9fedc64ff 100644 --- a/docs/src/content/docs/guides/mcps.md +++ b/docs/src/content/docs/guides/mcps.md @@ -41,7 +41,7 @@ tools: > [!TIP] > You can inspect test your MCP configuration by running
-> `gh aw mcp-inspect ` +> `gh aw mcp inspect ` ### Engine Compatibility @@ -168,7 +168,7 @@ When using an agentic engine that allows tool whitelisting (e.g. Claude), this g > [!TIP] > You can inspect the tools available for an Agentic Workflow by running
-> `gh aw mcp-inspect ` +> `gh aw mcp inspect ` ### Wildcard Access @@ -236,23 +236,29 @@ The compiler enforces these network permission rules: ### MCP Server Inspection -Use the `mcp-inspect` command to analyze and troubleshoot MCP configurations: +Use the `mcp inspect` command to analyze and troubleshoot MCP configurations: ```bash # List all workflows with MCP servers configured -gh aw mcp-inspect +gh aw mcp inspect # Inspect all MCP servers in a specific workflow -gh aw mcp-inspect my-workflow +gh aw mcp inspect my-workflow # Inspect a specific MCP server in a workflow -gh aw mcp-inspect my-workflow --server trello-server +gh aw mcp inspect my-workflow --server trello-server # Enable verbose output for debugging connection issues -gh aw mcp-inspect my-workflow --verbose +gh aw mcp inspect my-workflow --verbose # Launch official MCP inspector web interface -gh aw mcp-inspect my-workflow --inspector +gh aw mcp inspect my-workflow --inspector + +# Generate MCP server configuration using Claude engine +gh aw mcp inspect my-workflow --generate-config + +# Launch all configured MCP servers +gh aw mcp inspect my-workflow --launch-servers ### Common Issues and Solutions @@ -278,13 +284,13 @@ Error: Tool 'my_tool' not found **Solutions**: 1. Add tool to `allowed` list -2. Check tool name spelling (use `gh aw mcp-inspect` to see available tools) +2. Check tool name spelling (use `gh aw mcp inspect` to see available tools) 3. Verify MCP server is running correctly ## Related Documentation - [Tools Configuration](../reference/tools/) - Complete tools reference -- [CLI Commands](../tools/cli/) - CLI commands including `mcp-inspect` +- [CLI Commands](../tools/cli/) - CLI commands including `mcp inspect` - [Include Directives](../reference/include-directives/) - Modularizing workflows with includes - [Frontmatter Options](../reference/frontmatter/) - All configuration options - [Workflow Structure](../reference/workflow-structure/) - Directory organization diff --git a/docs/src/content/docs/reference/tools.md b/docs/src/content/docs/reference/tools.md index 7f1383ad34..185435ed81 100644 --- a/docs/src/content/docs/reference/tools.md +++ b/docs/src/content/docs/reference/tools.md @@ -25,7 +25,7 @@ All tools declared in included components are merged into the final workflow. > [!TIP] > You can inspect the tools available for an Agentic Workflow by running
-> `gh aw mcp-inspect ` +> `gh aw mcp inspect ` ## GitHub Tools (`github:`) diff --git a/docs/src/content/docs/tools/cli.md b/docs/src/content/docs/tools/cli.md index 67f6403c10..72b4419a0f 100644 --- a/docs/src/content/docs/tools/cli.md +++ b/docs/src/content/docs/tools/cli.md @@ -189,28 +189,34 @@ gh aw logs --format json -o ./exports/ ## 🔍 MCP Server Inspection -The `mcp-inspect` command allows you to analyze and troubleshoot Model Context Protocol (MCP) servers configured in your workflows. +The `mcp inspect` command allows you to analyze and troubleshoot Model Context Protocol (MCP) servers configured in your workflows. > **📘 Complete MCP Guide**: For comprehensive MCP setup, configuration examples, and troubleshooting, see the [MCPs](../guides/mcps/). ```bash # List all workflows that contain MCP server configurations -gh aw mcp-inspect +gh aw mcp inspect # Inspect all MCP servers in a specific workflow -gh aw mcp-inspect workflow-name +gh aw mcp inspect workflow-name # Filter inspection to specific servers by name -gh aw mcp-inspect workflow-name --server server-name +gh aw mcp inspect workflow-name --server server-name # Show detailed information about a specific tool (requires --server) -gh aw mcp-inspect workflow-name --server server-name --tool tool-name +gh aw mcp inspect workflow-name --server server-name --tool tool-name # Enable verbose output with connection details -gh aw mcp-inspect workflow-name --verbose +gh aw mcp inspect workflow-name --verbose # Launch the official @modelcontextprotocol/inspector web interface -gh aw mcp-inspect workflow-name --inspector +gh aw mcp inspect workflow-name --inspector + +# Generate MCP server configuration using Claude engine +gh aw mcp inspect workflow-name --generate-config + +# Launch all configured MCP servers +gh aw mcp inspect workflow-name --launch-servers ``` **Key Features:** diff --git a/pkg/cli/mcp_inspect.go b/pkg/cli/mcp_inspect.go index 5706986180..d09729cde6 100644 --- a/pkg/cli/mcp_inspect.go +++ b/pkg/cli/mcp_inspect.go @@ -184,41 +184,11 @@ func listWorkflowsWithMCP(workflowsDir string, verbose bool) error { for _, workflow := range workflowsWithMCP { fmt.Printf(" • %s\n", workflow) } - fmt.Printf("\nRun 'gh aw mcp-inspect ' to inspect MCP servers in a specific workflow.\n") + fmt.Printf("\nRun 'gh aw mcp inspect ' to inspect MCP servers in a specific workflow.\n") return nil } -// NewMCPInspectCommand creates the mcp-inspect command (legacy, kept for backwards compatibility) -func NewMCPInspectCommand() *cobra.Command { - cmd := NewMCPInspectSubCommand() - cmd.Use = "mcp-inspect [workflow-file]" - - // Update examples to show legacy command syntax - cmd.Long = `Inspect MCP servers used by a workflow and display available tools, resources, and roots. - -This command starts each MCP server configured in the workflow, queries its capabilities, -and displays the results in a formatted table. It supports stdio, Docker, and HTTP MCP servers. - -Examples: - gh aw mcp-inspect # List workflows with MCP servers - gh aw mcp-inspect weekly-research # Inspect MCP servers in weekly-research.md - gh aw mcp-inspect repomind --server repo-mind # Inspect only the repo-mind server - gh aw mcp-inspect weekly-research --server github --tool create_issue # Show details for a specific tool - gh aw mcp-inspect weekly-research -v # Verbose output with detailed connection info - gh aw mcp-inspect weekly-research --inspector # Launch @modelcontextprotocol/inspector - -The command will: -- Parse the workflow file to extract MCP server configurations -- Start each MCP server (stdio, docker, http) -- Query available tools, resources, and roots -- Validate required secrets are available -- Display results in formatted tables with error details - -NOTE: This command is deprecated. Use 'gh aw mcp inspect' instead.` - - return cmd -} // NewMCPInspectSubCommand creates the mcp inspect subcommand func NewMCPInspectSubCommand() *cobra.Command { diff --git a/pkg/cli/mcp_test.go b/pkg/cli/mcp_test.go index ae5e5d0c90..a23f331bbe 100644 --- a/pkg/cli/mcp_test.go +++ b/pkg/cli/mcp_test.go @@ -67,20 +67,6 @@ func TestNewMCPCommand(t *testing.T) { } }, }, - { - name: "legacy mcp-inspect command", - test: func(t *testing.T) { - cmd := NewMCPInspectCommand() - - if cmd.Use != "mcp-inspect [workflow-file]" { - t.Errorf("Expected legacy command Use to be 'mcp-inspect [workflow-file]', got '%s'", cmd.Use) - } - - if !strings.Contains(cmd.Long, "deprecated") { - t.Error("Expected legacy command Long description to mention it's deprecated") - } - }, - }, } for _, tt := range tests { From e872f849e25a607509ba81ac4ef9cb7518e5f784 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Tue, 16 Sep 2025 00:21:57 +0000 Subject: [PATCH 5/7] Always generate MCP config in memory, remove legacy server launching code and flags Co-authored-by: pelikhan <4175913+pelikhan@users.noreply.github.com> --- docs/src/content/docs/guides/mcps.md | 10 +- docs/src/content/docs/tools/cli.md | 9 - pkg/cli/mcp_inspect.go | 633 +++++---------------------- pkg/cli/mcp_test.go | 6 +- 4 files changed, 112 insertions(+), 546 deletions(-) diff --git a/docs/src/content/docs/guides/mcps.md b/docs/src/content/docs/guides/mcps.md index c9fedc64ff..ef9e547d0f 100644 --- a/docs/src/content/docs/guides/mcps.md +++ b/docs/src/content/docs/guides/mcps.md @@ -250,15 +250,9 @@ gh aw mcp inspect my-workflow --server trello-server # Enable verbose output for debugging connection issues gh aw mcp inspect my-workflow --verbose +``` -# Launch official MCP inspector web interface -gh aw mcp inspect my-workflow --inspector - -# Generate MCP server configuration using Claude engine -gh aw mcp inspect my-workflow --generate-config - -# Launch all configured MCP servers -gh aw mcp inspect my-workflow --launch-servers +The `mcp inspect` command automatically generates MCP server configurations using the Claude agentic engine and displays them along with the inspection results. ### Common Issues and Solutions diff --git a/docs/src/content/docs/tools/cli.md b/docs/src/content/docs/tools/cli.md index 72b4419a0f..6ec1364db4 100644 --- a/docs/src/content/docs/tools/cli.md +++ b/docs/src/content/docs/tools/cli.md @@ -208,15 +208,6 @@ gh aw mcp inspect workflow-name --server server-name --tool tool-name # Enable verbose output with connection details gh aw mcp inspect workflow-name --verbose - -# Launch the official @modelcontextprotocol/inspector web interface -gh aw mcp inspect workflow-name --inspector - -# Generate MCP server configuration using Claude engine -gh aw mcp inspect workflow-name --generate-config - -# Launch all configured MCP servers -gh aw mcp inspect workflow-name --launch-servers ``` **Key Features:** diff --git a/pkg/cli/mcp_inspect.go b/pkg/cli/mcp_inspect.go index d09729cde6..fdac173033 100644 --- a/pkg/cli/mcp_inspect.go +++ b/pkg/cli/mcp_inspect.go @@ -3,11 +3,8 @@ package cli import ( "fmt" "os" - "os/exec" "path/filepath" "strings" - "sync" - "time" "github.com/githubnext/gh-aw/pkg/console" "github.com/githubnext/gh-aw/pkg/parser" @@ -121,6 +118,110 @@ func InspectWorkflowMCP(workflowFile string, serverFilter string, toolFilter str } } + // Always generate MCP configuration in memory using Claude engine + if verbose { + fmt.Println(console.FormatInfoMessage("Generating MCP configuration using Claude agentic engine...")) + } + + if err := generateAndDisplayMCPConfig(workflowData, verbose); err != nil { + if verbose { + fmt.Println(console.FormatWarningMessage(fmt.Sprintf("Failed to generate MCP configuration: %v", err))) + } + } + + return nil +} + +// generateAndDisplayMCPConfig generates MCP configuration using Claude engine and displays it +func generateAndDisplayMCPConfig(workflowData *parser.FrontmatterResult, verbose bool) error { + // Create Claude engine to generate MCP configuration + claudeEngine := workflow.NewClaudeEngine() + + // Extract tools from frontmatter + tools := make(map[string]any) + if toolsSection, hasTools := workflowData.Frontmatter["tools"]; hasTools { + if toolsMap, ok := toolsSection.(map[string]any); ok { + tools = toolsMap + } + } + + // Extract MCP tool names from existing configurations + mcpConfigs, err := parser.ExtractMCPConfigurations(workflowData.Frontmatter, "") + if err != nil { + return fmt.Errorf("failed to extract MCP configurations: %w", err) + } + + // Build list of MCP servers to include in config + mcpTools := []string{} + + // Add existing MCP server configurations + for _, config := range mcpConfigs { + mcpTools = append(mcpTools, config.Name) + } + + // Add standard servers if configured (avoid duplicates) + if _, hasGithub := tools["github"]; hasGithub { + found := false + for _, existing := range mcpTools { + if existing == "github" { + found = true + break + } + } + if !found { + mcpTools = append(mcpTools, "github") + } + } + + if _, hasPlaywright := tools["playwright"]; hasPlaywright { + found := false + for _, existing := range mcpTools { + if existing == "playwright" { + found = true + break + } + } + if !found { + mcpTools = append(mcpTools, "playwright") + } + } + + if _, hasSafeOutputs := workflowData.Frontmatter["safe-outputs"]; hasSafeOutputs { + found := false + for _, existing := range mcpTools { + if existing == "safe-outputs" { + found = true + break + } + } + if !found { + mcpTools = append(mcpTools, "safe-outputs") + } + } + + if len(mcpTools) == 0 { + if verbose { + fmt.Println(console.FormatInfoMessage("No MCP tools found for configuration generation")) + } + return nil + } + + // Create a minimal WorkflowData for MCP config generation + workflowDataForMCP := &workflow.WorkflowData{ + Tools: tools, + NetworkPermissions: nil, // Will be populated if needed + } + + // Generate the MCP configuration + var mcpConfigBuilder strings.Builder + claudeEngine.RenderMCPConfig(&mcpConfigBuilder, tools, mcpTools, workflowDataForMCP) + + fmt.Println() + fmt.Println(console.FormatSuccessMessage(fmt.Sprintf("Generated MCP configuration for %d server(s)", len(mcpTools)))) + fmt.Println(console.FormatInfoMessage("Claude Engine MCP Configuration:")) + fmt.Println() + fmt.Println(mcpConfigBuilder.String()) + return nil } @@ -194,17 +295,14 @@ func listWorkflowsWithMCP(workflowsDir string, verbose bool) error { func NewMCPInspectSubCommand() *cobra.Command { var serverFilter string var toolFilter string - var spawnInspector bool - var generateConfig bool - var launchServers bool cmd := &cobra.Command{ Use: "inspect [workflow-file]", Short: "Inspect MCP servers and list available tools, resources, and roots", Long: `Inspect MCP servers used by a workflow and display available tools, resources, and roots. -This command can generate MCP configurations using the Claude agentic engine, parse them, -and launch all configured servers including github, playwright, and safe-outputs. +This command generates MCP configurations using the Claude agentic engine and launches +configured servers including github, playwright, and safe-outputs. Examples: gh aw mcp inspect # List workflows with MCP servers @@ -212,13 +310,10 @@ Examples: gh aw mcp inspect repomind --server repo-mind # Inspect only the repo-mind server gh aw mcp inspect weekly-research --server github --tool create_issue # Show details for a specific tool gh aw mcp inspect weekly-research -v # Verbose output with detailed connection info - gh aw mcp inspect weekly-research --inspector # Launch @modelcontextprotocol/inspector - gh aw mcp inspect weekly-research --generate-config # Generate MCP config using Claude engine - gh aw mcp inspect weekly-research --launch-servers # Launch all configured servers The command will: - Parse the workflow file to extract MCP server configurations -- Optionally generate MCP configuration using the Claude agentic engine +- Generate MCP configuration using the Claude agentic engine - Start each MCP server (stdio, docker, http) - Query available tools, resources, and roots - Validate required secrets are available @@ -241,20 +336,7 @@ The command will: return fmt.Errorf("--tool flag requires --server flag to be specified") } - // Handle generate config flag - if generateConfig { - return generateMCPConfig(workflowFile, verbose) - } - - // Handle launch servers flag - if launchServers { - return launchMCPServers(workflowFile, serverFilter, verbose) - } - // Handle spawn inspector flag - if spawnInspector { - return spawnMCPInspector(workflowFile, serverFilter, verbose) - } return InspectWorkflowMCP(workflowFile, serverFilter, toolFilter, verbose) }, @@ -263,508 +345,7 @@ The command will: cmd.Flags().StringVar(&serverFilter, "server", "", "Filter to inspect only the specified MCP server") cmd.Flags().StringVar(&toolFilter, "tool", "", "Show detailed information about a specific tool (requires --server)") cmd.Flags().BoolP("verbose", "v", false, "Enable verbose output with detailed connection information") - cmd.Flags().BoolVar(&spawnInspector, "inspector", false, "Launch the official @modelcontextprotocol/inspector tool") - cmd.Flags().BoolVar(&generateConfig, "generate-config", false, "Generate MCP server configuration using Claude agentic engine") - cmd.Flags().BoolVar(&launchServers, "launch-servers", false, "Launch all configured MCP servers (github, playwright, safe-outputs)") return cmd } -// spawnMCPInspector launches the official @modelcontextprotocol/inspector tool -// and spawns any stdio MCP servers beforehand -func spawnMCPInspector(workflowFile string, serverFilter string, verbose bool) error { - // Check if npx is available - if _, err := exec.LookPath("npx"); err != nil { - return fmt.Errorf("npx not found. Please install Node.js and npm to use the MCP inspector: %w", err) - } - - var mcpConfigs []parser.MCPServerConfig - var serverProcesses []*exec.Cmd - var wg sync.WaitGroup - - // If workflow file is specified, extract MCP configurations and start servers - if workflowFile != "" { - workflowsDir := workflow.GetWorkflowDir() - - // Normalize the workflow file path - if !strings.HasSuffix(workflowFile, ".md") { - workflowFile += ".md" - } - - workflowPath := filepath.Join(workflowsDir, workflowFile) - if !filepath.IsAbs(workflowPath) { - cwd, err := os.Getwd() - if err != nil { - return fmt.Errorf("failed to get current directory: %w", err) - } - workflowPath = filepath.Join(cwd, workflowPath) - } - - // Check if file exists - if _, err := os.Stat(workflowPath); os.IsNotExist(err) { - return fmt.Errorf("workflow file not found: %s", workflowPath) - } - - // Parse the workflow file to extract MCP configurations - content, err := os.ReadFile(workflowPath) - if err != nil { - return err - } - - workflowData, err := parser.ExtractFrontmatterFromContent(string(content)) - if err != nil { - return err - } - - // Extract MCP configurations - mcpConfigs, err = parser.ExtractMCPConfigurations(workflowData.Frontmatter, serverFilter) - if err != nil { - return err - } - - if len(mcpConfigs) > 0 { - fmt.Println(console.FormatInfoMessage(fmt.Sprintf("Found %d MCP server(s) in workflow:", len(mcpConfigs)))) - for _, config := range mcpConfigs { - fmt.Printf(" • %s (%s)\n", config.Name, config.Type) - } - fmt.Println() - - // Start stdio MCP servers in the background - stdioServers := []parser.MCPServerConfig{} - for _, config := range mcpConfigs { - if config.Type == "stdio" { - stdioServers = append(stdioServers, config) - } - } - - if len(stdioServers) > 0 { - fmt.Println(console.FormatInfoMessage("Starting stdio MCP servers...")) - - for _, config := range stdioServers { - if verbose { - fmt.Println(console.FormatInfoMessage(fmt.Sprintf("Starting server: %s", config.Name))) - } - - // Create the command for the MCP server - var cmd *exec.Cmd - if config.Container != "" { - // Docker container mode - args := append([]string{"run", "--rm", "-i"}, config.Args...) - cmd = exec.Command("docker", args...) - } else { - // Direct command mode - if config.Command == "" { - fmt.Println(console.FormatWarningMessage(fmt.Sprintf("Skipping server %s: no command specified", config.Name))) - continue - } - cmd = exec.Command(config.Command, config.Args...) - } - - // Set environment variables - cmd.Env = os.Environ() - for key, value := range config.Env { - // Resolve environment variable references - resolvedValue := os.ExpandEnv(value) - cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", key, resolvedValue)) - } - - // Start the server process - if err := cmd.Start(); err != nil { - fmt.Println(console.FormatWarningMessage(fmt.Sprintf("Failed to start server %s: %v", config.Name, err))) - continue - } - - serverProcesses = append(serverProcesses, cmd) - - // Monitor the process in the background - wg.Add(1) - go func(serverCmd *exec.Cmd, serverName string) { - defer wg.Done() - if err := serverCmd.Wait(); err != nil && verbose { - fmt.Println(console.FormatWarningMessage(fmt.Sprintf("Server %s exited with error: %v", serverName, err))) - } - }(cmd, config.Name) - - if verbose { - fmt.Println(console.FormatSuccessMessage(fmt.Sprintf("Started server: %s (PID: %d)", config.Name, cmd.Process.Pid))) - } - } - - // Give servers a moment to start up - time.Sleep(2 * time.Second) - fmt.Println(console.FormatSuccessMessage("All stdio servers started successfully")) - } - - fmt.Println(console.FormatInfoMessage("Configuration details for MCP inspector:")) - for _, config := range mcpConfigs { - fmt.Printf("\n📡 %s (%s):\n", config.Name, config.Type) - switch config.Type { - case "stdio": - if config.Container != "" { - fmt.Printf(" Container: %s\n", config.Container) - } else { - fmt.Printf(" Command: %s\n", config.Command) - if len(config.Args) > 0 { - fmt.Printf(" Args: %s\n", strings.Join(config.Args, " ")) - } - } - case "http": - fmt.Printf(" URL: %s\n", config.URL) - } - if len(config.Env) > 0 { - fmt.Printf(" Environment Variables: %v\n", config.Env) - } - } - fmt.Println() - } else { - fmt.Println(console.FormatWarningMessage("No MCP servers found in workflow")) - return nil - } - } - - // Set up cleanup function for stdio servers - defer func() { - if len(serverProcesses) > 0 { - fmt.Println(console.FormatInfoMessage("Cleaning up MCP servers...")) - for i, cmd := range serverProcesses { - if cmd.Process != nil { - if err := cmd.Process.Kill(); err != nil && verbose { - fmt.Println(console.FormatWarningMessage(fmt.Sprintf("Failed to kill server process %d: %v", cmd.Process.Pid, err))) - } - } - // Give each process a chance to clean up - if i < len(serverProcesses)-1 { - time.Sleep(100 * time.Millisecond) - } - } - // Wait for all background goroutines to finish (with timeout) - done := make(chan struct{}) - go func() { - wg.Wait() - close(done) - }() - - select { - case <-done: - // All finished - case <-time.After(5 * time.Second): - // Timeout waiting for cleanup - if verbose { - fmt.Println(console.FormatWarningMessage("Timeout waiting for server cleanup")) - } - } - } - }() - - fmt.Println(console.FormatInfoMessage("Launching @modelcontextprotocol/inspector...")) - fmt.Println(console.FormatInfoMessage("Visit http://localhost:5173 after the inspector starts")) - if len(serverProcesses) > 0 { - fmt.Println(console.FormatInfoMessage(fmt.Sprintf("%d stdio MCP server(s) are running in the background", len(serverProcesses)))) - fmt.Println(console.FormatInfoMessage("Configure them in the inspector using the details shown above")) - } - - cmd := exec.Command("npx", "@modelcontextprotocol/inspector") - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - cmd.Stdin = os.Stdin - - return cmd.Run() -} - -// generateMCPConfig uses the Claude agentic engine to generate MCP server configuration -func generateMCPConfig(workflowFile string, verbose bool) error { - if workflowFile == "" { - return fmt.Errorf("workflow file is required for config generation") - } - - workflowsDir := workflow.GetWorkflowDir() - - // Normalize the workflow file path - if !strings.HasSuffix(workflowFile, ".md") { - workflowFile += ".md" - } - - workflowPath := filepath.Join(workflowsDir, workflowFile) - if !filepath.IsAbs(workflowPath) { - cwd, err := os.Getwd() - if err != nil { - return fmt.Errorf("failed to get current directory: %w", err) - } - workflowPath = filepath.Join(cwd, workflowPath) - } - - // Check if file exists - if _, err := os.Stat(workflowPath); os.IsNotExist(err) { - return fmt.Errorf("workflow file not found: %s", workflowPath) - } - - if verbose { - fmt.Println(console.FormatInfoMessage(fmt.Sprintf("Generating MCP configuration for: %s", workflowPath))) - } - - // Parse the workflow file - content, err := os.ReadFile(workflowPath) - if err != nil { - return fmt.Errorf("failed to read workflow file: %w", err) - } - - workflowData, err := parser.ExtractFrontmatterFromContent(string(content)) - if err != nil { - return fmt.Errorf("failed to parse workflow file: %w", err) - } - - // Create Claude engine to generate MCP configuration - claudeEngine := workflow.NewClaudeEngine() - - // Extract tools from frontmatter - tools := make(map[string]any) - if toolsSection, hasTools := workflowData.Frontmatter["tools"]; hasTools { - if toolsMap, ok := toolsSection.(map[string]any); ok { - tools = toolsMap - } - } - - // Extract MCP tool names from existing configurations - mcpConfigs, err := parser.ExtractMCPConfigurations(workflowData.Frontmatter, "") - if err != nil { - return fmt.Errorf("failed to extract MCP configurations: %w", err) - } - - // Build list of MCP servers to include in config - mcpTools := []string{} - - // Add existing MCP server configurations - for _, config := range mcpConfigs { - mcpTools = append(mcpTools, config.Name) - } - - // Add standard servers if configured (avoid duplicates) - if _, hasGithub := tools["github"]; hasGithub { - found := false - for _, existing := range mcpTools { - if existing == "github" { - found = true - break - } - } - if !found { - mcpTools = append(mcpTools, "github") - } - } - - if _, hasPlaywright := tools["playwright"]; hasPlaywright { - found := false - for _, existing := range mcpTools { - if existing == "playwright" { - found = true - break - } - } - if !found { - mcpTools = append(mcpTools, "playwright") - } - } - - if _, hasSafeOutputs := workflowData.Frontmatter["safe-outputs"]; hasSafeOutputs { - found := false - for _, existing := range mcpTools { - if existing == "safe-outputs" { - found = true - break - } - } - if !found { - mcpTools = append(mcpTools, "safe-outputs") - } - } - - if len(mcpTools) == 0 { - fmt.Println(console.FormatWarningMessage("No MCP tools found in workflow")) - return nil - } - - // Create a minimal WorkflowData for MCP config generation - workflowDataForMCP := &workflow.WorkflowData{ - Tools: tools, - NetworkPermissions: nil, // Will be populated if needed - } - - // Generate the MCP configuration - var mcpConfigBuilder strings.Builder - claudeEngine.RenderMCPConfig(&mcpConfigBuilder, tools, mcpTools, workflowDataForMCP) - - fmt.Println(console.FormatSuccessMessage(fmt.Sprintf("Generated MCP configuration for %d server(s)", len(mcpTools)))) - fmt.Println(console.FormatInfoMessage("MCP Configuration:")) - fmt.Println() - fmt.Println(mcpConfigBuilder.String()) - - return nil -} - -// launchMCPServers launches all MCP servers configured in the workflow -func launchMCPServers(workflowFile string, serverFilter string, verbose bool) error { - if workflowFile == "" { - return fmt.Errorf("workflow file is required for launching servers") - } - - workflowsDir := workflow.GetWorkflowDir() - - // Normalize the workflow file path - if !strings.HasSuffix(workflowFile, ".md") { - workflowFile += ".md" - } - - workflowPath := filepath.Join(workflowsDir, workflowFile) - if !filepath.IsAbs(workflowPath) { - cwd, err := os.Getwd() - if err != nil { - return fmt.Errorf("failed to get current directory: %w", err) - } - workflowPath = filepath.Join(cwd, workflowPath) - } - - // Check if file exists - if _, err := os.Stat(workflowPath); os.IsNotExist(err) { - return fmt.Errorf("workflow file not found: %s", workflowPath) - } - - if verbose { - fmt.Println(console.FormatInfoMessage(fmt.Sprintf("Launching MCP servers from: %s", workflowPath))) - } - - // Parse the workflow file to extract MCP configurations - content, err := os.ReadFile(workflowPath) - if err != nil { - return err - } - - workflowData, err := parser.ExtractFrontmatterFromContent(string(content)) - if err != nil { - return err - } - - // Extract MCP configurations - mcpConfigs, err := parser.ExtractMCPConfigurations(workflowData.Frontmatter, serverFilter) - if err != nil { - return err - } - - if len(mcpConfigs) == 0 { - if serverFilter != "" { - fmt.Println(console.FormatWarningMessage(fmt.Sprintf("No MCP servers matching filter '%s' found in workflow", serverFilter))) - } else { - fmt.Println(console.FormatWarningMessage("No MCP servers found in workflow")) - } - return nil - } - - fmt.Println(console.FormatInfoMessage(fmt.Sprintf("Found %d MCP server(s) to launch:", len(mcpConfigs)))) - for _, config := range mcpConfigs { - fmt.Printf(" • %s (%s)\n", config.Name, config.Type) - } - fmt.Println() - - var serverProcesses []*exec.Cmd - var wg sync.WaitGroup - - // Launch each MCP server - for _, config := range mcpConfigs { - if verbose { - fmt.Println(console.FormatInfoMessage(fmt.Sprintf("Starting server: %s", config.Name))) - } - - // Create the command for the MCP server - var cmd *exec.Cmd - if config.Container != "" { - // Docker container mode - args := append([]string{"run", "--rm", "-i"}, config.Args...) - cmd = exec.Command("docker", args...) - } else { - // Direct command mode - if config.Command == "" { - fmt.Println(console.FormatWarningMessage(fmt.Sprintf("Skipping server %s: no command specified", config.Name))) - continue - } - cmd = exec.Command(config.Command, config.Args...) - } - - // Set environment variables - cmd.Env = os.Environ() - for key, value := range config.Env { - // Resolve environment variable references - resolvedValue := os.ExpandEnv(value) - cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", key, resolvedValue)) - } - - // Start the server process - if err := cmd.Start(); err != nil { - fmt.Println(console.FormatWarningMessage(fmt.Sprintf("Failed to start server %s: %v", config.Name, err))) - continue - } - - serverProcesses = append(serverProcesses, cmd) - - // Monitor the process in the background - wg.Add(1) - go func(serverCmd *exec.Cmd, serverName string) { - defer wg.Done() - if err := serverCmd.Wait(); err != nil && verbose { - fmt.Println(console.FormatWarningMessage(fmt.Sprintf("Server %s exited with error: %v", serverName, err))) - } - }(cmd, config.Name) - - if verbose { - fmt.Println(console.FormatSuccessMessage(fmt.Sprintf("Started server: %s (PID: %d)", config.Name, cmd.Process.Pid))) - } - } - - if len(serverProcesses) == 0 { - return fmt.Errorf("no MCP servers were successfully started") - } - - // Give servers a moment to start up - time.Sleep(2 * time.Second) - fmt.Println(console.FormatSuccessMessage(fmt.Sprintf("Successfully launched %d MCP server(s)", len(serverProcesses)))) - - // Set up cleanup function for servers - defer func() { - if len(serverProcesses) > 0 { - fmt.Println(console.FormatInfoMessage("Cleaning up MCP servers...")) - for i, cmd := range serverProcesses { - if cmd.Process != nil { - if err := cmd.Process.Kill(); err != nil && verbose { - fmt.Println(console.FormatWarningMessage(fmt.Sprintf("Failed to kill server process %d: %v", cmd.Process.Pid, err))) - } - } - // Give each process a chance to clean up - if i < len(serverProcesses)-1 { - time.Sleep(100 * time.Millisecond) - } - } - // Wait for all background goroutines to finish (with timeout) - done := make(chan struct{}) - go func() { - wg.Wait() - close(done) - }() - - select { - case <-done: - // All finished - case <-time.After(5 * time.Second): - // Timeout waiting for cleanup - if verbose { - fmt.Println(console.FormatWarningMessage("Timeout waiting for server cleanup")) - } - } - } - }() - - fmt.Println(console.FormatInfoMessage("MCP servers are running. Press Ctrl+C to stop all servers.")) - - // Wait for interrupt signal - fmt.Println(console.FormatInfoMessage("Use 'gh aw mcp inspect --inspector' to launch the MCP inspector tool")) - - // Keep the process alive until interrupted - select {} -} diff --git a/pkg/cli/mcp_test.go b/pkg/cli/mcp_test.go index a23f331bbe..92ea22de56 100644 --- a/pkg/cli/mcp_test.go +++ b/pkg/cli/mcp_test.go @@ -57,7 +57,7 @@ func TestNewMCPCommand(t *testing.T) { test: func(t *testing.T) { cmd := NewMCPInspectSubCommand() - expectedFlags := []string{"server", "tool", "verbose", "inspector", "generate-config", "launch-servers"} + expectedFlags := []string{"server", "tool", "verbose"} for _, flagName := range expectedFlags { flag := cmd.Flags().Lookup(flagName) @@ -92,7 +92,7 @@ func TestMCPInspectSubCommand(t *testing.T) { t.Error("Expected Short description to be set") } - expectedFeatures := []string{"generate MCP configurations", "Claude agentic engine", "github, playwright, and safe-outputs"} + expectedFeatures := []string{"generates MCP configurations", "Claude agentic engine", "github, playwright, and safe-outputs"} for _, feature := range expectedFeatures { if !strings.Contains(cmd.Long, feature) { t.Errorf("Expected Long description to mention '%s'", feature) @@ -105,7 +105,7 @@ func TestMCPInspectSubCommand(t *testing.T) { test: func(t *testing.T) { cmd := NewMCPInspectSubCommand() - expectedExamples := []string{"--generate-config", "--launch-servers", "--inspector"} + expectedExamples := []string{"--server", "--tool", "-v"} for _, example := range expectedExamples { if !strings.Contains(cmd.Long, example) { t.Errorf("Expected Long description to include example with '%s'", example) From 46562df70bc7fc56a9310a446c01c830ffd2a153 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Tue, 16 Sep 2025 00:35:37 +0000 Subject: [PATCH 6/7] Spawn MCP servers from generated Claude configuration with process management Co-authored-by: pelikhan <4175913+pelikhan@users.noreply.github.com> --- pkg/cli/mcp_inspect.go | 216 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 212 insertions(+), 4 deletions(-) diff --git a/pkg/cli/mcp_inspect.go b/pkg/cli/mcp_inspect.go index fdac173033..43c689a8ab 100644 --- a/pkg/cli/mcp_inspect.go +++ b/pkg/cli/mcp_inspect.go @@ -1,10 +1,16 @@ package cli import ( + "encoding/json" "fmt" "os" + "os/exec" + "os/signal" "path/filepath" + "regexp" "strings" + "sync" + "time" "github.com/githubnext/gh-aw/pkg/console" "github.com/githubnext/gh-aw/pkg/parser" @@ -222,9 +228,210 @@ func generateAndDisplayMCPConfig(workflowData *parser.FrontmatterResult, verbose fmt.Println() fmt.Println(mcpConfigBuilder.String()) + // Parse and spawn MCP servers from the generated configuration + if err := spawnMCPServersFromConfig(mcpConfigBuilder.String(), verbose); err != nil { + if verbose { + fmt.Println(console.FormatWarningMessage(fmt.Sprintf("Failed to spawn MCP servers: %v", err))) + } + } + return nil } +// MCPServerConfig represents a single MCP server configuration +type MCPServerConfig struct { + Command string `json:"command"` + Args []string `json:"args"` + Env map[string]string `json:"env"` +} + +// MCPConfig represents the complete MCP configuration +type MCPConfig struct { + MCPServers map[string]MCPServerConfig `json:"mcpServers"` +} + +// spawnMCPServersFromConfig parses the generated MCP configuration and spawns servers +func spawnMCPServersFromConfig(configScript string, verbose bool) error { + // Extract JSON from the generated shell script + jsonConfig, err := extractJSONFromScript(configScript) + if err != nil { + return fmt.Errorf("failed to extract JSON from configuration script: %w", err) + } + + if verbose { + fmt.Println(console.FormatInfoMessage("Extracted MCP JSON configuration:")) + fmt.Println(jsonConfig) + fmt.Println() + } + + // Replace GitHub Actions template variables with actual environment values + resolvedConfig := resolveTemplateVariables(jsonConfig, verbose) + + if verbose { + fmt.Println(console.FormatInfoMessage("Resolved MCP JSON configuration:")) + fmt.Println(resolvedConfig) + fmt.Println() + } + + // Parse the JSON configuration + var config MCPConfig + if err := json.Unmarshal([]byte(resolvedConfig), &config); err != nil { + return fmt.Errorf("failed to parse MCP configuration JSON: %w", err) + } + + if len(config.MCPServers) == 0 { + if verbose { + fmt.Println(console.FormatInfoMessage("No MCP servers found in configuration to spawn")) + } + return nil + } + + fmt.Println() + fmt.Println(console.FormatInfoMessage(fmt.Sprintf("Spawning %d MCP server(s) from generated configuration...", len(config.MCPServers)))) + + var wg sync.WaitGroup + var serverProcesses []*exec.Cmd + + // Start each server + for serverName, serverConfig := range config.MCPServers { + if verbose { + fmt.Println(console.FormatInfoMessage(fmt.Sprintf("Starting MCP server: %s", serverName))) + } + + // Create the command + cmd := exec.Command(serverConfig.Command, serverConfig.Args...) + + // Set environment variables + cmd.Env = os.Environ() + for key, value := range serverConfig.Env { + // Resolve environment variable references (simple implementation) + resolvedValue := os.ExpandEnv(value) + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", key, resolvedValue)) + } + + // Start the server process + if err := cmd.Start(); err != nil { + fmt.Println(console.FormatWarningMessage(fmt.Sprintf("Failed to start server %s: %v", serverName, err))) + continue + } + + serverProcesses = append(serverProcesses, cmd) + + // Monitor the process in the background + wg.Add(1) + go func(serverCmd *exec.Cmd, name string) { + defer wg.Done() + if err := serverCmd.Wait(); err != nil && verbose { + fmt.Println(console.FormatWarningMessage(fmt.Sprintf("Server %s exited with error: %v", name, err))) + } + }(cmd, serverName) + + if verbose { + fmt.Println(console.FormatSuccessMessage(fmt.Sprintf("Started server: %s (PID: %d)", serverName, cmd.Process.Pid))) + } + } + + if len(serverProcesses) > 0 { + fmt.Println(console.FormatSuccessMessage(fmt.Sprintf("Successfully started %d MCP server(s)", len(serverProcesses)))) + fmt.Println(console.FormatInfoMessage("Servers are running in the background")) + fmt.Println(console.FormatInfoMessage("Press Ctrl+C to stop the inspection and cleanup servers")) + + // Set up cleanup on interrupt + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt) + + go func() { + <-c + fmt.Println() + fmt.Println(console.FormatInfoMessage("Cleaning up MCP servers...")) + for i, cmd := range serverProcesses { + if cmd.Process != nil { + if err := cmd.Process.Kill(); err != nil && verbose { + fmt.Println(console.FormatWarningMessage(fmt.Sprintf("Failed to kill server process %d: %v", cmd.Process.Pid, err))) + } + } + // Give each process a chance to clean up + if i < len(serverProcesses)-1 { + time.Sleep(100 * time.Millisecond) + } + } + + // Wait for all background goroutines to finish (with timeout) + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + + select { + case <-done: + // All finished + case <-time.After(5 * time.Second): + // Timeout waiting for cleanup + if verbose { + fmt.Println(console.FormatWarningMessage("Timeout waiting for server cleanup")) + } + } + + os.Exit(0) + }() + + // Keep the main process alive to maintain servers + select {} + } + + return nil +} + +// resolveTemplateVariables replaces GitHub Actions template variables with local environment values +func resolveTemplateVariables(jsonConfig string, verbose bool) string { + // Replace common GitHub Actions template variables with environment values or defaults + resolved := jsonConfig + + // Replace ${{ env.GITHUB_AW_SAFE_OUTPUTS }} with environment value or default + if safeOutputs := os.Getenv("GITHUB_AW_SAFE_OUTPUTS"); safeOutputs != "" { + resolved = strings.ReplaceAll(resolved, `"${{ env.GITHUB_AW_SAFE_OUTPUTS }}"`, fmt.Sprintf(`"%s"`, safeOutputs)) + } else { + // Default to a temporary file for local testing + resolved = strings.ReplaceAll(resolved, `"${{ env.GITHUB_AW_SAFE_OUTPUTS }}"`, `"/tmp/safe-outputs.jsonl"`) + } + + // Replace ${{ toJSON(env.GITHUB_AW_SAFE_OUTPUTS_CONFIG) }} with environment value or default + if safeOutputsConfig := os.Getenv("GITHUB_AW_SAFE_OUTPUTS_CONFIG"); safeOutputsConfig != "" { + resolved = strings.ReplaceAll(resolved, `${{ toJSON(env.GITHUB_AW_SAFE_OUTPUTS_CONFIG) }}`, safeOutputsConfig) + } else { + // Default to empty config for local testing + resolved = strings.ReplaceAll(resolved, `${{ toJSON(env.GITHUB_AW_SAFE_OUTPUTS_CONFIG) }}`, `"{}"`) + } + + // Replace ${{ secrets.GITHUB_TOKEN }} with environment value or default + if ghToken := os.Getenv("GITHUB_TOKEN"); ghToken != "" { + resolved = strings.ReplaceAll(resolved, `"${{ secrets.GITHUB_TOKEN }}"`, fmt.Sprintf(`"%s"`, ghToken)) + } else if ghToken := os.Getenv("GH_TOKEN"); ghToken != "" { + resolved = strings.ReplaceAll(resolved, `"${{ secrets.GITHUB_TOKEN }}"`, fmt.Sprintf(`"%s"`, ghToken)) + } else { + if verbose { + fmt.Println(console.FormatWarningMessage("GitHub token not found in environment (set GITHUB_TOKEN or GH_TOKEN)")) + } + resolved = strings.ReplaceAll(resolved, `"${{ secrets.GITHUB_TOKEN }}"`, `"your-github-token"`) + } + + return resolved +} + +// extractJSONFromScript extracts the JSON configuration from the generated shell script +func extractJSONFromScript(script string) (string, error) { + // Look for the JSON content between << 'EOF' and EOF (multiline with DOTALL flag) + re := regexp.MustCompile(`(?s)cat > [^<]+ << 'EOF'\s*\n(.*?)\n\s*EOF`) + matches := re.FindStringSubmatch(script) + + if len(matches) < 2 { + return "", fmt.Errorf("could not find JSON configuration in script") + } + + return strings.TrimSpace(matches[1]), nil +} + // listWorkflowsWithMCP shows available workflow files that contain MCP configurations func listWorkflowsWithMCP(workflowsDir string, verbose bool) error { if _, err := os.Stat(workflowsDir); os.IsNotExist(err) { @@ -301,8 +508,8 @@ func NewMCPInspectSubCommand() *cobra.Command { Short: "Inspect MCP servers and list available tools, resources, and roots", Long: `Inspect MCP servers used by a workflow and display available tools, resources, and roots. -This command generates MCP configurations using the Claude agentic engine and launches -configured servers including github, playwright, and safe-outputs. +This command generates MCP configurations using the Claude agentic engine and automatically +spawns configured servers including github, playwright, and safe-outputs. Examples: gh aw mcp inspect # List workflows with MCP servers @@ -314,10 +521,11 @@ Examples: The command will: - Parse the workflow file to extract MCP server configurations - Generate MCP configuration using the Claude agentic engine -- Start each MCP server (stdio, docker, http) +- Spawn MCP servers from the generated configuration - Query available tools, resources, and roots - Validate required secrets are available -- Display results in formatted tables with error details`, +- Display results in formatted tables with error details +- Keep servers running until interrupted (Ctrl+C)`, Args: cobra.MaximumNArgs(1), RunE: func(cmd *cobra.Command, args []string) error { var workflowFile string From 65df90803b2cffe83553cf40b2138c964bbf3e71 Mon Sep 17 00:00:00 2001 From: Peli de Halleux Date: Tue, 16 Sep 2025 01:25:39 +0000 Subject: [PATCH 7/7] Implement feature X to enhance user experience and fix bug Y in module Z --- .github/workflows/test-mcp.lock.yml | 2611 --------------------------- 1 file changed, 2611 deletions(-) delete mode 100644 .github/workflows/test-mcp.lock.yml diff --git a/.github/workflows/test-mcp.lock.yml b/.github/workflows/test-mcp.lock.yml deleted file mode 100644 index a2f15df5e5..0000000000 --- a/.github/workflows/test-mcp.lock.yml +++ /dev/null @@ -1,2611 +0,0 @@ -# This file was automatically generated by gh-aw. DO NOT EDIT. -# To update this file, edit the corresponding .md file and run: -# gh aw compile - -name: "Test MCP Configuration" -on: - workflow_dispatch: {} - -permissions: {} - -concurrency: - group: "gh-aw-${{ github.workflow }}" - -run-name: "Test MCP Configuration" - -jobs: - test-mcp-configuration: - runs-on: ubuntu-latest - permissions: read-all - outputs: - output: ${{ steps.collect_output.outputs.output }} - steps: - - name: Checkout repository - uses: actions/checkout@v5 - - name: Generate Claude Settings - run: | - mkdir -p /tmp/.claude - cat > /tmp/.claude/settings.json << 'EOF' - { - "hooks": { - "PreToolUse": [ - { - "matcher": "WebFetch|WebSearch", - "hooks": [ - { - "type": "command", - "command": ".claude/hooks/network_permissions.py" - } - ] - } - ] - } - } - EOF - - name: Generate Network Permissions Hook - run: | - mkdir -p .claude/hooks - cat > .claude/hooks/network_permissions.py << 'EOF' - #!/usr/bin/env python3 - """ - Network permissions validator for Claude Code engine. - Generated by gh-aw from engine network permissions configuration. - """ - - import json - import sys - import urllib.parse - import re - - # Domain allow-list (populated during generation) - ALLOWED_DOMAINS = ["crl3.digicert.com","crl4.digicert.com","ocsp.digicert.com","ts-crl.ws.symantec.com","ts-ocsp.ws.symantec.com","crl.geotrust.com","ocsp.geotrust.com","crl.thawte.com","ocsp.thawte.com","crl.verisign.com","ocsp.verisign.com","crl.globalsign.com","ocsp.globalsign.com","crls.ssl.com","ocsp.ssl.com","crl.identrust.com","ocsp.identrust.com","crl.sectigo.com","ocsp.sectigo.com","crl.usertrust.com","ocsp.usertrust.com","s.symcb.com","s.symcd.com","json-schema.org","json.schemastore.org","archive.ubuntu.com","security.ubuntu.com","ppa.launchpad.net","keyserver.ubuntu.com","azure.archive.ubuntu.com","api.snapcraft.io","packagecloud.io","packages.cloud.google.com","packages.microsoft.com"] - - def extract_domain(url_or_query): - """Extract domain from URL or search query.""" - if not url_or_query: - return None - - if url_or_query.startswith(('http://', 'https://')): - return urllib.parse.urlparse(url_or_query).netloc.lower() - - # Check for domain patterns in search queries - match = re.search(r'site:([a-zA-Z0-9.-]+\.[a-zA-Z]{2,})', url_or_query) - if match: - return match.group(1).lower() - - return None - - def is_domain_allowed(domain): - """Check if domain is allowed.""" - if not domain: - # If no domain detected, allow only if not under deny-all policy - return bool(ALLOWED_DOMAINS) # False if empty list (deny-all), True if has domains - - # Empty allowed domains means deny all - if not ALLOWED_DOMAINS: - return False - - for pattern in ALLOWED_DOMAINS: - regex = pattern.replace('.', r'\.').replace('*', '.*') - if re.match(f'^{regex}$', domain): - return True - return False - - # Main logic - try: - data = json.load(sys.stdin) - tool_name = data.get('tool_name', '') - tool_input = data.get('tool_input', {}) - - if tool_name not in ['WebFetch', 'WebSearch']: - sys.exit(0) # Allow other tools - - target = tool_input.get('url') or tool_input.get('query', '') - domain = extract_domain(target) - - # For WebSearch, apply domain restrictions consistently - # If no domain detected in search query, check if restrictions are in place - if tool_name == 'WebSearch' and not domain: - # Since this hook is only generated when network permissions are configured, - # empty ALLOWED_DOMAINS means deny-all policy - if not ALLOWED_DOMAINS: # Empty list means deny all - print(f"Network access blocked: deny-all policy in effect", file=sys.stderr) - print(f"No domains are allowed for WebSearch", file=sys.stderr) - sys.exit(2) # Block under deny-all policy - else: - print(f"Network access blocked for web-search: no specific domain detected", file=sys.stderr) - print(f"Allowed domains: {', '.join(ALLOWED_DOMAINS)}", file=sys.stderr) - sys.exit(2) # Block general searches when domain allowlist is configured - - if not is_domain_allowed(domain): - print(f"Network access blocked for domain: {domain}", file=sys.stderr) - print(f"Allowed domains: {', '.join(ALLOWED_DOMAINS)}", file=sys.stderr) - sys.exit(2) # Block with feedback to Claude - - sys.exit(0) # Allow - - except Exception as e: - print(f"Network validation error: {e}", file=sys.stderr) - sys.exit(2) # Block on errors - - EOF - chmod +x .claude/hooks/network_permissions.py - - name: Setup agent output - id: setup_agent_output - uses: actions/github-script@v7 - with: - script: | - function main() { - const fs = require("fs"); - const crypto = require("crypto"); - // Generate a random filename for the output file - const randomId = crypto.randomBytes(8).toString("hex"); - const outputFile = `/tmp/aw_output_${randomId}.txt`; - // Ensure the /tmp directory exists - fs.mkdirSync("/tmp", { recursive: true }); - // We don't create the file, as the name is sufficiently random - // and some engines (Claude) fails first Write to the file - // if it exists and has not been read. - // Set the environment variable for subsequent steps - core.exportVariable("GITHUB_AW_SAFE_OUTPUTS", outputFile); - // Also set as step output for reference - core.setOutput("output_file", outputFile); - } - main(); - - name: Setup Safe Outputs Collector MCP - env: - GITHUB_AW_SAFE_OUTPUTS_CONFIG: "{\"add-issue-comment\":{\"enabled\":true},\"create-issue\":true}" - run: | - mkdir -p /tmp/safe-outputs - cat > /tmp/safe-outputs/mcp-server.cjs << 'EOF' - const fs = require("fs"); - const encoder = new TextEncoder(); - const configEnv = process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG; - if (!configEnv) throw new Error("GITHUB_AW_SAFE_OUTPUTS_CONFIG not set"); - const safeOutputsConfig = JSON.parse(configEnv); - const outputFile = process.env.GITHUB_AW_SAFE_OUTPUTS; - if (!outputFile) - throw new Error("GITHUB_AW_SAFE_OUTPUTS not set, no output file"); - const SERVER_INFO = { name: "safe-outputs-mcp-server", version: "1.0.0" }; - const debug = msg => process.stderr.write(`[${SERVER_INFO.name}] ${msg}\n`); - function writeMessage(obj) { - const json = JSON.stringify(obj); - debug(`send: ${json}`); - const message = json + "\n"; - const bytes = encoder.encode(message); - fs.writeSync(1, bytes); - } - class ReadBuffer { - append(chunk) { - this._buffer = this._buffer ? Buffer.concat([this._buffer, chunk]) : chunk; - } - readMessage() { - if (!this._buffer) { - return null; - } - const index = this._buffer.indexOf("\n"); - if (index === -1) { - return null; - } - const line = this._buffer.toString("utf8", 0, index).replace(/\r$/, ""); - this._buffer = this._buffer.subarray(index + 1); - if (line.trim() === "") { - return this.readMessage(); // Skip empty lines recursively - } - try { - return JSON.parse(line); - } catch (error) { - throw new Error( - `Parse error: ${error instanceof Error ? error.message : String(error)}` - ); - } - } - } - const readBuffer = new ReadBuffer(); - function onData(chunk) { - readBuffer.append(chunk); - processReadBuffer(); - } - function processReadBuffer() { - while (true) { - try { - const message = readBuffer.readMessage(); - if (!message) { - break; - } - debug(`recv: ${JSON.stringify(message)}`); - handleMessage(message); - } catch (error) { - // For parse errors, we can't know the request id, so we shouldn't send a response - // according to JSON-RPC spec. Just log the error. - debug( - `Parse error: ${error instanceof Error ? error.message : String(error)}` - ); - } - } - } - function replyResult(id, result) { - if (id === undefined || id === null) return; // notification - const res = { jsonrpc: "2.0", id, result }; - writeMessage(res); - } - function replyError(id, code, message, data) { - // Don't send error responses for notifications (id is null/undefined) - if (id === undefined || id === null) { - debug(`Error for notification: ${message}`); - return; - } - const error = { code, message }; - if (data !== undefined) { - error.data = data; - } - const res = { - jsonrpc: "2.0", - id, - error, - }; - writeMessage(res); - } - function isToolEnabled(name) { - return safeOutputsConfig[name] && safeOutputsConfig[name].enabled; - } - function appendSafeOutput(entry) { - if (!outputFile) throw new Error("No output file configured"); - const jsonLine = JSON.stringify(entry) + "\n"; - try { - fs.appendFileSync(outputFile, jsonLine); - } catch (error) { - throw new Error( - `Failed to write to output file: ${error instanceof Error ? error.message : String(error)}` - ); - } - } - const defaultHandler = type => args => { - const entry = { ...(args || {}), type }; - appendSafeOutput(entry); - return { - content: [ - { - type: "text", - text: `success`, - }, - ], - }; - }; - const TOOLS = Object.fromEntries( - [ - { - name: "create-issue", - description: "Create a new GitHub issue", - inputSchema: { - type: "object", - required: ["title", "body"], - properties: { - title: { type: "string", description: "Issue title" }, - body: { type: "string", description: "Issue body/description" }, - labels: { - type: "array", - items: { type: "string" }, - description: "Issue labels", - }, - }, - additionalProperties: false, - }, - }, - { - name: "create-discussion", - description: "Create a new GitHub discussion", - inputSchema: { - type: "object", - required: ["title", "body"], - properties: { - title: { type: "string", description: "Discussion title" }, - body: { type: "string", description: "Discussion body/content" }, - category: { type: "string", description: "Discussion category" }, - }, - additionalProperties: false, - }, - }, - { - name: "add-issue-comment", - description: "Add a comment to a GitHub issue or pull request", - inputSchema: { - type: "object", - required: ["body"], - properties: { - body: { type: "string", description: "Comment body/content" }, - issue_number: { - type: "number", - description: "Issue or PR number (optional for current context)", - }, - }, - additionalProperties: false, - }, - }, - { - name: "create-pull-request", - description: "Create a new GitHub pull request", - inputSchema: { - type: "object", - required: ["title", "body"], - properties: { - title: { type: "string", description: "Pull request title" }, - body: { - type: "string", - description: "Pull request body/description", - }, - branch: { - type: "string", - description: - "Optional branch name (will be auto-generated if not provided)", - }, - labels: { - type: "array", - items: { type: "string" }, - description: "Optional labels to add to the PR", - }, - }, - additionalProperties: false, - }, - }, - { - name: "create-pull-request-review-comment", - description: "Create a review comment on a GitHub pull request", - inputSchema: { - type: "object", - required: ["path", "line", "body"], - properties: { - path: { - type: "string", - description: "File path for the review comment", - }, - line: { - type: ["number", "string"], - description: "Line number for the comment", - }, - body: { type: "string", description: "Comment body content" }, - start_line: { - type: ["number", "string"], - description: "Optional start line for multi-line comments", - }, - side: { - type: "string", - enum: ["LEFT", "RIGHT"], - description: "Optional side of the diff: LEFT or RIGHT", - }, - }, - additionalProperties: false, - }, - }, - { - name: "create-code-scanning-alert", - description: "Create a code scanning alert", - inputSchema: { - type: "object", - required: ["file", "line", "severity", "message"], - properties: { - file: { - type: "string", - description: "File path where the issue was found", - }, - line: { - type: ["number", "string"], - description: "Line number where the issue was found", - }, - severity: { - type: "string", - enum: ["error", "warning", "info", "note"], - description: "Severity level", - }, - message: { - type: "string", - description: "Alert message describing the issue", - }, - column: { - type: ["number", "string"], - description: "Optional column number", - }, - ruleIdSuffix: { - type: "string", - description: "Optional rule ID suffix for uniqueness", - }, - }, - additionalProperties: false, - }, - }, - { - name: "add-issue-label", - description: "Add labels to a GitHub issue or pull request", - inputSchema: { - type: "object", - required: ["labels"], - properties: { - labels: { - type: "array", - items: { type: "string" }, - description: "Labels to add", - }, - issue_number: { - type: "number", - description: "Issue or PR number (optional for current context)", - }, - }, - additionalProperties: false, - }, - }, - { - name: "update-issue", - description: "Update a GitHub issue", - inputSchema: { - type: "object", - properties: { - status: { - type: "string", - enum: ["open", "closed"], - description: "Optional new issue status", - }, - title: { type: "string", description: "Optional new issue title" }, - body: { type: "string", description: "Optional new issue body" }, - issue_number: { - type: ["number", "string"], - description: "Optional issue number for target '*'", - }, - }, - additionalProperties: false, - }, - }, - { - name: "push-to-pr-branch", - description: "Push changes to a pull request branch", - inputSchema: { - type: "object", - properties: { - message: { type: "string", description: "Optional commit message" }, - pull_request_number: { - type: ["number", "string"], - description: "Optional pull request number for target '*'", - }, - }, - additionalProperties: false, - }, - }, - { - name: "missing-tool", - description: - "Report a missing tool or functionality needed to complete tasks", - inputSchema: { - type: "object", - required: ["tool", "reason"], - properties: { - tool: { type: "string", description: "Name of the missing tool" }, - reason: { type: "string", description: "Why this tool is needed" }, - alternatives: { - type: "string", - description: "Possible alternatives or workarounds", - }, - }, - additionalProperties: false, - }, - }, - ] - .filter(({ name }) => isToolEnabled(name)) - .map(tool => [tool.name, tool]) - ); - debug(`v${SERVER_INFO.version} ready on stdio`); - debug(` output file: ${outputFile}`); - debug(` config: ${JSON.stringify(safeOutputsConfig)}`); - debug(` tools: ${Object.keys(TOOLS).join(", ")}`); - if (!Object.keys(TOOLS).length) - throw new Error("No tools enabled in configuration"); - function handleMessage(req) { - // Validate basic JSON-RPC structure - if (!req || typeof req !== "object") { - debug(`Invalid message: not an object`); - return; - } - if (req.jsonrpc !== "2.0") { - debug(`Invalid message: missing or invalid jsonrpc field`); - return; - } - const { id, method, params } = req; - // Validate method field - if (!method || typeof method !== "string") { - replyError(id, -32600, "Invalid Request: method must be a string"); - return; - } - try { - if (method === "initialize") { - const clientInfo = params?.clientInfo ?? {}; - console.error(`client initialized:`, clientInfo); - const protocolVersion = params?.protocolVersion ?? undefined; - const result = { - serverInfo: SERVER_INFO, - ...(protocolVersion ? { protocolVersion } : {}), - capabilities: { - tools: {}, - }, - }; - replyResult(id, result); - } else if (method === "tools/list") { - const list = []; - Object.values(TOOLS).forEach(tool => { - list.push({ - name: tool.name, - description: tool.description, - inputSchema: tool.inputSchema, - }); - }); - replyResult(id, { tools: list }); - } else if (method === "tools/call") { - const name = params?.name; - const args = params?.arguments ?? {}; - if (!name || typeof name !== "string") { - replyError(id, -32602, "Invalid params: 'name' must be a string"); - return; - } - const tool = TOOLS[name]; - if (!tool) { - replyError(id, -32601, `Tool not found: ${name}`); - return; - } - const handler = tool.handler || defaultHandler(tool.name); - const requiredFields = - tool.inputSchema && Array.isArray(tool.inputSchema.required) - ? tool.inputSchema.required - : []; - if (requiredFields.length) { - const missing = requiredFields.filter(f => args[f] === undefined); - if (missing.length) { - replyError( - id, - -32602, - `Invalid arguments: missing ${missing.map(m => `'${m}'`).join(", ")}` - ); - return; - } - } - const result = handler(args); - const content = result && result.content ? result.content : []; - replyResult(id, { content }); - } else if (/^notifications\//.test(method)) { - debug(`ignore ${method}`); - } else { - replyError(id, -32601, `Method not found: ${method}`); - } - } catch (e) { - replyError(id, -32603, "Internal error", { - message: e instanceof Error ? e.message : String(e), - }); - } - } - process.stdin.on("data", onData); - process.stdin.on("error", err => debug(`stdin error: ${err}`)); - process.stdin.resume(); - debug(`listening...`); - EOF - chmod +x /tmp/safe-outputs/mcp-server.cjs - - - name: Setup MCPs - env: - GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} - GITHUB_AW_SAFE_OUTPUTS_CONFIG: "{\"add-issue-comment\":{\"enabled\":true},\"create-issue\":true}" - run: | - mkdir -p /tmp/mcp-config - cat > /tmp/mcp-config/mcp-servers.json << 'EOF' - { - "mcpServers": { - "github": { - "command": "docker", - "args": [ - "run", - "-i", - "--rm", - "-e", - "GITHUB_PERSONAL_ACCESS_TOKEN", - "ghcr.io/github/github-mcp-server:latest" - ], - "env": { - "GITHUB_PERSONAL_ACCESS_TOKEN": "${{ secrets.GITHUB_TOKEN }}" - } - }, - "playwright": { - "command": "npx", - "args": [ - "@playwright/mcp@latest", - "--allowed-origins", - "example.com,*.github.com" - ] - }, - "safe_outputs": { - "command": "node", - "args": ["/tmp/safe-outputs/mcp-server.cjs"], - "env": { - "GITHUB_AW_SAFE_OUTPUTS": "${{ env.GITHUB_AW_SAFE_OUTPUTS }}", - "GITHUB_AW_SAFE_OUTPUTS_CONFIG": ${{ toJSON(env.GITHUB_AW_SAFE_OUTPUTS_CONFIG) }} - } - } - } - } - EOF - - name: Create prompt - env: - GITHUB_AW_PROMPT: /tmp/aw-prompts/prompt.txt - GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} - run: | - mkdir -p /tmp/aw-prompts - cat > $GITHUB_AW_PROMPT << 'EOF' - # Test MCP Configuration - - This is a test workflow to demonstrate MCP configuration generation and server launching. - - - --- - - ## Adding a Comment to an Issue or Pull Request, Creating an Issue, Reporting Missing Tools or Functionality - - **IMPORTANT**: To do the actions mentioned in the header of this section, use the **safe-outputs** tools, do NOT attempt to use `gh`, do NOT attempt to use the GitHub API. You don't have write access to the GitHub repo. - EOF - - name: Print prompt to step summary - run: | - echo "## Generated Prompt" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo '``````markdown' >> $GITHUB_STEP_SUMMARY - cat $GITHUB_AW_PROMPT >> $GITHUB_STEP_SUMMARY - echo '``````' >> $GITHUB_STEP_SUMMARY - env: - GITHUB_AW_PROMPT: /tmp/aw-prompts/prompt.txt - - name: Generate agentic run info - uses: actions/github-script@v7 - with: - script: | - const fs = require('fs'); - - const awInfo = { - engine_id: "claude", - engine_name: "Claude Code", - model: "", - version: "", - workflow_name: "Test MCP Configuration", - experimental: false, - supports_tools_whitelist: true, - supports_http_transport: true, - run_id: context.runId, - run_number: context.runNumber, - run_attempt: process.env.GITHUB_RUN_ATTEMPT, - repository: context.repo.owner + '/' + context.repo.repo, - ref: context.ref, - sha: context.sha, - actor: context.actor, - event_name: context.eventName, - staged: false, - created_at: new Date().toISOString() - }; - - // Write to /tmp directory to avoid inclusion in PR - const tmpPath = '/tmp/aw_info.json'; - fs.writeFileSync(tmpPath, JSON.stringify(awInfo, null, 2)); - console.log('Generated aw_info.json at:', tmpPath); - console.log(JSON.stringify(awInfo, null, 2)); - - name: Upload agentic run info - if: always() - uses: actions/upload-artifact@v4 - with: - name: aw_info.json - path: /tmp/aw_info.json - if-no-files-found: warn - - name: Execute Claude Code CLI - id: agentic_execution - # Allowed tools (sorted): - # - ExitPlanMode - # - Glob - # - Grep - # - LS - # - NotebookRead - # - Read - # - Task - # - TodoWrite - # - Write - # - mcp__github__download_workflow_run_artifact - # - mcp__github__get_code_scanning_alert - # - mcp__github__get_commit - # - mcp__github__get_dependabot_alert - # - mcp__github__get_discussion - # - mcp__github__get_discussion_comments - # - mcp__github__get_file_contents - # - mcp__github__get_issue - # - mcp__github__get_issue_comments - # - mcp__github__get_job_logs - # - mcp__github__get_me - # - mcp__github__get_notification_details - # - mcp__github__get_pull_request - # - mcp__github__get_pull_request_comments - # - mcp__github__get_pull_request_diff - # - mcp__github__get_pull_request_files - # - mcp__github__get_pull_request_reviews - # - mcp__github__get_pull_request_status - # - mcp__github__get_secret_scanning_alert - # - mcp__github__get_tag - # - mcp__github__get_workflow_run - # - mcp__github__get_workflow_run_logs - # - mcp__github__get_workflow_run_usage - # - mcp__github__list_branches - # - mcp__github__list_code_scanning_alerts - # - mcp__github__list_commits - # - mcp__github__list_dependabot_alerts - # - mcp__github__list_discussion_categories - # - mcp__github__list_discussions - # - mcp__github__list_issues - # - mcp__github__list_notifications - # - mcp__github__list_pull_requests - # - mcp__github__list_secret_scanning_alerts - # - mcp__github__list_tags - # - mcp__github__list_workflow_jobs - # - mcp__github__list_workflow_run_artifacts - # - mcp__github__list_workflow_runs - # - mcp__github__list_workflows - # - mcp__github__search_code - # - mcp__github__search_issues - # - mcp__github__search_orgs - # - mcp__github__search_pull_requests - # - mcp__github__search_repositories - # - mcp__github__search_users - # - mcp__playwright__browser_click - # - mcp__playwright__browser_close - # - mcp__playwright__browser_console_messages - # - mcp__playwright__browser_drag - # - mcp__playwright__browser_evaluate - # - mcp__playwright__browser_file_upload - # - mcp__playwright__browser_fill_form - # - mcp__playwright__browser_handle_dialog - # - mcp__playwright__browser_hover - # - mcp__playwright__browser_install - # - mcp__playwright__browser_navigate - # - mcp__playwright__browser_navigate_back - # - mcp__playwright__browser_network_requests - # - mcp__playwright__browser_press_key - # - mcp__playwright__browser_resize - # - mcp__playwright__browser_select_option - # - mcp__playwright__browser_snapshot - # - mcp__playwright__browser_tabs - # - mcp__playwright__browser_take_screenshot - # - mcp__playwright__browser_type - # - mcp__playwright__browser_wait_for - timeout-minutes: 5 - run: | - set -o pipefail - # Execute Claude Code CLI with prompt from file - npx @anthropic-ai/claude-code@latest --print --mcp-config /tmp/mcp-config/mcp-servers.json --allowed-tools "ExitPlanMode,Glob,Grep,LS,NotebookRead,Read,Task,TodoWrite,Write,mcp__github__download_workflow_run_artifact,mcp__github__get_code_scanning_alert,mcp__github__get_commit,mcp__github__get_dependabot_alert,mcp__github__get_discussion,mcp__github__get_discussion_comments,mcp__github__get_file_contents,mcp__github__get_issue,mcp__github__get_issue_comments,mcp__github__get_job_logs,mcp__github__get_me,mcp__github__get_notification_details,mcp__github__get_pull_request,mcp__github__get_pull_request_comments,mcp__github__get_pull_request_diff,mcp__github__get_pull_request_files,mcp__github__get_pull_request_reviews,mcp__github__get_pull_request_status,mcp__github__get_secret_scanning_alert,mcp__github__get_tag,mcp__github__get_workflow_run,mcp__github__get_workflow_run_logs,mcp__github__get_workflow_run_usage,mcp__github__list_branches,mcp__github__list_code_scanning_alerts,mcp__github__list_commits,mcp__github__list_dependabot_alerts,mcp__github__list_discussion_categories,mcp__github__list_discussions,mcp__github__list_issues,mcp__github__list_notifications,mcp__github__list_pull_requests,mcp__github__list_secret_scanning_alerts,mcp__github__list_tags,mcp__github__list_workflow_jobs,mcp__github__list_workflow_run_artifacts,mcp__github__list_workflow_runs,mcp__github__list_workflows,mcp__github__search_code,mcp__github__search_issues,mcp__github__search_orgs,mcp__github__search_pull_requests,mcp__github__search_repositories,mcp__github__search_users,mcp__playwright__browser_click,mcp__playwright__browser_close,mcp__playwright__browser_console_messages,mcp__playwright__browser_drag,mcp__playwright__browser_evaluate,mcp__playwright__browser_file_upload,mcp__playwright__browser_fill_form,mcp__playwright__browser_handle_dialog,mcp__playwright__browser_hover,mcp__playwright__browser_install,mcp__playwright__browser_navigate,mcp__playwright__browser_navigate_back,mcp__playwright__browser_network_requests,mcp__playwright__browser_press_key,mcp__playwright__browser_resize,mcp__playwright__browser_select_option,mcp__playwright__browser_snapshot,mcp__playwright__browser_tabs,mcp__playwright__browser_take_screenshot,mcp__playwright__browser_type,mcp__playwright__browser_wait_for" --debug --verbose --permission-mode bypassPermissions --output-format json --settings /tmp/.claude/settings.json "$(cat /tmp/aw-prompts/prompt.txt)" 2>&1 | tee /tmp/test-mcp-configuration.log - env: - ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} - DISABLE_TELEMETRY: "1" - DISABLE_ERROR_REPORTING: "1" - DISABLE_BUG_COMMAND: "1" - GITHUB_AW_PROMPT: /tmp/aw-prompts/prompt.txt - GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} - - name: Ensure log file exists - if: always() - run: | - # Ensure log file exists - touch /tmp/test-mcp-configuration.log - # Show last few lines for debugging - echo "=== Last 10 lines of Claude execution log ===" - tail -10 /tmp/test-mcp-configuration.log || echo "No log content available" - - name: Print Agent output - env: - GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} - run: | - echo "## Agent Output (JSONL)" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo '``````json' >> $GITHUB_STEP_SUMMARY - if [ -f ${{ env.GITHUB_AW_SAFE_OUTPUTS }} ]; then - cat ${{ env.GITHUB_AW_SAFE_OUTPUTS }} >> $GITHUB_STEP_SUMMARY - # Ensure there's a newline after the file content if it doesn't end with one - if [ -s ${{ env.GITHUB_AW_SAFE_OUTPUTS }} ] && [ "$(tail -c1 ${{ env.GITHUB_AW_SAFE_OUTPUTS }})" != "" ]; then - echo "" >> $GITHUB_STEP_SUMMARY - fi - else - echo "No agent output file found" >> $GITHUB_STEP_SUMMARY - fi - echo '``````' >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - - name: Upload agentic output file - if: always() - uses: actions/upload-artifact@v4 - with: - name: safe_output.jsonl - path: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} - if-no-files-found: warn - - name: Ingest agent output - id: collect_output - uses: actions/github-script@v7 - env: - GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} - GITHUB_AW_SAFE_OUTPUTS_CONFIG: "{\"add-issue-comment\":{\"enabled\":true},\"create-issue\":true}" - with: - script: | - async function main() { - const fs = require("fs"); - /** - * Sanitizes content for safe output in GitHub Actions - * @param {string} content - The content to sanitize - * @returns {string} The sanitized content - */ - function sanitizeContent(content) { - if (!content || typeof content !== "string") { - return ""; - } - // Read allowed domains from environment variable - const allowedDomainsEnv = process.env.GITHUB_AW_ALLOWED_DOMAINS; - const defaultAllowedDomains = [ - "github.com", - "github.io", - "githubusercontent.com", - "githubassets.com", - "github.dev", - "codespaces.new", - ]; - const allowedDomains = allowedDomainsEnv - ? allowedDomainsEnv - .split(",") - .map(d => d.trim()) - .filter(d => d) - : defaultAllowedDomains; - let sanitized = content; - // Neutralize @mentions to prevent unintended notifications - sanitized = neutralizeMentions(sanitized); - // Remove control characters (except newlines and tabs) - sanitized = sanitized.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, ""); - // XML character escaping - sanitized = sanitized - .replace(/&/g, "&") // Must be first to avoid double-escaping - .replace(//g, ">") - .replace(/"/g, """) - .replace(/'/g, "'"); - // URI filtering - replace non-https protocols with "(redacted)" - sanitized = sanitizeUrlProtocols(sanitized); - // Domain filtering for HTTPS URIs - sanitized = sanitizeUrlDomains(sanitized); - // Limit total length to prevent DoS (0.5MB max) - const maxLength = 524288; - if (sanitized.length > maxLength) { - sanitized = - sanitized.substring(0, maxLength) + - "\n[Content truncated due to length]"; - } - // Limit number of lines to prevent log flooding (65k max) - const lines = sanitized.split("\n"); - const maxLines = 65000; - if (lines.length > maxLines) { - sanitized = - lines.slice(0, maxLines).join("\n") + - "\n[Content truncated due to line count]"; - } - // Remove ANSI escape sequences - sanitized = sanitized.replace(/\x1b\[[0-9;]*[mGKH]/g, ""); - // Neutralize common bot trigger phrases - sanitized = neutralizeBotTriggers(sanitized); - // Trim excessive whitespace - return sanitized.trim(); - /** - * Remove unknown domains - * @param {string} s - The string to process - * @returns {string} The string with unknown domains redacted - */ - function sanitizeUrlDomains(s) { - return s.replace( - /\bhttps:\/\/([^\/\s\])}'"<>&\x00-\x1f]+)/gi, - (match, domain) => { - // Extract the hostname part (before first slash, colon, or other delimiter) - const hostname = domain.split(/[\/:\?#]/)[0].toLowerCase(); - // Check if this domain or any parent domain is in the allowlist - const isAllowed = allowedDomains.some(allowedDomain => { - const normalizedAllowed = allowedDomain.toLowerCase(); - return ( - hostname === normalizedAllowed || - hostname.endsWith("." + normalizedAllowed) - ); - }); - return isAllowed ? match : "(redacted)"; - } - ); - } - /** - * Remove unknown protocols except https - * @param {string} s - The string to process - * @returns {string} The string with non-https protocols redacted - */ - function sanitizeUrlProtocols(s) { - // Match both protocol:// and protocol: patterns - return s.replace( - /\b(\w+):(?:\/\/)?[^\s\])}'"<>&\x00-\x1f]+/gi, - (match, protocol) => { - // Allow https (case insensitive), redact everything else - return protocol.toLowerCase() === "https" ? match : "(redacted)"; - } - ); - } - /** - * Neutralizes @mentions by wrapping them in backticks - * @param {string} s - The string to process - * @returns {string} The string with neutralized mentions - */ - function neutralizeMentions(s) { - // Replace @name or @org/team outside code with `@name` - return s.replace( - /(^|[^\w`])@([A-Za-z0-9](?:[A-Za-z0-9-]{0,37}[A-Za-z0-9])?(?:\/[A-Za-z0-9._-]+)?)/g, - (_m, p1, p2) => `${p1}\`@${p2}\`` - ); - } - /** - * Neutralizes bot trigger phrases by wrapping them in backticks - * @param {string} s - The string to process - * @returns {string} The string with neutralized bot triggers - */ - function neutralizeBotTriggers(s) { - // Neutralize common bot trigger phrases like "fixes #123", "closes #asdfs", etc. - return s.replace( - /\b(fixes?|closes?|resolves?|fix|close|resolve)\s+#(\w+)/gi, - (match, action, ref) => `\`${action} #${ref}\`` - ); - } - } - /** - * Gets the maximum allowed count for a given output type - * @param {string} itemType - The output item type - * @param {any} config - The safe-outputs configuration - * @returns {number} The maximum allowed count - */ - function getMaxAllowedForType(itemType, config) { - // Check if max is explicitly specified in config - if ( - config && - config[itemType] && - typeof config[itemType] === "object" && - config[itemType].max - ) { - return config[itemType].max; - } - // Use default limits for plural-supported types - switch (itemType) { - case "create-issue": - return 1; // Only one issue allowed - case "add-issue-comment": - return 1; // Only one comment allowed - case "create-pull-request": - return 1; // Only one pull request allowed - case "create-pull-request-review-comment": - return 10; // Default to 10 review comments allowed - case "add-issue-label": - return 5; // Only one labels operation allowed - case "update-issue": - return 1; // Only one issue update allowed - case "push-to-pr-branch": - return 1; // Only one push to branch allowed - case "create-discussion": - return 1; // Only one discussion allowed - case "missing-tool": - return 1000; // Allow many missing tool reports (default: unlimited) - case "create-code-scanning-alert": - return 1000; // Allow many repository security advisories (default: unlimited) - default: - return 1; // Default to single item for unknown types - } - } - /** - * Attempts to repair common JSON syntax issues in LLM-generated content - * @param {string} jsonStr - The potentially malformed JSON string - * @returns {string} The repaired JSON string - */ - function repairJson(jsonStr) { - let repaired = jsonStr.trim(); - // remove invalid control characters like - // U+0014 (DC4) — represented here as "\u0014" - // Escape control characters not allowed in JSON strings (U+0000 through U+001F) - // Preserve common JSON escapes for \b, \f, \n, \r, \t and use \uXXXX for the rest. - /** @type {Record} */ - const _ctrl = { 8: "\\b", 9: "\\t", 10: "\\n", 12: "\\f", 13: "\\r" }; - repaired = repaired.replace(/[\u0000-\u001F]/g, ch => { - const c = ch.charCodeAt(0); - return _ctrl[c] || "\\u" + c.toString(16).padStart(4, "0"); - }); - // Fix single quotes to double quotes (must be done first) - repaired = repaired.replace(/'/g, '"'); - // Fix missing quotes around object keys - repaired = repaired.replace( - /([{,]\s*)([a-zA-Z_$][a-zA-Z0-9_$]*)\s*:/g, - '$1"$2":' - ); - // Fix newlines and tabs inside strings by escaping them - repaired = repaired.replace(/"([^"\\]*)"/g, (match, content) => { - if ( - content.includes("\n") || - content.includes("\r") || - content.includes("\t") - ) { - const escaped = content - .replace(/\\/g, "\\\\") - .replace(/\n/g, "\\n") - .replace(/\r/g, "\\r") - .replace(/\t/g, "\\t"); - return `"${escaped}"`; - } - return match; - }); - // Fix unescaped quotes inside string values - repaired = repaired.replace( - /"([^"]*)"([^":,}\]]*)"([^"]*)"(\s*[,:}\]])/g, - (match, p1, p2, p3, p4) => `"${p1}\\"${p2}\\"${p3}"${p4}` - ); - // Fix wrong bracket/brace types - arrays should end with ] not } - repaired = repaired.replace( - /(\[\s*(?:"[^"]*"(?:\s*,\s*"[^"]*")*\s*),?)\s*}/g, - "$1]" - ); - // Fix missing closing braces/brackets - const openBraces = (repaired.match(/\{/g) || []).length; - const closeBraces = (repaired.match(/\}/g) || []).length; - if (openBraces > closeBraces) { - repaired += "}".repeat(openBraces - closeBraces); - } else if (closeBraces > openBraces) { - repaired = "{".repeat(closeBraces - openBraces) + repaired; - } - // Fix missing closing brackets for arrays - const openBrackets = (repaired.match(/\[/g) || []).length; - const closeBrackets = (repaired.match(/\]/g) || []).length; - if (openBrackets > closeBrackets) { - repaired += "]".repeat(openBrackets - closeBrackets); - } else if (closeBrackets > openBrackets) { - repaired = "[".repeat(closeBrackets - openBrackets) + repaired; - } - // Fix trailing commas in objects and arrays (AFTER fixing brackets/braces) - repaired = repaired.replace(/,(\s*[}\]])/g, "$1"); - return repaired; - } - /** - * Attempts to parse JSON with repair fallback - * @param {string} jsonStr - The JSON string to parse - * @returns {Object|undefined} The parsed JSON object, or undefined if parsing fails - */ - function parseJsonWithRepair(jsonStr) { - try { - // First, try normal JSON.parse - return JSON.parse(jsonStr); - } catch (originalError) { - try { - // If that fails, try repairing and parsing again - const repairedJson = repairJson(jsonStr); - return JSON.parse(repairedJson); - } catch (repairError) { - // If repair also fails, throw the error - core.info(`invalid input json: ${jsonStr}`); - const originalMsg = - originalError instanceof Error - ? originalError.message - : String(originalError); - const repairMsg = - repairError instanceof Error - ? repairError.message - : String(repairError); - throw new Error( - `JSON parsing failed. Original: ${originalMsg}. After attempted repair: ${repairMsg}` - ); - } - } - } - const outputFile = process.env.GITHUB_AW_SAFE_OUTPUTS; - const safeOutputsConfig = process.env.GITHUB_AW_SAFE_OUTPUTS_CONFIG; - if (!outputFile) { - core.info("GITHUB_AW_SAFE_OUTPUTS not set, no output to collect"); - core.setOutput("output", ""); - return; - } - if (!fs.existsSync(outputFile)) { - core.info(`Output file does not exist: ${outputFile}`); - core.setOutput("output", ""); - return; - } - const outputContent = fs.readFileSync(outputFile, "utf8"); - if (outputContent.trim() === "") { - core.info("Output file is empty"); - core.setOutput("output", ""); - return; - } - core.info(`Raw output content length: ${outputContent.length}`); - // Parse the safe-outputs configuration - /** @type {any} */ - let expectedOutputTypes = {}; - if (safeOutputsConfig) { - try { - expectedOutputTypes = JSON.parse(safeOutputsConfig); - core.info( - `Expected output types: ${JSON.stringify(Object.keys(expectedOutputTypes))}` - ); - } catch (error) { - const errorMsg = error instanceof Error ? error.message : String(error); - core.info(`Warning: Could not parse safe-outputs config: ${errorMsg}`); - } - } - // Parse JSONL content - const lines = outputContent.trim().split("\n"); - const parsedItems = []; - const errors = []; - for (let i = 0; i < lines.length; i++) { - const line = lines[i].trim(); - if (line === "") continue; // Skip empty lines - try { - /** @type {any} */ - const item = parseJsonWithRepair(line); - // If item is undefined (failed to parse), add error and process next line - if (item === undefined) { - errors.push(`Line ${i + 1}: Invalid JSON - JSON parsing failed`); - continue; - } - // Validate that the item has a 'type' field - if (!item.type) { - errors.push(`Line ${i + 1}: Missing required 'type' field`); - continue; - } - // Validate against expected output types - const itemType = item.type; - if (!expectedOutputTypes[itemType]) { - errors.push( - `Line ${i + 1}: Unexpected output type '${itemType}'. Expected one of: ${Object.keys(expectedOutputTypes).join(", ")}` - ); - continue; - } - // Check for too many items of the same type - const typeCount = parsedItems.filter( - existing => existing.type === itemType - ).length; - const maxAllowed = getMaxAllowedForType(itemType, expectedOutputTypes); - if (typeCount >= maxAllowed) { - errors.push( - `Line ${i + 1}: Too many items of type '${itemType}'. Maximum allowed: ${maxAllowed}.` - ); - continue; - } - // Basic validation based on type - switch (itemType) { - case "create-issue": - if (!item.title || typeof item.title !== "string") { - errors.push( - `Line ${i + 1}: create-issue requires a 'title' string field` - ); - continue; - } - if (!item.body || typeof item.body !== "string") { - errors.push( - `Line ${i + 1}: create-issue requires a 'body' string field` - ); - continue; - } - // Sanitize text content - item.title = sanitizeContent(item.title); - item.body = sanitizeContent(item.body); - // Sanitize labels if present - if (item.labels && Array.isArray(item.labels)) { - item.labels = item.labels.map( - /** @param {any} label */ label => - typeof label === "string" ? sanitizeContent(label) : label - ); - } - break; - case "add-issue-comment": - if (!item.body || typeof item.body !== "string") { - errors.push( - `Line ${i + 1}: add-issue-comment requires a 'body' string field` - ); - continue; - } - // Sanitize text content - item.body = sanitizeContent(item.body); - break; - case "create-pull-request": - if (!item.title || typeof item.title !== "string") { - errors.push( - `Line ${i + 1}: create-pull-request requires a 'title' string field` - ); - continue; - } - if (!item.body || typeof item.body !== "string") { - errors.push( - `Line ${i + 1}: create-pull-request requires a 'body' string field` - ); - continue; - } - // Sanitize text content - item.title = sanitizeContent(item.title); - item.body = sanitizeContent(item.body); - // Sanitize branch name if present - if (item.branch && typeof item.branch === "string") { - item.branch = sanitizeContent(item.branch); - } - // Sanitize labels if present - if (item.labels && Array.isArray(item.labels)) { - item.labels = item.labels.map( - /** @param {any} label */ label => - typeof label === "string" ? sanitizeContent(label) : label - ); - } - break; - case "add-issue-label": - if (!item.labels || !Array.isArray(item.labels)) { - errors.push( - `Line ${i + 1}: add-issue-label requires a 'labels' array field` - ); - continue; - } - if ( - item.labels.some( - /** @param {any} label */ label => typeof label !== "string" - ) - ) { - errors.push( - `Line ${i + 1}: add-issue-label labels array must contain only strings` - ); - continue; - } - // Sanitize label strings - item.labels = item.labels.map( - /** @param {any} label */ label => sanitizeContent(label) - ); - break; - case "update-issue": - // Check that at least one updateable field is provided - const hasValidField = - item.status !== undefined || - item.title !== undefined || - item.body !== undefined; - if (!hasValidField) { - errors.push( - `Line ${i + 1}: update-issue requires at least one of: 'status', 'title', or 'body' fields` - ); - continue; - } - // Validate status if provided - if (item.status !== undefined) { - if ( - typeof item.status !== "string" || - (item.status !== "open" && item.status !== "closed") - ) { - errors.push( - `Line ${i + 1}: update-issue 'status' must be 'open' or 'closed'` - ); - continue; - } - } - // Validate title if provided - if (item.title !== undefined) { - if (typeof item.title !== "string") { - errors.push( - `Line ${i + 1}: update-issue 'title' must be a string` - ); - continue; - } - item.title = sanitizeContent(item.title); - } - // Validate body if provided - if (item.body !== undefined) { - if (typeof item.body !== "string") { - errors.push( - `Line ${i + 1}: update-issue 'body' must be a string` - ); - continue; - } - item.body = sanitizeContent(item.body); - } - // Validate issue_number if provided (for target "*") - if (item.issue_number !== undefined) { - if ( - typeof item.issue_number !== "number" && - typeof item.issue_number !== "string" - ) { - errors.push( - `Line ${i + 1}: update-issue 'issue_number' must be a number or string` - ); - continue; - } - } - break; - case "push-to-pr-branch": - // Validate message if provided (optional) - if (item.message !== undefined) { - if (typeof item.message !== "string") { - errors.push( - `Line ${i + 1}: push-to-pr-branch 'message' must be a string` - ); - continue; - } - item.message = sanitizeContent(item.message); - } - // Validate pull_request_number if provided (for target "*") - if (item.pull_request_number !== undefined) { - if ( - typeof item.pull_request_number !== "number" && - typeof item.pull_request_number !== "string" - ) { - errors.push( - `Line ${i + 1}: push-to-pr-branch 'pull_request_number' must be a number or string` - ); - continue; - } - } - break; - case "create-pull-request-review-comment": - // Validate required path field - if (!item.path || typeof item.path !== "string") { - errors.push( - `Line ${i + 1}: create-pull-request-review-comment requires a 'path' string field` - ); - continue; - } - // Validate required line field - if ( - item.line === undefined || - (typeof item.line !== "number" && typeof item.line !== "string") - ) { - errors.push( - `Line ${i + 1}: create-pull-request-review-comment requires a 'line' number or string field` - ); - continue; - } - // Validate line is a positive integer - const lineNumber = - typeof item.line === "string" ? parseInt(item.line, 10) : item.line; - if ( - isNaN(lineNumber) || - lineNumber <= 0 || - !Number.isInteger(lineNumber) - ) { - errors.push( - `Line ${i + 1}: create-pull-request-review-comment 'line' must be a positive integer` - ); - continue; - } - // Validate required body field - if (!item.body || typeof item.body !== "string") { - errors.push( - `Line ${i + 1}: create-pull-request-review-comment requires a 'body' string field` - ); - continue; - } - // Sanitize required text content - item.body = sanitizeContent(item.body); - // Validate optional start_line field - if (item.start_line !== undefined) { - if ( - typeof item.start_line !== "number" && - typeof item.start_line !== "string" - ) { - errors.push( - `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a number or string` - ); - continue; - } - const startLineNumber = - typeof item.start_line === "string" - ? parseInt(item.start_line, 10) - : item.start_line; - if ( - isNaN(startLineNumber) || - startLineNumber <= 0 || - !Number.isInteger(startLineNumber) - ) { - errors.push( - `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be a positive integer` - ); - continue; - } - if (startLineNumber > lineNumber) { - errors.push( - `Line ${i + 1}: create-pull-request-review-comment 'start_line' must be less than or equal to 'line'` - ); - continue; - } - } - // Validate optional side field - if (item.side !== undefined) { - if ( - typeof item.side !== "string" || - (item.side !== "LEFT" && item.side !== "RIGHT") - ) { - errors.push( - `Line ${i + 1}: create-pull-request-review-comment 'side' must be 'LEFT' or 'RIGHT'` - ); - continue; - } - } - break; - case "create-discussion": - if (!item.title || typeof item.title !== "string") { - errors.push( - `Line ${i + 1}: create-discussion requires a 'title' string field` - ); - continue; - } - if (!item.body || typeof item.body !== "string") { - errors.push( - `Line ${i + 1}: create-discussion requires a 'body' string field` - ); - continue; - } - // Sanitize text content - item.title = sanitizeContent(item.title); - item.body = sanitizeContent(item.body); - break; - case "missing-tool": - // Validate required tool field - if (!item.tool || typeof item.tool !== "string") { - errors.push( - `Line ${i + 1}: missing-tool requires a 'tool' string field` - ); - continue; - } - // Validate required reason field - if (!item.reason || typeof item.reason !== "string") { - errors.push( - `Line ${i + 1}: missing-tool requires a 'reason' string field` - ); - continue; - } - // Sanitize text content - item.tool = sanitizeContent(item.tool); - item.reason = sanitizeContent(item.reason); - // Validate optional alternatives field - if (item.alternatives !== undefined) { - if (typeof item.alternatives !== "string") { - errors.push( - `Line ${i + 1}: missing-tool 'alternatives' must be a string` - ); - continue; - } - item.alternatives = sanitizeContent(item.alternatives); - } - break; - case "create-code-scanning-alert": - // Validate required fields - if (!item.file || typeof item.file !== "string") { - errors.push( - `Line ${i + 1}: create-code-scanning-alert requires a 'file' field (string)` - ); - continue; - } - if ( - item.line === undefined || - item.line === null || - (typeof item.line !== "number" && typeof item.line !== "string") - ) { - errors.push( - `Line ${i + 1}: create-code-scanning-alert requires a 'line' field (number or string)` - ); - continue; - } - // Additional validation: line must be parseable as a positive integer - const parsedLine = parseInt(item.line, 10); - if (isNaN(parsedLine) || parsedLine <= 0) { - errors.push( - `Line ${i + 1}: create-code-scanning-alert 'line' must be a valid positive integer (got: ${item.line})` - ); - continue; - } - if (!item.severity || typeof item.severity !== "string") { - errors.push( - `Line ${i + 1}: create-code-scanning-alert requires a 'severity' field (string)` - ); - continue; - } - if (!item.message || typeof item.message !== "string") { - errors.push( - `Line ${i + 1}: create-code-scanning-alert requires a 'message' field (string)` - ); - continue; - } - // Validate severity level - const allowedSeverities = ["error", "warning", "info", "note"]; - if (!allowedSeverities.includes(item.severity.toLowerCase())) { - errors.push( - `Line ${i + 1}: create-code-scanning-alert 'severity' must be one of: ${allowedSeverities.join(", ")}` - ); - continue; - } - // Validate optional column field - if (item.column !== undefined) { - if ( - typeof item.column !== "number" && - typeof item.column !== "string" - ) { - errors.push( - `Line ${i + 1}: create-code-scanning-alert 'column' must be a number or string` - ); - continue; - } - // Additional validation: must be parseable as a positive integer - const parsedColumn = parseInt(item.column, 10); - if (isNaN(parsedColumn) || parsedColumn <= 0) { - errors.push( - `Line ${i + 1}: create-code-scanning-alert 'column' must be a valid positive integer (got: ${item.column})` - ); - continue; - } - } - // Validate optional ruleIdSuffix field - if (item.ruleIdSuffix !== undefined) { - if (typeof item.ruleIdSuffix !== "string") { - errors.push( - `Line ${i + 1}: create-code-scanning-alert 'ruleIdSuffix' must be a string` - ); - continue; - } - if (!/^[a-zA-Z0-9_-]+$/.test(item.ruleIdSuffix.trim())) { - errors.push( - `Line ${i + 1}: create-code-scanning-alert 'ruleIdSuffix' must contain only alphanumeric characters, hyphens, and underscores` - ); - continue; - } - } - // Normalize severity to lowercase and sanitize string fields - item.severity = item.severity.toLowerCase(); - item.file = sanitizeContent(item.file); - item.severity = sanitizeContent(item.severity); - item.message = sanitizeContent(item.message); - if (item.ruleIdSuffix) { - item.ruleIdSuffix = sanitizeContent(item.ruleIdSuffix); - } - break; - default: - errors.push(`Line ${i + 1}: Unknown output type '${itemType}'`); - continue; - } - core.info(`Line ${i + 1}: Valid ${itemType} item`); - parsedItems.push(item); - } catch (error) { - const errorMsg = error instanceof Error ? error.message : String(error); - errors.push(`Line ${i + 1}: Invalid JSON - ${errorMsg}`); - } - } - // Report validation results - if (errors.length > 0) { - core.warning("Validation errors found:"); - errors.forEach(error => core.warning(` - ${error}`)); - if (parsedItems.length === 0) { - core.setFailed(errors.map(e => ` - ${e}`).join("\n")); - return; - } - // For now, we'll continue with valid items but log the errors - // In the future, we might want to fail the workflow for invalid items - } - core.info(`Successfully parsed ${parsedItems.length} valid output items`); - // Set the parsed and validated items as output - const validatedOutput = { - items: parsedItems, - errors: errors, - }; - // Store validatedOutput JSON in "agent_output.json" file - const agentOutputFile = "/tmp/agent_output.json"; - const validatedOutputJson = JSON.stringify(validatedOutput); - try { - // Ensure the /tmp directory exists - fs.mkdirSync("/tmp", { recursive: true }); - fs.writeFileSync(agentOutputFile, validatedOutputJson, "utf8"); - core.info(`Stored validated output to: ${agentOutputFile}`); - // Set the environment variable GITHUB_AW_AGENT_OUTPUT to the file path - core.exportVariable("GITHUB_AW_AGENT_OUTPUT", agentOutputFile); - } catch (error) { - const errorMsg = error instanceof Error ? error.message : String(error); - core.error(`Failed to write agent output file: ${errorMsg}`); - } - core.setOutput("output", JSON.stringify(validatedOutput)); - core.setOutput("raw_output", outputContent); - } - // Call the main function - await main(); - - name: Print sanitized agent output - run: | - echo "## Processed Output" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo '``````json' >> $GITHUB_STEP_SUMMARY - echo '${{ steps.collect_output.outputs.output }}' >> $GITHUB_STEP_SUMMARY - echo '``````' >> $GITHUB_STEP_SUMMARY - - name: Upload sanitized agent output - if: always() && env.GITHUB_AW_AGENT_OUTPUT - uses: actions/upload-artifact@v4 - with: - name: agent_output.json - path: ${{ env.GITHUB_AW_AGENT_OUTPUT }} - if-no-files-found: warn - - name: Upload engine output files - uses: actions/upload-artifact@v4 - with: - name: agent_outputs - path: | - output.txt - if-no-files-found: ignore - - name: Clean up engine output files - run: | - rm -f output.txt - - name: Parse agent logs for step summary - if: always() - uses: actions/github-script@v7 - env: - GITHUB_AW_AGENT_OUTPUT: /tmp/test-mcp-configuration.log - with: - script: | - function main() { - const fs = require("fs"); - try { - // Get the log file path from environment - const logFile = process.env.GITHUB_AW_AGENT_OUTPUT; - if (!logFile) { - core.info("No agent log file specified"); - return; - } - if (!fs.existsSync(logFile)) { - core.info(`Log file not found: ${logFile}`); - return; - } - const logContent = fs.readFileSync(logFile, "utf8"); - const result = parseClaudeLog(logContent); - // Append to GitHub step summary - core.summary.addRaw(result.markdown).write(); - // Check for MCP server failures and fail the job if any occurred - if (result.mcpFailures && result.mcpFailures.length > 0) { - const failedServers = result.mcpFailures.join(", "); - core.setFailed(`MCP server(s) failed to launch: ${failedServers}`); - } - } catch (error) { - const errorMessage = error instanceof Error ? error.message : String(error); - core.setFailed(errorMessage); - } - } - /** - * Parses Claude log content and converts it to markdown format - * @param {string} logContent - The raw log content as a string - * @returns {{markdown: string, mcpFailures: string[]}} Result with formatted markdown content and MCP failure list - */ - function parseClaudeLog(logContent) { - try { - const logEntries = JSON.parse(logContent); - if (!Array.isArray(logEntries)) { - return { - markdown: - "## Agent Log Summary\n\nLog format not recognized as Claude JSON array.\n", - mcpFailures: [], - }; - } - let markdown = ""; - const mcpFailures = []; - // Check for initialization data first - const initEntry = logEntries.find( - entry => entry.type === "system" && entry.subtype === "init" - ); - if (initEntry) { - markdown += "## 🚀 Initialization\n\n"; - const initResult = formatInitializationSummary(initEntry); - markdown += initResult.markdown; - mcpFailures.push(...initResult.mcpFailures); - markdown += "\n"; - } - markdown += "## 🤖 Commands and Tools\n\n"; - const toolUsePairs = new Map(); // Map tool_use_id to tool_result - const commandSummary = []; // For the succinct summary - // First pass: collect tool results by tool_use_id - for (const entry of logEntries) { - if (entry.type === "user" && entry.message?.content) { - for (const content of entry.message.content) { - if (content.type === "tool_result" && content.tool_use_id) { - toolUsePairs.set(content.tool_use_id, content); - } - } - } - } - // Collect all tool uses for summary - for (const entry of logEntries) { - if (entry.type === "assistant" && entry.message?.content) { - for (const content of entry.message.content) { - if (content.type === "tool_use") { - const toolName = content.name; - const input = content.input || {}; - // Skip internal tools - only show external commands and API calls - if ( - [ - "Read", - "Write", - "Edit", - "MultiEdit", - "LS", - "Grep", - "Glob", - "TodoWrite", - ].includes(toolName) - ) { - continue; // Skip internal file operations and searches - } - // Find the corresponding tool result to get status - const toolResult = toolUsePairs.get(content.id); - let statusIcon = "❓"; - if (toolResult) { - statusIcon = toolResult.is_error === true ? "❌" : "✅"; - } - // Add to command summary (only external tools) - if (toolName === "Bash") { - const formattedCommand = formatBashCommand(input.command || ""); - commandSummary.push(`* ${statusIcon} \`${formattedCommand}\``); - } else if (toolName.startsWith("mcp__")) { - const mcpName = formatMcpName(toolName); - commandSummary.push(`* ${statusIcon} \`${mcpName}(...)\``); - } else { - // Handle other external tools (if any) - commandSummary.push(`* ${statusIcon} ${toolName}`); - } - } - } - } - } - // Add command summary - if (commandSummary.length > 0) { - for (const cmd of commandSummary) { - markdown += `${cmd}\n`; - } - } else { - markdown += "No commands or tools used.\n"; - } - // Add Information section from the last entry with result metadata - markdown += "\n## 📊 Information\n\n"; - // Find the last entry with metadata - const lastEntry = logEntries[logEntries.length - 1]; - if ( - lastEntry && - (lastEntry.num_turns || - lastEntry.duration_ms || - lastEntry.total_cost_usd || - lastEntry.usage) - ) { - if (lastEntry.num_turns) { - markdown += `**Turns:** ${lastEntry.num_turns}\n\n`; - } - if (lastEntry.duration_ms) { - const durationSec = Math.round(lastEntry.duration_ms / 1000); - const minutes = Math.floor(durationSec / 60); - const seconds = durationSec % 60; - markdown += `**Duration:** ${minutes}m ${seconds}s\n\n`; - } - if (lastEntry.total_cost_usd) { - markdown += `**Total Cost:** $${lastEntry.total_cost_usd.toFixed(4)}\n\n`; - } - if (lastEntry.usage) { - const usage = lastEntry.usage; - if (usage.input_tokens || usage.output_tokens) { - markdown += `**Token Usage:**\n`; - if (usage.input_tokens) - markdown += `- Input: ${usage.input_tokens.toLocaleString()}\n`; - if (usage.cache_creation_input_tokens) - markdown += `- Cache Creation: ${usage.cache_creation_input_tokens.toLocaleString()}\n`; - if (usage.cache_read_input_tokens) - markdown += `- Cache Read: ${usage.cache_read_input_tokens.toLocaleString()}\n`; - if (usage.output_tokens) - markdown += `- Output: ${usage.output_tokens.toLocaleString()}\n`; - markdown += "\n"; - } - } - if ( - lastEntry.permission_denials && - lastEntry.permission_denials.length > 0 - ) { - markdown += `**Permission Denials:** ${lastEntry.permission_denials.length}\n\n`; - } - } - markdown += "\n## 🤖 Reasoning\n\n"; - // Second pass: process assistant messages in sequence - for (const entry of logEntries) { - if (entry.type === "assistant" && entry.message?.content) { - for (const content of entry.message.content) { - if (content.type === "text" && content.text) { - // Add reasoning text directly (no header) - const text = content.text.trim(); - if (text && text.length > 0) { - markdown += text + "\n\n"; - } - } else if (content.type === "tool_use") { - // Process tool use with its result - const toolResult = toolUsePairs.get(content.id); - const toolMarkdown = formatToolUse(content, toolResult); - if (toolMarkdown) { - markdown += toolMarkdown; - } - } - } - } - } - return { markdown, mcpFailures }; - } catch (error) { - const errorMessage = error instanceof Error ? error.message : String(error); - return { - markdown: `## Agent Log Summary\n\nError parsing Claude log: ${errorMessage}\n`, - mcpFailures: [], - }; - } - } - /** - * Formats initialization information from system init entry - * @param {any} initEntry - The system init entry containing tools, mcp_servers, etc. - * @returns {{markdown: string, mcpFailures: string[]}} Result with formatted markdown string and MCP failure list - */ - function formatInitializationSummary(initEntry) { - let markdown = ""; - const mcpFailures = []; - // Display model and session info - if (initEntry.model) { - markdown += `**Model:** ${initEntry.model}\n\n`; - } - if (initEntry.session_id) { - markdown += `**Session ID:** ${initEntry.session_id}\n\n`; - } - if (initEntry.cwd) { - // Show a cleaner path by removing common prefixes - const cleanCwd = initEntry.cwd.replace( - /^\/home\/runner\/work\/[^\/]+\/[^\/]+/, - "." - ); - markdown += `**Working Directory:** ${cleanCwd}\n\n`; - } - // Display MCP servers status - if (initEntry.mcp_servers && Array.isArray(initEntry.mcp_servers)) { - markdown += "**MCP Servers:**\n"; - for (const server of initEntry.mcp_servers) { - const statusIcon = - server.status === "connected" - ? "✅" - : server.status === "failed" - ? "❌" - : "❓"; - markdown += `- ${statusIcon} ${server.name} (${server.status})\n`; - // Track failed MCP servers - if (server.status === "failed") { - mcpFailures.push(server.name); - } - } - markdown += "\n"; - } - // Display tools by category - if (initEntry.tools && Array.isArray(initEntry.tools)) { - markdown += "**Available Tools:**\n"; - // Categorize tools - /** @type {{ [key: string]: string[] }} */ - const categories = { - Core: [], - "File Operations": [], - "Git/GitHub": [], - MCP: [], - Other: [], - }; - for (const tool of initEntry.tools) { - if ( - ["Task", "Bash", "BashOutput", "KillBash", "ExitPlanMode"].includes( - tool - ) - ) { - categories["Core"].push(tool); - } else if ( - [ - "Read", - "Edit", - "MultiEdit", - "Write", - "LS", - "Grep", - "Glob", - "NotebookEdit", - ].includes(tool) - ) { - categories["File Operations"].push(tool); - } else if (tool.startsWith("mcp__github__")) { - categories["Git/GitHub"].push(formatMcpName(tool)); - } else if ( - tool.startsWith("mcp__") || - ["ListMcpResourcesTool", "ReadMcpResourceTool"].includes(tool) - ) { - categories["MCP"].push( - tool.startsWith("mcp__") ? formatMcpName(tool) : tool - ); - } else { - categories["Other"].push(tool); - } - } - // Display categories with tools - for (const [category, tools] of Object.entries(categories)) { - if (tools.length > 0) { - markdown += `- **${category}:** ${tools.length} tools\n`; - if (tools.length <= 5) { - // Show all tools if 5 or fewer - markdown += ` - ${tools.join(", ")}\n`; - } else { - // Show first few and count - markdown += ` - ${tools.slice(0, 3).join(", ")}, and ${tools.length - 3} more\n`; - } - } - } - markdown += "\n"; - } - // Display slash commands if available - if (initEntry.slash_commands && Array.isArray(initEntry.slash_commands)) { - const commandCount = initEntry.slash_commands.length; - markdown += `**Slash Commands:** ${commandCount} available\n`; - if (commandCount <= 10) { - markdown += `- ${initEntry.slash_commands.join(", ")}\n`; - } else { - markdown += `- ${initEntry.slash_commands.slice(0, 5).join(", ")}, and ${commandCount - 5} more\n`; - } - markdown += "\n"; - } - return { markdown, mcpFailures }; - } - /** - * Formats a tool use entry with its result into markdown - * @param {any} toolUse - The tool use object containing name, input, etc. - * @param {any} toolResult - The corresponding tool result object - * @returns {string} Formatted markdown string - */ - function formatToolUse(toolUse, toolResult) { - const toolName = toolUse.name; - const input = toolUse.input || {}; - // Skip TodoWrite except the very last one (we'll handle this separately) - if (toolName === "TodoWrite") { - return ""; // Skip for now, would need global context to find the last one - } - // Helper function to determine status icon - function getStatusIcon() { - if (toolResult) { - return toolResult.is_error === true ? "❌" : "✅"; - } - return "❓"; // Unknown by default - } - let markdown = ""; - const statusIcon = getStatusIcon(); - switch (toolName) { - case "Bash": - const command = input.command || ""; - const description = input.description || ""; - // Format the command to be single line - const formattedCommand = formatBashCommand(command); - if (description) { - markdown += `${description}:\n\n`; - } - markdown += `${statusIcon} \`${formattedCommand}\`\n\n`; - break; - case "Read": - const filePath = input.file_path || input.path || ""; - const relativePath = filePath.replace( - /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, - "" - ); // Remove /home/runner/work/repo/repo/ prefix - markdown += `${statusIcon} Read \`${relativePath}\`\n\n`; - break; - case "Write": - case "Edit": - case "MultiEdit": - const writeFilePath = input.file_path || input.path || ""; - const writeRelativePath = writeFilePath.replace( - /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, - "" - ); - markdown += `${statusIcon} Write \`${writeRelativePath}\`\n\n`; - break; - case "Grep": - case "Glob": - const query = input.query || input.pattern || ""; - markdown += `${statusIcon} Search for \`${truncateString(query, 80)}\`\n\n`; - break; - case "LS": - const lsPath = input.path || ""; - const lsRelativePath = lsPath.replace( - /^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, - "" - ); - markdown += `${statusIcon} LS: ${lsRelativePath || lsPath}\n\n`; - break; - default: - // Handle MCP calls and other tools - if (toolName.startsWith("mcp__")) { - const mcpName = formatMcpName(toolName); - const params = formatMcpParameters(input); - markdown += `${statusIcon} ${mcpName}(${params})\n\n`; - } else { - // Generic tool formatting - show the tool name and main parameters - const keys = Object.keys(input); - if (keys.length > 0) { - // Try to find the most important parameter - const mainParam = - keys.find(k => - ["query", "command", "path", "file_path", "content"].includes(k) - ) || keys[0]; - const value = String(input[mainParam] || ""); - if (value) { - markdown += `${statusIcon} ${toolName}: ${truncateString(value, 100)}\n\n`; - } else { - markdown += `${statusIcon} ${toolName}\n\n`; - } - } else { - markdown += `${statusIcon} ${toolName}\n\n`; - } - } - } - return markdown; - } - /** - * Formats MCP tool name from internal format to display format - * @param {string} toolName - The raw tool name (e.g., mcp__github__search_issues) - * @returns {string} Formatted tool name (e.g., github::search_issues) - */ - function formatMcpName(toolName) { - // Convert mcp__github__search_issues to github::search_issues - if (toolName.startsWith("mcp__")) { - const parts = toolName.split("__"); - if (parts.length >= 3) { - const provider = parts[1]; // github, etc. - const method = parts.slice(2).join("_"); // search_issues, etc. - return `${provider}::${method}`; - } - } - return toolName; - } - /** - * Formats MCP parameters into a human-readable string - * @param {Record} input - The input object containing parameters - * @returns {string} Formatted parameters string - */ - function formatMcpParameters(input) { - const keys = Object.keys(input); - if (keys.length === 0) return ""; - const paramStrs = []; - for (const key of keys.slice(0, 4)) { - // Show up to 4 parameters - const value = String(input[key] || ""); - paramStrs.push(`${key}: ${truncateString(value, 40)}`); - } - if (keys.length > 4) { - paramStrs.push("..."); - } - return paramStrs.join(", "); - } - /** - * Formats a bash command by normalizing whitespace and escaping - * @param {string} command - The raw bash command string - * @returns {string} Formatted and escaped command string - */ - function formatBashCommand(command) { - if (!command) return ""; - // Convert multi-line commands to single line by replacing newlines with spaces - // and collapsing multiple spaces - let formatted = command - .replace(/\n/g, " ") // Replace newlines with spaces - .replace(/\r/g, " ") // Replace carriage returns with spaces - .replace(/\t/g, " ") // Replace tabs with spaces - .replace(/\s+/g, " ") // Collapse multiple spaces into one - .trim(); // Remove leading/trailing whitespace - // Escape backticks to prevent markdown issues - formatted = formatted.replace(/`/g, "\\`"); - // Truncate if too long (keep reasonable length for summary) - const maxLength = 80; - if (formatted.length > maxLength) { - formatted = formatted.substring(0, maxLength) + "..."; - } - return formatted; - } - /** - * Truncates a string to a maximum length with ellipsis - * @param {string} str - The string to truncate - * @param {number} maxLength - Maximum allowed length - * @returns {string} Truncated string with ellipsis if needed - */ - function truncateString(str, maxLength) { - if (!str) return ""; - if (str.length <= maxLength) return str; - return str.substring(0, maxLength) + "..."; - } - // Export for testing - if (typeof module !== "undefined" && module.exports) { - module.exports = { - parseClaudeLog, - formatToolUse, - formatInitializationSummary, - formatBashCommand, - truncateString, - }; - } - main(); - - name: Upload agent logs - if: always() - uses: actions/upload-artifact@v4 - with: - name: test-mcp-configuration.log - path: /tmp/test-mcp-configuration.log - if-no-files-found: warn - - create_issue: - needs: test-mcp-configuration - runs-on: ubuntu-latest - permissions: - contents: read - issues: write - timeout-minutes: 10 - outputs: - issue_number: ${{ steps.create_issue.outputs.issue_number }} - issue_url: ${{ steps.create_issue.outputs.issue_url }} - steps: - - name: Check team membership for workflow - id: check-team-member - uses: actions/github-script@v7 - env: - GITHUB_AW_REQUIRED_ROLES: admin,maintainer - with: - script: | - async function setCancelled(message) { - try { - await github.rest.actions.cancelWorkflowRun({ - owner: context.repo.owner, - repo: context.repo.repo, - run_id: context.runId, - }); - core.info(`Cancellation requested for this workflow run: ${message}`); - } catch (error) { - const errorMessage = error instanceof Error ? error.message : String(error); - core.warning(`Failed to cancel workflow run: ${errorMessage}`); - core.setFailed(message); // Fallback if API call fails - } - } - async function main() { - const { eventName } = context; - // skip check for safe events - const safeEvents = ["workflow_dispatch", "workflow_run", "schedule"]; - if (safeEvents.includes(eventName)) { - core.info(`✅ Event ${eventName} does not require validation`); - return; - } - const actor = context.actor; - const { owner, repo } = context.repo; - const requiredPermissionsEnv = process.env.GITHUB_AW_REQUIRED_ROLES; - const requiredPermissions = requiredPermissionsEnv - ? requiredPermissionsEnv.split(",").filter(p => p.trim() !== "") - : []; - if (!requiredPermissions || requiredPermissions.length === 0) { - core.error( - "❌ Configuration error: Required permissions not specified. Contact repository administrator." - ); - await setCancelled( - "Configuration error: Required permissions not specified" - ); - return; - } - // Check if the actor has the required repository permissions - try { - core.debug( - `Checking if user '${actor}' has required permissions for ${owner}/${repo}` - ); - core.debug(`Required permissions: ${requiredPermissions.join(", ")}`); - const repoPermission = - await github.rest.repos.getCollaboratorPermissionLevel({ - owner: owner, - repo: repo, - username: actor, - }); - const permission = repoPermission.data.permission; - core.debug(`Repository permission level: ${permission}`); - // Check if user has one of the required permission levels - for (const requiredPerm of requiredPermissions) { - if ( - permission === requiredPerm || - (requiredPerm === "maintainer" && permission === "maintain") - ) { - core.info(`✅ User has ${permission} access to repository`); - return; - } - } - core.warning( - `User permission '${permission}' does not meet requirements: ${requiredPermissions.join(", ")}` - ); - } catch (repoError) { - const errorMessage = - repoError instanceof Error ? repoError.message : String(repoError); - core.error(`Repository permission check failed: ${errorMessage}`); - await setCancelled(`Repository permission check failed: ${errorMessage}`); - return; - } - // Cancel the workflow when permission check fails - core.warning( - `❌ Access denied: Only authorized users can trigger this workflow. User '${actor}' is not authorized. Required permissions: ${requiredPermissions.join(", ")}` - ); - await setCancelled( - `Access denied: User '${actor}' is not authorized. Required permissions: ${requiredPermissions.join(", ")}` - ); - } - await main(); - - name: Create Output Issue - id: create_issue - uses: actions/github-script@v7 - env: - GITHUB_AW_AGENT_OUTPUT: ${{ needs.test-mcp-configuration.outputs.output }} - GITHUB_AW_ISSUE_TITLE_PREFIX: "[Test] " - with: - script: | - async function main() { - // Check if we're in staged mode - const isStaged = process.env.GITHUB_AW_SAFE_OUTPUTS_STAGED === "true"; - // Read the validated output content from environment variable - const outputContent = process.env.GITHUB_AW_AGENT_OUTPUT; - if (!outputContent) { - core.info("No GITHUB_AW_AGENT_OUTPUT environment variable found"); - return; - } - if (outputContent.trim() === "") { - core.info("Agent output content is empty"); - return; - } - core.info(`Agent output content length: ${outputContent.length}`); - // Parse the validated output JSON - let validatedOutput; - try { - validatedOutput = JSON.parse(outputContent); - } catch (error) { - core.setFailed( - `Error parsing agent output JSON: ${error instanceof Error ? error.message : String(error)}` - ); - return; - } - if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { - core.info("No valid items found in agent output"); - return; - } - // Find all create-issue items - const createIssueItems = validatedOutput.items.filter( - /** @param {any} item */ item => item.type === "create-issue" - ); - if (createIssueItems.length === 0) { - core.info("No create-issue items found in agent output"); - return; - } - core.info(`Found ${createIssueItems.length} create-issue item(s)`); - // If in staged mode, emit step summary instead of creating issues - if (isStaged) { - let summaryContent = "## 🎭 Staged Mode: Create Issues Preview\n\n"; - summaryContent += - "The following issues would be created if staged mode was disabled:\n\n"; - for (let i = 0; i < createIssueItems.length; i++) { - const item = createIssueItems[i]; - summaryContent += `### Issue ${i + 1}\n`; - summaryContent += `**Title:** ${item.title || "No title provided"}\n\n`; - if (item.body) { - summaryContent += `**Body:**\n${item.body}\n\n`; - } - if (item.labels && item.labels.length > 0) { - summaryContent += `**Labels:** ${item.labels.join(", ")}\n\n`; - } - summaryContent += "---\n\n"; - } - // Write to step summary - await core.summary.addRaw(summaryContent).write(); - core.info("📝 Issue creation preview written to step summary"); - return; - } - // Check if we're in an issue context (triggered by an issue event) - const parentIssueNumber = context.payload?.issue?.number; - // Parse labels from environment variable (comma-separated string) - const labelsEnv = process.env.GITHUB_AW_ISSUE_LABELS; - let envLabels = labelsEnv - ? labelsEnv - .split(",") - .map(/** @param {string} label */ label => label.trim()) - .filter(/** @param {string} label */ label => label) - : []; - const createdIssues = []; - // Process each create-issue item - for (let i = 0; i < createIssueItems.length; i++) { - const createIssueItem = createIssueItems[i]; - core.info( - `Processing create-issue item ${i + 1}/${createIssueItems.length}: title=${createIssueItem.title}, bodyLength=${createIssueItem.body.length}` - ); - // Merge environment labels with item-specific labels - let labels = [...envLabels]; - if (createIssueItem.labels && Array.isArray(createIssueItem.labels)) { - labels = [...labels, ...createIssueItem.labels].filter(Boolean); - } - // Extract title and body from the JSON item - let title = createIssueItem.title ? createIssueItem.title.trim() : ""; - let bodyLines = createIssueItem.body.split("\n"); - // If no title was found, use the body content as title (or a default) - if (!title) { - title = createIssueItem.body || "Agent Output"; - } - // Apply title prefix if provided via environment variable - const titlePrefix = process.env.GITHUB_AW_ISSUE_TITLE_PREFIX; - if (titlePrefix && !title.startsWith(titlePrefix)) { - title = titlePrefix + title; - } - if (parentIssueNumber) { - core.info("Detected issue context, parent issue #" + parentIssueNumber); - // Add reference to parent issue in the child issue body - bodyLines.push(`Related to #${parentIssueNumber}`); - } - // Add AI disclaimer with run id, run htmlurl - // Add AI disclaimer with workflow run information - const runId = context.runId; - const runUrl = context.payload.repository - ? `${context.payload.repository.html_url}/actions/runs/${runId}` - : `https://github.com/actions/runs/${runId}`; - bodyLines.push( - ``, - ``, - `> Generated by Agentic Workflow [Run](${runUrl})`, - "" - ); - // Prepare the body content - const body = bodyLines.join("\n").trim(); - core.info(`Creating issue with title: ${title}`); - core.info(`Labels: ${labels}`); - core.info(`Body length: ${body.length}`); - try { - // Create the issue using GitHub API - const { data: issue } = await github.rest.issues.create({ - owner: context.repo.owner, - repo: context.repo.repo, - title: title, - body: body, - labels: labels, - }); - core.info("Created issue #" + issue.number + ": " + issue.html_url); - createdIssues.push(issue); - // If we have a parent issue, add a comment to it referencing the new child issue - if (parentIssueNumber) { - try { - await github.rest.issues.createComment({ - owner: context.repo.owner, - repo: context.repo.repo, - issue_number: parentIssueNumber, - body: `Created related issue: #${issue.number}`, - }); - core.info("Added comment to parent issue #" + parentIssueNumber); - } catch (error) { - core.info( - `Warning: Could not add comment to parent issue: ${error instanceof Error ? error.message : String(error)}` - ); - } - } - // Set output for the last created issue (for backward compatibility) - if (i === createIssueItems.length - 1) { - core.setOutput("issue_number", issue.number); - core.setOutput("issue_url", issue.html_url); - } - } catch (error) { - const errorMessage = - error instanceof Error ? error.message : String(error); - // Special handling for disabled issues repository - if ( - errorMessage.includes("Issues has been disabled in this repository") - ) { - core.info( - `⚠ Cannot create issue "${title}": Issues are disabled for this repository` - ); - core.info( - "Consider enabling issues in repository settings if you want to create issues automatically" - ); - continue; // Skip this issue but continue processing others - } - core.error(`✗ Failed to create issue "${title}": ${errorMessage}`); - throw error; - } - } - // Write summary for all created issues - if (createdIssues.length > 0) { - let summaryContent = "\n\n## GitHub Issues\n"; - for (const issue of createdIssues) { - summaryContent += `- Issue #${issue.number}: [${issue.title}](${issue.html_url})\n`; - } - await core.summary.addRaw(summaryContent).write(); - } - core.info(`Successfully created ${createdIssues.length} issue(s)`); - } - await main(); - - create_issue_comment: - needs: test-mcp-configuration - if: github.event.issue.number || github.event.pull_request.number - runs-on: ubuntu-latest - permissions: - contents: read - issues: write - pull-requests: write - timeout-minutes: 10 - outputs: - comment_id: ${{ steps.create_comment.outputs.comment_id }} - comment_url: ${{ steps.create_comment.outputs.comment_url }} - steps: - - name: Add Issue Comment - id: create_comment - uses: actions/github-script@v7 - env: - GITHUB_AW_AGENT_OUTPUT: ${{ needs.test-mcp-configuration.outputs.output }} - with: - script: | - async function main() { - // Check if we're in staged mode - const isStaged = process.env.GITHUB_AW_SAFE_OUTPUTS_STAGED === "true"; - // Read the validated output content from environment variable - const outputContent = process.env.GITHUB_AW_AGENT_OUTPUT; - if (!outputContent) { - core.info("No GITHUB_AW_AGENT_OUTPUT environment variable found"); - return; - } - if (outputContent.trim() === "") { - core.info("Agent output content is empty"); - return; - } - core.info(`Agent output content length: ${outputContent.length}`); - // Parse the validated output JSON - let validatedOutput; - try { - validatedOutput = JSON.parse(outputContent); - } catch (error) { - core.setFailed( - `Error parsing agent output JSON: ${error instanceof Error ? error.message : String(error)}` - ); - return; - } - if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { - core.info("No valid items found in agent output"); - return; - } - // Find all add-issue-comment items - const commentItems = validatedOutput.items.filter( - /** @param {any} item */ item => item.type === "add-issue-comment" - ); - if (commentItems.length === 0) { - core.info("No add-issue-comment items found in agent output"); - return; - } - core.info(`Found ${commentItems.length} add-issue-comment item(s)`); - // If in staged mode, emit step summary instead of creating comments - if (isStaged) { - let summaryContent = "## 🎭 Staged Mode: Add Comments Preview\n\n"; - summaryContent += - "The following comments would be added if staged mode was disabled:\n\n"; - for (let i = 0; i < commentItems.length; i++) { - const item = commentItems[i]; - summaryContent += `### Comment ${i + 1}\n`; - if (item.issue_number) { - summaryContent += `**Target Issue:** #${item.issue_number}\n\n`; - } else { - summaryContent += `**Target:** Current issue/PR\n\n`; - } - summaryContent += `**Body:**\n${item.body || "No content provided"}\n\n`; - summaryContent += "---\n\n"; - } - // Write to step summary - await core.summary.addRaw(summaryContent).write(); - core.info("📝 Comment creation preview written to step summary"); - return; - } - // Get the target configuration from environment variable - const commentTarget = process.env.GITHUB_AW_COMMENT_TARGET || "triggering"; - core.info(`Comment target configuration: ${commentTarget}`); - // Check if we're in an issue or pull request context - const isIssueContext = - context.eventName === "issues" || context.eventName === "issue_comment"; - const isPRContext = - context.eventName === "pull_request" || - context.eventName === "pull_request_review" || - context.eventName === "pull_request_review_comment"; - // Validate context based on target configuration - if (commentTarget === "triggering" && !isIssueContext && !isPRContext) { - core.info( - 'Target is "triggering" but not running in issue or pull request context, skipping comment creation' - ); - return; - } - const createdComments = []; - // Process each comment item - for (let i = 0; i < commentItems.length; i++) { - const commentItem = commentItems[i]; - core.info( - `Processing add-issue-comment item ${i + 1}/${commentItems.length}: bodyLength=${commentItem.body.length}` - ); - // Determine the issue/PR number and comment endpoint for this comment - let issueNumber; - let commentEndpoint; - if (commentTarget === "*") { - // For target "*", we need an explicit issue number from the comment item - if (commentItem.issue_number) { - issueNumber = parseInt(commentItem.issue_number, 10); - if (isNaN(issueNumber) || issueNumber <= 0) { - core.info( - `Invalid issue number specified: ${commentItem.issue_number}` - ); - continue; - } - commentEndpoint = "issues"; - } else { - core.info( - 'Target is "*" but no issue_number specified in comment item' - ); - continue; - } - } else if (commentTarget && commentTarget !== "triggering") { - // Explicit issue number specified in target - issueNumber = parseInt(commentTarget, 10); - if (isNaN(issueNumber) || issueNumber <= 0) { - core.info( - `Invalid issue number in target configuration: ${commentTarget}` - ); - continue; - } - commentEndpoint = "issues"; - } else { - // Default behavior: use triggering issue/PR - if (isIssueContext) { - if (context.payload.issue) { - issueNumber = context.payload.issue.number; - commentEndpoint = "issues"; - } else { - core.info("Issue context detected but no issue found in payload"); - continue; - } - } else if (isPRContext) { - if (context.payload.pull_request) { - issueNumber = context.payload.pull_request.number; - commentEndpoint = "issues"; // PR comments use the issues API endpoint - } else { - core.info( - "Pull request context detected but no pull request found in payload" - ); - continue; - } - } - } - if (!issueNumber) { - core.info("Could not determine issue or pull request number"); - continue; - } - // Extract body from the JSON item - let body = commentItem.body.trim(); - // Add AI disclaimer with run id, run htmlurl - const runId = context.runId; - const runUrl = context.payload.repository - ? `${context.payload.repository.html_url}/actions/runs/${runId}` - : `https://github.com/actions/runs/${runId}`; - body += `\n\n> Generated by Agentic Workflow [Run](${runUrl})\n`; - core.info(`Creating comment on ${commentEndpoint} #${issueNumber}`); - core.info(`Comment content length: ${body.length}`); - try { - // Create the comment using GitHub API - const { data: comment } = await github.rest.issues.createComment({ - owner: context.repo.owner, - repo: context.repo.repo, - issue_number: issueNumber, - body: body, - }); - core.info("Created comment #" + comment.id + ": " + comment.html_url); - createdComments.push(comment); - // Set output for the last created comment (for backward compatibility) - if (i === commentItems.length - 1) { - core.setOutput("comment_id", comment.id); - core.setOutput("comment_url", comment.html_url); - } - } catch (error) { - core.error( - `✗ Failed to create comment: ${error instanceof Error ? error.message : String(error)}` - ); - throw error; - } - } - // Write summary for all created comments - if (createdComments.length > 0) { - let summaryContent = "\n\n## GitHub Comments\n"; - for (const comment of createdComments) { - summaryContent += `- Comment #${comment.id}: [View Comment](${comment.html_url})\n`; - } - await core.summary.addRaw(summaryContent).write(); - } - core.info(`Successfully created ${createdComments.length} comment(s)`); - return createdComments; - } - await main(); -