diff --git a/internal/agents/chat.go b/internal/agents/chat.go index 2e36053..139a00c 100644 --- a/internal/agents/chat.go +++ b/internal/agents/chat.go @@ -42,200 +42,10 @@ type ToolCall struct { // isThought=true for reasoning chunks, false for text chunks type ReasoningCallback func(chunk string, isThought bool) -// getMainAgentTools returns the tools for the main agent +// getMainAgentTools returns the tools for the main agent. +// Delegates to the centralized tool registry. func getMainAgentTools() []common.Tool { - return []common.Tool{ - { - Name: "get_endpoints_details", - Description: "Get detailed information about specified endpoints including description, parameters, security, request body, and responses.", - InputSchema: map[string]any{ - "type": "object", - "additionalProperties": false, - "properties": map[string]any{ - "endpoints": map[string]any{ - "type": "array", - "items": map[string]any{ - "type": "object", - "additionalProperties": false, - "properties": map[string]any{ - "path": map[string]any{ - "type": "string", - "description": "Endpoint path (e.g., /users)", - }, - "method": map[string]any{ - "type": "string", - "description": "HTTP method (GET, POST, PUT, DELETE, PATCH)", - }, - }, - "required": []string{"path", "method"}, - }, - }, - }, - "required": []string{"endpoints"}, - }, - }, - { - Name: "GenerateTestPlan", - Description: "Generate test cases for API endpoints. Describe endpoints with all relevant details from get_endpoints_details.", - InputSchema: map[string]any{ - "type": "object", - "additionalProperties": false, - "properties": map[string]any{ - "what": map[string]any{ - "type": "string", - "description": "Detailed endpoint description including: method, path, what it does, authentication requirements (Security field), request body schema, expected responses, parameters. Be thorough!", - }, - "focus": map[string]any{ - "type": "string", - "description": "Testing focus: 'happy path' (basic success), 'authentication' (with/without auth), 'error handling' (validation, 404, etc), 'all aspects' (comprehensive)", - }, - }, - "required": []string{"what", "focus"}, - }, - }, - { - Name: "ExecuteTestGroup", - Description: "Execute a group of tests against the API. Tests are run locally by the CLI and results are returned. Call this AFTER GenerateTestPlan to actually run the tests.", - InputSchema: map[string]any{ - "type": "object", - "additionalProperties": false, - "properties": map[string]any{ - "tests": map[string]any{ - "type": "array", - "items": map[string]any{ - "type": "object", - "additionalProperties": false, - "properties": map[string]any{ - "method": map[string]any{ - "type": "string", - "description": "HTTP method (GET, POST, PUT, DELETE, etc)", - }, - "endpoint": map[string]any{ - "type": "string", - "description": "API endpoint path (e.g., /api/health)", - }, - "headers": map[string]any{ - "type": []any{"object", "null"}, - "additionalProperties": false, - "description": "Optional HTTP headers", - }, - "body": map[string]any{ - "type": []any{"string", "null"}, - "description": "Optional request body (JSON string)", - }, - "requires_auth": map[string]any{ - "type": "boolean", - "description": "Whether authentication is required for this test", - }, - "expected_status": map[string]any{ - "type": "integer", - "description": "Expected HTTP status code. Set correctly: 201 for POST creating resources, 204 for DELETE, 400 for bad input, 401 for unauthorized, 404 for not found.", - }, - "extract": map[string]any{ - "type": []any{"array", "null"}, - "description": "Extract values from response body for use in later tests. Each item: {\"field\": \"id\", \"as\": \"user_id\"}. Use dot notation for nested fields: \"data.token\", \"items.0.id\".", - "items": map[string]any{ - "type": "object", - "properties": map[string]any{ - "field": map[string]any{"type": "string", "description": "Dot-path to field in response JSON"}, - "as": map[string]any{"type": "string", "description": "Variable name to store value as"}, - }, - "required": []string{"field", "as"}, - }, - }, - "assertions": map[string]any{ - "type": []any{"array", "null"}, - "description": "Assert specific values in the response body. Each item: {\"field\": \"name\", \"op\": \"eq\", \"value\": \"Alice\"}. Operators: eq, neq, exists, not_exists, contains, gt, gte, lt, lte.", - "items": map[string]any{ - "type": "object", - "properties": map[string]any{ - "field": map[string]any{"type": "string", "description": "Dot-path to field in response JSON"}, - "op": map[string]any{"type": "string", "description": "Operator: eq, neq, exists, not_exists, contains, gt, gte, lt, lte"}, - "value": map[string]any{"description": "Expected value (omit for exists/not_exists)"}, - }, - "required": []string{"field", "op"}, - }, - }, - }, - "required": []string{"method", "endpoint", "headers", "body", "requires_auth", "expected_status"}, - }, - }, - }, - "required": []string{"tests"}, - }, - }, - { - Name: "GenerateReport", - Description: "Generate a PDF report from test results. Call this AFTER tests have been executed to create a professional report. Write the report content in Markdown format — it will be converted to a styled PDF. Include: title, summary, test results table (method, endpoint, status, duration), and analysis.", - InputSchema: map[string]any{ - "type": "object", - "additionalProperties": false, - "properties": map[string]any{ - "report_content": map[string]any{ - "type": "string", - "description": "Full report content in Markdown format. Use headers, tables, lists, and code blocks for a professional layout. Include: report title, test summary (total/passed/failed), detailed results table, and analysis/recommendations.", - }, - "file_name": map[string]any{ - "type": "string", - "description": "Optional output file name for the PDF (e.g., 'api-test-report.pdf'). If not provided, a timestamped name will be used.", - }, - }, - "required": []string{"report_content"}, - }, - }, - { - Name: "ExportTests", - Description: "Export API tests to files in the specified formats. Can export either executed tests or generated test plans (even if they haven't been executed yet). Call this ONLY when the user explicitly requests to save/export. Can export to multiple formats at once.", - InputSchema: map[string]any{ - "type": "object", - "additionalProperties": false, - "properties": map[string]any{ - "exports": map[string]any{ - "type": "array", - "items": map[string]any{ - "type": "object", - "additionalProperties": false, - "properties": map[string]any{ - "format": map[string]any{ - "type": "string", - "enum": []string{"postman", "pytest", "sh"}, - "description": "Export format: 'postman' for Postman Collection v2.1 (JSON), 'pytest' for Python tests (.py), 'sh' for bash script with curl commands", - }, - "filepath": map[string]any{ - "type": "string", - "description": "Output file path (e.g., 'tests.json', 'test_api.py', 'api-tests.sh'). Can be relative or absolute.", - }, - }, - "required": []string{"format", "filepath"}, - }, - "description": "List of export configurations. Each entry specifies a format and output filepath.", - }, - }, - "required": []string{"exports"}, - }, - }, - { - Name: "wait", - Description: "Wait for a specified number of seconds before proceeding. Use this when you receive a 429 rate limit response or a Retry-After header to pause before retrying.", - InputSchema: map[string]any{ - "type": "object", - "additionalProperties": false, - "properties": map[string]any{ - "seconds": map[string]any{ - "type": "integer", - "description": "Number of seconds to wait (1-60)", - "minimum": 1, - "maximum": 60, - }, - "reason": map[string]any{ - "type": "string", - "description": "Why you are waiting (e.g., 'Rate limit hit, Retry-After: 10')", - }, - }, - "required": []string{"seconds"}, - }, - }, - } + return GetToolDefinitions() } func (a *Agent) Chat(messages []ChatMessage, thinkingEnabled bool, endpointsList ...string) (*ChatResponse, error) { diff --git a/internal/agents/tools.go b/internal/agents/tools.go new file mode 100644 index 0000000..7085499 --- /dev/null +++ b/internal/agents/tools.go @@ -0,0 +1,258 @@ +package agent + +import "github.com/Octrafic/octrafic-cli/internal/llm/common" + +// Tool name constants — the single source of truth for all tool names. +// Use these throughout the codebase instead of raw strings to prevent +// typos and make "find all references" work reliably. +const ( + ToolGetEndpointsDetails = "get_endpoints_details" + ToolGenerateTestPlan = "GenerateTestPlan" + ToolExecuteTestGroup = "ExecuteTestGroup" + ToolExecuteTest = "ExecuteTest" // internal, dispatched inside ExecuteTestGroup + ToolExportTests = "ExportTests" + ToolGenerateReport = "GenerateReport" + ToolWait = "wait" +) + +// ToolMeta holds the LLM-facing definition together with UI display hints. +type ToolMeta struct { + // Definition is sent to the LLM provider as a tool schema. + Definition common.Tool + + // WidgetTitle is the short label shown in the TUI spinner widget + // while the tool is executing (e.g. "Generating PDF report"). + WidgetTitle string +} + +// registry is the ordered list of tools presented to the LLM. +// Order matters — the LLM sees them in this sequence. +var registry = []ToolMeta{ + { + WidgetTitle: "Getting endpoint details", + Definition: common.Tool{ + Name: ToolGetEndpointsDetails, + Description: "Get detailed information about specified endpoints including description, parameters, security, request body, and responses.", + InputSchema: map[string]any{ + "type": "object", + "additionalProperties": false, + "properties": map[string]any{ + "endpoints": map[string]any{ + "type": "array", + "items": map[string]any{ + "type": "object", + "additionalProperties": false, + "properties": map[string]any{ + "path": map[string]any{ + "type": "string", + "description": "Endpoint path (e.g., /users)", + }, + "method": map[string]any{ + "type": "string", + "description": "HTTP method (GET, POST, PUT, DELETE, PATCH)", + }, + }, + "required": []string{"path", "method"}, + }, + }, + }, + "required": []string{"endpoints"}, + }, + }, + }, + { + WidgetTitle: "Generating test plan", + Definition: common.Tool{ + Name: ToolGenerateTestPlan, + Description: "Generate test cases for API endpoints. Describe endpoints with all relevant details from get_endpoints_details.", + InputSchema: map[string]any{ + "type": "object", + "additionalProperties": false, + "properties": map[string]any{ + "what": map[string]any{ + "type": "string", + "description": "Detailed endpoint description including: method, path, what it does, authentication requirements (Security field), request body schema, expected responses, parameters. Be thorough!", + }, + "focus": map[string]any{ + "type": "string", + "description": "Testing focus: 'happy path' (basic success), 'authentication' (with/without auth), 'error handling' (validation, 404, etc), 'all aspects' (comprehensive)", + }, + }, + "required": []string{"what", "focus"}, + }, + }, + }, + { + WidgetTitle: "Executing tests", + Definition: common.Tool{ + Name: ToolExecuteTestGroup, + Description: "Execute a group of tests against the API. Tests are run locally by the CLI and results are returned. Call this AFTER GenerateTestPlan to actually run the tests.", + InputSchema: map[string]any{ + "type": "object", + "additionalProperties": false, + "properties": map[string]any{ + "tests": map[string]any{ + "type": "array", + "items": map[string]any{ + "type": "object", + "additionalProperties": false, + "properties": map[string]any{ + "method": map[string]any{ + "type": "string", + "description": "HTTP method (GET, POST, PUT, DELETE, etc)", + }, + "endpoint": map[string]any{ + "type": "string", + "description": "API endpoint path (e.g., /api/health)", + }, + "headers": map[string]any{ + "type": []any{"object", "null"}, + "additionalProperties": false, + "description": "Optional HTTP headers", + }, + "body": map[string]any{ + "type": []any{"string", "null"}, + "description": "Optional request body (JSON string)", + }, + "requires_auth": map[string]any{ + "type": "boolean", + "description": "Whether authentication is required for this test", + }, + "expected_status": map[string]any{ + "type": "integer", + "description": "Expected HTTP status code. Set correctly: 201 for POST creating resources, 204 for DELETE, 400 for bad input, 401 for unauthorized, 404 for not found.", + }, + "extract": map[string]any{ + "type": []any{"array", "null"}, + "description": "Extract values from response body for use in later tests. Each item: {\"field\": \"id\", \"as\": \"user_id\"}. Use dot notation for nested fields: \"data.token\", \"items.0.id\".", + "items": map[string]any{ + "type": "object", + "properties": map[string]any{ + "field": map[string]any{"type": "string", "description": "Dot-path to field in response JSON"}, + "as": map[string]any{"type": "string", "description": "Variable name to store value as"}, + }, + "required": []string{"field", "as"}, + }, + }, + "assertions": map[string]any{ + "type": []any{"array", "null"}, + "description": "Assert specific values in the response body. Each item: {\"field\": \"name\", \"op\": \"eq\", \"value\": \"Alice\"}. Operators: eq, neq, exists, not_exists, contains, gt, gte, lt, lte.", + "items": map[string]any{ + "type": "object", + "properties": map[string]any{ + "field": map[string]any{"type": "string", "description": "Dot-path to field in response JSON"}, + "op": map[string]any{"type": "string", "description": "Operator: eq, neq, exists, not_exists, contains, gt, gte, lt, lte"}, + "value": map[string]any{"description": "Expected value (omit for exists/not_exists)"}, + }, + "required": []string{"field", "op"}, + }, + }, + }, + "required": []string{"method", "endpoint", "headers", "body", "requires_auth", "expected_status"}, + }, + }, + }, + "required": []string{"tests"}, + }, + }, + }, + { + WidgetTitle: "Generating PDF report", + Definition: common.Tool{ + Name: ToolGenerateReport, + Description: "Generate a PDF report from test results. Call this AFTER tests have been executed to create a professional report. Write the report content in Markdown format — it will be converted to a styled PDF. Include: title, summary, test results table (method, endpoint, status, duration), and analysis.", + InputSchema: map[string]any{ + "type": "object", + "additionalProperties": false, + "properties": map[string]any{ + "report_content": map[string]any{ + "type": "string", + "description": "Full report content in Markdown format. Use headers, tables, lists, and code blocks for a professional layout. Include: report title, test summary (total/passed/failed), detailed results table, and analysis/recommendations.", + }, + "file_name": map[string]any{ + "type": "string", + "description": "Optional output file name for the PDF (e.g., 'api-test-report.pdf'). If not provided, a timestamped name will be used.", + }, + }, + "required": []string{"report_content"}, + }, + }, + }, + { + WidgetTitle: "Exporting tests", + Definition: common.Tool{ + Name: ToolExportTests, + Description: "Export API tests to files in the specified formats. Can export either executed tests or generated test plans (even if they haven't been executed yet). Call this ONLY when the user explicitly requests to save/export. Can export to multiple formats at once.", + InputSchema: map[string]any{ + "type": "object", + "additionalProperties": false, + "properties": map[string]any{ + "exports": map[string]any{ + "type": "array", + "items": map[string]any{ + "type": "object", + "additionalProperties": false, + "properties": map[string]any{ + "format": map[string]any{ + "type": "string", + "enum": []string{"postman", "pytest", "sh"}, + "description": "Export format: 'postman' for Postman Collection v2.1 (JSON), 'pytest' for Python tests (.py), 'sh' for bash script with curl commands", + }, + "filepath": map[string]any{ + "type": "string", + "description": "Output file path (e.g., 'tests.json', 'test_api.py', 'api-tests.sh'). Can be relative or absolute.", + }, + }, + "required": []string{"format", "filepath"}, + }, + "description": "List of export configurations. Each entry specifies a format and output filepath.", + }, + }, + "required": []string{"exports"}, + }, + }, + }, + { + WidgetTitle: "Waiting", + Definition: common.Tool{ + Name: ToolWait, + Description: "Wait for a specified number of seconds before proceeding. Use this when you receive a 429 rate limit response or a Retry-After header to pause before retrying.", + InputSchema: map[string]any{ + "type": "object", + "additionalProperties": false, + "properties": map[string]any{ + "seconds": map[string]any{ + "type": "integer", + "description": "Number of seconds to wait (1-60)", + "minimum": 1, + "maximum": 60, + }, + "reason": map[string]any{ + "type": "string", + "description": "Why you are waiting (e.g., 'Rate limit hit, Retry-After: 10')", + }, + }, + "required": []string{"seconds"}, + }, + }, + }, +} + +// GetToolDefinitions returns the LLM-facing tool definitions. +func GetToolDefinitions() []common.Tool { + defs := make([]common.Tool, len(registry)) + for i, tm := range registry { + defs[i] = tm.Definition + } + return defs +} + +// GetToolMeta returns the ToolMeta for a given tool name, or nil if not found. +func GetToolMeta(name string) *ToolMeta { + for i := range registry { + if registry[i].Definition.Name == name { + return ®istry[i] + } + } + return nil +} diff --git a/internal/cli/handlers.go b/internal/cli/handlers.go index 6126697..a636a32 100644 --- a/internal/cli/handlers.go +++ b/internal/cli/handlers.go @@ -203,7 +203,7 @@ func (m *TestUIModel) executeTool(toolCall agent.ToolCall) tea.Cmd { return func() tea.Msg { time.Sleep(300 * time.Millisecond) - if toolCall.Name == "get_endpoints_details" { + if toolCall.Name == agent.ToolGetEndpointsDetails { endpointsArg, ok := toolCall.Arguments["endpoints"] if !ok { return toolResultMsg{ @@ -273,7 +273,7 @@ func (m *TestUIModel) executeTool(toolCall agent.ToolCall) tea.Cmd { } } - if toolCall.Name == "ExecuteTest" { + if toolCall.Name == agent.ToolExecuteTest { method, _ := toolCall.Arguments["method"].(string) endpoint, _ := toolCall.Arguments["endpoint"].(string) @@ -352,7 +352,7 @@ func (m *TestUIModel) executeTool(toolCall agent.ToolCall) tea.Cmd { } } - if toolCall.Name == "GenerateReport" { + if toolCall.Name == agent.ToolGenerateReport { reportContent, _ := toolCall.Arguments["report_content"].(string) if reportContent == "" { return toolResultMsg{ @@ -385,7 +385,7 @@ func (m *TestUIModel) executeTool(toolCall agent.ToolCall) tea.Cmd { } } - if toolCall.Name == "ExecuteTestGroup" { + if toolCall.Name == agent.ToolExecuteTestGroup { testsArg, ok := toolCall.Arguments["tests"] if !ok { return toolResultMsg{ @@ -437,11 +437,11 @@ func (m *TestUIModel) executeTool(toolCall agent.ToolCall) tea.Cmd { } } - if toolCall.Name == "ExportTests" { + if toolCall.Name == agent.ToolExportTests { return m.handleExportTests(toolCall) } - if toolCall.Name == "wait" { + if toolCall.Name == agent.ToolWait { seconds := 5 if s, ok := toolCall.Arguments["seconds"].(float64); ok { seconds = int(s) @@ -471,7 +471,7 @@ func (m *TestUIModel) executeTool(toolCall agent.ToolCall) tea.Cmd { } func (m *TestUIModel) handleToolResult(toolName string, toolID string, result any) tea.Cmd { - if toolName == "ExecuteTest" { + if toolName == agent.ToolExecuteTest { if resultMap, ok := result.(map[string]any); ok { method, _ := resultMap["method"].(string) endpoint, _ := resultMap["endpoint"].(string) @@ -526,7 +526,7 @@ func (m *TestUIModel) handleToolResult(toolName string, toolID string, result an Role: "user", FunctionResponse: &agent.FunctionResponseData{ ID: toolID, - Name: "ExecuteTest", + Name: agent.ToolExecuteTest, Response: resultMap, }, } @@ -542,7 +542,7 @@ func (m *TestUIModel) handleToolResult(toolName string, toolID string, result an } } - if toolName == "ExecuteTestGroup" { + if toolName == agent.ToolExecuteTestGroup { // Display results from test group if resultMap, ok := result.(map[string]any); ok { count, _ := resultMap["count"].(int) @@ -620,7 +620,7 @@ func (m *TestUIModel) handleToolResult(toolName string, toolID string, result an Role: "user", FunctionResponse: &agent.FunctionResponseData{ ID: toolID, - Name: "ExecuteTestGroup", + Name: agent.ToolExecuteTestGroup, Response: resultMap, }, } @@ -636,7 +636,7 @@ func (m *TestUIModel) handleToolResult(toolName string, toolID string, result an } } - if toolName == "get_endpoints_details" { + if toolName == agent.ToolGetEndpointsDetails { // Add tool result to conversation history as function response if toolID != "" { var resultMap map[string]any @@ -649,7 +649,7 @@ func (m *TestUIModel) handleToolResult(toolName string, toolID string, result an Role: "user", FunctionResponse: &agent.FunctionResponseData{ ID: toolID, - Name: "get_endpoints_details", + Name: agent.ToolGetEndpointsDetails, Response: resultMap, }, } @@ -664,7 +664,7 @@ func (m *TestUIModel) handleToolResult(toolName string, toolID string, result an return nil // No tool_use, so don't send response back } - if toolName == "GenerateReport" { + if toolName == agent.ToolGenerateReport { if resultMap, ok := result.(map[string]any); ok { filePath, _ := resultMap["file_path"].(string) @@ -677,7 +677,7 @@ func (m *TestUIModel) handleToolResult(toolName string, toolID string, result an Role: "user", FunctionResponse: &agent.FunctionResponseData{ ID: toolID, - Name: "GenerateReport", + Name: agent.ToolGenerateReport, Response: resultMap, }, } @@ -692,7 +692,7 @@ func (m *TestUIModel) handleToolResult(toolName string, toolID string, result an } } - if toolName == "ExportTests" { + if toolName == agent.ToolExportTests { if resultMap, ok := result.(map[string]any); ok { testCount, _ := resultMap["test_count"].(int) exports, _ := resultMap["exports"].([]map[string]any) @@ -725,7 +725,7 @@ func (m *TestUIModel) handleToolResult(toolName string, toolID string, result an Role: "user", FunctionResponse: &agent.FunctionResponseData{ ID: toolID, - Name: "ExportTests", + Name: agent.ToolExportTests, Response: resultMap, }, } @@ -737,7 +737,7 @@ func (m *TestUIModel) handleToolResult(toolName string, toolID string, result an } } - if toolName == "GenerateTestPlan" { + if toolName == agent.ToolGenerateTestPlan { // Add tool result to conversation history as function response if toolID != "" { var resultMap map[string]any @@ -750,7 +750,7 @@ func (m *TestUIModel) handleToolResult(toolName string, toolID string, result an Role: "user", FunctionResponse: &agent.FunctionResponseData{ ID: toolID, - Name: "GenerateTestPlan", + Name: agent.ToolGenerateTestPlan, Response: resultMap, }, } @@ -765,7 +765,7 @@ func (m *TestUIModel) handleToolResult(toolName string, toolID string, result an return nil // No tool_use, so don't send response back } - if toolName == "wait" { + if toolName == agent.ToolWait { if toolID != "" { var resultMap map[string]any if r, ok := result.(map[string]any); ok { @@ -775,7 +775,7 @@ func (m *TestUIModel) handleToolResult(toolName string, toolID string, result an Role: "user", FunctionResponse: &agent.FunctionResponseData{ ID: toolID, - Name: "wait", + Name: agent.ToolWait, Response: resultMap, }, } diff --git a/internal/cli/tui.go b/internal/cli/tui.go index a4ef4fd..fe2ecd0 100644 --- a/internal/cli/tui.go +++ b/internal/cli/tui.go @@ -2,15 +2,16 @@ package cli import ( "fmt" - "github.com/Octrafic/octrafic-cli/internal/agents" + "strings" + "time" + + agent "github.com/Octrafic/octrafic-cli/internal/agents" "github.com/Octrafic/octrafic-cli/internal/config" "github.com/Octrafic/octrafic-cli/internal/core/analyzer" "github.com/Octrafic/octrafic-cli/internal/core/auth" "github.com/Octrafic/octrafic-cli/internal/core/tester" "github.com/Octrafic/octrafic-cli/internal/infra/storage" "github.com/Octrafic/octrafic-cli/internal/updater" - "strings" - "time" "github.com/Octrafic/octrafic-cli/internal/ui/textarea" "github.com/charmbracelet/bubbles/spinner" @@ -446,7 +447,7 @@ func (m *TestUIModel) View() string { s.WriteString(lipgloss.NewStyle().Foreground(Theme.Warning).Bold(true).Render("Execute tool: "+toolName) + "\n") // Show test details if executing a test - if strings.HasPrefix(toolName, "ExecuteTest") { + if strings.HasPrefix(toolName, agent.ToolExecuteTest) { // Find next pending test to show details for _, test := range m.tests { if test.Status == "pending" { diff --git a/internal/cli/update.go b/internal/cli/update.go index 8d3c652..b333dff 100644 --- a/internal/cli/update.go +++ b/internal/cli/update.go @@ -1277,7 +1277,7 @@ func handleTestPlanState(m *TestUIModel, msg tea.KeyMsg) (tea.Model, tea.Cmd) { } toolID := "" - toolName := "ExecuteTestGroup" + toolName := agent.ToolExecuteTestGroup if m.pendingTestGroupToolCall != nil { toolID = m.pendingTestGroupToolCall.ID toolName = m.pendingTestGroupToolCall.Name @@ -1337,7 +1337,7 @@ func handleConfirmationState(m *TestUIModel, msg tea.KeyMsg) (tea.Model, tea.Cmd m.lastMessageRole = "assistant" return m, nil default: - isExecuteTest := m.pendingToolCall != nil && strings.HasPrefix(m.pendingToolCall.Name, "ExecuteTest") + isExecuteTest := m.pendingToolCall != nil && strings.HasPrefix(m.pendingToolCall.Name, agent.ToolExecuteTest) m.pendingToolCall = nil if isExecuteTest { @@ -1359,7 +1359,7 @@ func handleConfirmationState(m *TestUIModel, msg tea.KeyMsg) (tea.Model, tea.Cmd } } if hasPendingTests { - toolCall := agent.ToolCall{Name: "ExecuteTest"} + toolCall := agent.ToolCall{Name: agent.ToolExecuteTest} m.pendingToolCall = &toolCall m.confirmationChoice = 0 m.agentState = StateAskingConfirmation @@ -1456,9 +1456,9 @@ func handleProcessToolCalls(m *TestUIModel, _ processToolCallsMsg) (tea.Model, t m.streamedToolCalls = m.streamedToolCalls[1:] switch toolCall.Name { - case "get_endpoints_details": + case agent.ToolGetEndpointsDetails: m.currentTestToolID = toolCall.ID - m.currentTestToolName = "get_endpoints_details" + m.currentTestToolName = agent.ToolGetEndpointsDetails m.agentState = StateThinking if endpointsArg, ok := toolCall.Arguments["endpoints"].([]any); ok { @@ -1476,7 +1476,7 @@ func handleProcessToolCalls(m *TestUIModel, _ processToolCallsMsg) (tea.Model, t return m, m.executeTool(toolCall) - case "GenerateTestPlan": + case agent.ToolGenerateTestPlan: what, ok := toolCall.Arguments["what"].(string) if !ok || what == "" { m.addMessage(m.subtleStyle.Render("⚠️ GenerateTestPlan missing 'what' parameter")) @@ -1489,7 +1489,7 @@ func handleProcessToolCalls(m *TestUIModel, _ processToolCallsMsg) (tea.Model, t } m.currentTestToolID = toolCall.ID - m.currentTestToolName = "GenerateTestPlan" + m.currentTestToolName = agent.ToolGenerateTestPlan m.agentState = StateUsingTool m.animationFrame = 0 @@ -1517,16 +1517,16 @@ func handleProcessToolCalls(m *TestUIModel, _ processToolCallsMsg) (tea.Model, t }, ) - case "ExecuteTestGroup": + case agent.ToolExecuteTestGroup: m.currentTestToolID = toolCall.ID - m.currentTestToolName = "ExecuteTestGroup" + m.currentTestToolName = agent.ToolExecuteTestGroup m.agentState = StateProcessing return m, m.executeTool(toolCall) - case "ExportTests": + case agent.ToolExportTests: m.currentTestToolID = toolCall.ID - m.currentTestToolName = "ExportTests" + m.currentTestToolName = agent.ToolExportTests exportsArg, _ := toolCall.Arguments["exports"].([]any) formatCount := len(exportsArg) @@ -1538,9 +1538,9 @@ func handleProcessToolCalls(m *TestUIModel, _ processToolCallsMsg) (tea.Model, t m.spinner.Style = lipgloss.NewStyle().Foreground(Theme.Primary) return m, tea.Batch(animationTick(), m.executeTool(toolCall)) - case "GenerateReport": + case agent.ToolGenerateReport: m.currentTestToolID = toolCall.ID - m.currentTestToolName = "GenerateReport" + m.currentTestToolName = agent.ToolGenerateReport showToolWidget(m, "Generating PDF report", "") m.agentState = StateUsingTool @@ -1548,9 +1548,9 @@ func handleProcessToolCalls(m *TestUIModel, _ processToolCallsMsg) (tea.Model, t m.spinner.Style = lipgloss.NewStyle().Foreground(Theme.Primary) return m, tea.Batch(animationTick(), m.executeTool(toolCall)) - case "wait": + case agent.ToolWait: m.currentTestToolID = toolCall.ID - m.currentTestToolName = "wait" + m.currentTestToolName = agent.ToolWait seconds := 5 if s, ok := toolCall.Arguments["seconds"].(float64); ok { @@ -1689,7 +1689,7 @@ func handleShowTestSelection(m *TestUIModel, msg showTestSelectionMsg) (tea.Mode } toolID := "" - toolName := "ExecuteTestGroup" + toolName := agent.ToolExecuteTestGroup if m.pendingTestGroupToolCall != nil { toolID = m.pendingTestGroupToolCall.ID toolName = m.pendingTestGroupToolCall.Name diff --git a/internal/cli/update_tests.go b/internal/cli/update_tests.go index 7520952..17e415b 100644 --- a/internal/cli/update_tests.go +++ b/internal/cli/update_tests.go @@ -64,7 +64,7 @@ func handleGenerateTestPlanResult(m *TestUIModel, msg generateTestPlanResultMsg) toolID := m.currentTestToolID funcResp := &agent.FunctionResponseData{ ID: toolID, - Name: "GenerateTestPlan", + Name: agent.ToolGenerateTestPlan, Response: map[string]any{ "status": "tests_generated", "test_count": len(testCases), diff --git a/internal/cli/utils.go b/internal/cli/utils.go index a807aa1..4567fd7 100644 --- a/internal/cli/utils.go +++ b/internal/cli/utils.go @@ -4,7 +4,7 @@ import ( "fmt" "strings" - "github.com/Octrafic/octrafic-cli/internal/agents" + agent "github.com/Octrafic/octrafic-cli/internal/agents" "github.com/Octrafic/octrafic-cli/internal/infra/logger" "github.com/Octrafic/octrafic-cli/internal/infra/storage" tea "github.com/charmbracelet/bubbletea" @@ -91,9 +91,9 @@ func (m *TestUIModel) shouldAskForConfirmation(toolName string) bool { // Tools that are safe and don't need confirmation // ExecuteTestGroup is safe - user already approved the plan via checkboxes safeTools := map[string]bool{ - "GenerateTestPlan": true, // Planning is safe, doesn't execute anything - "ExecuteTestGroup": true, // Plan was already approved via checkboxes - "GenerateReport": true, // Generating a report is safe + agent.ToolGenerateTestPlan: true, // Planning is safe, doesn't execute anything + agent.ToolExecuteTestGroup: true, // Plan was already approved via checkboxes + agent.ToolGenerateReport: true, // Generating a report is safe } return !safeTools[toolName] @@ -328,15 +328,15 @@ func (m *TestUIModel) loadConversationHistory() error { if toolName != "" { displayName := "" switch toolName { - case "get_endpoints_details": + case agent.ToolGetEndpointsDetails: displayName = "Getting endpoint details" - case "GenerateTestPlan": + case agent.ToolGenerateTestPlan: displayName = "Generated test cases" - case "ExecuteTestGroup": + case agent.ToolExecuteTestGroup: displayName = "Executing tests" - case "GenerateReport": + case agent.ToolGenerateReport: displayName = "Generating PDF report" - case "ExecuteTest": + case agent.ToolExecuteTest: displayName = "Executing test" default: displayName = fmt.Sprintf("Tool: %s", toolName)