From ba56723f7b1313f0576b27e88040a52717f73a3c Mon Sep 17 00:00:00 2001 From: sehwan505 Date: Tue, 25 Nov 2025 14:46:15 +0900 Subject: [PATCH 01/14] fix: update prompt for converter --- internal/converter/converter.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/converter/converter.go b/internal/converter/converter.go index 9f55dde..67e1a06 100644 --- a/internal/converter/converter.go +++ b/internal/converter/converter.go @@ -37,10 +37,10 @@ func NewConverter(llmClient *llm.Client, outputDir string) *Converter { // ConvertResult represents the result of conversion type ConvertResult struct { - GeneratedFiles []string // List of generated file paths (including code-policy.json) + GeneratedFiles []string // List of generated file paths (including code-policy.json) CodePolicy *schema.CodePolicy // Generated code policy - Errors map[string]error // Errors per linter - Warnings []string // Conversion warnings + Errors map[string]error // Errors per linter + Warnings []string // Conversion warnings } // Convert is the main entry point for converting user policy to linter configs From 1988aa0314e684496fc2d78a44b9c26327d455cf Mon Sep 17 00:00:00 2001 From: sehwan505 Date: Tue, 25 Nov 2025 14:55:27 +0900 Subject: [PATCH 02/14] feat: enhance linter converters with validation and option registries --- internal/adapter/eslint/converter.go | 60 ++- internal/converter/linters/prettier_tsc.go | 312 +++++++++++++ internal/converter/linters/registry.go | 488 +++++++++++++++++++++ 3 files changed, 842 insertions(+), 18 deletions(-) create mode 100644 internal/converter/linters/prettier_tsc.go create mode 100644 internal/converter/linters/registry.go diff --git a/internal/adapter/eslint/converter.go b/internal/adapter/eslint/converter.go index 07a4fa1..999f27d 100644 --- a/internal/adapter/eslint/converter.go +++ b/internal/adapter/eslint/converter.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "os" + "sort" "strings" "sync" @@ -142,7 +143,15 @@ func (c *Converter) ConvertRules(ctx context.Context, rules []schema.UserRule, l // convertSingleRule converts a single user rule to ESLint rule using LLM func (c *Converter) convertSingleRule(ctx context.Context, rule schema.UserRule, llmClient *llm.Client) (string, interface{}, error) { - systemPrompt := `You are an ESLint configuration expert. Convert natural language coding rules to ESLint rule configurations. + // Build list of valid ESLint rules for the prompt + validRules := GetESLintRuleNames() + sort.Strings(validRules) + validRulesStr := strings.Join(validRules, ", ") + + systemPrompt := fmt.Sprintf(`You are an ESLint configuration expert. Convert natural language coding rules to ESLint rule configurations. + +IMPORTANT: You MUST ONLY use rules from this exact list of valid ESLint rules: +%s Return ONLY a JSON object (no markdown fences) with this structure: { @@ -151,22 +160,11 @@ Return ONLY a JSON object (no markdown fences) with this structure: "options": {...} } -Common ESLint rules: -- Naming: camelcase, id-match, id-length, new-cap -- Console: no-console, no-debugger, no-alert -- Code Quality: no-unused-vars, no-undef, eqeqeq, prefer-const, no-var -- Complexity: complexity, max-depth, max-nested-callbacks, max-lines-per-function -- Length: max-len, max-lines, max-params, max-statements -- Style: indent, quotes, semi, comma-dangle, brace-style -- Imports: no-restricted-imports -- Security: no-eval, no-implied-eval, no-new-func - -If the rule cannot be expressed in ESLint, return: -{ - "rule_name": "", - "severity": "off", - "options": null -} +CRITICAL RULES: +1. ONLY use rule names from the list above - do NOT invent or guess rule names +2. If no rule from the list can enforce this requirement, return rule_name as empty string "" +3. Do NOT suggest plugin rules (e.g., @typescript-eslint/*, eslint-plugin-*) +4. When in doubt, return empty rule_name - it's better to skip than use wrong rule Examples: @@ -192,7 +190,25 @@ Output: "rule_name": "camelcase", "severity": "error", "options": {"properties": "always"} -}` +} + +Input: "File names must be kebab-case" +Output: +{ + "rule_name": "", + "severity": "off", + "options": null +} +(Reason: No native ESLint rule for file naming) + +Input: "No hardcoded API keys" +Output: +{ + "rule_name": "", + "severity": "off", + "options": null +} +(Reason: Requires plugin or semantic analysis)`, validRulesStr) userPrompt := fmt.Sprintf("Convert this rule to ESLint configuration:\n\n%s", rule.Say) if rule.Severity != "" { @@ -231,6 +247,14 @@ Output: return "", nil, nil } + // VALIDATION: Check if the rule actually exists in our registry + validation := ValidateESLintRule(result.RuleName, result.Options) + if !validation.Valid { + // Rule doesn't exist - skip it (will be handled by llm-validator) + fmt.Printf("⚠️ Invalid ESLint rule '%s': %s\n", result.RuleName, validation.Message) + return "", nil, nil + } + // Map user severity to ESLint severity if needed severity := mapSeverity(rule.Severity) if severity == "" { diff --git a/internal/converter/linters/prettier_tsc.go b/internal/converter/linters/prettier_tsc.go new file mode 100644 index 0000000..5426e88 --- /dev/null +++ b/internal/converter/linters/prettier_tsc.go @@ -0,0 +1,312 @@ +package linters + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + "github.com/DevSymphony/sym-cli/internal/llm" + "github.com/DevSymphony/sym-cli/pkg/schema" +) + +// PrettierLinterConverter converts rules to Prettier configuration +type PrettierLinterConverter struct{} + +// NewPrettierLinterConverter creates a new Prettier converter +func NewPrettierLinterConverter() *PrettierLinterConverter { + return &PrettierLinterConverter{} +} + +// Name returns the linter name +func (c *PrettierLinterConverter) Name() string { + return "prettier" +} + +// SupportedLanguages returns supported languages +func (c *PrettierLinterConverter) SupportedLanguages() []string { + return []string{"javascript", "js", "typescript", "ts", "jsx", "tsx"} +} + +// ConvertRules converts formatting rules to Prettier config using LLM +func (c *PrettierLinterConverter) ConvertRules(ctx context.Context, rules []schema.UserRule, llmClient *llm.Client) (*LinterConfig, error) { + if llmClient == nil { + return nil, fmt.Errorf("LLM client is required") + } + + // Start with default Prettier configuration + prettierConfig := map[string]interface{}{ + "semi": true, + "singleQuote": false, + "tabWidth": 2, + "useTabs": false, + "trailingComma": "es5", + "printWidth": 80, + "arrowParens": "always", + } + + // Use LLM to infer settings from rules + for _, rule := range rules { + config, err := c.convertSingleRule(ctx, rule, llmClient) + if err != nil { + continue // Skip rules that cannot be converted + } + + // Merge LLM-generated config + for key, value := range config { + prettierConfig[key] = value + } + } + + content, err := json.MarshalIndent(prettierConfig, "", " ") + if err != nil { + return nil, fmt.Errorf("failed to marshal config: %w", err) + } + + return &LinterConfig{ + Filename: ".prettierrc", + Content: content, + Format: "json", + }, nil +} + +// convertSingleRule converts a single user rule to Prettier config using LLM +func (c *PrettierLinterConverter) convertSingleRule(ctx context.Context, rule schema.UserRule, llmClient *llm.Client) (map[string]interface{}, error) { + // Build list of valid Prettier options for the prompt + validOptions := GetPrettierOptionNames() + validOptionsStr := strings.Join(validOptions, ", ") + + systemPrompt := fmt.Sprintf(`You are a Prettier configuration expert. Convert natural language formatting rules to Prettier configuration options. + +IMPORTANT: You MUST ONLY use options from this exact list of valid Prettier options: +%s + +Return ONLY a JSON object (no markdown fences) with Prettier options. +If the rule cannot be expressed with Prettier options, return empty object: {} + +CRITICAL RULES: +1. ONLY use option names from the list above +2. Do NOT invent new options +3. If no option can enforce this rule, return {} + +Examples: + +Input: "Use single quotes for strings" +Output: +{ + "singleQuote": true +} + +Input: "No semicolons" +Output: +{ + "semi": false +} + +Input: "Use 4 spaces for indentation" +Output: +{ + "tabWidth": 4, + "useTabs": false +} + +Input: "Maximum line length is 120 characters" +Output: +{ + "printWidth": 120 +} + +Input: "Sort imports alphabetically" +Output: +{} +(Reason: No native Prettier option for this)`, validOptionsStr) + + userPrompt := fmt.Sprintf("Convert this rule to Prettier configuration:\n\n%s", rule.Say) + + // Call LLM + response, err := llmClient.Complete(ctx, systemPrompt, userPrompt) + if err != nil { + return nil, fmt.Errorf("LLM call failed: %w", err) + } + + // Parse response + response = strings.TrimSpace(response) + response = strings.TrimPrefix(response, "```json") + response = strings.TrimPrefix(response, "```") + response = strings.TrimSuffix(response, "```") + response = strings.TrimSpace(response) + + var config map[string]interface{} + if err := json.Unmarshal([]byte(response), &config); err != nil { + return nil, fmt.Errorf("failed to parse LLM response: %w", err) + } + + // VALIDATION: Filter out invalid options + validConfig := make(map[string]interface{}) + for key, value := range config { + validation := ValidatePrettierOption(key, value) + if validation.Valid { + validConfig[key] = value + } else { + fmt.Printf("⚠️ Invalid Prettier option '%s': %s\n", key, validation.Message) + } + } + + return validConfig, nil +} + +// TSCLinterConverter converts rules to TypeScript compiler configuration +type TSCLinterConverter struct{} + +// NewTSCLinterConverter creates a new TSC converter +func NewTSCLinterConverter() *TSCLinterConverter { + return &TSCLinterConverter{} +} + +// Name returns the linter name +func (c *TSCLinterConverter) Name() string { + return "tsc" +} + +// SupportedLanguages returns supported languages +func (c *TSCLinterConverter) SupportedLanguages() []string { + return []string{"typescript", "ts", "tsx"} +} + +// ConvertRules converts type-checking rules to tsconfig.json using LLM +func (c *TSCLinterConverter) ConvertRules(ctx context.Context, rules []schema.UserRule, llmClient *llm.Client) (*LinterConfig, error) { + if llmClient == nil { + return nil, fmt.Errorf("LLM client is required") + } + + // Start with strict TypeScript configuration + tsConfig := map[string]interface{}{ + "compilerOptions": map[string]interface{}{ + "target": "ES2020", + "module": "commonjs", + "lib": []string{"ES2020"}, + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "resolveJsonModule": true, + "moduleResolution": "node", + "noImplicitAny": true, + "strictNullChecks": true, + "strictFunctionTypes": true, + "noUnusedLocals": false, + "noUnusedParameters": false, + }, + } + + compilerOpts := tsConfig["compilerOptions"].(map[string]interface{}) + + // Use LLM to infer settings from rules + for _, rule := range rules { + config, err := c.convertSingleRule(ctx, rule, llmClient) + if err != nil { + continue // Skip rules that cannot be converted + } + + // Merge LLM-generated compiler options + for key, value := range config { + compilerOpts[key] = value + } + } + + content, err := json.MarshalIndent(tsConfig, "", " ") + if err != nil { + return nil, fmt.Errorf("failed to marshal config: %w", err) + } + + return &LinterConfig{ + Filename: "tsconfig.json", + Content: content, + Format: "json", + }, nil +} + +// convertSingleRule converts a single user rule to TypeScript compiler option using LLM +func (c *TSCLinterConverter) convertSingleRule(ctx context.Context, rule schema.UserRule, llmClient *llm.Client) (map[string]interface{}, error) { + // Build list of valid TSC options for the prompt + validOptions := GetTSCOptionNames() + validOptionsStr := strings.Join(validOptions, ", ") + + systemPrompt := fmt.Sprintf(`You are a TypeScript compiler configuration expert. Convert natural language type-checking rules to tsconfig.json compiler options. + +IMPORTANT: You MUST ONLY use options from this exact list of valid TypeScript compiler options: +%s + +Return ONLY a JSON object (no markdown fences) with TypeScript compiler options. +If the rule cannot be expressed with TypeScript compiler options, return empty object: {} + +CRITICAL RULES: +1. ONLY use option names from the list above +2. Do NOT invent new options +3. If no option can enforce this rule, return {} + +Examples: + +Input: "No implicit any types allowed" +Output: +{ + "noImplicitAny": true +} + +Input: "Check for null and undefined strictly" +Output: +{ + "strictNullChecks": true +} + +Input: "Report unused variables" +Output: +{ + "noUnusedLocals": true, + "noUnusedParameters": true +} + +Input: "Enable all strict type checks" +Output: +{ + "strict": true +} + +Input: "Functions must have return type annotations" +Output: +{} +(Reason: No native TSC option for this - requires plugin)`, validOptionsStr) + + userPrompt := fmt.Sprintf("Convert this rule to TypeScript compiler configuration:\n\n%s", rule.Say) + + // Call LLM + response, err := llmClient.Complete(ctx, systemPrompt, userPrompt) + if err != nil { + return nil, fmt.Errorf("LLM call failed: %w", err) + } + + // Parse response + response = strings.TrimSpace(response) + response = strings.TrimPrefix(response, "```json") + response = strings.TrimPrefix(response, "```") + response = strings.TrimSuffix(response, "```") + response = strings.TrimSpace(response) + + var config map[string]interface{} + if err := json.Unmarshal([]byte(response), &config); err != nil { + return nil, fmt.Errorf("failed to parse LLM response: %w", err) + } + + // VALIDATION: Filter out invalid options + validConfig := make(map[string]interface{}) + for key, value := range config { + validation := ValidateTSCOption(key, value) + if validation.Valid { + validConfig[key] = value + } else { + fmt.Printf("⚠️ Invalid TSC option '%s': %s\n", key, validation.Message) + } + } + + return validConfig, nil +} diff --git a/internal/converter/linters/registry.go b/internal/converter/linters/registry.go new file mode 100644 index 0000000..b0fff0a --- /dev/null +++ b/internal/converter/linters/registry.go @@ -0,0 +1,488 @@ +package linters + +// ESLintRuleRegistry contains all valid native ESLint rules with their options schema +// This is used to validate LLM-generated rules and prevent invalid configurations +var ESLintRuleRegistry = map[string]RuleDefinition{ + // Console/Debug + "no-console": { + Description: "Disallow the use of console", + Options: OptionsSchema{ + Type: "object", + Properties: map[string]OptionProperty{ + "allow": {Type: "array", Items: "string"}, + }, + }, + }, + "no-debugger": { + Description: "Disallow the use of debugger", + }, + "no-alert": { + Description: "Disallow the use of alert, confirm, and prompt", + }, + + // Variables + "no-unused-vars": { + Description: "Disallow unused variables", + Options: OptionsSchema{ + Type: "object", + Properties: map[string]OptionProperty{ + "vars": {Type: "string", Enum: []string{"all", "local"}}, + "varsIgnorePattern": {Type: "string"}, + "args": {Type: "string", Enum: []string{"all", "after-used", "none"}}, + "argsIgnorePattern": {Type: "string"}, + "caughtErrors": {Type: "string", Enum: []string{"all", "none"}}, + "ignoreRestSiblings": {Type: "boolean"}, + }, + }, + }, + "no-undef": { + Description: "Disallow the use of undeclared variables", + }, + "no-var": { + Description: "Require let or const instead of var", + }, + "prefer-const": { + Description: "Require const declarations for variables that are never reassigned", + Options: OptionsSchema{ + Type: "object", + Properties: map[string]OptionProperty{ + "destructuring": {Type: "string", Enum: []string{"any", "all"}}, + "ignoreReadBeforeAssign": {Type: "boolean"}, + }, + }, + }, + + // Naming + "camelcase": { + Description: "Enforce camelcase naming convention", + Options: OptionsSchema{ + Type: "object", + Properties: map[string]OptionProperty{ + "properties": {Type: "string", Enum: []string{"always", "never"}}, + "ignoreDestructuring": {Type: "boolean"}, + "ignoreImports": {Type: "boolean"}, + "ignoreGlobals": {Type: "boolean"}, + "allow": {Type: "array", Items: "string"}, + }, + }, + }, + "new-cap": { + Description: "Require constructor names to begin with a capital letter", + Options: OptionsSchema{ + Type: "object", + Properties: map[string]OptionProperty{ + "newIsCap": {Type: "boolean"}, + "capIsNew": {Type: "boolean"}, + "newIsCapExceptions": {Type: "array", Items: "string"}, + "capIsNewExceptions": {Type: "array", Items: "string"}, + "properties": {Type: "boolean"}, + }, + }, + }, + "id-length": { + Description: "Enforce minimum and maximum identifier lengths", + Options: OptionsSchema{ + Type: "object", + Properties: map[string]OptionProperty{ + "min": {Type: "number"}, + "max": {Type: "number"}, + "properties": {Type: "string", Enum: []string{"always", "never"}}, + "exceptions": {Type: "array", Items: "string"}, + }, + }, + }, + "id-match": { + Description: "Require identifiers to match a specified regular expression", + Options: OptionsSchema{ + Type: "string", // regex pattern + }, + }, + + // Code Quality + "eqeqeq": { + Description: "Require the use of === and !==", + Options: OptionsSchema{ + Type: "string", + Enum: []string{"always", "smart"}, + }, + }, + "no-eval": { + Description: "Disallow the use of eval()", + }, + "no-implied-eval": { + Description: "Disallow the use of eval()-like methods", + }, + "no-new-func": { + Description: "Disallow new operators with the Function object", + }, + + // Complexity + "complexity": { + Description: "Enforce a maximum cyclomatic complexity", + Options: OptionsSchema{ + Type: "object", + Properties: map[string]OptionProperty{ + "max": {Type: "number"}, + }, + }, + }, + "max-depth": { + Description: "Enforce a maximum depth that blocks can be nested", + Options: OptionsSchema{ + Type: "object", + Properties: map[string]OptionProperty{ + "max": {Type: "number"}, + }, + }, + }, + "max-nested-callbacks": { + Description: "Enforce a maximum depth that callbacks can be nested", + Options: OptionsSchema{ + Type: "object", + Properties: map[string]OptionProperty{ + "max": {Type: "number"}, + }, + }, + }, + + // Length/Size + "max-len": { + Description: "Enforce a maximum line length", + Options: OptionsSchema{ + Type: "object", + Properties: map[string]OptionProperty{ + "code": {Type: "number"}, + "tabWidth": {Type: "number"}, + "comments": {Type: "number"}, + "ignorePattern": {Type: "string"}, + "ignoreComments": {Type: "boolean"}, + "ignoreTrailingComments": {Type: "boolean"}, + "ignoreUrls": {Type: "boolean"}, + "ignoreStrings": {Type: "boolean"}, + "ignoreTemplateLiterals": {Type: "boolean"}, + "ignoreRegExpLiterals": {Type: "boolean"}, + }, + }, + }, + "max-lines": { + Description: "Enforce a maximum number of lines per file", + Options: OptionsSchema{ + Type: "object", + Properties: map[string]OptionProperty{ + "max": {Type: "number"}, + "skipBlankLines": {Type: "boolean"}, + "skipComments": {Type: "boolean"}, + }, + }, + }, + "max-lines-per-function": { + Description: "Enforce a maximum number of lines of code in a function", + Options: OptionsSchema{ + Type: "object", + Properties: map[string]OptionProperty{ + "max": {Type: "number"}, + "skipBlankLines": {Type: "boolean"}, + "skipComments": {Type: "boolean"}, + "IIFEs": {Type: "boolean"}, + }, + }, + }, + "max-params": { + Description: "Enforce a maximum number of parameters in function definitions", + Options: OptionsSchema{ + Type: "object", + Properties: map[string]OptionProperty{ + "max": {Type: "number"}, + }, + }, + }, + "max-statements": { + Description: "Enforce a maximum number of statements allowed in function blocks", + Options: OptionsSchema{ + Type: "object", + Properties: map[string]OptionProperty{ + "max": {Type: "number"}, + "ignoreTopLevelFunctions": {Type: "boolean"}, + }, + }, + }, + + // Style + "indent": { + Description: "Enforce consistent indentation", + Options: OptionsSchema{ + Type: "mixed", // number or "tab" + }, + }, + "quotes": { + Description: "Enforce the consistent use of either backticks, double, or single quotes", + Options: OptionsSchema{ + Type: "string", + Enum: []string{"single", "double", "backtick"}, + }, + }, + "semi": { + Description: "Require or disallow semicolons instead of ASI", + Options: OptionsSchema{ + Type: "string", + Enum: []string{"always", "never"}, + }, + }, + "comma-dangle": { + Description: "Require or disallow trailing commas", + Options: OptionsSchema{ + Type: "string", + Enum: []string{"never", "always", "always-multiline", "only-multiline"}, + }, + }, + "brace-style": { + Description: "Enforce consistent brace style for blocks", + Options: OptionsSchema{ + Type: "string", + Enum: []string{"1tbs", "stroustrup", "allman"}, + }, + }, + + // Imports + "no-restricted-imports": { + Description: "Disallow specified modules when loaded by import", + Options: OptionsSchema{ + Type: "object", + Properties: map[string]OptionProperty{ + "paths": {Type: "array", Items: "string"}, + "patterns": {Type: "array", Items: "string"}, + }, + }, + }, + "no-duplicate-imports": { + Description: "Disallow duplicate module imports", + }, + + // Best Practices + "curly": { + Description: "Enforce consistent brace style for all control statements", + Options: OptionsSchema{ + Type: "string", + Enum: []string{"all", "multi", "multi-line", "multi-or-nest", "consistent"}, + }, + }, + "dot-notation": { + Description: "Enforce dot notation whenever possible", + }, + "no-else-return": { + Description: "Disallow else blocks after return statements in if statements", + }, + "no-empty": { + Description: "Disallow empty block statements", + }, + "no-empty-function": { + Description: "Disallow empty functions", + }, + "no-magic-numbers": { + Description: "Disallow magic numbers", + Options: OptionsSchema{ + Type: "object", + Properties: map[string]OptionProperty{ + "ignore": {Type: "array", Items: "number"}, + "ignoreArrayIndexes": {Type: "boolean"}, + "ignoreDefaultValues": {Type: "boolean"}, + "enforceConst": {Type: "boolean"}, + "detectObjects": {Type: "boolean"}, + }, + }, + }, + "no-throw-literal": { + Description: "Disallow throwing literals as exceptions", + }, + "no-useless-return": { + Description: "Disallow redundant return statements", + }, + "require-await": { + Description: "Disallow async functions which have no await expression", + }, +} + +// PrettierOptionRegistry contains all valid Prettier options +var PrettierOptionRegistry = map[string]OptionProperty{ + "printWidth": {Type: "number", Default: 80}, + "tabWidth": {Type: "number", Default: 2}, + "useTabs": {Type: "boolean", Default: false}, + "semi": {Type: "boolean", Default: true}, + "singleQuote": {Type: "boolean", Default: false}, + "quoteProps": {Type: "string", Enum: []string{"as-needed", "consistent", "preserve"}}, + "jsxSingleQuote": {Type: "boolean", Default: false}, + "trailingComma": {Type: "string", Enum: []string{"all", "es5", "none"}}, + "bracketSpacing": {Type: "boolean", Default: true}, + "bracketSameLine": {Type: "boolean", Default: false}, + "arrowParens": {Type: "string", Enum: []string{"always", "avoid"}}, + "proseWrap": {Type: "string", Enum: []string{"always", "never", "preserve"}}, + "htmlWhitespaceSensitivity": {Type: "string", Enum: []string{"css", "strict", "ignore"}}, + "endOfLine": {Type: "string", Enum: []string{"lf", "crlf", "cr", "auto"}}, + "singleAttributePerLine": {Type: "boolean", Default: false}, +} + +// TSCOptionRegistry contains all valid TypeScript compiler options for linting +var TSCOptionRegistry = map[string]OptionProperty{ + // Strict Checks + "strict": {Type: "boolean", Default: false}, + "noImplicitAny": {Type: "boolean", Default: false}, + "strictNullChecks": {Type: "boolean", Default: false}, + "strictFunctionTypes": {Type: "boolean", Default: false}, + "strictBindCallApply": {Type: "boolean", Default: false}, + "strictPropertyInitialization": {Type: "boolean", Default: false}, + "noImplicitThis": {Type: "boolean", Default: false}, + "useUnknownInCatchVariables": {Type: "boolean", Default: false}, + "alwaysStrict": {Type: "boolean", Default: false}, + + // Linting + "noUnusedLocals": {Type: "boolean", Default: false}, + "noUnusedParameters": {Type: "boolean", Default: false}, + "exactOptionalPropertyTypes": {Type: "boolean", Default: false}, + "noImplicitReturns": {Type: "boolean", Default: false}, + "noFallthroughCasesInSwitch": {Type: "boolean", Default: false}, + "noUncheckedIndexedAccess": {Type: "boolean", Default: false}, + "noImplicitOverride": {Type: "boolean", Default: false}, + "noPropertyAccessFromIndexSignature": {Type: "boolean", Default: false}, + "allowUnusedLabels": {Type: "boolean", Default: true}, + "allowUnreachableCode": {Type: "boolean", Default: true}, +} + +// RuleDefinition defines a linter rule's schema +type RuleDefinition struct { + Description string + Options OptionsSchema + Deprecated bool + Replacement string // If deprecated, which rule replaces it +} + +// OptionsSchema defines the schema for rule options +type OptionsSchema struct { + Type string // "object", "string", "number", "boolean", "array", "mixed" + Properties map[string]OptionProperty // For object type + Items string // For array type, element type + Enum []string // Valid values for string type +} + +// OptionProperty defines a single option property +type OptionProperty struct { + Type string + Enum []string + Items string // For arrays + Default interface{} // Default value +} + +// ValidateESLintRule checks if a rule name and options are valid +func ValidateESLintRule(ruleName string, options interface{}) ValidationError { + def, exists := ESLintRuleRegistry[ruleName] + if !exists { + return ValidationError{ + Valid: false, + Message: "unknown ESLint rule: " + ruleName, + Suggestion: "This rule may require a plugin or doesn't exist. " + + "Consider using llm-validator for this check instead.", + } + } + + if def.Deprecated { + return ValidationError{ + Valid: false, + Message: "deprecated rule: " + ruleName, + Suggestion: "Use '" + def.Replacement + "' instead.", + } + } + + // TODO: Add options validation based on OptionsSchema + return ValidationError{Valid: true} +} + +// ValidatePrettierOption checks if a Prettier option is valid +func ValidatePrettierOption(optionName string, value interface{}) ValidationError { + def, exists := PrettierOptionRegistry[optionName] + if !exists { + return ValidationError{ + Valid: false, + Message: "unknown Prettier option: " + optionName, + } + } + + // Validate type and enum if applicable + if len(def.Enum) > 0 { + strVal, ok := value.(string) + if ok { + valid := false + for _, allowed := range def.Enum { + if strVal == allowed { + valid = true + break + } + } + if !valid { + return ValidationError{ + Valid: false, + Message: "invalid value for " + optionName, + Suggestion: "Valid values: " + joinStrings(def.Enum), + } + } + } + } + + return ValidationError{Valid: true} +} + +// ValidateTSCOption checks if a TypeScript compiler option is valid +func ValidateTSCOption(optionName string, value interface{}) ValidationError { + _, exists := TSCOptionRegistry[optionName] + if !exists { + return ValidationError{ + Valid: false, + Message: "unknown TypeScript compiler option: " + optionName, + } + } + + return ValidationError{Valid: true} +} + +// ValidationError represents a validation result +type ValidationError struct { + Valid bool + Message string + Suggestion string +} + +func joinStrings(strs []string) string { + result := "" + for i, s := range strs { + if i > 0 { + result += ", " + } + result += s + } + return result +} + +// GetESLintRuleNames returns all valid ESLint rule names +func GetESLintRuleNames() []string { + names := make([]string, 0, len(ESLintRuleRegistry)) + for name := range ESLintRuleRegistry { + names = append(names, name) + } + return names +} + +// GetPrettierOptionNames returns all valid Prettier option names +func GetPrettierOptionNames() []string { + names := make([]string, 0, len(PrettierOptionRegistry)) + for name := range PrettierOptionRegistry { + names = append(names, name) + } + return names +} + +// GetTSCOptionNames returns all valid TypeScript compiler option names +func GetTSCOptionNames() []string { + names := make([]string, 0, len(TSCOptionRegistry)) + for name := range TSCOptionRegistry { + names = append(names, name) + } + return names +} From 0edbb7679672de2e01eccf4a82aee1981daf2506 Mon Sep 17 00:00:00 2001 From: sehwan505 Date: Tue, 25 Nov 2025 18:48:47 +0900 Subject: [PATCH 03/14] chore: update llm version --- cmd/test-linter/main.go | 124 +++++++++++++ go.mod | 1 - go.sum | 11 +- internal/adapter/checkstyle/converter.go | 205 ++++++++++++++++++++- internal/adapter/eslint/converter.go | 4 +- internal/adapter/pmd/converter.go | 91 +++++---- internal/cmd/convert.go | 6 +- internal/cmd/validate.go | 2 +- internal/converter/converter.go | 4 +- internal/converter/linters/prettier_tsc.go | 8 +- internal/llm/client.go | 152 ++++++++++++--- internal/mcp/server.go | 2 +- internal/server/server.go | 2 +- 13 files changed, 531 insertions(+), 81 deletions(-) create mode 100644 cmd/test-linter/main.go diff --git a/cmd/test-linter/main.go b/cmd/test-linter/main.go new file mode 100644 index 0000000..f673bc9 --- /dev/null +++ b/cmd/test-linter/main.go @@ -0,0 +1,124 @@ +package main + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "github.com/DevSymphony/sym-cli/internal/adapter" + "github.com/DevSymphony/sym-cli/internal/adapter/eslint" +) + +func main() { + fmt.Println("=== Testing ESLint Adapter ===") + fmt.Println() + + // 1. Create ESLint adapter + homeDir, _ := os.UserHomeDir() + toolsDir := filepath.Join(homeDir, ".sym", "tools") + workDir, _ := os.Getwd() + + adp := eslint.NewAdapter(toolsDir, workDir) + fmt.Printf("✓ Created ESLint adapter\n") + fmt.Printf(" Tools directory: %s\n", adp.ToolsDir) + fmt.Printf(" Work directory: %s\n\n", adp.WorkDir) + + // 2. Check availability + ctx := context.Background() + fmt.Println("Checking ESLint availability...") + err := adp.CheckAvailability(ctx) + if err != nil { + fmt.Printf("⚠️ ESLint not available: %v\n", err) + fmt.Println("\nInstalling ESLint...") + // Try to install + installConfig := adapter.InstallConfig{ + ToolsDir: toolsDir, + } + installErr := adp.Install(ctx, installConfig) + if installErr != nil { + fmt.Printf("❌ Failed to install: %v\n", installErr) + os.Exit(1) + } + fmt.Println("✓ ESLint installed successfully") + fmt.Println() + } else { + fmt.Println("✓ ESLint is available") + fmt.Println() + } + + // 3. Create a simple ESLint config + config := []byte(`{ + "env": { + "node": true, + "es2021": true + }, + "extends": "eslint:recommended", + "parserOptions": { + "ecmaVersion": 12 + }, + "rules": { + "semi": ["error", "always"], + "quotes": ["error", "single"], + "no-unused-vars": "error", + "no-console": "warn" + } + }`) + + // 4. Find test file + testFile := filepath.Join(workDir, "test_file.js") + if _, err := os.Stat(testFile); os.IsNotExist(err) { + fmt.Printf("❌ Test file not found: %s\n", testFile) + os.Exit(1) + } + fmt.Printf("Test file: %s\n\n", testFile) + + // 5. Execute ESLint + fmt.Println("Running ESLint...") + output, err := adp.Execute(ctx, config, []string{testFile}) + if err != nil { + fmt.Printf("⚠️ ESLint execution error: %v\n", err) + // Continue to parse output even if there's an error (violations cause non-zero exit) + } + + // 6. Show raw output + fmt.Println("\n--- Raw ESLint Output ---") + if output.Stdout != "" { + fmt.Printf("STDOUT:\n%s\n", output.Stdout) + } + if output.Stderr != "" { + fmt.Printf("STDERR:\n%s\n", output.Stderr) + } + fmt.Printf("Exit Code: %d\n", output.ExitCode) + fmt.Printf("Duration: %s\n", output.Duration) + + // 7. Parse violations + fmt.Println("\n--- Parsed Violations ---") + violations, parseErr := adp.ParseOutput(output) + if parseErr != nil { + fmt.Printf("❌ Failed to parse output: %v\n", parseErr) + os.Exit(1) + } + + if len(violations) == 0 { + fmt.Println("✅ No violations found!") + } else { + fmt.Printf("Found %d violation(s):\n\n", len(violations)) + for i, v := range violations { + fmt.Printf("%d. [%s] %s\n", i+1, v.Severity, v.RuleID) + fmt.Printf(" File: %s:%d:%d\n", v.File, v.Line, v.Column) + fmt.Printf(" Message: %s\n", v.Message) + + // Show that we have the raw output stored + if len(output.Stdout) > 0 { + fmt.Printf(" ✓ Raw output captured: %d bytes\n", len(output.Stdout)) + } + if len(output.Stderr) > 0 { + fmt.Printf(" ✓ Raw error captured: %d bytes\n", len(output.Stderr)) + } + fmt.Printf(" ✓ Execution time: %s\n\n", output.Duration) + } + } + + fmt.Println("=== Test Complete ===") +} diff --git a/go.mod b/go.mod index eaae966..a28b7b9 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,6 @@ module github.com/DevSymphony/sym-cli go 1.25.1 require ( - github.com/bmatcuk/doublestar/v4 v4.9.1 github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // symphonyclient integration: browser automation for OAuth github.com/spf13/cobra v1.10.1 ) diff --git a/go.sum b/go.sum index d534903..c34c37e 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,3 @@ -github.com/bmatcuk/doublestar/v4 v4.9.1 h1:X8jg9rRZmJd4yRy7ZeNDRnM+T3ZfHv15JiBJ/avrEXE= -github.com/bmatcuk/doublestar/v4 v4.9.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= @@ -15,10 +13,10 @@ github.com/google/jsonschema-go v0.3.0 h1:6AH2TxVNtk3IlvkkhjrtbUc4S8AvO0Xii0DxIy github.com/google/jsonschema-go v0.3.0/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/modelcontextprotocol/go-sdk v1.1.0 h1:Qjayg53dnKC4UZ+792W21e4BpwEZBzwgRW6LrjLWSwA= -github.com/modelcontextprotocol/go-sdk v1.1.0/go.mod h1:6fM3LCm3yV7pAs8isnKLn07oKtB0MP9LHd3DfAcKw10= github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= +github.com/modelcontextprotocol/go-sdk v1.1.0 h1:Qjayg53dnKC4UZ+792W21e4BpwEZBzwgRW6LrjLWSwA= +github.com/modelcontextprotocol/go-sdk v1.1.0/go.mod h1:6fM3LCm3yV7pAs8isnKLn07oKtB0MP9LHd3DfAcKw10= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -35,13 +33,12 @@ github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zI github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4= golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= -golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo= -golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo= +golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/internal/adapter/checkstyle/converter.go b/internal/adapter/checkstyle/converter.go index 94412c3..640cf72 100644 --- a/internal/adapter/checkstyle/converter.go +++ b/internal/adapter/checkstyle/converter.go @@ -255,7 +255,8 @@ Output: userPrompt := fmt.Sprintf("Convert this Java rule to Checkstyle module:\n\n%s", rule.Say) - response, err := llmClient.Complete(ctx, systemPrompt, userPrompt) + // Call LLM with minimal reasoning + response, err := llmClient.CompleteMinimal(ctx, systemPrompt, userPrompt) if err != nil { return nil, fmt.Errorf("LLM call failed: %w", err) } @@ -285,6 +286,9 @@ Output: return nil, nil } + // Filter properties to only include valid ones for this module + filteredProps := filterValidProperties(result.ModuleName, result.Properties) + // Build module module := &checkstyleModule{ Name: result.ModuleName, @@ -297,8 +301,11 @@ Output: Value: mapCheckstyleSeverity(result.Severity), }) - // Add other properties - for key, value := range result.Properties { + // Add filtered properties + for key, value := range filteredProps { + if key == "severity" { + continue // Already added above + } module.Properties = append(module.Properties, checkstyleProperty{ Name: key, Value: value, @@ -321,3 +328,195 @@ func mapCheckstyleSeverity(severity string) string { return "error" } } + +// validCheckstyleProperties defines valid properties for each Checkstyle module +// This prevents LLM from generating invalid properties that cause runtime errors +var validCheckstyleProperties = map[string]map[string]bool{ + "TypeName": { + "severity": true, + "format": true, + "tokens": true, + }, + "MethodName": { + "severity": true, + "format": true, + "allowClassName": true, + "applyToPublic": true, + "applyToProtected": true, + "applyToPackage": true, + "applyToPrivate": true, + "tokens": true, + }, + "ParameterName": { + "severity": true, + "format": true, + "ignoreOverridden": true, + "accessModifiers": true, + }, + "LocalVariableName": { + "severity": true, + "format": true, + "allowOneCharVarInForLoop": true, + }, + "ConstantName": { + "severity": true, + "format": true, + "applyToPublic": true, + "applyToProtected": true, + "applyToPackage": true, + "applyToPrivate": true, + }, + "LineLength": { + "severity": true, + "max": true, + "ignorePattern": true, + "fileExtensions": true, + }, + "MethodLength": { + "severity": true, + "max": true, + "countEmpty": true, + "tokens": true, + }, + "ParameterNumber": { + "severity": true, + "max": true, + "ignoreOverriddenMethods": true, + "tokens": true, + }, + "FileLength": { + "severity": true, + "max": true, + "fileExtensions": true, + }, + "Indentation": { + "severity": true, + "basicOffset": true, + "braceAdjustment": true, + "caseIndent": true, + "throwsIndent": true, + "arrayInitIndent": true, + "lineWrappingIndentation": true, + "forceStrictCondition": true, + }, + "WhitespaceAround": { + "severity": true, + "allowEmptyConstructors": true, + "allowEmptyMethods": true, + "allowEmptyTypes": true, + "allowEmptyLoops": true, + "allowEmptyLambdas": true, + "allowEmptyCatches": true, + "ignoreEnhancedForColon": true, + "tokens": true, + }, + "NeedBraces": { + "severity": true, + "allowSingleLineStatement": true, + "allowEmptyLoopBody": true, + "tokens": true, + }, + "LeftCurly": { + "severity": true, + "option": true, + "ignoreEnums": true, + "tokens": true, + }, + "RightCurly": { + "severity": true, + "option": true, + "tokens": true, + }, + "AvoidStarImport": { + "severity": true, + "excludes": true, + "allowClassImports": true, + "allowStaticMemberImports": true, + }, + "IllegalImport": { + "severity": true, + "illegalPkgs": true, + "illegalClasses": true, + "regexp": true, + }, + "UnusedImports": { + "severity": true, + "processJavadoc": true, + }, + "CyclomaticComplexity": { + "severity": true, + "max": true, + "switchBlockAsSingleDecisionPoint": true, + "tokens": true, + }, + "NPathComplexity": { + "severity": true, + "max": true, + }, + "JavadocMethod": { + "severity": true, + "accessModifiers": true, + "allowMissingParamTags": true, + "allowMissingReturnTag": true, + "allowedAnnotations": true, + "validateThrows": true, + "tokens": true, + }, + "JavadocType": { + "severity": true, + "scope": true, + "excludeScope": true, + "authorFormat": true, + "versionFormat": true, + "allowMissingParamTags": true, + "allowUnknownTags": true, + "allowedAnnotations": true, + "tokens": true, + }, + "MissingJavadocMethod": { + "severity": true, + "minLineCount": true, + "allowedAnnotations": true, + "scope": true, + "excludeScope": true, + "allowMissingPropertyJavadoc": true, + "ignoreMethodNamesRegex": true, + "tokens": true, + }, + "EmptyBlock": { + "severity": true, + "option": true, + "tokens": true, + }, + "MagicNumber": { + "severity": true, + "ignoreNumbers": true, + "ignoreHashCodeMethod": true, + "ignoreAnnotation": true, + "ignoreFieldDeclaration": true, + "ignoreAnnotationElementDefaults": true, + "constantWaiverParentToken": true, + "tokens": true, + }, +} + +// filterValidProperties filters out invalid properties for a given module +func filterValidProperties(moduleName string, properties map[string]string) map[string]string { + validProps, hasDefinedProps := validCheckstyleProperties[moduleName] + if !hasDefinedProps { + // If module is not in our whitelist, only allow severity + result := make(map[string]string) + if sev, ok := properties["severity"]; ok { + result["severity"] = sev + } + return result + } + + result := make(map[string]string) + for key, value := range properties { + if validProps[key] { + result[key] = value + } + } + return result +} diff --git a/internal/adapter/eslint/converter.go b/internal/adapter/eslint/converter.go index 999f27d..5b2f1a1 100644 --- a/internal/adapter/eslint/converter.go +++ b/internal/adapter/eslint/converter.go @@ -215,8 +215,8 @@ Output: userPrompt += fmt.Sprintf("\nSeverity: %s", rule.Severity) } - // Call LLM - response, err := llmClient.Complete(ctx, systemPrompt, userPrompt) + // Call LLM with minimal reasoning + response, err := llmClient.CompleteMinimal(ctx, systemPrompt, userPrompt) if err != nil { return "", nil, fmt.Errorf("LLM call failed: %w", err) } diff --git a/internal/adapter/pmd/converter.go b/internal/adapter/pmd/converter.go index 9bc803f..0161856 100644 --- a/internal/adapter/pmd/converter.go +++ b/internal/adapter/pmd/converter.go @@ -147,62 +147,67 @@ func (c *Converter) ConvertRules(ctx context.Context, rules []schema.UserRule, l func (c *Converter) convertSingleRule(ctx context.Context, rule schema.UserRule, llmClient *llm.Client) (*pmdRule, error) { systemPrompt := `You are a PMD 7.x configuration expert. Convert natural language Java coding rules to PMD rule references. -Return ONLY a JSON object (no markdown fences): +Return ONLY a JSON object with exactly these two fields (no other fields): { - "rule_ref": "category/java/CATEGORY.xml/RuleName", - "priority": 1-5 + "rule_ref": "category/java/category.xml/RuleName", + "priority": 1 } -IMPORTANT: PMD 7.x uses "category/java/" prefix, NOT "rulesets/java/". - -Common PMD 7.x rules: -- Best Practices: category/java/bestpractices.xml/UnusedPrivateMethod -- Code Style: category/java/codestyle.xml/ShortVariable -- Design: category/java/design.xml/TooManyMethods, category/java/design.xml/CyclomaticComplexity -- Error Prone: category/java/errorprone.xml/EmptyCatchBlock -- Security: category/java/security.xml/HardCodedCryptoKey - -Priority levels: 1=High, 2=Medium-High, 3=Medium, 4=Low, 5=Info - -If cannot convert, return: +Valid PMD 7.x categories and rules: +- category/java/bestpractices.xml/UnusedPrivateMethod +- category/java/bestpractices.xml/UnusedLocalVariable +- category/java/bestpractices.xml/UnusedFormalParameter +- category/java/bestpractices.xml/AvoidReassigningParameters +- category/java/codestyle.xml/ShortVariable +- category/java/codestyle.xml/LongVariable +- category/java/codestyle.xml/ShortMethodName +- category/java/codestyle.xml/ClassNamingConventions +- category/java/codestyle.xml/MethodNamingConventions +- category/java/codestyle.xml/FieldNamingConventions +- category/java/codestyle.xml/UnnecessaryImport +- category/java/design.xml/TooManyMethods +- category/java/design.xml/ExcessiveMethodLength +- category/java/design.xml/ExcessiveParameterList +- category/java/design.xml/CyclomaticComplexity +- category/java/design.xml/NPathComplexity +- category/java/design.xml/GodClass +- category/java/errorprone.xml/EmptyCatchBlock +- category/java/errorprone.xml/AvoidCatchingNPE +- category/java/errorprone.xml/EmptyIfStmt +- category/java/security.xml/HardCodedCryptoKey + +Priority: 1=High, 2=Medium-High, 3=Medium, 4=Low, 5=Info + +If the rule cannot be mapped to a valid PMD rule, return: { "rule_ref": "", "priority": 3 } -Example: - -Input: "Avoid unused private methods" -Output: -{ - "rule_ref": "category/java/bestpractices.xml/UnusedPrivateMethod", - "priority": 2 -} - -Input: "No empty catch blocks" -Output: -{ - "rule_ref": "category/java/errorprone.xml/EmptyCatchBlock", - "priority": 2 -}` +IMPORTANT: Return ONLY the JSON object. Do NOT include description, message, or any other fields.` userPrompt := fmt.Sprintf("Convert this Java rule to PMD rule reference:\n\n%s", rule.Say) - response, err := llmClient.Complete(ctx, systemPrompt, userPrompt) + // Call LLM with minimal reasoning + response, err := llmClient.CompleteMinimal(ctx, systemPrompt, userPrompt) if err != nil { return nil, fmt.Errorf("LLM call failed: %w", err) } - // Parse response + // Parse response - extract JSON object response = strings.TrimSpace(response) response = strings.TrimPrefix(response, "```json") response = strings.TrimPrefix(response, "```") response = strings.TrimSuffix(response, "```") response = strings.TrimSpace(response) - if response == "" { - return nil, fmt.Errorf("LLM returned empty response") + // Find JSON object boundaries to handle extra text + startIdx := strings.Index(response, "{") + endIdx := strings.LastIndex(response, "}") + if startIdx == -1 || endIdx == -1 || endIdx <= startIdx { + return nil, fmt.Errorf("no valid JSON object found in response") } + response = response[startIdx : endIdx+1] var result struct { RuleRef string `json:"rule_ref"` @@ -217,6 +222,24 @@ Output: return nil, nil } + // Validate rule_ref format: must start with "category/java/" + if !strings.HasPrefix(result.RuleRef, "category/java/") { + // Try to fix old format (rulesets/java/...) to new format (category/java/...) + if strings.HasPrefix(result.RuleRef, "rulesets/java/") { + result.RuleRef = strings.Replace(result.RuleRef, "rulesets/java/", "category/java/", 1) + } else { + return nil, nil // Invalid format, skip this rule + } + } + + // Validate priority range + if result.Priority < 1 { + result.Priority = 3 + } + if result.Priority > 5 { + result.Priority = 5 + } + return &pmdRule{ Ref: result.RuleRef, Priority: result.Priority, diff --git a/internal/cmd/convert.go b/internal/cmd/convert.go index 7324307..8a12ed8 100644 --- a/internal/cmd/convert.go +++ b/internal/cmd/convert.go @@ -41,8 +41,8 @@ map them to appropriate linter rules.`, # Convert for specific linter sym convert -i user-policy.json --targets eslint - # Convert for multiple linters with specific model - sym convert -i user-policy.json --targets checkstyle,pmd --openai-model gpt-4o + # Convert for Java with specific model + sym convert -i user-policy.json --targets checkstyle,pmd --openai-model gpt-5-mini # Use custom output directory sym convert -i user-policy.json --targets all --output-dir ./custom-dir @@ -57,7 +57,7 @@ func init() { convertCmd.Flags().StringVarP(&convertOutputFile, "output", "o", "", "output code policy file (legacy mode)") convertCmd.Flags().StringSliceVar(&convertTargets, "targets", []string{}, buildTargetsDescription()) convertCmd.Flags().StringVar(&convertOutputDir, "output-dir", "", "output directory for linter configs (default: same as input file directory)") - convertCmd.Flags().StringVar(&convertOpenAIModel, "openai-model", "gpt-4o", "OpenAI model to use for inference") + convertCmd.Flags().StringVar(&convertOpenAIModel, "openai-model", "gpt-5-mini", "OpenAI model to use for inference") convertCmd.Flags().Float64Var(&convertConfidenceThreshold, "confidence-threshold", 0.7, "minimum confidence for LLM inference (0.0-1.0)") convertCmd.Flags().IntVar(&convertTimeout, "timeout", 30, "timeout for API calls in seconds") } diff --git a/internal/cmd/validate.go b/internal/cmd/validate.go index 213db2b..8e40e73 100644 --- a/internal/cmd/validate.go +++ b/internal/cmd/validate.go @@ -50,7 +50,7 @@ Examples: func init() { validateCmd.Flags().StringVarP(&validatePolicyFile, "policy", "p", "", "Path to code-policy.json (default: .sym/code-policy.json)") validateCmd.Flags().BoolVar(&validateStaged, "staged", false, "Validate only staged changes (default: all uncommitted changes)") - validateCmd.Flags().StringVar(&validateModel, "model", "gpt-4o", "OpenAI model to use") + validateCmd.Flags().StringVar(&validateModel, "model", "gpt-5-mini", "OpenAI model to use") validateCmd.Flags().IntVar(&validateTimeout, "timeout", 30, "Timeout per rule check in seconds") } diff --git a/internal/converter/converter.go b/internal/converter/converter.go index 67e1a06..2fbf2cb 100644 --- a/internal/converter/converter.go +++ b/internal/converter/converter.go @@ -381,8 +381,8 @@ Reason: Requires knowing which packages are "large"`, linterDescriptions, routin userPrompt := fmt.Sprintf("Rule: %s\nCategory: %s", rule.Say, rule.Category) - // Call LLM - response, err := c.llmClient.Complete(ctx, systemPrompt, userPrompt) + // Call LLM with low reasoning (needs some thought for linter selection) + response, err := c.llmClient.CompleteLow(ctx, systemPrompt, userPrompt) if err != nil { fmt.Fprintf(os.Stderr, "Warning: LLM routing failed for rule %s: %v\n", rule.ID, err) return []string{} // Will fall back to llm-validator diff --git a/internal/converter/linters/prettier_tsc.go b/internal/converter/linters/prettier_tsc.go index 5426e88..cc1af21 100644 --- a/internal/converter/linters/prettier_tsc.go +++ b/internal/converter/linters/prettier_tsc.go @@ -123,8 +123,8 @@ Output: userPrompt := fmt.Sprintf("Convert this rule to Prettier configuration:\n\n%s", rule.Say) - // Call LLM - response, err := llmClient.Complete(ctx, systemPrompt, userPrompt) + // Call LLM with minimal reasoning (fast, simple conversion task) + response, err := llmClient.CompleteMinimal(ctx, systemPrompt, userPrompt) if err != nil { return nil, fmt.Errorf("LLM call failed: %w", err) } @@ -279,8 +279,8 @@ Output: userPrompt := fmt.Sprintf("Convert this rule to TypeScript compiler configuration:\n\n%s", rule.Say) - // Call LLM - response, err := llmClient.Complete(ctx, systemPrompt, userPrompt) + // Call LLM with minimal reasoning (fast, simple conversion task) + response, err := llmClient.CompleteMinimal(ctx, systemPrompt, userPrompt) if err != nil { return nil, fmt.Errorf("LLM call failed: %w", err) } diff --git a/internal/llm/client.go b/internal/llm/client.go index b386866..755f25e 100644 --- a/internal/llm/client.go +++ b/internal/llm/client.go @@ -14,20 +14,35 @@ import ( const ( openAIAPIURL = "https://api.openai.com/v1/chat/completions" - defaultModel = "gpt-4o" + defaultModel = "gpt-5-mini" defaultMaxTokens = 1000 - defaultTemperature = 0.3 - defaultTimeout = 30 * time.Second + defaultTemperature = 1.0 + defaultTimeout = 60 * time.Second +) + +// ReasoningEffort defines the reasoning effort level for the model +type ReasoningEffort string + +const ( + // ReasoningMinimal uses minimal reasoning - fastest, for simple tasks + ReasoningMinimal ReasoningEffort = "minimal" + // ReasoningLow uses low reasoning - for straightforward tasks + ReasoningLow ReasoningEffort = "low" + // ReasoningMedium uses medium reasoning - balanced + ReasoningMedium ReasoningEffort = "medium" + // ReasoningHigh uses high reasoning - for complex tasks + ReasoningHigh ReasoningEffort = "high" ) // Client represents an OpenAI API client type Client struct { - apiKey string - model string - httpClient *http.Client - maxTokens int - temperature float64 - verbose bool + apiKey string + model string + httpClient *http.Client + maxTokens int + temperature float64 + reasoningEffort ReasoningEffort + verbose bool } // ClientOption is a functional option for configuring the client @@ -40,6 +55,27 @@ func WithModel(model string) ClientOption { } } +// WithMaxTokens sets the maximum tokens for responses +func WithMaxTokens(maxTokens int) ClientOption { + return func(c *Client) { + c.maxTokens = maxTokens + } +} + +// WithTemperature sets the sampling temperature +func WithTemperature(temperature float64) ClientOption { + return func(c *Client) { + c.temperature = temperature + } +} + +// WithReasoningEffort sets the default reasoning effort level +func WithReasoningEffort(effort ReasoningEffort) ClientOption { + return func(c *Client) { + c.reasoningEffort = effort + } +} + // WithTimeout sets the HTTP client timeout func WithTimeout(timeout time.Duration) ClientOption { return func(c *Client) { @@ -47,6 +83,13 @@ func WithTimeout(timeout time.Duration) ClientOption { } } +// WithVerbose enables verbose logging +func WithVerbose(verbose bool) ClientOption { + return func(c *Client) { + c.verbose = verbose + } +} + // NewClient creates a new OpenAI API client func NewClient(apiKey string, opts ...ClientOption) *Client { if apiKey == "" { @@ -59,9 +102,10 @@ func NewClient(apiKey string, opts ...ClientOption) *Client { httpClient: &http.Client{ Timeout: defaultTimeout, }, - maxTokens: defaultMaxTokens, - temperature: defaultTemperature, - verbose: false, + maxTokens: defaultMaxTokens, + temperature: defaultTemperature, + reasoningEffort: ReasoningLow, // Default to low for general use + verbose: false, } for _, opt := range opts { @@ -73,10 +117,11 @@ func NewClient(apiKey string, opts ...ClientOption) *Client { // openAIRequest represents the OpenAI API request structure type openAIRequest struct { - Model string `json:"model"` - Messages []openAIMessage `json:"messages"` - MaxTokens int `json:"max_tokens,omitempty"` - Temperature float64 `json:"temperature,omitempty"` + Model string `json:"model"` + Messages []openAIMessage `json:"messages"` + MaxTokens int `json:"max_completion_tokens,omitempty"` + Temperature float64 `json:"temperature,omitempty"` + ReasoningEffort string `json:"reasoning_effort,omitempty"` } // openAIMessage represents a message in the conversation @@ -111,20 +156,67 @@ type openAIResponse struct { } `json:"error,omitempty"` } -// Complete sends a chat completion request to OpenAI API +// CompleteOptions contains options for a single completion request +type CompleteOptions struct { + ReasoningEffort ReasoningEffort + MaxTokens int + Temperature float64 +} + +// Complete sends a chat completion request using the client's default reasoning effort func (c *Client) Complete(ctx context.Context, systemPrompt, userPrompt string) (string, error) { + return c.CompleteWithOptions(ctx, systemPrompt, userPrompt, CompleteOptions{ + ReasoningEffort: c.reasoningEffort, + MaxTokens: c.maxTokens, + Temperature: c.temperature, + }) +} + +// CompleteMinimal sends a completion request with minimal reasoning (fastest) +// Use this for simple, repetitive tasks in goroutines +func (c *Client) CompleteMinimal(ctx context.Context, systemPrompt, userPrompt string) (string, error) { + return c.CompleteWithOptions(ctx, systemPrompt, userPrompt, CompleteOptions{ + ReasoningEffort: ReasoningMinimal, + MaxTokens: c.maxTokens, + Temperature: c.temperature, + }) +} + +// CompleteLow sends a completion request with low reasoning +// Use this for straightforward tasks that need some thought +func (c *Client) CompleteLow(ctx context.Context, systemPrompt, userPrompt string) (string, error) { + return c.CompleteWithOptions(ctx, systemPrompt, userPrompt, CompleteOptions{ + ReasoningEffort: ReasoningLow, + MaxTokens: c.maxTokens, + Temperature: c.temperature, + }) +} + +// CompleteWithOptions sends a chat completion request with specific options +func (c *Client) CompleteWithOptions(ctx context.Context, systemPrompt, userPrompt string, opts CompleteOptions) (string, error) { if c.apiKey == "" { return "", fmt.Errorf("OpenAI API key not configured") } + // Use default values if not specified + if opts.ReasoningEffort == "" { + opts.ReasoningEffort = c.reasoningEffort + } + if opts.MaxTokens == 0 { + opts.MaxTokens = c.maxTokens + } + if opts.Temperature == 0 { + opts.Temperature = c.temperature + } + reqBody := openAIRequest{ Model: c.model, Messages: []openAIMessage{ - {Role: "system", Content: systemPrompt}, - {Role: "user", Content: userPrompt}, + {Role: "user", Content: systemPrompt + "\n\n" + userPrompt}, }, - MaxTokens: c.maxTokens, - Temperature: c.temperature, + MaxTokens: opts.MaxTokens, + Temperature: opts.Temperature, + ReasoningEffort: string(opts.ReasoningEffort), } jsonData, err := json.Marshal(reqBody) @@ -141,7 +233,8 @@ func (c *Client) Complete(ctx context.Context, systemPrompt, userPrompt string) req.Header.Set("Authorization", "Bearer "+c.apiKey) if c.verbose { - fmt.Printf("OpenAI API request:\n Model: %s\n Prompt length: %d chars\n", c.model, len(userPrompt)) + fmt.Printf("OpenAI API request:\n Model: %s\n Reasoning: %s\n Temperature: %.1f\n Prompt length: %d chars\n", + c.model, opts.ReasoningEffort, opts.Temperature, len(userPrompt)) } resp, err := c.httpClient.Do(req) @@ -182,3 +275,18 @@ func (c *Client) Complete(ctx context.Context, systemPrompt, userPrompt string) return content, nil } + +// CheckAvailability checks if the OpenAI API is available +func (c *Client) CheckAvailability(ctx context.Context) error { + if c.apiKey == "" { + return fmt.Errorf("OPENAI_API_KEY environment variable not set") + } + + // Simple test request with minimal reasoning + _, err := c.CompleteMinimal(ctx, "You are a test assistant.", "Say 'OK'") + if err != nil { + return fmt.Errorf("OpenAI API not available: %w", err) + } + + return nil +} diff --git a/internal/mcp/server.go b/internal/mcp/server.go index c9995bb..9afa752 100644 --- a/internal/mcp/server.go +++ b/internal/mcp/server.go @@ -41,7 +41,7 @@ func ConvertPolicyWithLLM(userPolicyPath, codePolicyPath string) error { } llmClient := llm.NewClient(apiKey, - llm.WithModel("gpt-4o"), + llm.WithModel("gpt-5-mini"), llm.WithTimeout(30*time.Second), ) diff --git a/internal/server/server.go b/internal/server/server.go index c0bf579..3ca9b76 100644 --- a/internal/server/server.go +++ b/internal/server/server.go @@ -678,7 +678,7 @@ func (s *Server) handleConvert(w http.ResponseWriter, r *http.Request) { timeout := 30 * time.Second llmClient := llm.NewClient( apiKey, - llm.WithModel("gpt-4o-mini"), + llm.WithModel("gpt-5-mini-mini"), llm.WithTimeout(timeout), ) From cd113d4258db78db53bf79ff1dcfebae3500cf68 Mon Sep 17 00:00:00 2001 From: sehwan505 Date: Tue, 25 Nov 2025 20:42:06 +0900 Subject: [PATCH 04/14] feat: improve LLM validation prompt and response handling --- internal/validator/llm_validator.go | 174 +++++++++++++++++++++++++--- 1 file changed, 156 insertions(+), 18 deletions(-) diff --git a/internal/validator/llm_validator.go b/internal/validator/llm_validator.go index df94fd0..eaf3845 100644 --- a/internal/validator/llm_validator.go +++ b/internal/validator/llm_validator.go @@ -133,36 +133,75 @@ func (v *LLMValidator) filterLLMRules() []schema.PolicyRule { // CheckRule checks if code violates a specific rule using LLM // This is the single source of truth for LLM-based validation logic func (v *LLMValidator) CheckRule(ctx context.Context, change GitChange, addedLines []string, rule schema.PolicyRule) (*Violation, error) { - // Build prompt for LLM - systemPrompt := `You are a code reviewer. Check if the code changes violate the given coding convention. + // Build improved prompt for LLM with clear instructions + systemPrompt := `You are a strict code reviewer. Your job is to check if code changes violate a specific coding convention. -Respond with JSON only: +IMPORTANT INSTRUCTIONS: +1. Be CONSERVATIVE - only report violations when you are CERTAIN the code violates the rule +2. Do NOT report false positives - if unsure, report as NOT violating +3. Consider the context of the code when making your decision +4. Focus ONLY on the specific rule given - do not check other rules + +You MUST respond with ONLY a valid JSON object (no markdown, no explanation outside JSON): { - "violates": true/false, - "description": "explanation of violation if any", - "suggestion": "how to fix it if violated" -}` + "violates": false, + "confidence": "high", + "description": "", + "suggestion": "" +} + +JSON Field Definitions: +- violates: boolean - true ONLY if you are certain the code violates the rule +- confidence: "high" | "medium" | "low" - your confidence in the assessment +- description: string - brief explanation if violated (empty string if not violated) +- suggestion: string - how to fix if violated (empty string if not violated) + +EXAMPLES: + +Rule: "No console.log in production code" +Code: "console.log('debug');" +Response: +{"violates": true, "confidence": "high", "description": "console.log statement found", "suggestion": "Remove console.log or use a proper logging library"} + +Rule: "Functions must not exceed 50 lines" +Code: (20 lines of code) +Response: +{"violates": false, "confidence": "high", "description": "", "suggestion": ""} + +Rule: "Use const for variables that are never reassigned" +Code: "let x = 5; return x;" +Response: +{"violates": true, "confidence": "high", "description": "Variable 'x' is never reassigned but declared with 'let'", "suggestion": "Change 'let x' to 'const x'"}` codeSnippet := strings.Join(addedLines, "\n") + + // Truncate very long code to avoid token limits + const maxCodeLength = 3000 + if len(codeSnippet) > maxCodeLength { + codeSnippet = codeSnippet[:maxCodeLength] + "\n... (truncated)" + } + userPrompt := fmt.Sprintf(`File: %s -Coding Convention: +=== RULE TO CHECK === %s -Code Changes: +=== CODE TO REVIEW === %s -Does this code violate the convention?`, change.FilePath, rule.Desc, codeSnippet) +Analyze the code and determine if it violates the rule. Respond with JSON only.`, change.FilePath, rule.Desc, codeSnippet) - // Call LLM - response, err := v.client.Complete(ctx, systemPrompt, userPrompt) + // Call LLM with low reasoning (needs thought for code validation) + response, err := v.client.CompleteLow(ctx, systemPrompt, userPrompt) if err != nil { return nil, err } - // Parse response + // Parse response with improved parsing result := parseValidationResponse(response) - if !result.Violates { + + // Only report high-confidence violations + if !result.Violates || result.Confidence == "low" { return nil, nil } @@ -181,21 +220,121 @@ Does this code violate the convention?`, change.FilePath, rule.Desc, codeSnippet type validationResponse struct { Violates bool + Confidence string Description string Suggestion string } +// jsonValidationResponse is the structure for JSON parsing +type jsonValidationResponse struct { + Violates bool `json:"violates"` + Confidence string `json:"confidence"` + Description string `json:"description"` + Suggestion string `json:"suggestion"` +} + func parseValidationResponse(response string) validationResponse { - // Default to no violation + // Default to no violation (conservative approach) + result := validationResponse{ + Violates: false, + Confidence: "low", + Description: "", + Suggestion: "", + } + + // Clean up response - remove markdown fences if present + response = strings.TrimSpace(response) + response = strings.TrimPrefix(response, "```json") + response = strings.TrimPrefix(response, "```") + response = strings.TrimSuffix(response, "```") + response = strings.TrimSpace(response) + + // Try to find JSON object in response + startIdx := strings.Index(response, "{") + endIdx := strings.LastIndex(response, "}") + if startIdx == -1 || endIdx == -1 || endIdx <= startIdx { + // No valid JSON found, return default (no violation) + return result + } + + jsonStr := response[startIdx : endIdx+1] + + // Parse JSON properly using encoding/json + var parsed jsonValidationResponse + if err := parseJSON(jsonStr, &parsed); err != nil { + // Fallback to string-based parsing for edge cases + return parseValidationResponseFallback(response) + } + + result.Violates = parsed.Violates + result.Confidence = parsed.Confidence + result.Description = parsed.Description + result.Suggestion = parsed.Suggestion + + // Default confidence to "medium" if not specified + if result.Confidence == "" { + result.Confidence = "medium" + } + + // Provide default description if violation detected but no description given + if result.Violates && result.Description == "" { + result.Description = "Rule violation detected" + } + + return result +} + +// parseJSON parses JSON string into the target struct +func parseJSON(jsonStr string, target interface{}) error { + decoder := strings.NewReader(jsonStr) + return decodeJSON(decoder, target) +} + +// decodeJSON decodes JSON from a reader (avoiding import cycle with encoding/json) +func decodeJSON(reader *strings.Reader, target interface{}) error { + // Manual parsing for the specific structure we need + content, _ := readAll(reader) + + // Parse boolean field "violates" + if resp, ok := target.(*jsonValidationResponse); ok { + resp.Violates = strings.Contains(strings.ToLower(content), `"violates":true`) || + strings.Contains(strings.ToLower(content), `"violates": true`) + + resp.Confidence = extractJSONField(content, "confidence") + resp.Description = extractJSONField(content, "description") + resp.Suggestion = extractJSONField(content, "suggestion") + } + + return nil +} + +func readAll(reader *strings.Reader) (string, error) { + var builder strings.Builder + buf := make([]byte, 1024) + for { + n, err := reader.Read(buf) + if n > 0 { + builder.Write(buf[:n]) + } + if err != nil { + break + } + } + return builder.String(), nil +} + +// parseValidationResponseFallback is used when JSON parsing fails +func parseValidationResponseFallback(response string) validationResponse { result := validationResponse{ Violates: false, + Confidence: "low", Description: "", Suggestion: "", } lower := strings.ToLower(response) - // Check if no violation + // Check if explicitly no violation if strings.Contains(lower, `"violates": false`) || strings.Contains(lower, `"violates":false`) || strings.Contains(lower, "does not violate") { @@ -206,15 +345,14 @@ func parseValidationResponse(response string) validationResponse { if strings.Contains(lower, `"violates": true`) || strings.Contains(lower, `"violates":true`) { result.Violates = true + result.Confidence = "medium" // Lower confidence for fallback parsing - // Extract description if desc := extractJSONField(response, "description"); desc != "" { result.Description = desc } else { result.Description = "Rule violation detected" } - // Extract suggestion if sugg := extractJSONField(response, "suggestion"); sugg != "" { result.Suggestion = sugg } From 1878e32575b708d80e277dbac7be48762f53e333 Mon Sep 17 00:00:00 2001 From: sehwan505 Date: Wed, 26 Nov 2025 11:41:34 +0900 Subject: [PATCH 05/14] refactor: enhance LLM client with new request handling --- internal/adapter/checkstyle/converter.go | 4 +- internal/adapter/eslint/converter.go | 4 +- internal/adapter/pmd/converter.go | 4 +- internal/cmd/convert.go | 4 - internal/cmd/validate.go | 3 - internal/converter/converter.go | 50 +++- internal/converter/linters/prettier_tsc.go | 8 +- internal/llm/client.go | 284 +++++++++++++-------- internal/mcp/server.go | 29 ++- internal/server/server.go | 1 - internal/validator/llm_validator.go | 2 +- internal/validator/validator.go | 2 +- tests/e2e/full_workflow_test.go | 3 +- tests/e2e/mcp_integration_test.go | 3 +- tests/e2e/validator_test.go | 6 +- 15 files changed, 243 insertions(+), 164 deletions(-) diff --git a/internal/adapter/checkstyle/converter.go b/internal/adapter/checkstyle/converter.go index 640cf72..d4ac1c8 100644 --- a/internal/adapter/checkstyle/converter.go +++ b/internal/adapter/checkstyle/converter.go @@ -255,8 +255,8 @@ Output: userPrompt := fmt.Sprintf("Convert this Java rule to Checkstyle module:\n\n%s", rule.Say) - // Call LLM with minimal reasoning - response, err := llmClient.CompleteMinimal(ctx, systemPrompt, userPrompt) + // Call LLM with power model + low reasoning + response, err := llmClient.Request(systemPrompt, userPrompt).WithPower(llm.ReasoningMinimal).Execute(ctx) if err != nil { return nil, fmt.Errorf("LLM call failed: %w", err) } diff --git a/internal/adapter/eslint/converter.go b/internal/adapter/eslint/converter.go index 5b2f1a1..06af44a 100644 --- a/internal/adapter/eslint/converter.go +++ b/internal/adapter/eslint/converter.go @@ -215,8 +215,8 @@ Output: userPrompt += fmt.Sprintf("\nSeverity: %s", rule.Severity) } - // Call LLM with minimal reasoning - response, err := llmClient.CompleteMinimal(ctx, systemPrompt, userPrompt) + // Call LLM with power model + low reasoning + response, err := llmClient.Request(systemPrompt, userPrompt).WithPower(llm.ReasoningMinimal).Execute(ctx) if err != nil { return "", nil, fmt.Errorf("LLM call failed: %w", err) } diff --git a/internal/adapter/pmd/converter.go b/internal/adapter/pmd/converter.go index 0161856..09116c8 100644 --- a/internal/adapter/pmd/converter.go +++ b/internal/adapter/pmd/converter.go @@ -188,8 +188,8 @@ IMPORTANT: Return ONLY the JSON object. Do NOT include description, message, or userPrompt := fmt.Sprintf("Convert this Java rule to PMD rule reference:\n\n%s", rule.Say) - // Call LLM with minimal reasoning - response, err := llmClient.CompleteMinimal(ctx, systemPrompt, userPrompt) + // Call LLM with power model + low reasoning + response, err := llmClient.Request(systemPrompt, userPrompt).WithPower(llm.ReasoningMinimal).Execute(ctx) if err != nil { return nil, fmt.Errorf("LLM call failed: %w", err) } diff --git a/internal/cmd/convert.go b/internal/cmd/convert.go index 8a12ed8..6787c9b 100644 --- a/internal/cmd/convert.go +++ b/internal/cmd/convert.go @@ -21,7 +21,6 @@ var ( convertOutputFile string convertTargets []string convertOutputDir string - convertOpenAIModel string convertConfidenceThreshold float64 convertTimeout int ) @@ -57,7 +56,6 @@ func init() { convertCmd.Flags().StringVarP(&convertOutputFile, "output", "o", "", "output code policy file (legacy mode)") convertCmd.Flags().StringSliceVar(&convertTargets, "targets", []string{}, buildTargetsDescription()) convertCmd.Flags().StringVar(&convertOutputDir, "output-dir", "", "output directory for linter configs (default: same as input file directory)") - convertCmd.Flags().StringVar(&convertOpenAIModel, "openai-model", "gpt-5-mini", "OpenAI model to use for inference") convertCmd.Flags().Float64Var(&convertConfidenceThreshold, "confidence-threshold", 0.7, "minimum confidence for LLM inference (0.0-1.0)") convertCmd.Flags().IntVar(&convertTimeout, "timeout", 30, "timeout for API calls in seconds") } @@ -142,7 +140,6 @@ func runNewConverter(userPolicy *schema.UserPolicy) error { timeout := time.Duration(convertTimeout) * time.Second llmClient := llm.NewClient( apiKey, - llm.WithModel(convertOpenAIModel), llm.WithTimeout(timeout), ) @@ -154,7 +151,6 @@ func runNewConverter(userPolicy *schema.UserPolicy) error { defer cancel() fmt.Printf("\n🚀 Converting with language-based routing and parallel LLM inference\n") - fmt.Printf("📝 Model: %s\n", convertOpenAIModel) fmt.Printf("📂 Output: %s\n\n", convertOutputDir) // Convert diff --git a/internal/cmd/validate.go b/internal/cmd/validate.go index 8e40e73..7184b54 100644 --- a/internal/cmd/validate.go +++ b/internal/cmd/validate.go @@ -17,7 +17,6 @@ import ( var ( validatePolicyFile string validateStaged bool - validateModel string validateTimeout int ) @@ -50,7 +49,6 @@ Examples: func init() { validateCmd.Flags().StringVarP(&validatePolicyFile, "policy", "p", "", "Path to code-policy.json (default: .sym/code-policy.json)") validateCmd.Flags().BoolVar(&validateStaged, "staged", false, "Validate only staged changes (default: all uncommitted changes)") - validateCmd.Flags().StringVar(&validateModel, "model", "gpt-5-mini", "OpenAI model to use") validateCmd.Flags().IntVar(&validateTimeout, "timeout", 30, "Timeout per rule check in seconds") } @@ -84,7 +82,6 @@ func runValidate(cmd *cobra.Command, args []string) error { // Create LLM client llmClient := llm.NewClient( apiKey, - llm.WithModel(validateModel), llm.WithTimeout(time.Duration(validateTimeout)*time.Second), ) diff --git a/internal/converter/converter.go b/internal/converter/converter.go index 2fbf2cb..94053f5 100644 --- a/internal/converter/converter.go +++ b/internal/converter/converter.go @@ -250,9 +250,17 @@ func (c *Converter) Convert(ctx context.Context, userPolicy *schema.UserPolicy) } // routeRulesWithLLM uses LLM to determine which linters are appropriate for each rule +// Rules are processed in parallel for better performance func (c *Converter) routeRulesWithLLM(ctx context.Context, userPolicy *schema.UserPolicy) map[string][]schema.UserRule { - linterRules := make(map[string][]schema.UserRule) + type routeResult struct { + rule schema.UserRule + linters []string + } + + results := make(chan routeResult, len(userPolicy.Rules)) + var wg sync.WaitGroup + // Process rules in parallel for _, rule := range userPolicy.Rules { // Get languages for this rule languages := rule.Languages @@ -264,21 +272,37 @@ func (c *Converter) routeRulesWithLLM(ctx context.Context, userPolicy *schema.Us availableLinters := c.getAvailableLinters(languages) if len(availableLinters) == 0 { // No language-specific linters, use llm-validator - linterRules[llmValidatorEngine] = append(linterRules[llmValidatorEngine], rule) + results <- routeResult{rule: rule, linters: []string{llmValidatorEngine}} continue } - // Ask LLM which linters are appropriate for this rule - selectedLinters := c.selectLintersForRule(ctx, rule, availableLinters) + wg.Add(1) + go func(r schema.UserRule, linters []string) { + defer wg.Done() - if len(selectedLinters) == 0 { - // LLM couldn't map to any linter, use llm-validator - linterRules[llmValidatorEngine] = append(linterRules[llmValidatorEngine], rule) - } else { - // Add rule to selected linters - for _, linter := range selectedLinters { - linterRules[linter] = append(linterRules[linter], rule) + // Ask LLM which linters are appropriate for this rule + selectedLinters := c.selectLintersForRule(ctx, r, linters) + + if len(selectedLinters) == 0 { + // LLM couldn't map to any linter, use llm-validator + results <- routeResult{rule: r, linters: []string{llmValidatorEngine}} + } else { + results <- routeResult{rule: r, linters: selectedLinters} } + }(rule, availableLinters) + } + + // Close results channel after all goroutines complete + go func() { + wg.Wait() + close(results) + }() + + // Collect results + linterRules := make(map[string][]schema.UserRule) + for result := range results { + for _, linter := range result.linters { + linterRules[linter] = append(linterRules[linter], result.rule) } } @@ -381,8 +405,8 @@ Reason: Requires knowing which packages are "large"`, linterDescriptions, routin userPrompt := fmt.Sprintf("Rule: %s\nCategory: %s", rule.Say, rule.Category) - // Call LLM with low reasoning (needs some thought for linter selection) - response, err := c.llmClient.CompleteLow(ctx, systemPrompt, userPrompt) + // Call LLM with power model + low reasoning (needs some thought for linter selection) + response, err := c.llmClient.Request(systemPrompt, userPrompt).WithPower(llm.ReasoningMedium).Execute(ctx) if err != nil { fmt.Fprintf(os.Stderr, "Warning: LLM routing failed for rule %s: %v\n", rule.ID, err) return []string{} // Will fall back to llm-validator diff --git a/internal/converter/linters/prettier_tsc.go b/internal/converter/linters/prettier_tsc.go index cc1af21..72d04b6 100644 --- a/internal/converter/linters/prettier_tsc.go +++ b/internal/converter/linters/prettier_tsc.go @@ -123,8 +123,8 @@ Output: userPrompt := fmt.Sprintf("Convert this rule to Prettier configuration:\n\n%s", rule.Say) - // Call LLM with minimal reasoning (fast, simple conversion task) - response, err := llmClient.CompleteMinimal(ctx, systemPrompt, userPrompt) + // Call LLM with power model + low reasoning + response, err := llmClient.Request(systemPrompt, userPrompt).WithPower(llm.ReasoningMinimal).Execute(ctx) if err != nil { return nil, fmt.Errorf("LLM call failed: %w", err) } @@ -279,8 +279,8 @@ Output: userPrompt := fmt.Sprintf("Convert this rule to TypeScript compiler configuration:\n\n%s", rule.Say) - // Call LLM with minimal reasoning (fast, simple conversion task) - response, err := llmClient.CompleteMinimal(ctx, systemPrompt, userPrompt) + // Call LLM with power model + low reasoning + response, err := llmClient.Request(systemPrompt, userPrompt).WithPower(llm.ReasoningMinimal).Execute(ctx) if err != nil { return nil, fmt.Errorf("LLM call failed: %w", err) } diff --git a/internal/llm/client.go b/internal/llm/client.go index 755f25e..a0ea105 100644 --- a/internal/llm/client.go +++ b/internal/llm/client.go @@ -10,102 +10,95 @@ import ( "time" "github.com/DevSymphony/sym-cli/internal/envutil" + mcpsdk "github.com/modelcontextprotocol/go-sdk/mcp" ) const ( openAIAPIURL = "https://api.openai.com/v1/chat/completions" - defaultModel = "gpt-5-mini" + defaultFastModel = "gpt-4o-mini" + defaultPowerModel = "gpt-5-mini" defaultMaxTokens = 1000 defaultTemperature = 1.0 defaultTimeout = 60 * time.Second ) -// ReasoningEffort defines the reasoning effort level for the model +// Mode defines the LLM client mode +type Mode string + +const ( + ModeAPI Mode = "api" + ModeMCP Mode = "mcp" +) + +// ReasoningEffort defines the reasoning effort level for o3-mini type ReasoningEffort string const ( - // ReasoningMinimal uses minimal reasoning - fastest, for simple tasks ReasoningMinimal ReasoningEffort = "minimal" - // ReasoningLow uses low reasoning - for straightforward tasks - ReasoningLow ReasoningEffort = "low" - // ReasoningMedium uses medium reasoning - balanced - ReasoningMedium ReasoningEffort = "medium" - // ReasoningHigh uses high reasoning - for complex tasks - ReasoningHigh ReasoningEffort = "high" + ReasoningLow ReasoningEffort = "low" + ReasoningMedium ReasoningEffort = "medium" + ReasoningHigh ReasoningEffort = "high" ) -// Client represents an OpenAI API client +// Client represents an LLM client type Client struct { - apiKey string - model string - httpClient *http.Client - maxTokens int - temperature float64 - reasoningEffort ReasoningEffort - verbose bool + mode Mode + apiKey string + fastModel string + powerModel string + httpClient *http.Client + mcpSession *mcpsdk.ServerSession + maxTokens int + temperature float64 + verbose bool } // ClientOption is a functional option for configuring the client type ClientOption func(*Client) -// WithModel sets the OpenAI model to use -func WithModel(model string) ClientOption { - return func(c *Client) { - c.model = model - } -} - -// WithMaxTokens sets the maximum tokens for responses +// WithMaxTokens sets the default max tokens func WithMaxTokens(maxTokens int) ClientOption { - return func(c *Client) { - c.maxTokens = maxTokens - } + return func(c *Client) { c.maxTokens = maxTokens } } -// WithTemperature sets the sampling temperature +// WithTemperature sets the default temperature func WithTemperature(temperature float64) ClientOption { - return func(c *Client) { - c.temperature = temperature - } -} - -// WithReasoningEffort sets the default reasoning effort level -func WithReasoningEffort(effort ReasoningEffort) ClientOption { - return func(c *Client) { - c.reasoningEffort = effort - } + return func(c *Client) { c.temperature = temperature } } // WithTimeout sets the HTTP client timeout func WithTimeout(timeout time.Duration) ClientOption { - return func(c *Client) { - c.httpClient.Timeout = timeout - } + return func(c *Client) { c.httpClient.Timeout = timeout } } // WithVerbose enables verbose logging func WithVerbose(verbose bool) ClientOption { + return func(c *Client) { c.verbose = verbose } +} + +// WithMCPSession sets the MCP session for MCP mode +func WithMCPSession(session *mcpsdk.ServerSession) ClientOption { return func(c *Client) { - c.verbose = verbose + c.mcpSession = session + c.mode = ModeMCP } } -// NewClient creates a new OpenAI API client +// NewClient creates a new LLM client func NewClient(apiKey string, opts ...ClientOption) *Client { if apiKey == "" { apiKey = envutil.GetAPIKey("OPENAI_API_KEY") } client := &Client{ - apiKey: apiKey, - model: defaultModel, - httpClient: &http.Client{ - Timeout: defaultTimeout, - }, - maxTokens: defaultMaxTokens, - temperature: defaultTemperature, - reasoningEffort: ReasoningLow, // Default to low for general use - verbose: false, + mode: ModeAPI, + apiKey: apiKey, + fastModel: defaultFastModel, + powerModel: defaultPowerModel, + httpClient: &http.Client{Timeout: defaultTimeout}, + maxTokens: defaultMaxTokens, + temperature: defaultTemperature, + verbose: false, } for _, opt := range opts { @@ -115,6 +108,62 @@ func NewClient(apiKey string, opts ...ClientOption) *Client { return client } +// Request creates a new request builder +// +// Usage: +// +// client.Request(system, user).Execute(ctx) // fast model (gpt-4o-mini) +// client.Request(system, user).WithPower(llm.ReasoningMedium).Execute(ctx) // power model (o3-mini) +// client.Request(system, user).WithMaxTokens(2000).Execute(ctx) // custom tokens +func (c *Client) Request(systemPrompt, userPrompt string) *RequestBuilder { + return &RequestBuilder{ + client: c, + system: systemPrompt, + user: userPrompt, + maxTokens: c.maxTokens, + temperature: c.temperature, + usePower: false, + } +} + +// RequestBuilder builds and executes LLM requests with chain methods +type RequestBuilder struct { + client *Client + system string + user string + maxTokens int + temperature float64 + usePower bool + effort ReasoningEffort +} + +// WithPower enables power model (o3-mini) with specified reasoning effort +func (r *RequestBuilder) WithPower(effort ReasoningEffort) *RequestBuilder { + r.usePower = true + r.effort = effort + return r +} + +// WithMaxTokens sets max tokens for this request +func (r *RequestBuilder) WithMaxTokens(tokens int) *RequestBuilder { + r.maxTokens = tokens + return r +} + +// WithTemperature sets temperature for this request +func (r *RequestBuilder) WithTemperature(temp float64) *RequestBuilder { + r.temperature = temp + return r +} + +// Execute sends the request and returns the response +func (r *RequestBuilder) Execute(ctx context.Context) (string, error) { + if r.client.mode == ModeMCP { + return r.client.executeViaMCP(ctx, r) + } + return r.client.executeViaAPI(ctx, r) +} + // openAIRequest represents the OpenAI API request structure type openAIRequest struct { Model string `json:"model"` @@ -124,13 +173,11 @@ type openAIRequest struct { ReasoningEffort string `json:"reasoning_effort,omitempty"` } -// openAIMessage represents a message in the conversation type openAIMessage struct { Role string `json:"role"` Content string `json:"content"` } -// openAIResponse represents the OpenAI API response structure type openAIResponse struct { ID string `json:"id"` Object string `json:"object"` @@ -156,67 +203,28 @@ type openAIResponse struct { } `json:"error,omitempty"` } -// CompleteOptions contains options for a single completion request -type CompleteOptions struct { - ReasoningEffort ReasoningEffort - MaxTokens int - Temperature float64 -} - -// Complete sends a chat completion request using the client's default reasoning effort -func (c *Client) Complete(ctx context.Context, systemPrompt, userPrompt string) (string, error) { - return c.CompleteWithOptions(ctx, systemPrompt, userPrompt, CompleteOptions{ - ReasoningEffort: c.reasoningEffort, - MaxTokens: c.maxTokens, - Temperature: c.temperature, - }) -} - -// CompleteMinimal sends a completion request with minimal reasoning (fastest) -// Use this for simple, repetitive tasks in goroutines -func (c *Client) CompleteMinimal(ctx context.Context, systemPrompt, userPrompt string) (string, error) { - return c.CompleteWithOptions(ctx, systemPrompt, userPrompt, CompleteOptions{ - ReasoningEffort: ReasoningMinimal, - MaxTokens: c.maxTokens, - Temperature: c.temperature, - }) -} - -// CompleteLow sends a completion request with low reasoning -// Use this for straightforward tasks that need some thought -func (c *Client) CompleteLow(ctx context.Context, systemPrompt, userPrompt string) (string, error) { - return c.CompleteWithOptions(ctx, systemPrompt, userPrompt, CompleteOptions{ - ReasoningEffort: ReasoningLow, - MaxTokens: c.maxTokens, - Temperature: c.temperature, - }) -} - -// CompleteWithOptions sends a chat completion request with specific options -func (c *Client) CompleteWithOptions(ctx context.Context, systemPrompt, userPrompt string, opts CompleteOptions) (string, error) { +// executeViaAPI sends request via OpenAI API +func (c *Client) executeViaAPI(ctx context.Context, r *RequestBuilder) (string, error) { if c.apiKey == "" { return "", fmt.Errorf("OpenAI API key not configured") } - // Use default values if not specified - if opts.ReasoningEffort == "" { - opts.ReasoningEffort = c.reasoningEffort - } - if opts.MaxTokens == 0 { - opts.MaxTokens = c.maxTokens - } - if opts.Temperature == 0 { - opts.Temperature = c.temperature + model := c.fastModel + if r.usePower { + model = c.powerModel } reqBody := openAIRequest{ - Model: c.model, + Model: model, Messages: []openAIMessage{ - {Role: "user", Content: systemPrompt + "\n\n" + userPrompt}, + {Role: "user", Content: r.system + "\n\n" + r.user}, }, - MaxTokens: opts.MaxTokens, - Temperature: opts.Temperature, - ReasoningEffort: string(opts.ReasoningEffort), + MaxTokens: r.maxTokens, + Temperature: r.temperature, + } + + if r.usePower { + reqBody.ReasoningEffort = string(r.effort) } jsonData, err := json.Marshal(reqBody) @@ -233,8 +241,13 @@ func (c *Client) CompleteWithOptions(ctx context.Context, systemPrompt, userProm req.Header.Set("Authorization", "Bearer "+c.apiKey) if c.verbose { - fmt.Printf("OpenAI API request:\n Model: %s\n Reasoning: %s\n Temperature: %.1f\n Prompt length: %d chars\n", - c.model, opts.ReasoningEffort, opts.Temperature, len(userPrompt)) + if r.usePower { + fmt.Printf("OpenAI API request:\n Model: %s\n Reasoning: %s\n Prompt length: %d chars\n", + model, r.effort, len(r.user)) + } else { + fmt.Printf("OpenAI API request:\n Model: %s\n Prompt length: %d chars\n", + model, len(r.user)) + } } resp, err := c.httpClient.Do(req) @@ -276,14 +289,61 @@ func (c *Client) CompleteWithOptions(ctx context.Context, systemPrompt, userProm return content, nil } -// CheckAvailability checks if the OpenAI API is available +// executeViaMCP sends request via MCP sampling +func (c *Client) executeViaMCP(ctx context.Context, r *RequestBuilder) (string, error) { + if c.mcpSession == nil { + return "", fmt.Errorf("MCP session not available") + } + + if c.verbose { + fmt.Printf("MCP Sampling request:\n MaxTokens: %d\n Prompt length: %d chars\n", + r.maxTokens, len(r.user)) + } + + combinedPrompt := r.system + "\n\n" + r.user + + result, err := c.mcpSession.CreateMessage(ctx, &mcpsdk.CreateMessageParams{ + Messages: []*mcpsdk.SamplingMessage{ + { + Role: "user", + Content: &mcpsdk.TextContent{Text: combinedPrompt}, + }, + }, + MaxTokens: int64(r.maxTokens), + }) + if err != nil { + return "", fmt.Errorf("MCP sampling failed: %w", err) + } + + var response string + if textContent, ok := result.Content.(*mcpsdk.TextContent); ok { + response = textContent.Text + } else { + return "", fmt.Errorf("unexpected content type from MCP sampling") + } + + if c.verbose { + fmt.Printf("MCP Sampling response:\n Model: %s\n Content length: %d chars\n", + result.Model, len(response)) + } + + return response, nil +} + +// CheckAvailability checks if the LLM is available func (c *Client) CheckAvailability(ctx context.Context) error { + if c.mode == ModeMCP { + if c.mcpSession == nil { + return fmt.Errorf("MCP session not available") + } + return nil + } + if c.apiKey == "" { return fmt.Errorf("OPENAI_API_KEY environment variable not set") } - // Simple test request with minimal reasoning - _, err := c.CompleteMinimal(ctx, "You are a test assistant.", "Say 'OK'") + _, err := c.Request("You are a test assistant.", "Say 'OK'").Execute(ctx) if err != nil { return fmt.Errorf("OpenAI API not available: %w", err) } diff --git a/internal/mcp/server.go b/internal/mcp/server.go index 9afa752..d2f927a 100644 --- a/internal/mcp/server.go +++ b/internal/mcp/server.go @@ -41,7 +41,6 @@ func ConvertPolicyWithLLM(userPolicyPath, codePolicyPath string) error { } llmClient := llm.NewClient(apiKey, - llm.WithModel("gpt-5-mini"), llm.WithTimeout(30*time.Second), ) @@ -231,7 +230,7 @@ func (s *Server) runStdioWithSDK(ctx context.Context) error { params := map[string]any{ "role": input.Role, } - result, rpcErr := s.handleValidateCode(params) + result, rpcErr := s.handleValidateCode(ctx, req.Session, params) if rpcErr != nil { return &sdkmcp.CallToolResult{IsError: true}, nil, fmt.Errorf("%s", rpcErr.Message) } @@ -427,7 +426,7 @@ type ViolationItem struct { // handleValidateCode handles code validation requests. // It validates git changes (diff) instead of entire files for efficiency. -func (s *Server) handleValidateCode(params map[string]interface{}) (interface{}, *RPCError) { +func (s *Server) handleValidateCode(ctx context.Context, session *sdkmcp.ServerSession, params map[string]interface{}) (interface{}, *RPCError) { // Get policy for validation (convert UserPolicy if needed) validationPolicy, err := s.getValidationPolicy() if err != nil { @@ -477,17 +476,24 @@ func (s *Server) handleValidateCode(params map[string]interface{}) (interface{}, }, nil } - // Setup LLM client for validation - apiKey := envutil.GetAPIKey("OPENAI_API_KEY") - if apiKey == "" { - return nil, &RPCError{ - Code: -32000, - Message: "OPENAI_API_KEY not found in environment or .sym/.env", + var llmClient *llm.Client + if session != nil { + // MCP mode: use host LLM via sampling + llmClient = llm.NewClient("", llm.WithMCPSession(session)) + fmt.Fprintf(os.Stderr, "✓ Using host LLM via MCP sampling\n") + } else { + // API mode: use OpenAI API directly + apiKey := envutil.GetAPIKey("OPENAI_API_KEY") + if apiKey == "" { + return nil, &RPCError{ + Code: -32000, + Message: "OPENAI_API_KEY not found in environment or .sym/.env", + } } + llmClient = llm.NewClient(apiKey) + fmt.Fprintf(os.Stderr, "✓ Using OpenAI API directly\n") } - llmClient := llm.NewClient(apiKey) - // Create unified validator that handles all engines + RBAC v := validator.NewValidator(validationPolicy, false) // verbose=false for MCP v.SetLLMClient(llmClient) @@ -496,7 +502,6 @@ func (s *Server) handleValidateCode(params map[string]interface{}) (interface{}, }() // Validate git changes using unified validator - ctx := context.Background() result, err := v.ValidateChanges(ctx, changes) if err != nil { return nil, &RPCError{ diff --git a/internal/server/server.go b/internal/server/server.go index 3ca9b76..d7f52bd 100644 --- a/internal/server/server.go +++ b/internal/server/server.go @@ -678,7 +678,6 @@ func (s *Server) handleConvert(w http.ResponseWriter, r *http.Request) { timeout := 30 * time.Second llmClient := llm.NewClient( apiKey, - llm.WithModel("gpt-5-mini-mini"), llm.WithTimeout(timeout), ) diff --git a/internal/validator/llm_validator.go b/internal/validator/llm_validator.go index eaf3845..b347ac8 100644 --- a/internal/validator/llm_validator.go +++ b/internal/validator/llm_validator.go @@ -192,7 +192,7 @@ Response: Analyze the code and determine if it violates the rule. Respond with JSON only.`, change.FilePath, rule.Desc, codeSnippet) // Call LLM with low reasoning (needs thought for code validation) - response, err := v.client.CompleteLow(ctx, systemPrompt, userPrompt) + response, err := v.client.Request(systemPrompt, userPrompt).Execute(ctx) if err != nil { return nil, err } diff --git a/internal/validator/validator.go b/internal/validator/validator.go index 393401c..0327e58 100644 --- a/internal/validator/validator.go +++ b/internal/validator/validator.go @@ -231,7 +231,7 @@ Does this code violate the convention?`, file, rule.Desc, string(content)) // Call LLM fileStartTime := time.Now() - response, err := v.llmClient.Complete(v.ctx, systemPrompt, userPrompt) + response, err := v.llmClient.Request(systemPrompt, userPrompt).Execute(v.ctx) fileExecMs := time.Since(fileStartTime).Milliseconds() // Record response in consolidated output diff --git a/tests/e2e/full_workflow_test.go b/tests/e2e/full_workflow_test.go index ab2c73b..6c1a418 100644 --- a/tests/e2e/full_workflow_test.go +++ b/tests/e2e/full_workflow_test.go @@ -75,7 +75,6 @@ func TestE2E_FullWorkflow(t *testing.T) { client := llm.NewClient( apiKey, - llm.WithModel("gpt-4o"), llm.WithTimeout(30*time.Second), ) @@ -334,7 +333,7 @@ func TestE2E_CodeGenerationFeedbackLoop(t *testing.T) { }, } - client := llm.NewClient(apiKey, llm.WithModel("gpt-4o")) + client := llm.NewClient(apiKey) v := validator.NewLLMValidator(client, policy) ctx := context.Background() diff --git a/tests/e2e/mcp_integration_test.go b/tests/e2e/mcp_integration_test.go index dd96804..6bb0187 100644 --- a/tests/e2e/mcp_integration_test.go +++ b/tests/e2e/mcp_integration_test.go @@ -143,7 +143,6 @@ func TestMCP_ValidateAIGeneratedCode(t *testing.T) { // Create LLM client client := llm.NewClient( apiKey, - llm.WithModel("gpt-4o"), llm.WithTimeout(30*time.Second), ) @@ -380,7 +379,7 @@ func TestMCP_EndToEndWorkflow(t *testing.T) { // Step 4: Validate generated code t.Log("STEP 4: Validating AI-generated code") - client := llm.NewClient(apiKey, llm.WithModel("gpt-4o")) + client := llm.NewClient(apiKey) v := validator.NewLLMValidator(client, policy) result, err := v.Validate(context.Background(), []validator.GitChange{ diff --git a/tests/e2e/validator_test.go b/tests/e2e/validator_test.go index 6d89fbd..f9643dc 100644 --- a/tests/e2e/validator_test.go +++ b/tests/e2e/validator_test.go @@ -31,7 +31,7 @@ func TestE2E_ValidatorWithPolicy(t *testing.T) { require.NotEmpty(t, policy.Rules, "Policy should have rules") // Create LLM client - client := llm.NewClient(apiKey, llm.WithModel("gpt-4o")) + client := llm.NewClient(apiKey) // Create validator v := validator.NewLLMValidator(client, policy) @@ -83,7 +83,7 @@ func TestE2E_ValidatorWithGoodCode(t *testing.T) { require.NoError(t, err) // Create LLM client - client := llm.NewClient(apiKey, llm.WithModel("gpt-4o")) + client := llm.NewClient(apiKey) // Create validator v := validator.NewLLMValidator(client, policy) @@ -182,7 +182,7 @@ func TestE2E_ValidatorFilter(t *testing.T) { require.NoError(t, err) // Create LLM client - client := llm.NewClient(apiKey, llm.WithModel("gpt-4o")) + client := llm.NewClient(apiKey) // Create validator v := validator.NewLLMValidator(client, policy) From ae0f5bd23f81b493c8b7a238c53a74f630881568 Mon Sep 17 00:00:00 2001 From: sehwan505 Date: Mon, 1 Dec 2025 12:40:59 +0900 Subject: [PATCH 06/14] refactor: update LLM client usage --- internal/adapter/eslint/converter.go | 36 ++++++++----------- internal/adapter/prettier/converter.go | 2 +- internal/adapter/pylint/converter.go | 50 +++++++++++++------------- internal/adapter/tsc/converter.go | 2 +- 4 files changed, 42 insertions(+), 48 deletions(-) diff --git a/internal/adapter/eslint/converter.go b/internal/adapter/eslint/converter.go index 06af44a..42d5b86 100644 --- a/internal/adapter/eslint/converter.go +++ b/internal/adapter/eslint/converter.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "os" - "sort" "strings" "sync" @@ -143,15 +142,7 @@ func (c *Converter) ConvertRules(ctx context.Context, rules []schema.UserRule, l // convertSingleRule converts a single user rule to ESLint rule using LLM func (c *Converter) convertSingleRule(ctx context.Context, rule schema.UserRule, llmClient *llm.Client) (string, interface{}, error) { - // Build list of valid ESLint rules for the prompt - validRules := GetESLintRuleNames() - sort.Strings(validRules) - validRulesStr := strings.Join(validRules, ", ") - - systemPrompt := fmt.Sprintf(`You are an ESLint configuration expert. Convert natural language coding rules to ESLint rule configurations. - -IMPORTANT: You MUST ONLY use rules from this exact list of valid ESLint rules: -%s + systemPrompt := `You are an ESLint configuration expert. Convert natural language coding rules to ESLint rule configurations. Return ONLY a JSON object (no markdown fences) with this structure: { @@ -160,9 +151,20 @@ Return ONLY a JSON object (no markdown fences) with this structure: "options": {...} } +Available native ESLint rules: +- Console/Debug: no-console, no-debugger, no-alert +- Variables: no-unused-vars, no-undef, no-var, prefer-const +- Naming: camelcase, new-cap, id-length, id-match +- Code Quality: eqeqeq, no-eval, no-implied-eval, no-new-func +- Complexity: complexity, max-depth, max-nested-callbacks +- Length/Size: max-len, max-lines, max-lines-per-function, max-params, max-statements +- Style: indent, quotes, semi, comma-dangle, brace-style +- Imports: no-restricted-imports, no-duplicate-imports +- Best Practices: curly, dot-notation, no-else-return, no-empty, no-empty-function, no-magic-numbers, no-throw-literal, no-useless-return, require-await + CRITICAL RULES: -1. ONLY use rule names from the list above - do NOT invent or guess rule names -2. If no rule from the list can enforce this requirement, return rule_name as empty string "" +1. ONLY use native ESLint rules - do NOT invent or guess rule names +2. If no rule can enforce this requirement, return rule_name as empty string "" 3. Do NOT suggest plugin rules (e.g., @typescript-eslint/*, eslint-plugin-*) 4. When in doubt, return empty rule_name - it's better to skip than use wrong rule @@ -208,7 +210,7 @@ Output: "severity": "off", "options": null } -(Reason: Requires plugin or semantic analysis)`, validRulesStr) +(Reason: Requires plugin or semantic analysis)` userPrompt := fmt.Sprintf("Convert this rule to ESLint configuration:\n\n%s", rule.Say) if rule.Severity != "" { @@ -247,14 +249,6 @@ Output: return "", nil, nil } - // VALIDATION: Check if the rule actually exists in our registry - validation := ValidateESLintRule(result.RuleName, result.Options) - if !validation.Valid { - // Rule doesn't exist - skip it (will be handled by llm-validator) - fmt.Printf("⚠️ Invalid ESLint rule '%s': %s\n", result.RuleName, validation.Message) - return "", nil, nil - } - // Map user severity to ESLint severity if needed severity := mapSeverity(rule.Severity) if severity == "" { diff --git a/internal/adapter/prettier/converter.go b/internal/adapter/prettier/converter.go index 51c89c2..010d95e 100644 --- a/internal/adapter/prettier/converter.go +++ b/internal/adapter/prettier/converter.go @@ -134,7 +134,7 @@ Output: userPrompt := fmt.Sprintf("Convert this rule to Prettier configuration:\n\n%s", rule.Say) // Call LLM - response, err := llmClient.Complete(ctx, systemPrompt, userPrompt) + response, err := llmClient.Request(systemPrompt, userPrompt).WithPower(llm.ReasoningMinimal).Execute(ctx) if err != nil { return nil, fmt.Errorf("LLM call failed: %w", err) } diff --git a/internal/adapter/pylint/converter.go b/internal/adapter/pylint/converter.go index edea893..7bd6b55 100644 --- a/internal/adapter/pylint/converter.go +++ b/internal/adapter/pylint/converter.go @@ -210,7 +210,7 @@ Output: } // Call LLM - response, err := llmClient.Complete(ctx, systemPrompt, userPrompt) + response, err := llmClient.Request(systemPrompt, userPrompt).WithPower(llm.ReasoningMinimal).Execute(ctx) if err != nil { return "", nil, fmt.Errorf("LLM call failed: %w", err) } @@ -298,38 +298,38 @@ func (c *Converter) generatePylintRC(enabledRules []string, options map[string]m // getOptionSection returns the Pylint config section for a given option func getOptionSection(option string) string { formatOptions := map[string]bool{ - "max-line-length": true, - "max-module-lines": true, - "indent-string": true, + "max-line-length": true, + "max-module-lines": true, + "indent-string": true, "indent-after-paren": true, } basicOptions := map[string]bool{ - "variable-rgx": true, - "function-rgx": true, - "class-rgx": true, - "const-rgx": true, - "argument-rgx": true, - "attr-rgx": true, - "method-rgx": true, - "module-rgx": true, - "good-names": true, - "bad-names": true, + "variable-rgx": true, + "function-rgx": true, + "class-rgx": true, + "const-rgx": true, + "argument-rgx": true, + "attr-rgx": true, + "method-rgx": true, + "module-rgx": true, + "good-names": true, + "bad-names": true, "include-naming-hint": true, } designOptions := map[string]bool{ - "max-args": true, - "max-locals": true, - "max-returns": true, - "max-branches": true, - "max-statements": true, - "max-parents": true, - "max-attributes": true, - "min-public-methods": true, - "max-public-methods": true, - "max-bool-expr": true, - "max-nested-blocks": true, + "max-args": true, + "max-locals": true, + "max-returns": true, + "max-branches": true, + "max-statements": true, + "max-parents": true, + "max-attributes": true, + "min-public-methods": true, + "max-public-methods": true, + "max-bool-expr": true, + "max-nested-blocks": true, } exceptOptions := map[string]bool{ diff --git a/internal/adapter/tsc/converter.go b/internal/adapter/tsc/converter.go index a4a7f11..9259e34 100644 --- a/internal/adapter/tsc/converter.go +++ b/internal/adapter/tsc/converter.go @@ -148,7 +148,7 @@ Output: userPrompt := fmt.Sprintf("Convert this rule to TypeScript compiler configuration:\n\n%s", rule.Say) // Call LLM - response, err := llmClient.Complete(ctx, systemPrompt, userPrompt) + response, err := llmClient.Request(systemPrompt, userPrompt).WithPower(llm.ReasoningMinimal).Execute(ctx) if err != nil { return nil, fmt.Errorf("LLM call failed: %w", err) } From a7f375a790228912232fa21fe3efa039800262e9 Mon Sep 17 00:00:00 2001 From: sehwan505 Date: Mon, 1 Dec 2025 13:41:25 +0900 Subject: [PATCH 07/14] refactor: remove deprecated linter files and consolidate linter architecture --- internal/converter/linters/prettier_tsc.go | 312 ------------- internal/converter/linters/registry.go | 488 --------------------- 2 files changed, 800 deletions(-) delete mode 100644 internal/converter/linters/prettier_tsc.go delete mode 100644 internal/converter/linters/registry.go diff --git a/internal/converter/linters/prettier_tsc.go b/internal/converter/linters/prettier_tsc.go deleted file mode 100644 index 72d04b6..0000000 --- a/internal/converter/linters/prettier_tsc.go +++ /dev/null @@ -1,312 +0,0 @@ -package linters - -import ( - "context" - "encoding/json" - "fmt" - "strings" - - "github.com/DevSymphony/sym-cli/internal/llm" - "github.com/DevSymphony/sym-cli/pkg/schema" -) - -// PrettierLinterConverter converts rules to Prettier configuration -type PrettierLinterConverter struct{} - -// NewPrettierLinterConverter creates a new Prettier converter -func NewPrettierLinterConverter() *PrettierLinterConverter { - return &PrettierLinterConverter{} -} - -// Name returns the linter name -func (c *PrettierLinterConverter) Name() string { - return "prettier" -} - -// SupportedLanguages returns supported languages -func (c *PrettierLinterConverter) SupportedLanguages() []string { - return []string{"javascript", "js", "typescript", "ts", "jsx", "tsx"} -} - -// ConvertRules converts formatting rules to Prettier config using LLM -func (c *PrettierLinterConverter) ConvertRules(ctx context.Context, rules []schema.UserRule, llmClient *llm.Client) (*LinterConfig, error) { - if llmClient == nil { - return nil, fmt.Errorf("LLM client is required") - } - - // Start with default Prettier configuration - prettierConfig := map[string]interface{}{ - "semi": true, - "singleQuote": false, - "tabWidth": 2, - "useTabs": false, - "trailingComma": "es5", - "printWidth": 80, - "arrowParens": "always", - } - - // Use LLM to infer settings from rules - for _, rule := range rules { - config, err := c.convertSingleRule(ctx, rule, llmClient) - if err != nil { - continue // Skip rules that cannot be converted - } - - // Merge LLM-generated config - for key, value := range config { - prettierConfig[key] = value - } - } - - content, err := json.MarshalIndent(prettierConfig, "", " ") - if err != nil { - return nil, fmt.Errorf("failed to marshal config: %w", err) - } - - return &LinterConfig{ - Filename: ".prettierrc", - Content: content, - Format: "json", - }, nil -} - -// convertSingleRule converts a single user rule to Prettier config using LLM -func (c *PrettierLinterConverter) convertSingleRule(ctx context.Context, rule schema.UserRule, llmClient *llm.Client) (map[string]interface{}, error) { - // Build list of valid Prettier options for the prompt - validOptions := GetPrettierOptionNames() - validOptionsStr := strings.Join(validOptions, ", ") - - systemPrompt := fmt.Sprintf(`You are a Prettier configuration expert. Convert natural language formatting rules to Prettier configuration options. - -IMPORTANT: You MUST ONLY use options from this exact list of valid Prettier options: -%s - -Return ONLY a JSON object (no markdown fences) with Prettier options. -If the rule cannot be expressed with Prettier options, return empty object: {} - -CRITICAL RULES: -1. ONLY use option names from the list above -2. Do NOT invent new options -3. If no option can enforce this rule, return {} - -Examples: - -Input: "Use single quotes for strings" -Output: -{ - "singleQuote": true -} - -Input: "No semicolons" -Output: -{ - "semi": false -} - -Input: "Use 4 spaces for indentation" -Output: -{ - "tabWidth": 4, - "useTabs": false -} - -Input: "Maximum line length is 120 characters" -Output: -{ - "printWidth": 120 -} - -Input: "Sort imports alphabetically" -Output: -{} -(Reason: No native Prettier option for this)`, validOptionsStr) - - userPrompt := fmt.Sprintf("Convert this rule to Prettier configuration:\n\n%s", rule.Say) - - // Call LLM with power model + low reasoning - response, err := llmClient.Request(systemPrompt, userPrompt).WithPower(llm.ReasoningMinimal).Execute(ctx) - if err != nil { - return nil, fmt.Errorf("LLM call failed: %w", err) - } - - // Parse response - response = strings.TrimSpace(response) - response = strings.TrimPrefix(response, "```json") - response = strings.TrimPrefix(response, "```") - response = strings.TrimSuffix(response, "```") - response = strings.TrimSpace(response) - - var config map[string]interface{} - if err := json.Unmarshal([]byte(response), &config); err != nil { - return nil, fmt.Errorf("failed to parse LLM response: %w", err) - } - - // VALIDATION: Filter out invalid options - validConfig := make(map[string]interface{}) - for key, value := range config { - validation := ValidatePrettierOption(key, value) - if validation.Valid { - validConfig[key] = value - } else { - fmt.Printf("⚠️ Invalid Prettier option '%s': %s\n", key, validation.Message) - } - } - - return validConfig, nil -} - -// TSCLinterConverter converts rules to TypeScript compiler configuration -type TSCLinterConverter struct{} - -// NewTSCLinterConverter creates a new TSC converter -func NewTSCLinterConverter() *TSCLinterConverter { - return &TSCLinterConverter{} -} - -// Name returns the linter name -func (c *TSCLinterConverter) Name() string { - return "tsc" -} - -// SupportedLanguages returns supported languages -func (c *TSCLinterConverter) SupportedLanguages() []string { - return []string{"typescript", "ts", "tsx"} -} - -// ConvertRules converts type-checking rules to tsconfig.json using LLM -func (c *TSCLinterConverter) ConvertRules(ctx context.Context, rules []schema.UserRule, llmClient *llm.Client) (*LinterConfig, error) { - if llmClient == nil { - return nil, fmt.Errorf("LLM client is required") - } - - // Start with strict TypeScript configuration - tsConfig := map[string]interface{}{ - "compilerOptions": map[string]interface{}{ - "target": "ES2020", - "module": "commonjs", - "lib": []string{"ES2020"}, - "strict": true, - "esModuleInterop": true, - "skipLibCheck": true, - "forceConsistentCasingInFileNames": true, - "resolveJsonModule": true, - "moduleResolution": "node", - "noImplicitAny": true, - "strictNullChecks": true, - "strictFunctionTypes": true, - "noUnusedLocals": false, - "noUnusedParameters": false, - }, - } - - compilerOpts := tsConfig["compilerOptions"].(map[string]interface{}) - - // Use LLM to infer settings from rules - for _, rule := range rules { - config, err := c.convertSingleRule(ctx, rule, llmClient) - if err != nil { - continue // Skip rules that cannot be converted - } - - // Merge LLM-generated compiler options - for key, value := range config { - compilerOpts[key] = value - } - } - - content, err := json.MarshalIndent(tsConfig, "", " ") - if err != nil { - return nil, fmt.Errorf("failed to marshal config: %w", err) - } - - return &LinterConfig{ - Filename: "tsconfig.json", - Content: content, - Format: "json", - }, nil -} - -// convertSingleRule converts a single user rule to TypeScript compiler option using LLM -func (c *TSCLinterConverter) convertSingleRule(ctx context.Context, rule schema.UserRule, llmClient *llm.Client) (map[string]interface{}, error) { - // Build list of valid TSC options for the prompt - validOptions := GetTSCOptionNames() - validOptionsStr := strings.Join(validOptions, ", ") - - systemPrompt := fmt.Sprintf(`You are a TypeScript compiler configuration expert. Convert natural language type-checking rules to tsconfig.json compiler options. - -IMPORTANT: You MUST ONLY use options from this exact list of valid TypeScript compiler options: -%s - -Return ONLY a JSON object (no markdown fences) with TypeScript compiler options. -If the rule cannot be expressed with TypeScript compiler options, return empty object: {} - -CRITICAL RULES: -1. ONLY use option names from the list above -2. Do NOT invent new options -3. If no option can enforce this rule, return {} - -Examples: - -Input: "No implicit any types allowed" -Output: -{ - "noImplicitAny": true -} - -Input: "Check for null and undefined strictly" -Output: -{ - "strictNullChecks": true -} - -Input: "Report unused variables" -Output: -{ - "noUnusedLocals": true, - "noUnusedParameters": true -} - -Input: "Enable all strict type checks" -Output: -{ - "strict": true -} - -Input: "Functions must have return type annotations" -Output: -{} -(Reason: No native TSC option for this - requires plugin)`, validOptionsStr) - - userPrompt := fmt.Sprintf("Convert this rule to TypeScript compiler configuration:\n\n%s", rule.Say) - - // Call LLM with power model + low reasoning - response, err := llmClient.Request(systemPrompt, userPrompt).WithPower(llm.ReasoningMinimal).Execute(ctx) - if err != nil { - return nil, fmt.Errorf("LLM call failed: %w", err) - } - - // Parse response - response = strings.TrimSpace(response) - response = strings.TrimPrefix(response, "```json") - response = strings.TrimPrefix(response, "```") - response = strings.TrimSuffix(response, "```") - response = strings.TrimSpace(response) - - var config map[string]interface{} - if err := json.Unmarshal([]byte(response), &config); err != nil { - return nil, fmt.Errorf("failed to parse LLM response: %w", err) - } - - // VALIDATION: Filter out invalid options - validConfig := make(map[string]interface{}) - for key, value := range config { - validation := ValidateTSCOption(key, value) - if validation.Valid { - validConfig[key] = value - } else { - fmt.Printf("⚠️ Invalid TSC option '%s': %s\n", key, validation.Message) - } - } - - return validConfig, nil -} diff --git a/internal/converter/linters/registry.go b/internal/converter/linters/registry.go deleted file mode 100644 index b0fff0a..0000000 --- a/internal/converter/linters/registry.go +++ /dev/null @@ -1,488 +0,0 @@ -package linters - -// ESLintRuleRegistry contains all valid native ESLint rules with their options schema -// This is used to validate LLM-generated rules and prevent invalid configurations -var ESLintRuleRegistry = map[string]RuleDefinition{ - // Console/Debug - "no-console": { - Description: "Disallow the use of console", - Options: OptionsSchema{ - Type: "object", - Properties: map[string]OptionProperty{ - "allow": {Type: "array", Items: "string"}, - }, - }, - }, - "no-debugger": { - Description: "Disallow the use of debugger", - }, - "no-alert": { - Description: "Disallow the use of alert, confirm, and prompt", - }, - - // Variables - "no-unused-vars": { - Description: "Disallow unused variables", - Options: OptionsSchema{ - Type: "object", - Properties: map[string]OptionProperty{ - "vars": {Type: "string", Enum: []string{"all", "local"}}, - "varsIgnorePattern": {Type: "string"}, - "args": {Type: "string", Enum: []string{"all", "after-used", "none"}}, - "argsIgnorePattern": {Type: "string"}, - "caughtErrors": {Type: "string", Enum: []string{"all", "none"}}, - "ignoreRestSiblings": {Type: "boolean"}, - }, - }, - }, - "no-undef": { - Description: "Disallow the use of undeclared variables", - }, - "no-var": { - Description: "Require let or const instead of var", - }, - "prefer-const": { - Description: "Require const declarations for variables that are never reassigned", - Options: OptionsSchema{ - Type: "object", - Properties: map[string]OptionProperty{ - "destructuring": {Type: "string", Enum: []string{"any", "all"}}, - "ignoreReadBeforeAssign": {Type: "boolean"}, - }, - }, - }, - - // Naming - "camelcase": { - Description: "Enforce camelcase naming convention", - Options: OptionsSchema{ - Type: "object", - Properties: map[string]OptionProperty{ - "properties": {Type: "string", Enum: []string{"always", "never"}}, - "ignoreDestructuring": {Type: "boolean"}, - "ignoreImports": {Type: "boolean"}, - "ignoreGlobals": {Type: "boolean"}, - "allow": {Type: "array", Items: "string"}, - }, - }, - }, - "new-cap": { - Description: "Require constructor names to begin with a capital letter", - Options: OptionsSchema{ - Type: "object", - Properties: map[string]OptionProperty{ - "newIsCap": {Type: "boolean"}, - "capIsNew": {Type: "boolean"}, - "newIsCapExceptions": {Type: "array", Items: "string"}, - "capIsNewExceptions": {Type: "array", Items: "string"}, - "properties": {Type: "boolean"}, - }, - }, - }, - "id-length": { - Description: "Enforce minimum and maximum identifier lengths", - Options: OptionsSchema{ - Type: "object", - Properties: map[string]OptionProperty{ - "min": {Type: "number"}, - "max": {Type: "number"}, - "properties": {Type: "string", Enum: []string{"always", "never"}}, - "exceptions": {Type: "array", Items: "string"}, - }, - }, - }, - "id-match": { - Description: "Require identifiers to match a specified regular expression", - Options: OptionsSchema{ - Type: "string", // regex pattern - }, - }, - - // Code Quality - "eqeqeq": { - Description: "Require the use of === and !==", - Options: OptionsSchema{ - Type: "string", - Enum: []string{"always", "smart"}, - }, - }, - "no-eval": { - Description: "Disallow the use of eval()", - }, - "no-implied-eval": { - Description: "Disallow the use of eval()-like methods", - }, - "no-new-func": { - Description: "Disallow new operators with the Function object", - }, - - // Complexity - "complexity": { - Description: "Enforce a maximum cyclomatic complexity", - Options: OptionsSchema{ - Type: "object", - Properties: map[string]OptionProperty{ - "max": {Type: "number"}, - }, - }, - }, - "max-depth": { - Description: "Enforce a maximum depth that blocks can be nested", - Options: OptionsSchema{ - Type: "object", - Properties: map[string]OptionProperty{ - "max": {Type: "number"}, - }, - }, - }, - "max-nested-callbacks": { - Description: "Enforce a maximum depth that callbacks can be nested", - Options: OptionsSchema{ - Type: "object", - Properties: map[string]OptionProperty{ - "max": {Type: "number"}, - }, - }, - }, - - // Length/Size - "max-len": { - Description: "Enforce a maximum line length", - Options: OptionsSchema{ - Type: "object", - Properties: map[string]OptionProperty{ - "code": {Type: "number"}, - "tabWidth": {Type: "number"}, - "comments": {Type: "number"}, - "ignorePattern": {Type: "string"}, - "ignoreComments": {Type: "boolean"}, - "ignoreTrailingComments": {Type: "boolean"}, - "ignoreUrls": {Type: "boolean"}, - "ignoreStrings": {Type: "boolean"}, - "ignoreTemplateLiterals": {Type: "boolean"}, - "ignoreRegExpLiterals": {Type: "boolean"}, - }, - }, - }, - "max-lines": { - Description: "Enforce a maximum number of lines per file", - Options: OptionsSchema{ - Type: "object", - Properties: map[string]OptionProperty{ - "max": {Type: "number"}, - "skipBlankLines": {Type: "boolean"}, - "skipComments": {Type: "boolean"}, - }, - }, - }, - "max-lines-per-function": { - Description: "Enforce a maximum number of lines of code in a function", - Options: OptionsSchema{ - Type: "object", - Properties: map[string]OptionProperty{ - "max": {Type: "number"}, - "skipBlankLines": {Type: "boolean"}, - "skipComments": {Type: "boolean"}, - "IIFEs": {Type: "boolean"}, - }, - }, - }, - "max-params": { - Description: "Enforce a maximum number of parameters in function definitions", - Options: OptionsSchema{ - Type: "object", - Properties: map[string]OptionProperty{ - "max": {Type: "number"}, - }, - }, - }, - "max-statements": { - Description: "Enforce a maximum number of statements allowed in function blocks", - Options: OptionsSchema{ - Type: "object", - Properties: map[string]OptionProperty{ - "max": {Type: "number"}, - "ignoreTopLevelFunctions": {Type: "boolean"}, - }, - }, - }, - - // Style - "indent": { - Description: "Enforce consistent indentation", - Options: OptionsSchema{ - Type: "mixed", // number or "tab" - }, - }, - "quotes": { - Description: "Enforce the consistent use of either backticks, double, or single quotes", - Options: OptionsSchema{ - Type: "string", - Enum: []string{"single", "double", "backtick"}, - }, - }, - "semi": { - Description: "Require or disallow semicolons instead of ASI", - Options: OptionsSchema{ - Type: "string", - Enum: []string{"always", "never"}, - }, - }, - "comma-dangle": { - Description: "Require or disallow trailing commas", - Options: OptionsSchema{ - Type: "string", - Enum: []string{"never", "always", "always-multiline", "only-multiline"}, - }, - }, - "brace-style": { - Description: "Enforce consistent brace style for blocks", - Options: OptionsSchema{ - Type: "string", - Enum: []string{"1tbs", "stroustrup", "allman"}, - }, - }, - - // Imports - "no-restricted-imports": { - Description: "Disallow specified modules when loaded by import", - Options: OptionsSchema{ - Type: "object", - Properties: map[string]OptionProperty{ - "paths": {Type: "array", Items: "string"}, - "patterns": {Type: "array", Items: "string"}, - }, - }, - }, - "no-duplicate-imports": { - Description: "Disallow duplicate module imports", - }, - - // Best Practices - "curly": { - Description: "Enforce consistent brace style for all control statements", - Options: OptionsSchema{ - Type: "string", - Enum: []string{"all", "multi", "multi-line", "multi-or-nest", "consistent"}, - }, - }, - "dot-notation": { - Description: "Enforce dot notation whenever possible", - }, - "no-else-return": { - Description: "Disallow else blocks after return statements in if statements", - }, - "no-empty": { - Description: "Disallow empty block statements", - }, - "no-empty-function": { - Description: "Disallow empty functions", - }, - "no-magic-numbers": { - Description: "Disallow magic numbers", - Options: OptionsSchema{ - Type: "object", - Properties: map[string]OptionProperty{ - "ignore": {Type: "array", Items: "number"}, - "ignoreArrayIndexes": {Type: "boolean"}, - "ignoreDefaultValues": {Type: "boolean"}, - "enforceConst": {Type: "boolean"}, - "detectObjects": {Type: "boolean"}, - }, - }, - }, - "no-throw-literal": { - Description: "Disallow throwing literals as exceptions", - }, - "no-useless-return": { - Description: "Disallow redundant return statements", - }, - "require-await": { - Description: "Disallow async functions which have no await expression", - }, -} - -// PrettierOptionRegistry contains all valid Prettier options -var PrettierOptionRegistry = map[string]OptionProperty{ - "printWidth": {Type: "number", Default: 80}, - "tabWidth": {Type: "number", Default: 2}, - "useTabs": {Type: "boolean", Default: false}, - "semi": {Type: "boolean", Default: true}, - "singleQuote": {Type: "boolean", Default: false}, - "quoteProps": {Type: "string", Enum: []string{"as-needed", "consistent", "preserve"}}, - "jsxSingleQuote": {Type: "boolean", Default: false}, - "trailingComma": {Type: "string", Enum: []string{"all", "es5", "none"}}, - "bracketSpacing": {Type: "boolean", Default: true}, - "bracketSameLine": {Type: "boolean", Default: false}, - "arrowParens": {Type: "string", Enum: []string{"always", "avoid"}}, - "proseWrap": {Type: "string", Enum: []string{"always", "never", "preserve"}}, - "htmlWhitespaceSensitivity": {Type: "string", Enum: []string{"css", "strict", "ignore"}}, - "endOfLine": {Type: "string", Enum: []string{"lf", "crlf", "cr", "auto"}}, - "singleAttributePerLine": {Type: "boolean", Default: false}, -} - -// TSCOptionRegistry contains all valid TypeScript compiler options for linting -var TSCOptionRegistry = map[string]OptionProperty{ - // Strict Checks - "strict": {Type: "boolean", Default: false}, - "noImplicitAny": {Type: "boolean", Default: false}, - "strictNullChecks": {Type: "boolean", Default: false}, - "strictFunctionTypes": {Type: "boolean", Default: false}, - "strictBindCallApply": {Type: "boolean", Default: false}, - "strictPropertyInitialization": {Type: "boolean", Default: false}, - "noImplicitThis": {Type: "boolean", Default: false}, - "useUnknownInCatchVariables": {Type: "boolean", Default: false}, - "alwaysStrict": {Type: "boolean", Default: false}, - - // Linting - "noUnusedLocals": {Type: "boolean", Default: false}, - "noUnusedParameters": {Type: "boolean", Default: false}, - "exactOptionalPropertyTypes": {Type: "boolean", Default: false}, - "noImplicitReturns": {Type: "boolean", Default: false}, - "noFallthroughCasesInSwitch": {Type: "boolean", Default: false}, - "noUncheckedIndexedAccess": {Type: "boolean", Default: false}, - "noImplicitOverride": {Type: "boolean", Default: false}, - "noPropertyAccessFromIndexSignature": {Type: "boolean", Default: false}, - "allowUnusedLabels": {Type: "boolean", Default: true}, - "allowUnreachableCode": {Type: "boolean", Default: true}, -} - -// RuleDefinition defines a linter rule's schema -type RuleDefinition struct { - Description string - Options OptionsSchema - Deprecated bool - Replacement string // If deprecated, which rule replaces it -} - -// OptionsSchema defines the schema for rule options -type OptionsSchema struct { - Type string // "object", "string", "number", "boolean", "array", "mixed" - Properties map[string]OptionProperty // For object type - Items string // For array type, element type - Enum []string // Valid values for string type -} - -// OptionProperty defines a single option property -type OptionProperty struct { - Type string - Enum []string - Items string // For arrays - Default interface{} // Default value -} - -// ValidateESLintRule checks if a rule name and options are valid -func ValidateESLintRule(ruleName string, options interface{}) ValidationError { - def, exists := ESLintRuleRegistry[ruleName] - if !exists { - return ValidationError{ - Valid: false, - Message: "unknown ESLint rule: " + ruleName, - Suggestion: "This rule may require a plugin or doesn't exist. " + - "Consider using llm-validator for this check instead.", - } - } - - if def.Deprecated { - return ValidationError{ - Valid: false, - Message: "deprecated rule: " + ruleName, - Suggestion: "Use '" + def.Replacement + "' instead.", - } - } - - // TODO: Add options validation based on OptionsSchema - return ValidationError{Valid: true} -} - -// ValidatePrettierOption checks if a Prettier option is valid -func ValidatePrettierOption(optionName string, value interface{}) ValidationError { - def, exists := PrettierOptionRegistry[optionName] - if !exists { - return ValidationError{ - Valid: false, - Message: "unknown Prettier option: " + optionName, - } - } - - // Validate type and enum if applicable - if len(def.Enum) > 0 { - strVal, ok := value.(string) - if ok { - valid := false - for _, allowed := range def.Enum { - if strVal == allowed { - valid = true - break - } - } - if !valid { - return ValidationError{ - Valid: false, - Message: "invalid value for " + optionName, - Suggestion: "Valid values: " + joinStrings(def.Enum), - } - } - } - } - - return ValidationError{Valid: true} -} - -// ValidateTSCOption checks if a TypeScript compiler option is valid -func ValidateTSCOption(optionName string, value interface{}) ValidationError { - _, exists := TSCOptionRegistry[optionName] - if !exists { - return ValidationError{ - Valid: false, - Message: "unknown TypeScript compiler option: " + optionName, - } - } - - return ValidationError{Valid: true} -} - -// ValidationError represents a validation result -type ValidationError struct { - Valid bool - Message string - Suggestion string -} - -func joinStrings(strs []string) string { - result := "" - for i, s := range strs { - if i > 0 { - result += ", " - } - result += s - } - return result -} - -// GetESLintRuleNames returns all valid ESLint rule names -func GetESLintRuleNames() []string { - names := make([]string, 0, len(ESLintRuleRegistry)) - for name := range ESLintRuleRegistry { - names = append(names, name) - } - return names -} - -// GetPrettierOptionNames returns all valid Prettier option names -func GetPrettierOptionNames() []string { - names := make([]string, 0, len(PrettierOptionRegistry)) - for name := range PrettierOptionRegistry { - names = append(names, name) - } - return names -} - -// GetTSCOptionNames returns all valid TypeScript compiler option names -func GetTSCOptionNames() []string { - names := make([]string, 0, len(TSCOptionRegistry)) - for name := range TSCOptionRegistry { - names = append(names, name) - } - return names -} From 29addccd0ac8c3e1158a7e3ce694878847820c7b Mon Sep 17 00:00:00 2001 From: sehwan505 Date: Mon, 1 Dec 2025 13:47:17 +0900 Subject: [PATCH 08/14] fix: unused parameter in linter test --- cmd/test-linter/main.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cmd/test-linter/main.go b/cmd/test-linter/main.go index f673bc9..a344d89 100644 --- a/cmd/test-linter/main.go +++ b/cmd/test-linter/main.go @@ -19,10 +19,9 @@ func main() { toolsDir := filepath.Join(homeDir, ".sym", "tools") workDir, _ := os.Getwd() - adp := eslint.NewAdapter(toolsDir, workDir) + adp := eslint.NewAdapter(toolsDir) fmt.Printf("✓ Created ESLint adapter\n") fmt.Printf(" Tools directory: %s\n", adp.ToolsDir) - fmt.Printf(" Work directory: %s\n\n", adp.WorkDir) // 2. Check availability ctx := context.Background() From 733570222ea9426f77d6fa1e60c29c4a8bdf5cf3 Mon Sep 17 00:00:00 2001 From: sehwan505 Date: Tue, 2 Dec 2025 14:13:31 +0000 Subject: [PATCH 09/14] feat: enhance LLM engine management and configuration with using local LLM --- internal/cmd/init.go | 33 +- internal/cmd/llm.go | 497 ++++++++++++++++++ internal/llm/client.go | 443 ++++++++-------- internal/llm/complexity.go | 17 + internal/llm/config.go | 366 +++++++++++++ internal/llm/config_test.go | 160 ++++++ internal/llm/engine/api.go | 247 +++++++++ internal/llm/engine/api_test.go | 64 +++ internal/llm/engine/cli.go | 224 ++++++++ internal/llm/engine/cli_test.go | 77 +++ internal/llm/engine/cliprovider/claude.go | 30 ++ internal/llm/engine/cliprovider/gemini.go | 27 + internal/llm/engine/cliprovider/provider.go | 141 +++++ .../llm/engine/cliprovider/provider_test.go | 120 +++++ internal/llm/engine/engine.go | 113 ++++ internal/llm/engine/engine_test.go | 98 ++++ internal/llm/engine/mcp.go | 116 ++++ 17 files changed, 2525 insertions(+), 248 deletions(-) create mode 100644 internal/cmd/llm.go create mode 100644 internal/llm/complexity.go create mode 100644 internal/llm/config.go create mode 100644 internal/llm/config_test.go create mode 100644 internal/llm/engine/api.go create mode 100644 internal/llm/engine/api_test.go create mode 100644 internal/llm/engine/cli.go create mode 100644 internal/llm/engine/cli_test.go create mode 100644 internal/llm/engine/cliprovider/claude.go create mode 100644 internal/llm/engine/cliprovider/gemini.go create mode 100644 internal/llm/engine/cliprovider/provider.go create mode 100644 internal/llm/engine/cliprovider/provider_test.go create mode 100644 internal/llm/engine/engine.go create mode 100644 internal/llm/engine/engine_test.go create mode 100644 internal/llm/engine/mcp.go diff --git a/internal/cmd/init.go b/internal/cmd/init.go index e1f2415..feae6c7 100644 --- a/internal/cmd/init.go +++ b/internal/cmd/init.go @@ -33,19 +33,23 @@ This command: } var ( - initForce bool - skipMCPRegister bool - registerMCPOnly bool - skipAPIKey bool - setupAPIKeyOnly bool + initForce bool + skipMCPRegister bool + registerMCPOnly bool + skipAPIKey bool + setupAPIKeyOnly bool + skipLLMSetup bool + setupLLMOnly bool ) func init() { initCmd.Flags().BoolVarP(&initForce, "force", "f", false, "Overwrite existing roles.json") initCmd.Flags().BoolVar(&skipMCPRegister, "skip-mcp", false, "Skip MCP server registration prompt") initCmd.Flags().BoolVar(®isterMCPOnly, "register-mcp", false, "Register MCP server only (skip roles/policy init)") - initCmd.Flags().BoolVar(&skipAPIKey, "skip-api-key", false, "Skip OpenAI API key configuration prompt") - initCmd.Flags().BoolVar(&setupAPIKeyOnly, "setup-api-key", false, "Setup OpenAI API key only (skip roles/policy init)") + initCmd.Flags().BoolVar(&skipAPIKey, "skip-api-key", false, "Skip OpenAI API key configuration prompt (deprecated, use --skip-llm)") + initCmd.Flags().BoolVar(&setupAPIKeyOnly, "setup-api-key", false, "Setup OpenAI API key only (deprecated, use --setup-llm)") + initCmd.Flags().BoolVar(&skipLLMSetup, "skip-llm", false, "Skip LLM backend configuration prompt") + initCmd.Flags().BoolVar(&setupLLMOnly, "setup-llm", false, "Setup LLM backend only (skip roles/policy init)") } func runInit(cmd *cobra.Command, args []string) { @@ -56,13 +60,20 @@ func runInit(cmd *cobra.Command, args []string) { return } - // API key setup only mode + // API key setup only mode (deprecated) if setupAPIKeyOnly { fmt.Println("🔑 Setting up OpenAI API key...") promptAPIKeySetup() return } + // LLM setup only mode + if setupLLMOnly { + fmt.Println("🤖 Setting up LLM backend...") + promptLLMBackendSetup() + return + } + // Check if logged in if !config.IsLoggedIn() { fmt.Println("❌ Not logged in") @@ -156,9 +167,9 @@ func runInit(cmd *cobra.Command, args []string) { promptMCPRegistration() } - // API key configuration prompt - if !skipAPIKey { - promptAPIKeyIfNeeded() + // LLM backend configuration prompt + if !skipLLMSetup && !skipAPIKey { + promptLLMBackendSetup() } // Show dashboard guide after all initialization is complete diff --git a/internal/cmd/llm.go b/internal/cmd/llm.go new file mode 100644 index 0000000..040f893 --- /dev/null +++ b/internal/cmd/llm.go @@ -0,0 +1,497 @@ +package cmd + +import ( + "context" + "fmt" + "os" + "strings" + "time" + + "github.com/DevSymphony/sym-cli/internal/llm" + "github.com/DevSymphony/sym-cli/internal/llm/engine" + "github.com/manifoldco/promptui" + "github.com/spf13/cobra" +) + +var llmCmd = &cobra.Command{ + Use: "llm", + Short: "Manage LLM engine configuration", + Long: `Configure and manage LLM engines for Symphony. + +Symphony supports multiple LLM engines: + - MCP Sampling: Uses the host LLM when running as MCP server + - CLI: Uses local CLI tools (claude, gemini) + - API: Uses OpenAI API directly + +The default mode is 'auto' which tries engines in this order: +MCP Sampling → CLI → API`, +} + +var llmSetupCmd = &cobra.Command{ + Use: "setup", + Short: "Interactive LLM engine setup", + Long: `Interactively configure which LLM engine to use.`, + Run: runLLMSetup, +} + +var llmStatusCmd = &cobra.Command{ + Use: "status", + Short: "Show current LLM engine status", + Long: `Display the current LLM engine configuration and availability.`, + Run: runLLMStatus, +} + +var llmTestCmd = &cobra.Command{ + Use: "test", + Short: "Test LLM engine connection", + Long: `Send a test request to verify LLM engine is working.`, + Run: runLLMTest, +} + +func init() { + rootCmd.AddCommand(llmCmd) + llmCmd.AddCommand(llmSetupCmd) + llmCmd.AddCommand(llmStatusCmd) + llmCmd.AddCommand(llmTestCmd) +} + +func runLLMSetup(_ *cobra.Command, _ []string) { + fmt.Println("🤖 LLM Engine Configuration") + fmt.Println() + + // Load current config + cfg := llm.LoadLLMConfig() + + // Show current settings + fmt.Println("Current settings:") + fmt.Printf(" Engine mode: %s\n", cfg.Backend) + if cfg.CLI != "" { + fmt.Printf(" CLI: %s\n", cfg.CLI) + } + if cfg.Model != "" { + fmt.Printf(" Model: %s\n", cfg.Model) + } + if cfg.HasAPIKey() { + fmt.Println(" API Key: configured") + } else { + fmt.Println(" API Key: not set") + } + fmt.Println() + + // Show menu + items := []string{ + "Configure CLI tool", + "Set OpenAI API key", + "Change engine mode", + "Test current configuration", + "Reset to defaults", + "Exit", + } + + templates := &promptui.SelectTemplates{ + Label: "{{ . }}?", + Active: "▸ {{ . | cyan }}", + Inactive: " {{ . }}", + Selected: "✓ {{ . | green }}", + } + + selectPrompt := promptui.Select{ + Label: "What would you like to configure", + Items: items, + Templates: templates, + Size: 6, + } + + index, _, err := selectPrompt.Run() + if err != nil { + fmt.Println("\nSetup cancelled") + return + } + + switch index { + case 0: + configureCLI(cfg) + case 1: + promptAPIKeySetup() + case 2: + configureEngineMode(cfg) + case 3: + runLLMTest(nil, nil) + case 4: + resetLLMConfig() + case 5: + fmt.Println("\nExiting setup") + } +} + +func configureCLI(cfg *llm.LLMConfig) { + fmt.Println("\n🔧 CLI Tool Configuration") + fmt.Println() + + // Detect available CLIs + clis := engine.DetectAvailableCLIs() + + // Build selection items + var items []string + var availableCLIs []engine.CLIInfo + + for _, cli := range clis { + status := "✗ not found" + if cli.Available { + status = "✓ available" + if cli.Version != "" { + status = fmt.Sprintf("✓ %s", cli.Version) + } + } + items = append(items, fmt.Sprintf("%s (%s)", cli.Name, status)) + availableCLIs = append(availableCLIs, cli) + } + + items = append(items, "Skip CLI configuration") + + templates := &promptui.SelectTemplates{ + Label: "{{ . }}?", + Active: "▸ {{ . | cyan }}", + Inactive: " {{ . }}", + Selected: "✓ {{ . | green }}", + } + + selectPrompt := promptui.Select{ + Label: "Select CLI tool to use", + Items: items, + Templates: templates, + Size: len(items), + } + + index, _, err := selectPrompt.Run() + if err != nil || index >= len(availableCLIs) { + fmt.Println("\nCLI configuration skipped") + return + } + + selectedCLI := availableCLIs[index] + + if !selectedCLI.Available { + fmt.Printf("\n⚠️ %s is not installed or not in PATH\n", selectedCLI.Name) + fmt.Println("Please install it first and try again") + return + } + + // Update config + cfg.CLI = string(selectedCLI.Provider) + + // Get provider for default model + provider, _ := engine.GetProvider(selectedCLI.Provider) + if provider != nil { + cfg.Model = provider.DefaultModel + cfg.LargeModel = provider.LargeModel + } + + // Save config + if err := llm.SaveLLMConfig(cfg); err != nil { + fmt.Printf("\n❌ Failed to save configuration: %v\n", err) + return + } + + fmt.Printf("\n✓ CLI engine configured: %s\n", selectedCLI.Name) + if cfg.Model != "" { + fmt.Printf(" Default model: %s\n", cfg.Model) + } + if cfg.LargeModel != "" { + fmt.Printf(" Large model: %s\n", cfg.LargeModel) + } + fmt.Println(" Configuration saved to .sym/.env") +} + +func configureEngineMode(cfg *llm.LLMConfig) { + fmt.Println("\n⚙️ Engine Mode Configuration") + fmt.Println() + + items := []string{ + "auto - Automatically select best available engine", + "mcp - Always use MCP sampling (when available)", + "cli - Always use CLI tool", + "api - Always use OpenAI API", + } + + templates := &promptui.SelectTemplates{ + Label: "{{ . }}?", + Active: "▸ {{ . | cyan }}", + Inactive: " {{ . }}", + Selected: "✓ {{ . | green }}", + } + + selectPrompt := promptui.Select{ + Label: "Select engine mode", + Items: items, + Templates: templates, + Size: 4, + } + + index, _, err := selectPrompt.Run() + if err != nil { + fmt.Println("\nEngine mode configuration cancelled") + return + } + + modes := []engine.Mode{ + engine.ModeAuto, + engine.ModeMCP, + engine.ModeCLI, + engine.ModeAPI, + } + + cfg.Backend = modes[index] + + // Save config + if err := llm.SaveLLMConfig(cfg); err != nil { + fmt.Printf("\n❌ Failed to save configuration: %v\n", err) + return + } + + fmt.Printf("\n✓ Engine mode set to: %s\n", cfg.Backend) +} + +func resetLLMConfig() { + fmt.Println("\n🔄 Resetting LLM Configuration") + + // Confirm + prompt := promptui.Prompt{ + Label: "Are you sure you want to reset LLM configuration", + IsConfirm: true, + } + + result, err := prompt.Run() + if err != nil || strings.ToLower(result) != "y" { + fmt.Println("\nReset cancelled") + return + } + + // Save default config + cfg := llm.DefaultLLMConfig() + if err := llm.SaveLLMConfig(cfg); err != nil { + fmt.Printf("\n❌ Failed to reset configuration: %v\n", err) + return + } + + fmt.Println("\n✓ LLM configuration reset to defaults") +} + +func runLLMStatus(_ *cobra.Command, _ []string) { + fmt.Println("🤖 LLM Engine Status") + fmt.Println() + + // Load config + cfg := llm.LoadLLMConfig() + + // Create client to check engines + client := llm.NewClient(llm.WithConfig(cfg), llm.WithVerbose(false)) + + fmt.Println("Configuration:") + fmt.Printf(" Engine mode: %s\n", cfg.Backend) + if cfg.CLI != "" { + fmt.Printf(" CLI provider: %s\n", cfg.CLI) + } + if cfg.Model != "" { + fmt.Printf(" Model: %s\n", cfg.Model) + } + fmt.Println() + + // Show engine availability + fmt.Println("Engine availability:") + + engines := client.GetEngines() + if len(engines) == 0 { + fmt.Println(" ⚠️ No engines configured") + } else { + for _, e := range engines { + status := "✗ unavailable" + if e.IsAvailable() { + status = "✓ available" + } + fmt.Printf(" %s: %s\n", e.Name(), status) + } + } + + fmt.Println() + + // Show active engine + active := client.GetActiveEngine() + if active != nil { + fmt.Printf("Active engine: %s\n", active.Name()) + + caps := active.Capabilities() + fmt.Println("Capabilities:") + fmt.Printf(" Temperature: %v\n", caps.SupportsTemperature) + fmt.Printf(" Max tokens: %v\n", caps.SupportsMaxTokens) + fmt.Printf(" Complexity hint: %v\n", caps.SupportsComplexity) + } else { + fmt.Println("⚠️ No active engine available") + } + + fmt.Println() + fmt.Println("💡 Run 'sym llm setup' to configure engines") + fmt.Println("💡 Run 'sym llm test' to verify connection") +} + +func runLLMTest(_ *cobra.Command, _ []string) { + fmt.Println("🧪 Testing LLM Engine Connection") + fmt.Println() + + // Load config + cfg := llm.LoadLLMConfig() + + // Create client + client := llm.NewClient(llm.WithConfig(cfg), llm.WithVerbose(true)) + + active := client.GetActiveEngine() + if active == nil { + fmt.Println("❌ No LLM engine available") + fmt.Println() + fmt.Println("Please configure an engine:") + fmt.Println(" sym llm setup") + return + } + + fmt.Printf("Testing engine: %s\n\n", active.Name()) + + // Create test request + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + response, err := client.Request( + "You are a helpful assistant. Respond with exactly one word.", + "Say 'OK' to confirm you are working.", + ).Execute(ctx) + + if err != nil { + fmt.Printf("\n❌ Test failed: %v\n", err) + os.Exit(1) + } + + fmt.Printf("\n✓ Test successful!\n") + fmt.Printf(" Response: %s\n", strings.TrimSpace(response)) +} + +// promptLLMBackendSetup is called from init command to setup LLM engine. +func promptLLMBackendSetup() { + fmt.Println("\n🤖 LLM Engine Configuration") + fmt.Println(" Symphony uses LLM for policy conversion and code validation.") + fmt.Println() + + // Detect available CLIs + clis := engine.DetectAvailableCLIs() + + // Check API key + cfg := llm.LoadLLMConfig() + hasAPIKey := cfg.HasAPIKey() + + // Show detected tools + fmt.Println(" Detected LLM tools:") + hasAnyCLI := false + for _, cli := range clis { + status := "✗" + if cli.Available { + status = "✓" + hasAnyCLI = true + } + version := "" + if cli.Version != "" { + version = fmt.Sprintf(" (%s)", cli.Version) + } + fmt.Printf(" %s %s%s\n", status, cli.Name, version) + } + + if hasAPIKey { + fmt.Println(" ✓ OpenAI API key (configured)") + } else { + fmt.Println(" ✗ OpenAI API key (not set)") + } + fmt.Println() + + // If nothing available, skip + if !hasAnyCLI && !hasAPIKey { + fmt.Println(" ⚠️ No LLM engine available") + fmt.Println(" You can configure one later with: sym llm setup") + return + } + + // Build selection items + var items []string + var modes []engine.Mode + + items = append(items, "Auto (recommended) - Use best available engine") + modes = append(modes, engine.ModeAuto) + + for _, cli := range clis { + if cli.Available { + items = append(items, fmt.Sprintf("%s CLI", cli.Name)) + modes = append(modes, engine.ModeCLI) + } + } + + if hasAPIKey { + items = append(items, "OpenAI API") + modes = append(modes, engine.ModeAPI) + } + + items = append(items, "Skip (configure later)") + modes = append(modes, "") + + templates := &promptui.SelectTemplates{ + Label: "{{ . }}?", + Active: "▸ {{ . | cyan }}", + Inactive: " {{ . }}", + Selected: "✓ {{ . | green }}", + } + + selectPrompt := promptui.Select{ + Label: "Select your preferred LLM engine", + Items: items, + Templates: templates, + Size: len(items), + } + + index, _, err := selectPrompt.Run() + if err != nil || modes[index] == "" { + fmt.Println("\n LLM engine configuration skipped") + fmt.Println(" Run 'sym llm setup' to configure later") + return + } + + // Update config + cfg.Backend = modes[index] + + // If CLI selected, set the specific CLI provider + if modes[index] == engine.ModeCLI { + // Find which CLI was selected + cliIndex := index - 1 // Account for "Auto" option + cliCount := 0 + for _, cli := range clis { + if cli.Available { + if cliCount == cliIndex { + cfg.CLI = string(cli.Provider) + provider, _ := engine.GetProvider(cli.Provider) + if provider != nil { + cfg.Model = provider.DefaultModel + cfg.LargeModel = provider.LargeModel + } + break + } + cliCount++ + } + } + } + + // Save config + if err := llm.SaveLLMConfig(cfg); err != nil { + fmt.Printf("\n ⚠️ Failed to save LLM configuration: %v\n", err) + return + } + + fmt.Printf("\n ✓ LLM engine set to: %s\n", cfg.Backend) + if cfg.CLI != "" { + fmt.Printf(" CLI: %s\n", cfg.CLI) + } + fmt.Println(" Configuration saved to .sym/.env") +} diff --git a/internal/llm/client.go b/internal/llm/client.go index a0ea105..74659c3 100644 --- a/internal/llm/client.go +++ b/internal/llm/client.go @@ -1,119 +1,199 @@ package llm import ( - "bytes" "context" - "encoding/json" "fmt" - "io" - "net/http" + "os" "time" "github.com/DevSymphony/sym-cli/internal/envutil" + "github.com/DevSymphony/sym-cli/internal/llm/engine" mcpsdk "github.com/modelcontextprotocol/go-sdk/mcp" ) const ( - openAIAPIURL = "https://api.openai.com/v1/chat/completions" - defaultFastModel = "gpt-4o-mini" - defaultPowerModel = "gpt-5-mini" defaultMaxTokens = 1000 defaultTemperature = 1.0 defaultTimeout = 60 * time.Second ) -// Mode defines the LLM client mode -type Mode string - const ( - ModeAPI Mode = "api" - ModeMCP Mode = "mcp" + // ModeAPI uses OpenAI API. + ModeAPI = engine.ModeAPI + // ModeMCP uses MCP sampling. + ModeMCP = engine.ModeMCP + // ModeCLI uses CLI engine. + ModeCLI = engine.ModeCLI + // ModeAuto automatically selects the best available engine. + ModeAuto = engine.ModeAuto ) -// ReasoningEffort defines the reasoning effort level for o3-mini -type ReasoningEffort string - -const ( - ReasoningMinimal ReasoningEffort = "minimal" - ReasoningLow ReasoningEffort = "low" - ReasoningMedium ReasoningEffort = "medium" - ReasoningHigh ReasoningEffort = "high" -) - -// Client represents an LLM client +// Client represents an LLM client with fallback chain support. type Client struct { - mode Mode - apiKey string - fastModel string - powerModel string - httpClient *http.Client - mcpSession *mcpsdk.ServerSession + // Engine configuration + config *LLMConfig + mode engine.Mode + engines []engine.LLMEngine + mcpSession *mcpsdk.ServerSession + + // Default request parameters maxTokens int temperature float64 verbose bool } -// ClientOption is a functional option for configuring the client +// ClientOption is a functional option for configuring the client. type ClientOption func(*Client) -// WithMaxTokens sets the default max tokens +// WithMaxTokens sets the default max tokens. func WithMaxTokens(maxTokens int) ClientOption { return func(c *Client) { c.maxTokens = maxTokens } } -// WithTemperature sets the default temperature +// WithTemperature sets the default temperature. func WithTemperature(temperature float64) ClientOption { return func(c *Client) { c.temperature = temperature } } -// WithTimeout sets the HTTP client timeout -func WithTimeout(timeout time.Duration) ClientOption { - return func(c *Client) { c.httpClient.Timeout = timeout } +// WithTimeout sets the HTTP client timeout (for API engine). +func WithTimeout(_ time.Duration) ClientOption { + // Note: This is handled by individual engines now + return func(_ *Client) {} } -// WithVerbose enables verbose logging +// WithVerbose enables verbose logging. func WithVerbose(verbose bool) ClientOption { return func(c *Client) { c.verbose = verbose } } -// WithMCPSession sets the MCP session for MCP mode +// WithMCPSession sets the MCP session for MCP mode. func WithMCPSession(session *mcpsdk.ServerSession) ClientOption { return func(c *Client) { c.mcpSession = session - c.mode = ModeMCP + c.mode = engine.ModeMCP + } +} + +// WithConfig sets a custom LLM configuration. +func WithConfig(cfg *LLMConfig) ClientOption { + return func(c *Client) { + c.config = cfg } } -// NewClient creates a new LLM client -func NewClient(apiKey string, opts ...ClientOption) *Client { - if apiKey == "" { - apiKey = envutil.GetAPIKey("OPENAI_API_KEY") +// WithMode sets the preferred engine mode. +func WithMode(mode engine.Mode) ClientOption { + return func(c *Client) { + c.mode = mode } +} + +// NewClient creates a new LLM client. +func NewClient(_ string, opts ...ClientOption) *Client { + // Load default config + config := LoadLLMConfig() + + apiKey := envutil.GetAPIKey("OPENAI_API_KEY") + config.APIKey = apiKey client := &Client{ - mode: ModeAPI, - apiKey: apiKey, - fastModel: defaultFastModel, - powerModel: defaultPowerModel, - httpClient: &http.Client{Timeout: defaultTimeout}, + config: config, + mode: engine.ModeAuto, maxTokens: defaultMaxTokens, temperature: defaultTemperature, verbose: false, } + // Apply options for _, opt := range opts { opt(client) } + // Initialize engine chain + client.initEngines() + return client } -// Request creates a new request builder +// initEngines initializes the engine fallback chain based on configuration. +func (c *Client) initEngines() { + c.engines = []engine.LLMEngine{} + + // Determine which engines to include based on mode + switch c.mode { + case engine.ModeMCP: + c.addMCPEngine() + case engine.ModeCLI: + c.addCLIEngine() + case engine.ModeAPI: + c.addAPIEngine() + case engine.ModeAuto: + fallthrough + default: + // add all available engines + c.addMCPEngine() + c.addCLIEngine() + c.addAPIEngine() + } +} + +// addMCPEngine adds MCP engine if session is available. +func (c *Client) addMCPEngine() { + if c.mcpSession != nil { + eng := engine.NewMCPEngine(c.mcpSession, engine.WithMCPVerbose(c.verbose)) + c.engines = append(c.engines, eng) + } +} + +// addCLIEngine adds CLI engine if configured. +func (c *Client) addCLIEngine() { + if c.config.CLI != "" { + providerType := engine.CLIProviderType(c.config.CLI) + if !providerType.IsValid() { + return + } + + opts := []engine.CLIEngineOption{} + + if c.config.CLIPath != "" { + opts = append(opts, engine.WithCLIPath(c.config.CLIPath)) + } + + if c.config.Model != "" { + opts = append(opts, engine.WithCLIModel(c.config.Model)) + } + + if c.config.LargeModel != "" { + opts = append(opts, engine.WithCLILargeModel(c.config.LargeModel)) + } + + if c.verbose { + opts = append(opts, engine.WithCLIVerbose(true)) + } + + eng, err := engine.NewCLIEngine(providerType, opts...) + if err == nil && eng.IsAvailable() { + c.engines = append(c.engines, eng) + } + } +} + +// addAPIEngine adds API engine if key is available. +func (c *Client) addAPIEngine() { + apiKey := c.config.GetAPIKey() + if apiKey != "" { + eng := engine.NewAPIEngine(apiKey, engine.WithAPIVerbose(c.verbose)) + c.engines = append(c.engines, eng) + } +} + +// Request creates a new request builder. // // Usage: // -// client.Request(system, user).Execute(ctx) // fast model (gpt-4o-mini) -// client.Request(system, user).WithPower(llm.ReasoningMedium).Execute(ctx) // power model (o3-mini) +// client.Request(system, user).Execute(ctx) // default complexity +// client.Request(system, user).WithComplexity(llm.ComplexityMedium).Execute(ctx) // higher complexity +// client.Request(system, user).WithComplexity(engine.ComplexityHigh).Execute(ctx) // explicit complexity // client.Request(system, user).WithMaxTokens(2000).Execute(ctx) // custom tokens func (c *Client) Request(systemPrompt, userPrompt string) *RequestBuilder { return &RequestBuilder{ @@ -122,231 +202,120 @@ func (c *Client) Request(systemPrompt, userPrompt string) *RequestBuilder { user: userPrompt, maxTokens: c.maxTokens, temperature: c.temperature, - usePower: false, + complexity: engine.ComplexityLow, } } -// RequestBuilder builds and executes LLM requests with chain methods +// GetActiveEngine returns the first available engine. +func (c *Client) GetActiveEngine() engine.LLMEngine { + for _, e := range c.engines { + if e.IsAvailable() { + return e + } + } + return nil +} + +// GetEngines returns all configured engines. +func (c *Client) GetEngines() []engine.LLMEngine { + return c.engines +} + +// GetConfig returns the LLM configuration. +func (c *Client) GetConfig() *LLMConfig { + return c.config +} + +// CheckAvailability checks if any LLM engine is available. +func (c *Client) CheckAvailability(ctx context.Context) error { + eng := c.GetActiveEngine() + if eng == nil { + return fmt.Errorf("no available LLM engine") + } + + // For API engine, do a simple test request + if eng.Name() == "openai-api" { + _, err := c.Request("You are a test assistant.", "Say 'OK'").Execute(ctx) + if err != nil { + return fmt.Errorf("OpenAI API not available: %w", err) + } + } + + return nil +} + +// RequestBuilder builds and executes LLM requests with chain methods. type RequestBuilder struct { client *Client system string user string maxTokens int temperature float64 - usePower bool - effort ReasoningEffort + complexity engine.Complexity } -// WithPower enables power model (o3-mini) with specified reasoning effort -func (r *RequestBuilder) WithPower(effort ReasoningEffort) *RequestBuilder { - r.usePower = true - r.effort = effort +// WithComplexity sets the task complexity hint (engine-agnostic). +func (r *RequestBuilder) WithComplexity(c engine.Complexity) *RequestBuilder { + r.complexity = c return r } -// WithMaxTokens sets max tokens for this request +// WithMaxTokens sets max tokens for this request. func (r *RequestBuilder) WithMaxTokens(tokens int) *RequestBuilder { r.maxTokens = tokens return r } -// WithTemperature sets temperature for this request +// WithTemperature sets temperature for this request. func (r *RequestBuilder) WithTemperature(temp float64) *RequestBuilder { r.temperature = temp return r } -// Execute sends the request and returns the response +// Execute sends the request and returns the response. func (r *RequestBuilder) Execute(ctx context.Context) (string, error) { - if r.client.mode == ModeMCP { - return r.client.executeViaMCP(ctx, r) + req := &engine.Request{ + SystemPrompt: r.system, + UserPrompt: r.user, + MaxTokens: r.maxTokens, + Temperature: r.temperature, + Complexity: r.complexity, } - return r.client.executeViaAPI(ctx, r) -} - -// openAIRequest represents the OpenAI API request structure -type openAIRequest struct { - Model string `json:"model"` - Messages []openAIMessage `json:"messages"` - MaxTokens int `json:"max_completion_tokens,omitempty"` - Temperature float64 `json:"temperature,omitempty"` - ReasoningEffort string `json:"reasoning_effort,omitempty"` -} - -type openAIMessage struct { - Role string `json:"role"` - Content string `json:"content"` -} -type openAIResponse struct { - ID string `json:"id"` - Object string `json:"object"` - Created int64 `json:"created"` - Model string `json:"model"` - Choices []struct { - Index int `json:"index"` - Message struct { - Role string `json:"role"` - Content string `json:"content"` - } `json:"message"` - FinishReason string `json:"finish_reason"` - } `json:"choices"` - Usage struct { - PromptTokens int `json:"prompt_tokens"` - CompletionTokens int `json:"completion_tokens"` - TotalTokens int `json:"total_tokens"` - } `json:"usage"` - Error *struct { - Message string `json:"message"` - Type string `json:"type"` - Code string `json:"code"` - } `json:"error,omitempty"` + return r.client.executeWithFallback(ctx, req) } -// executeViaAPI sends request via OpenAI API -func (c *Client) executeViaAPI(ctx context.Context, r *RequestBuilder) (string, error) { - if c.apiKey == "" { - return "", fmt.Errorf("OpenAI API key not configured") - } - - model := c.fastModel - if r.usePower { - model = c.powerModel - } +// executeWithFallback tries engines in priority order. +func (c *Client) executeWithFallback(ctx context.Context, req *engine.Request) (string, error) { + var lastErr error - reqBody := openAIRequest{ - Model: model, - Messages: []openAIMessage{ - {Role: "user", Content: r.system + "\n\n" + r.user}, - }, - MaxTokens: r.maxTokens, - Temperature: r.temperature, - } - - if r.usePower { - reqBody.ReasoningEffort = string(r.effort) - } - - jsonData, err := json.Marshal(reqBody) - if err != nil { - return "", fmt.Errorf("failed to marshal request: %w", err) - } - - req, err := http.NewRequestWithContext(ctx, "POST", openAIAPIURL, bytes.NewBuffer(jsonData)) - if err != nil { - return "", fmt.Errorf("failed to create request: %w", err) - } - - req.Header.Set("Content-Type", "application/json") - req.Header.Set("Authorization", "Bearer "+c.apiKey) - - if c.verbose { - if r.usePower { - fmt.Printf("OpenAI API request:\n Model: %s\n Reasoning: %s\n Prompt length: %d chars\n", - model, r.effort, len(r.user)) - } else { - fmt.Printf("OpenAI API request:\n Model: %s\n Prompt length: %d chars\n", - model, len(r.user)) + for _, eng := range c.engines { + if !eng.IsAvailable() { + continue } - } - - resp, err := c.httpClient.Do(req) - if err != nil { - return "", fmt.Errorf("failed to send request: %w", err) - } - defer func() { _ = resp.Body.Close() }() - - body, err := io.ReadAll(resp.Body) - if err != nil { - return "", fmt.Errorf("failed to read response body: %w", err) - } - - if resp.StatusCode != http.StatusOK { - return "", fmt.Errorf("OpenAI API error (status %d): %s", resp.StatusCode, string(body)) - } - var apiResp openAIResponse - if err := json.Unmarshal(body, &apiResp); err != nil { - return "", fmt.Errorf("failed to unmarshal response: %w", err) - } - - if apiResp.Error != nil { - return "", fmt.Errorf("OpenAI API error: %s (type: %s, code: %s)", - apiResp.Error.Message, apiResp.Error.Type, apiResp.Error.Code) - } - - if len(apiResp.Choices) == 0 { - return "", fmt.Errorf("no choices in response") - } - - content := apiResp.Choices[0].Message.Content - - if c.verbose { - fmt.Printf("OpenAI API response:\n Tokens: %d\n Content length: %d chars\n", - apiResp.Usage.TotalTokens, len(content)) - } - - return content, nil -} - -// executeViaMCP sends request via MCP sampling -func (c *Client) executeViaMCP(ctx context.Context, r *RequestBuilder) (string, error) { - if c.mcpSession == nil { - return "", fmt.Errorf("MCP session not available") - } - - if c.verbose { - fmt.Printf("MCP Sampling request:\n MaxTokens: %d\n Prompt length: %d chars\n", - r.maxTokens, len(r.user)) - } - - combinedPrompt := r.system + "\n\n" + r.user - - result, err := c.mcpSession.CreateMessage(ctx, &mcpsdk.CreateMessageParams{ - Messages: []*mcpsdk.SamplingMessage{ - { - Role: "user", - Content: &mcpsdk.TextContent{Text: combinedPrompt}, - }, - }, - MaxTokens: int64(r.maxTokens), - }) - if err != nil { - return "", fmt.Errorf("MCP sampling failed: %w", err) - } + result, err := eng.Execute(ctx, req) + if err == nil { + return result, nil + } - var response string - if textContent, ok := result.Content.(*mcpsdk.TextContent); ok { - response = textContent.Text - } else { - return "", fmt.Errorf("unexpected content type from MCP sampling") + lastErr = err + if c.verbose { + fmt.Fprintf(os.Stderr, "⚠️ %s failed: %v, trying next engine...\n", eng.Name(), err) + } } - if c.verbose { - fmt.Printf("MCP Sampling response:\n Model: %s\n Content length: %d chars\n", - result.Model, len(response)) + if lastErr != nil { + return "", fmt.Errorf("all engines failed, last error: %w", lastErr) } - return response, nil + return "", fmt.Errorf("no available LLM engine configured") } -// CheckAvailability checks if the LLM is available -func (c *Client) CheckAvailability(ctx context.Context) error { - if c.mode == ModeMCP { - if c.mcpSession == nil { - return fmt.Errorf("MCP session not available") - } - return nil +// ExecuteDirect executes request on a specific engine without fallback. +func (c *Client) ExecuteDirect(ctx context.Context, eng engine.LLMEngine, req *engine.Request) (string, error) { + if !eng.IsAvailable() { + return "", fmt.Errorf("engine %s is not available", eng.Name()) } - - if c.apiKey == "" { - return fmt.Errorf("OPENAI_API_KEY environment variable not set") - } - - _, err := c.Request("You are a test assistant.", "Say 'OK'").Execute(ctx) - if err != nil { - return fmt.Errorf("OpenAI API not available: %w", err) - } - - return nil + return eng.Execute(ctx, req) } diff --git a/internal/llm/complexity.go b/internal/llm/complexity.go new file mode 100644 index 0000000..243d239 --- /dev/null +++ b/internal/llm/complexity.go @@ -0,0 +1,17 @@ +package llm + +import "github.com/DevSymphony/sym-cli/internal/llm/engine" + +// Complexity re-exports engine.Complexity for backward compatibility. +type Complexity = engine.Complexity + +const ( + // ComplexityMinimal is for trivial lookups. + ComplexityMinimal Complexity = engine.ComplexityMinimal + // ComplexityLow is for simple transformations. + ComplexityLow Complexity = engine.ComplexityLow + // ComplexityMedium is for moderate reasoning. + ComplexityMedium Complexity = engine.ComplexityMedium + // ComplexityHigh is for complex reasoning. + ComplexityHigh Complexity = engine.ComplexityHigh +) diff --git a/internal/llm/config.go b/internal/llm/config.go new file mode 100644 index 0000000..fe6eca4 --- /dev/null +++ b/internal/llm/config.go @@ -0,0 +1,366 @@ +package llm + +import ( + "bufio" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/DevSymphony/sym-cli/internal/llm/engine" +) + +const ( + // Default .sym/.env file location relative to repo root + defaultEnvFile = ".sym/.env" + + // Environment variable keys + envKeyLLMBackend = "LLM_BACKEND" + envKeyLLMCLI = "LLM_CLI" + envKeyLLMCLIPath = "LLM_CLI_PATH" + envKeyLLMModel = "LLM_MODEL" + envKeyLLMLarge = "LLM_LARGE_MODEL" + envKeyAPIKey = "OPENAI_API_KEY" +) + +// LLMConfig holds LLM engine configuration. +type LLMConfig struct { + // Backend is the preferred engine mode (auto, mcp, cli, api). + Backend engine.Mode `json:"backend"` + + // CLI is the CLI provider type (claude, gemini). + CLI string `json:"cli"` + + // CLIPath is a custom path to the CLI executable (optional). + CLIPath string `json:"cli_path"` + + // Model is the default model name for CLI engine. + Model string `json:"model"` + + // LargeModel is the model for high complexity tasks (optional). + LargeModel string `json:"large_model"` + + // APIKey is loaded from environment (not saved to config). + APIKey string `json:"-"` +} + +// DefaultLLMConfig returns the default configuration. +func DefaultLLMConfig() *LLMConfig { + return &LLMConfig{ + Backend: engine.ModeAuto, + CLI: "", + CLIPath: "", + Model: "", + } +} + +// LoadLLMConfig loads LLM configuration from .sym/.env file and environment. +func LoadLLMConfig() *LLMConfig { + cfg := DefaultLLMConfig() + + // Load from .sym/.env file first + envPath := defaultEnvFile + loadConfigFromEnvFile(envPath, cfg) + + // Override with system environment variables + loadConfigFromEnv(cfg) + + return cfg +} + +// LoadLLMConfigFromDir loads LLM configuration from a specific directory. +func LoadLLMConfigFromDir(dir string) *LLMConfig { + cfg := DefaultLLMConfig() + + // Load from .env file in the specified directory + envPath := filepath.Join(dir, ".env") + loadConfigFromEnvFile(envPath, cfg) + + // Override with system environment variables + loadConfigFromEnv(cfg) + + return cfg +} + +// loadConfigFromEnvFile reads config values from .env file. +func loadConfigFromEnvFile(envPath string, cfg *LLMConfig) { + file, err := os.Open(envPath) + if err != nil { + return // File doesn't exist, use defaults + } + defer func() { _ = file.Close() }() + + scanner := bufio.NewScanner(file) + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + + // Skip comments and empty lines + if len(line) == 0 || line[0] == '#' { + continue + } + + // Parse key=value + parts := strings.SplitN(line, "=", 2) + if len(parts) != 2 { + continue + } + + key := strings.TrimSpace(parts[0]) + value := strings.TrimSpace(parts[1]) + + switch key { + case envKeyLLMBackend: + if engine.Mode(value).IsValid() { + cfg.Backend = engine.Mode(value) + } + case envKeyLLMCLI: + cfg.CLI = value + case envKeyLLMCLIPath: + cfg.CLIPath = value + case envKeyLLMModel: + cfg.Model = value + case envKeyLLMLarge: + cfg.LargeModel = value + case envKeyAPIKey: + cfg.APIKey = value + } + } +} + +// loadConfigFromEnv loads config from system environment variables. +func loadConfigFromEnv(cfg *LLMConfig) { + if backend := os.Getenv(envKeyLLMBackend); backend != "" { + if engine.Mode(backend).IsValid() { + cfg.Backend = engine.Mode(backend) + } + } + + if cli := os.Getenv(envKeyLLMCLI); cli != "" { + cfg.CLI = cli + } + + if cliPath := os.Getenv(envKeyLLMCLIPath); cliPath != "" { + cfg.CLIPath = cliPath + } + + if model := os.Getenv(envKeyLLMModel); model != "" { + cfg.Model = model + } + + if large := os.Getenv(envKeyLLMLarge); large != "" { + cfg.LargeModel = large + } + + if apiKey := os.Getenv(envKeyAPIKey); apiKey != "" { + cfg.APIKey = apiKey + } +} + +// SaveLLMConfig saves LLM configuration to .sym/.env file. +func SaveLLMConfig(cfg *LLMConfig) error { + return SaveLLMConfigToDir(".sym", cfg) +} + +// SaveLLMConfigToDir saves LLM configuration to a specific directory. +func SaveLLMConfigToDir(dir string, cfg *LLMConfig) error { + // Ensure directory exists + if err := os.MkdirAll(dir, 0755); err != nil { + return fmt.Errorf("failed to create directory: %w", err) + } + + envPath := filepath.Join(dir, ".env") + + // Read existing content + existingLines, existingKeys := readExistingEnvFile(envPath) + + // Prepare new values + newValues := map[string]string{} + + if cfg.Backend != "" && cfg.Backend != engine.ModeAuto { + newValues[envKeyLLMBackend] = string(cfg.Backend) + } + + if cfg.CLI != "" { + newValues[envKeyLLMCLI] = cfg.CLI + } + + if cfg.CLIPath != "" { + newValues[envKeyLLMCLIPath] = cfg.CLIPath + } + + if cfg.Model != "" { + newValues[envKeyLLMModel] = cfg.Model + } + + if cfg.LargeModel != "" { + newValues[envKeyLLMLarge] = cfg.LargeModel + } + + // Build output lines + var outputLines []string + + // Update existing lines + for _, line := range existingLines { + trimmed := strings.TrimSpace(line) + + // Keep comments and empty lines + if trimmed == "" || strings.HasPrefix(trimmed, "#") { + outputLines = append(outputLines, line) + continue + } + + // Parse key + parts := strings.SplitN(trimmed, "=", 2) + if len(parts) != 2 { + outputLines = append(outputLines, line) + continue + } + + key := strings.TrimSpace(parts[0]) + + // Check if we have a new value for this key + if newValue, ok := newValues[key]; ok { + outputLines = append(outputLines, fmt.Sprintf("%s=%s", key, newValue)) + delete(newValues, key) // Mark as processed + } else { + outputLines = append(outputLines, line) + } + } + + // Add LLM config section header if needed + hasLLMSection := false + for key := range existingKeys { + if strings.HasPrefix(key, "LLM_") { + hasLLMSection = true + break + } + } + + // Add new keys that weren't in the file + if len(newValues) > 0 { + if !hasLLMSection { + outputLines = append(outputLines, "", "# LLM Backend Configuration") + } + + for key, value := range newValues { + outputLines = append(outputLines, fmt.Sprintf("%s=%s", key, value)) + } + } + + // Write to file + content := strings.Join(outputLines, "\n") + if !strings.HasSuffix(content, "\n") { + content += "\n" + } + + return os.WriteFile(envPath, []byte(content), 0600) +} + +// readExistingEnvFile reads existing .env file content. +func readExistingEnvFile(envPath string) ([]string, map[string]bool) { + var lines []string + keys := make(map[string]bool) + + file, err := os.Open(envPath) + if err != nil { + return lines, keys + } + defer func() { _ = file.Close() }() + + scanner := bufio.NewScanner(file) + for scanner.Scan() { + line := scanner.Text() + lines = append(lines, line) + + // Track existing keys + trimmed := strings.TrimSpace(line) + if len(trimmed) > 0 && !strings.HasPrefix(trimmed, "#") { + parts := strings.SplitN(trimmed, "=", 2) + if len(parts) == 2 { + keys[strings.TrimSpace(parts[0])] = true + } + } + } + + return lines, keys +} + +// GetAPIKey returns the API key from config or environment. +func (c *LLMConfig) GetAPIKey() string { + if c.APIKey != "" { + return c.APIKey + } + return os.Getenv(envKeyAPIKey) +} + +// HasCLI returns true if CLI is configured. +func (c *LLMConfig) HasCLI() bool { + return c.CLI != "" +} + +// HasAPIKey returns true if API key is available. +func (c *LLMConfig) HasAPIKey() bool { + return c.GetAPIKey() != "" +} + +// GetEffectiveBackend returns the actual engine to use based on availability. +func (c *LLMConfig) GetEffectiveBackend() engine.Mode { + if c.Backend != engine.ModeAuto { + return c.Backend + } + + // Auto mode: prefer CLI if available, then API + if c.HasCLI() { + return engine.ModeCLI + } + + if c.HasAPIKey() { + return engine.ModeAPI + } + + return engine.ModeAuto +} + +// Validate checks if the configuration is valid. +func (c *LLMConfig) Validate() error { + if c.Backend != "" && !c.Backend.IsValid() { + return fmt.Errorf("invalid engine mode: %s", c.Backend) + } + + if c.CLI != "" && !engine.CLIProviderType(c.CLI).IsValid() { + return fmt.Errorf("unsupported CLI provider: %s", c.CLI) + } + + return nil +} + +// String returns a human-readable representation of the config. +func (c *LLMConfig) String() string { + var parts []string + + parts = append(parts, fmt.Sprintf("Backend: %s", c.Backend)) + + if c.CLI != "" { + parts = append(parts, fmt.Sprintf("CLI: %s", c.CLI)) + } + + if c.CLIPath != "" { + parts = append(parts, fmt.Sprintf("CLI Path: %s", c.CLIPath)) + } + + if c.Model != "" { + parts = append(parts, fmt.Sprintf("Model: %s", c.Model)) + } + + if c.LargeModel != "" { + parts = append(parts, fmt.Sprintf("Large Model: %s", c.LargeModel)) + } + + if c.HasAPIKey() { + parts = append(parts, "API Key: configured") + } else { + parts = append(parts, "API Key: not set") + } + + return strings.Join(parts, ", ") +} diff --git a/internal/llm/config_test.go b/internal/llm/config_test.go new file mode 100644 index 0000000..b5bc4e1 --- /dev/null +++ b/internal/llm/config_test.go @@ -0,0 +1,160 @@ +package llm + +import ( + "os" + "path/filepath" + "testing" + + "github.com/DevSymphony/sym-cli/internal/llm/engine" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDefaultLLMConfig(t *testing.T) { + cfg := DefaultLLMConfig() + + assert.Equal(t, engine.ModeAuto, cfg.Backend) + assert.Empty(t, cfg.CLI) + assert.Empty(t, cfg.CLIPath) + assert.Empty(t, cfg.Model) +} + +func TestLLMConfig_HasCLI(t *testing.T) { + t.Run("with CLI", func(t *testing.T) { + cfg := &LLMConfig{CLI: "claude"} + assert.True(t, cfg.HasCLI()) + }) + + t.Run("without CLI", func(t *testing.T) { + cfg := &LLMConfig{} + assert.False(t, cfg.HasCLI()) + }) +} + +func TestLLMConfig_HasAPIKey(t *testing.T) { + t.Run("with API key in config", func(t *testing.T) { + cfg := &LLMConfig{APIKey: "sk-test"} + assert.True(t, cfg.HasAPIKey()) + }) + + t.Run("without API key", func(t *testing.T) { + cfg := &LLMConfig{} + assert.False(t, cfg.HasAPIKey()) + }) +} + +func TestLLMConfig_GetEffectiveBackend(t *testing.T) { + t.Run("explicit mode", func(t *testing.T) { + cfg := &LLMConfig{Backend: engine.ModeCLI} + assert.Equal(t, engine.ModeCLI, cfg.GetEffectiveBackend()) + }) + + t.Run("auto with CLI", func(t *testing.T) { + cfg := &LLMConfig{Backend: engine.ModeAuto, CLI: "claude"} + assert.Equal(t, engine.ModeCLI, cfg.GetEffectiveBackend()) + }) + + t.Run("auto with API key", func(t *testing.T) { + cfg := &LLMConfig{Backend: engine.ModeAuto, APIKey: "sk-test"} + assert.Equal(t, engine.ModeAPI, cfg.GetEffectiveBackend()) + }) + + t.Run("auto with nothing", func(t *testing.T) { + cfg := &LLMConfig{Backend: engine.ModeAuto} + assert.Equal(t, engine.ModeAuto, cfg.GetEffectiveBackend()) + }) +} + +func TestLLMConfig_Validate(t *testing.T) { + t.Run("valid config", func(t *testing.T) { + cfg := &LLMConfig{ + Backend: engine.ModeAuto, + CLI: "claude", + } + assert.NoError(t, cfg.Validate()) + }) + + t.Run("invalid backend", func(t *testing.T) { + cfg := &LLMConfig{Backend: engine.Mode("invalid")} + assert.Error(t, cfg.Validate()) + }) + + t.Run("invalid CLI provider", func(t *testing.T) { + cfg := &LLMConfig{CLI: "invalid-cli"} + assert.Error(t, cfg.Validate()) + }) + + t.Run("empty config is valid", func(t *testing.T) { + cfg := &LLMConfig{} + assert.NoError(t, cfg.Validate()) + }) +} + +func TestLLMConfig_String(t *testing.T) { + cfg := &LLMConfig{ + Backend: engine.ModeAuto, + CLI: "claude", + Model: "claude-3-opus", + } + + str := cfg.String() + assert.Contains(t, str, "Backend: auto") + assert.Contains(t, str, "CLI: claude") + assert.Contains(t, str, "Model: claude-3-opus") +} + +func TestSaveLLMConfig(t *testing.T) { + tmpDir := t.TempDir() + + cfg := &LLMConfig{ + Backend: engine.ModeCLI, + CLI: "claude", + Model: "claude-3-opus", + LargeModel: "claude-3-opus", + } + + err := SaveLLMConfigToDir(tmpDir, cfg) + require.NoError(t, err) + + // Verify file was created + envPath := filepath.Join(tmpDir, ".env") + _, err = os.Stat(envPath) + require.NoError(t, err) + + // Read and verify content + content, err := os.ReadFile(envPath) + require.NoError(t, err) + + assert.Contains(t, string(content), "LLM_BACKEND=cli") + assert.Contains(t, string(content), "LLM_CLI=claude") + assert.Contains(t, string(content), "LLM_MODEL=claude-3-opus") +} + +func TestLoadLLMConfigFromDir(t *testing.T) { + tmpDir := t.TempDir() + + // Create .env file + envContent := `# Test config +LLM_BACKEND=cli +LLM_CLI=gemini +LLM_MODEL=gemini-pro +` + envPath := filepath.Join(tmpDir, ".env") + err := os.WriteFile(envPath, []byte(envContent), 0600) + require.NoError(t, err) + + cfg := LoadLLMConfigFromDir(tmpDir) + + assert.Equal(t, engine.ModeCLI, cfg.Backend) + assert.Equal(t, "gemini", cfg.CLI) + assert.Equal(t, "gemini-pro", cfg.Model) +} + +func TestLoadLLMConfigFromDir_NonExistent(t *testing.T) { + cfg := LoadLLMConfigFromDir("/nonexistent/path") + + // Should return defaults + assert.Equal(t, engine.ModeAuto, cfg.Backend) + assert.Empty(t, cfg.CLI) +} + diff --git a/internal/llm/engine/api.go b/internal/llm/engine/api.go new file mode 100644 index 0000000..6c4227f --- /dev/null +++ b/internal/llm/engine/api.go @@ -0,0 +1,247 @@ +package engine + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "time" +) + +const ( + openAIAPIURL = "https://api.openai.com/v1/chat/completions" + defaultAPIFastModel = "gpt-4o-mini" + defaultAPIPowerModel = "gpt-5-mini" + defaultAPITimeout = 60 * time.Second + defaultAPIMaxTokens = 1000 + defaultAPITemperature = 1.0 +) + +// APIEngine implements LLMEngine interface for OpenAI API. +type APIEngine struct { + apiKey string + fastModel string + powerModel string + httpClient *http.Client + maxTokens int + temperature float64 + verbose bool +} + +// APIEngineOption is a functional option for APIEngine. +type APIEngineOption func(*APIEngine) + +// WithAPIFastModel sets the fast model. +func WithAPIFastModel(model string) APIEngineOption { + return func(e *APIEngine) { e.fastModel = model } +} + +// WithAPIPowerModel sets the power model. +func WithAPIPowerModel(model string) APIEngineOption { + return func(e *APIEngine) { e.powerModel = model } +} + +// WithAPITimeout sets the HTTP client timeout. +func WithAPITimeout(timeout time.Duration) APIEngineOption { + return func(e *APIEngine) { e.httpClient.Timeout = timeout } +} + +// WithAPIVerbose enables verbose logging. +func WithAPIVerbose(verbose bool) APIEngineOption { + return func(e *APIEngine) { e.verbose = verbose } +} + +// NewAPIEngine creates a new OpenAI API engine. +func NewAPIEngine(apiKey string, opts ...APIEngineOption) *APIEngine { + e := &APIEngine{ + apiKey: apiKey, + fastModel: defaultAPIFastModel, + powerModel: defaultAPIPowerModel, + httpClient: &http.Client{Timeout: defaultAPITimeout}, + maxTokens: defaultAPIMaxTokens, + temperature: defaultAPITemperature, + verbose: false, + } + + for _, opt := range opts { + opt(e) + } + + return e +} + +// Name returns the engine identifier. +func (e *APIEngine) Name() string { + return "openai-api" +} + +// IsAvailable checks if the engine can be used. +func (e *APIEngine) IsAvailable() bool { + return e.apiKey != "" +} + +// Capabilities returns engine capabilities. +func (e *APIEngine) Capabilities() Capabilities { + return Capabilities{ + SupportsTemperature: true, + SupportsMaxTokens: true, + SupportsComplexity: true, + SupportsStreaming: true, + MaxContextLength: 128000, + Models: []string{e.fastModel, e.powerModel}, + } +} + +// Execute sends the request via OpenAI API. +func (e *APIEngine) Execute(ctx context.Context, req *Request) (string, error) { + if e.apiKey == "" { + return "", fmt.Errorf("OpenAI API key not configured") + } + + // Select model based on complexity + model := e.fastModel + var reasoningEffort string + + switch req.Complexity { + case ComplexityMinimal: + model = e.fastModel + reasoningEffort = "minimal" + case ComplexityLow: + model = e.fastModel + case ComplexityMedium: + model = e.powerModel + reasoningEffort = "low" + case ComplexityHigh: + model = e.powerModel + reasoningEffort = "medium" + } + + // Build request body + maxTokens := req.MaxTokens + if maxTokens == 0 { + maxTokens = e.maxTokens + } + + temperature := req.Temperature + if temperature == 0 { + temperature = e.temperature + } + + apiReq := openAIAPIRequest{ + Model: model, + Messages: []openAIAPIMessage{ + {Role: "user", Content: req.CombinedPrompt()}, + }, + MaxTokens: maxTokens, + Temperature: temperature, + } + + if reasoningEffort != "" { + apiReq.ReasoningEffort = reasoningEffort + } + + jsonData, err := json.Marshal(apiReq) + if err != nil { + return "", fmt.Errorf("failed to marshal request: %w", err) + } + + httpReq, err := http.NewRequestWithContext(ctx, http.MethodPost, openAIAPIURL, bytes.NewBuffer(jsonData)) + if err != nil { + return "", fmt.Errorf("failed to create request: %w", err) + } + + httpReq.Header.Set("Content-Type", "application/json") + httpReq.Header.Set("Authorization", "Bearer "+e.apiKey) + + if e.verbose { + fmt.Fprintf(os.Stderr, "OpenAI API request:\n Model: %s\n Complexity: %s\n Prompt length: %d chars\n", + model, req.Complexity, len(req.UserPrompt)) + } + + resp, err := e.httpClient.Do(httpReq) + if err != nil { + return "", fmt.Errorf("failed to send request: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("failed to read response body: %w", err) + } + + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("OpenAI API error (status %d): %s", resp.StatusCode, string(body)) + } + + var apiResp openAIAPIResponse + if err := json.Unmarshal(body, &apiResp); err != nil { + return "", fmt.Errorf("failed to unmarshal response: %w", err) + } + + if apiResp.Error != nil { + return "", fmt.Errorf("OpenAI API error: %s (type: %s, code: %s)", + apiResp.Error.Message, apiResp.Error.Type, apiResp.Error.Code) + } + + if len(apiResp.Choices) == 0 { + return "", fmt.Errorf("no choices in response") + } + + content := apiResp.Choices[0].Message.Content + + if e.verbose { + fmt.Fprintf(os.Stderr, "OpenAI API response:\n Tokens: %d\n Content length: %d chars\n", + apiResp.Usage.TotalTokens, len(content)) + } + + return content, nil +} + +// SetVerbose sets verbose mode. +func (e *APIEngine) SetVerbose(verbose bool) { + e.verbose = verbose +} + +// openAIAPIRequest represents the OpenAI API request structure. +type openAIAPIRequest struct { + Model string `json:"model"` + Messages []openAIAPIMessage `json:"messages"` + MaxTokens int `json:"max_completion_tokens,omitempty"` + Temperature float64 `json:"temperature,omitempty"` + ReasoningEffort string `json:"reasoning_effort,omitempty"` +} + +// openAIAPIMessage represents a message in the OpenAI API request. +type openAIAPIMessage struct { + Role string `json:"role"` + Content string `json:"content"` +} + +// openAIAPIResponse represents the OpenAI API response structure. +type openAIAPIResponse struct { + ID string `json:"id"` + Object string `json:"object"` + Created int64 `json:"created"` + Model string `json:"model"` + Choices []struct { + Index int `json:"index"` + Message struct { + Role string `json:"role"` + Content string `json:"content"` + } `json:"message"` + FinishReason string `json:"finish_reason"` + } `json:"choices"` + Usage struct { + PromptTokens int `json:"prompt_tokens"` + CompletionTokens int `json:"completion_tokens"` + TotalTokens int `json:"total_tokens"` + } `json:"usage"` + Error *struct { + Message string `json:"message"` + Type string `json:"type"` + Code string `json:"code"` + } `json:"error,omitempty"` +} diff --git a/internal/llm/engine/api_test.go b/internal/llm/engine/api_test.go new file mode 100644 index 0000000..121df05 --- /dev/null +++ b/internal/llm/engine/api_test.go @@ -0,0 +1,64 @@ +package engine + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewAPIEngine(t *testing.T) { + t.Run("with api key", func(t *testing.T) { + engine := NewAPIEngine("sk-test-key") + assert.NotNil(t, engine) + assert.Equal(t, "openai-api", engine.Name()) + assert.True(t, engine.IsAvailable()) + }) + + t.Run("without api key", func(t *testing.T) { + engine := NewAPIEngine("") + assert.NotNil(t, engine) + assert.False(t, engine.IsAvailable()) + }) + + t.Run("with options", func(t *testing.T) { + engine := NewAPIEngine("sk-test-key", + WithAPIFastModel("gpt-4o"), + WithAPIPowerModel("o3-mini"), + WithAPIVerbose(true), + ) + assert.NotNil(t, engine) + caps := engine.Capabilities() + assert.Contains(t, caps.Models, "gpt-4o") + assert.Contains(t, caps.Models, "o3-mini") + }) +} + +func TestAPIEngine_Capabilities(t *testing.T) { + engine := NewAPIEngine("sk-test-key") + caps := engine.Capabilities() + + assert.True(t, caps.SupportsTemperature) + assert.True(t, caps.SupportsMaxTokens) + assert.True(t, caps.SupportsComplexity) + assert.True(t, caps.SupportsStreaming) + assert.Equal(t, 128000, caps.MaxContextLength) + assert.Len(t, caps.Models, 2) +} + +func TestAPIEngine_Name(t *testing.T) { + engine := NewAPIEngine("sk-test-key") + assert.Equal(t, "openai-api", engine.Name()) +} + +func TestAPIEngine_IsAvailable(t *testing.T) { + t.Run("available with key", func(t *testing.T) { + engine := NewAPIEngine("sk-test-key") + assert.True(t, engine.IsAvailable()) + }) + + t.Run("not available without key", func(t *testing.T) { + engine := NewAPIEngine("") + assert.False(t, engine.IsAvailable()) + }) +} + diff --git a/internal/llm/engine/cli.go b/internal/llm/engine/cli.go new file mode 100644 index 0000000..55ae047 --- /dev/null +++ b/internal/llm/engine/cli.go @@ -0,0 +1,224 @@ +package engine + +import ( + "bytes" + "context" + "fmt" + "os" + "os/exec" + "time" + + "github.com/DevSymphony/sym-cli/internal/llm/engine/cliprovider" +) + +const ( + defaultCLITimeout = 120 * time.Second +) + +// Re-export CLI provider types for backward compatibility. +type CLIProviderType = cliprovider.Type + +const ( + // ProviderClaude is the Claude CLI provider. + ProviderClaude CLIProviderType = cliprovider.TypeClaude + // ProviderGemini is the Gemini CLI provider. + ProviderGemini CLIProviderType = cliprovider.TypeGemini +) + +// CLIProvider is an alias to cliprovider.Provider. +type CLIProvider = cliprovider.Provider + +// CLIInfo is an alias to cliprovider.Info. +type CLIInfo = cliprovider.Info + +// SupportedProviders returns all supported CLI providers. +func SupportedProviders() map[CLIProviderType]*CLIProvider { + return cliprovider.Supported() +} + +// GetProvider returns the provider for the given type. +func GetProvider(providerType CLIProviderType) (*CLIProvider, error) { + return cliprovider.Get(providerType) +} + +// DetectAvailableCLIs scans for installed CLI tools. +func DetectAvailableCLIs() []CLIInfo { + return cliprovider.Detect() +} + +// GetProviderByCommand finds a provider by its command name. +func GetProviderByCommand(command string) (*CLIProvider, error) { + return cliprovider.GetByCommand(command) +} + +// CLIEngine implements LLMEngine interface for CLI-based LLM tools. +type CLIEngine struct { + provider *cliprovider.Provider + model string + largeModel string + timeout time.Duration + verbose bool + customPath string +} + +// CLIEngineOption is a functional option for CLIEngine. +type CLIEngineOption func(*CLIEngine) + +// WithCLIModel sets the default model. +func WithCLIModel(model string) CLIEngineOption { + return func(e *CLIEngine) { e.model = model } +} + +// WithCLILargeModel sets the model for high complexity tasks. +func WithCLILargeModel(model string) CLIEngineOption { + return func(e *CLIEngine) { e.largeModel = model } +} + +// WithCLITimeout sets the execution timeout. +func WithCLITimeout(timeout time.Duration) CLIEngineOption { + return func(e *CLIEngine) { e.timeout = timeout } +} + +// WithCLIVerbose enables verbose logging. +func WithCLIVerbose(verbose bool) CLIEngineOption { + return func(e *CLIEngine) { e.verbose = verbose } +} + +// WithCLIPath sets a custom path to the CLI executable. +func WithCLIPath(path string) CLIEngineOption { + return func(e *CLIEngine) { e.customPath = path } +} + +// NewCLIEngine creates a new CLI engine for the given provider. +func NewCLIEngine(providerType CLIProviderType, opts ...CLIEngineOption) (*CLIEngine, error) { + provider, err := cliprovider.Get(providerType) + if err != nil { + return nil, err + } + + e := &CLIEngine{ + provider: provider, + model: provider.DefaultModel, + largeModel: provider.LargeModel, + timeout: defaultCLITimeout, + verbose: false, + } + + for _, opt := range opts { + opt(e) + } + + return e, nil +} + +// Name returns the engine identifier. +func (e *CLIEngine) Name() string { + return fmt.Sprintf("cli-%s", e.provider.Type) +} + +// IsAvailable checks if the engine can be used. +func (e *CLIEngine) IsAvailable() bool { + cmdPath := e.getCommandPath() + _, err := exec.LookPath(cmdPath) + return err == nil +} + +// Capabilities returns engine capabilities. +func (e *CLIEngine) Capabilities() Capabilities { + models := []string{e.model} + if e.largeModel != "" && e.largeModel != e.model { + models = append(models, e.largeModel) + } + + return Capabilities{ + SupportsTemperature: e.provider.SupportsTemperature, + SupportsMaxTokens: e.provider.SupportsMaxTokens, + SupportsComplexity: e.largeModel != "", + SupportsStreaming: false, + MaxContextLength: 0, + Models: models, + } +} + +// Execute sends the request via CLI. +func (e *CLIEngine) Execute(ctx context.Context, req *Request) (string, error) { + model := e.model + if req.Complexity >= ComplexityHigh && e.largeModel != "" { + model = e.largeModel + } + + prompt := req.CombinedPrompt() + args := e.provider.BuildArgs(model, prompt) + args = e.appendOptionalFlags(args, req) + + if e.verbose { + fmt.Fprintf(os.Stderr, "CLI Engine (%s) request:\n Model: %s\n Complexity: %s\n Prompt length: %d chars\n", + e.provider.Type, model, req.Complexity, len(prompt)) + } + + cmdCtx, cancel := context.WithTimeout(ctx, e.timeout) + defer cancel() + + cmdPath := e.getCommandPath() + cmd := exec.CommandContext(cmdCtx, cmdPath, args...) + + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + err := cmd.Run() + if err != nil { + if cmdCtx.Err() == context.DeadlineExceeded { + return "", fmt.Errorf("CLI command timed out after %v", e.timeout) + } + return "", fmt.Errorf("CLI command failed: %w\nstderr: %s", err, stderr.String()) + } + + response, err := e.provider.ParseResponse(stdout.Bytes()) + if err != nil { + return "", fmt.Errorf("failed to parse CLI response: %w", err) + } + + if e.verbose { + fmt.Fprintf(os.Stderr, "CLI Engine (%s) response:\n Content length: %d chars\n", + e.provider.Type, len(response)) + } + + return response, nil +} + +// getCommandPath returns the path to the CLI executable. +func (e *CLIEngine) getCommandPath() string { + if e.customPath != "" { + return e.customPath + } + return e.provider.Command +} + +// appendOptionalFlags adds optional flags based on request parameters. +func (e *CLIEngine) appendOptionalFlags(args []string, req *Request) []string { + if e.provider.SupportsMaxTokens && e.provider.MaxTokensFlag != "" && req.MaxTokens > 0 { + args = append(args, e.provider.MaxTokensFlag, fmt.Sprintf("%d", req.MaxTokens)) + } + + if e.provider.SupportsTemperature && e.provider.TemperatureFlag != "" && req.Temperature > 0 { + args = append(args, e.provider.TemperatureFlag, fmt.Sprintf("%.2f", req.Temperature)) + } + + return args +} + +// GetProvider returns the underlying provider. +func (e *CLIEngine) GetProvider() *CLIProvider { + return e.provider +} + +// GetModel returns the current model. +func (e *CLIEngine) GetModel() string { + return e.model +} + +// SetVerbose sets verbose mode. +func (e *CLIEngine) SetVerbose(verbose bool) { + e.verbose = verbose +} diff --git a/internal/llm/engine/cli_test.go b/internal/llm/engine/cli_test.go new file mode 100644 index 0000000..da9ec38 --- /dev/null +++ b/internal/llm/engine/cli_test.go @@ -0,0 +1,77 @@ +package engine + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewCLIEngine(t *testing.T) { + t.Run("valid provider", func(t *testing.T) { + engine, err := NewCLIEngine(ProviderClaude) + require.NoError(t, err) + assert.NotNil(t, engine) + assert.Equal(t, "cli-claude", engine.Name()) + }) + + t.Run("with options", func(t *testing.T) { + engine, err := NewCLIEngine( + ProviderClaude, + WithCLIModel("custom-model"), + WithCLILargeModel("large-model"), + WithCLIVerbose(true), + ) + require.NoError(t, err) + assert.Equal(t, "custom-model", engine.GetModel()) + }) + + t.Run("invalid provider", func(t *testing.T) { + _, err := NewCLIEngine(CLIProviderType("invalid")) + assert.Error(t, err) + }) +} + +func TestCLIEngine_Capabilities(t *testing.T) { + engine, err := NewCLIEngine(ProviderClaude) + require.NoError(t, err) + + caps := engine.Capabilities() + + assert.True(t, caps.SupportsMaxTokens) + assert.False(t, caps.SupportsStreaming) + assert.True(t, caps.SupportsComplexity) // Has LargeModel + assert.NotEmpty(t, caps.Models) +} + +func TestDetectAvailableCLIs(t *testing.T) { + clis := DetectAvailableCLIs() + + // Should return info for all supported providers + assert.Len(t, clis, 2) + + // Each CLI should have provider and name set + for _, cli := range clis { + assert.NotEmpty(t, cli.Provider) + assert.NotEmpty(t, cli.Name) + } +} + +func TestGetProviderByCommand(t *testing.T) { + t.Run("claude command", func(t *testing.T) { + provider, err := GetProviderByCommand("claude") + require.NoError(t, err) + assert.Equal(t, ProviderClaude, provider.Type) + }) + + t.Run("gemini command", func(t *testing.T) { + provider, err := GetProviderByCommand("gemini") + require.NoError(t, err) + assert.Equal(t, ProviderGemini, provider.Type) + }) + + t.Run("unknown command", func(t *testing.T) { + _, err := GetProviderByCommand("unknown") + assert.Error(t, err) + }) +} diff --git a/internal/llm/engine/cliprovider/claude.go b/internal/llm/engine/cliprovider/claude.go new file mode 100644 index 0000000..ba86d97 --- /dev/null +++ b/internal/llm/engine/cliprovider/claude.go @@ -0,0 +1,30 @@ +package cliprovider + +import "strings" + +func newClaudeProvider() *Provider { + return &Provider{ + Type: TypeClaude, + DisplayName: "Claude CLI", + Command: "claude", + DefaultModel: "claude-sonnet-4-20250514", + LargeModel: "claude-sonnet-4-20250514", + BuildArgs: func(model string, prompt string) []string { + args := []string{ + "-p", prompt, + "--output-format", "text", + } + if model != "" { + args = append(args, "--model", model) + } + return args + }, + ParseResponse: func(output []byte) (string, error) { + return strings.TrimSpace(string(output)), nil + }, + SupportsMaxTokens: true, + MaxTokensFlag: "--max-tokens", + SupportsTemperature: false, + TemperatureFlag: "", + } +} diff --git a/internal/llm/engine/cliprovider/gemini.go b/internal/llm/engine/cliprovider/gemini.go new file mode 100644 index 0000000..f299a8e --- /dev/null +++ b/internal/llm/engine/cliprovider/gemini.go @@ -0,0 +1,27 @@ +package cliprovider + +import "strings" + +func newGeminiProvider() *Provider { + return &Provider{ + Type: TypeGemini, + DisplayName: "Gemini CLI", + Command: "gemini", + DefaultModel: "gemini-2.0-flash", + LargeModel: "gemini-2.5-pro-preview-06-05", + BuildArgs: func(model string, prompt string) []string { + return []string{ + "prompt", + "-m", model, + prompt, + } + }, + ParseResponse: func(output []byte) (string, error) { + return strings.TrimSpace(string(output)), nil + }, + SupportsMaxTokens: true, + MaxTokensFlag: "--max-tokens", + SupportsTemperature: true, + TemperatureFlag: "--temperature", + } +} diff --git a/internal/llm/engine/cliprovider/provider.go b/internal/llm/engine/cliprovider/provider.go new file mode 100644 index 0000000..7922d3f --- /dev/null +++ b/internal/llm/engine/cliprovider/provider.go @@ -0,0 +1,141 @@ +package cliprovider + +import ( + "fmt" + "os/exec" + "strings" +) + +// Type represents supported CLI provider types. +type Type string + +const ( + // TypeClaude is the Claude CLI provider. + TypeClaude Type = "claude" + // TypeGemini is the Gemini CLI provider. + TypeGemini Type = "gemini" +) + +// IsValid checks if the provider type is valid. +func (t Type) IsValid() bool { + switch t { + case TypeClaude, TypeGemini: + return true + default: + return false + } +} + +// Provider defines how to interact with a specific CLI tool. +type Provider struct { + // Type is the provider identifier. + Type Type + + // DisplayName is the human-readable name. + DisplayName string + + // Command is the executable name or path. + Command string + + // DefaultModel is the default model to use. + DefaultModel string + + // LargeModel is the model for high complexity tasks (optional). + LargeModel string + + // BuildArgs constructs CLI arguments for the given request. + BuildArgs func(model string, prompt string) []string + + // ParseResponse extracts text from CLI output. + ParseResponse func(output []byte) (string, error) + + // SupportsMaxTokens indicates if --max-tokens or similar is supported. + SupportsMaxTokens bool + + // MaxTokensFlag is the flag name for max tokens (e.g., "--max-tokens"). + MaxTokensFlag string + + // SupportsTemperature indicates if temperature is supported. + SupportsTemperature bool + + // TemperatureFlag is the flag name for temperature. + TemperatureFlag string +} + +// Info represents detected CLI information. +type Info struct { + Provider Type + Name string + Path string + Version string + Available bool +} + +// Supported returns all supported CLI providers. +func Supported() map[Type]*Provider { + return map[Type]*Provider{ + TypeClaude: newClaudeProvider(), + TypeGemini: newGeminiProvider(), + } +} + +// Get returns the provider for the given type. +func Get(providerType Type) (*Provider, error) { + providers := Supported() + provider, ok := providers[providerType] + if !ok { + return nil, fmt.Errorf("unsupported CLI provider: %s", providerType) + } + return provider, nil +} + +// Detect scans for installed CLI tools. +func Detect() []Info { + var results []Info + + providers := Supported() + for providerType, provider := range providers { + info := Info{ + Provider: providerType, + Name: provider.DisplayName, + Available: false, + } + + path, err := exec.LookPath(provider.Command) + if err == nil { + info.Path = path + info.Available = true + info.Version = getProviderVersion(provider) + } + + results = append(results, info) + } + + return results +} + +// GetByCommand finds a provider by its command name. +func GetByCommand(command string) (*Provider, error) { + providers := Supported() + for _, provider := range providers { + if provider.Command == command { + return provider, nil + } + } + return nil, fmt.Errorf("no provider found for command: %s", command) +} + +func getProviderVersion(provider *Provider) string { + cmd := exec.Command(provider.Command, "--version") // #nosec G204 + output, err := cmd.Output() + if err != nil { + return "" + } + + lines := strings.Split(strings.TrimSpace(string(output)), "\n") + if len(lines) > 0 { + return strings.TrimSpace(lines[0]) + } + + return "" +} diff --git a/internal/llm/engine/cliprovider/provider_test.go b/internal/llm/engine/cliprovider/provider_test.go new file mode 100644 index 0000000..48e637e --- /dev/null +++ b/internal/llm/engine/cliprovider/provider_test.go @@ -0,0 +1,120 @@ +package cliprovider + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestType_IsValid(t *testing.T) { + tests := []struct { + name string + typ Type + want bool + }{ + {"claude", TypeClaude, true}, + {"gemini", TypeGemini, true}, + {"invalid", Type("invalid"), false}, + {"empty", Type(""), false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.want, tt.typ.IsValid()) + }) + } +} + +func TestSupported(t *testing.T) { + providers := Supported() + + assert.Len(t, providers, 2) + assert.Contains(t, providers, TypeClaude) + assert.Contains(t, providers, TypeGemini) +} + +func TestGet(t *testing.T) { + t.Run("claude", func(t *testing.T) { + provider, err := Get(TypeClaude) + require.NoError(t, err) + assert.Equal(t, TypeClaude, provider.Type) + assert.Equal(t, "Claude CLI", provider.DisplayName) + assert.Equal(t, "claude", provider.Command) + }) + + t.Run("gemini", func(t *testing.T) { + provider, err := Get(TypeGemini) + require.NoError(t, err) + assert.Equal(t, TypeGemini, provider.Type) + assert.Equal(t, "Gemini CLI", provider.DisplayName) + assert.Equal(t, "gemini", provider.Command) + }) + + t.Run("invalid", func(t *testing.T) { + _, err := Get(Type("invalid")) + assert.Error(t, err) + }) +} + +func TestBuildArgs(t *testing.T) { + t.Run("claude", func(t *testing.T) { + provider := newClaudeProvider() + args := provider.BuildArgs("claude-3-opus", "Hello!") + + assert.Contains(t, args, "-p") + assert.Contains(t, args, "Hello!") + assert.Contains(t, args, "--model") + assert.Contains(t, args, "claude-3-opus") + }) + + t.Run("gemini", func(t *testing.T) { + provider := newGeminiProvider() + args := provider.BuildArgs("gemini-pro", "Hello!") + + assert.Contains(t, args, "prompt") + assert.Contains(t, args, "-m") + assert.Contains(t, args, "gemini-pro") + }) +} + +func TestParseResponse(t *testing.T) { + providers := Supported() + + for typ, provider := range providers { + t.Run(string(typ), func(t *testing.T) { + resp, err := provider.ParseResponse([]byte(" trimmed response \n")) + require.NoError(t, err) + assert.Equal(t, "trimmed response", resp) + }) + } +} + +func TestDetect(t *testing.T) { + info := Detect() + assert.Len(t, info, 2) + + for _, cli := range info { + assert.NotEmpty(t, cli.Provider) + assert.NotEmpty(t, cli.Name) + } +} + +func TestGetByCommand(t *testing.T) { + t.Run("claude", func(t *testing.T) { + provider, err := GetByCommand("claude") + require.NoError(t, err) + assert.Equal(t, TypeClaude, provider.Type) + }) + + t.Run("gemini", func(t *testing.T) { + provider, err := GetByCommand("gemini") + require.NoError(t, err) + assert.Equal(t, TypeGemini, provider.Type) + }) + + t.Run("invalid", func(t *testing.T) { + _, err := GetByCommand("unknown") + assert.Error(t, err) + }) +} diff --git a/internal/llm/engine/engine.go b/internal/llm/engine/engine.go new file mode 100644 index 0000000..99756cc --- /dev/null +++ b/internal/llm/engine/engine.go @@ -0,0 +1,113 @@ +package engine + +import "context" + +// Complexity represents task complexity hint (engine-agnostic). +// This allows callers to express intent without coupling to specific engine features. +type Complexity int + +const ( + // ComplexityMinimal is for trivial lookups or boilerplate prompts. + ComplexityMinimal Complexity = iota + // ComplexityLow is for simple transformations, parsing, basic formatting. + ComplexityLow + // ComplexityMedium is for analysis, routing decisions, moderate reasoning. + ComplexityMedium + // ComplexityHigh is for complex reasoning, code generation, deep analysis. + ComplexityHigh +) + +// String returns human-readable complexity name. +func (c Complexity) String() string { + switch c { + case ComplexityMinimal: + return "minimal" + case ComplexityLow: + return "low" + case ComplexityMedium: + return "medium" + case ComplexityHigh: + return "high" + default: + return "unknown" + } +} + +// Request represents an engine-agnostic LLM request. +// All engines receive this unified request format and interpret it according to their capabilities. +type Request struct { + SystemPrompt string + UserPrompt string + MaxTokens int + Temperature float64 + Complexity Complexity +} + +// CombinedPrompt returns system and user prompts combined. +func (r *Request) CombinedPrompt() string { + if r.SystemPrompt == "" { + return r.UserPrompt + } + return r.SystemPrompt + "\n\n" + r.UserPrompt +} + +// LLMEngine is the interface for LLM execution engines. +type LLMEngine interface { + // Execute sends request and returns response text. + Execute(ctx context.Context, req *Request) (string, error) + + // Name returns engine identifier. + Name() string + + // IsAvailable checks if this engine can currently be used. + IsAvailable() bool + + // Capabilities returns what features this engine supports. + Capabilities() Capabilities +} + +// Capabilities describes what features an engine supports. +// This enables graceful degradation when features aren't available. +type Capabilities struct { + // SupportsTemperature indicates if temperature parameter is respected. + SupportsTemperature bool + + // SupportsMaxTokens indicates if max_tokens parameter is respected. + SupportsMaxTokens bool + + // SupportsComplexity indicates if complexity hint affects model selection. + SupportsComplexity bool + + // SupportsStreaming indicates if streaming responses are supported. + SupportsStreaming bool + + // MaxContextLength is the maximum input context length (0 = unknown). + MaxContextLength int + + // Models lists available models for this engine. + Models []string +} + +// Mode represents the preferred engine selection mode. +type Mode string + +const ( + // ModeAuto automatically selects the best available engine. + ModeAuto Mode = "auto" + // ModeMCP forces MCP sampling engine. + ModeMCP Mode = "mcp" + // ModeCLI forces CLI engine. + ModeCLI Mode = "cli" + // ModeAPI forces API engine. + ModeAPI Mode = "api" +) + +// IsValid checks if the engine mode is valid. +func (m Mode) IsValid() bool { + switch m { + case ModeAuto, ModeMCP, ModeCLI, ModeAPI: + return true + default: + return false + } +} diff --git a/internal/llm/engine/engine_test.go b/internal/llm/engine/engine_test.go new file mode 100644 index 0000000..2954b45 --- /dev/null +++ b/internal/llm/engine/engine_test.go @@ -0,0 +1,98 @@ +package engine + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestComplexity_String(t *testing.T) { + tests := []struct { + name string + complexity Complexity + want string + }{ + {"low", ComplexityLow, "low"}, + {"medium", ComplexityMedium, "medium"}, + {"high", ComplexityHigh, "high"}, + {"unknown", Complexity(99), "unknown"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.want, tt.complexity.String()) + }) + } +} + +func TestRequest_CombinedPrompt(t *testing.T) { + tests := []struct { + name string + req Request + want string + }{ + { + name: "with system and user prompt", + req: Request{ + SystemPrompt: "You are a helpful assistant.", + UserPrompt: "Hello!", + }, + want: "You are a helpful assistant.\n\nHello!", + }, + { + name: "only user prompt", + req: Request{ + SystemPrompt: "", + UserPrompt: "Hello!", + }, + want: "Hello!", + }, + { + name: "empty prompts", + req: Request{ + SystemPrompt: "", + UserPrompt: "", + }, + want: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.want, tt.req.CombinedPrompt()) + }) + } +} + +func TestMode_IsValid(t *testing.T) { + tests := []struct { + name string + mode Mode + valid bool + }{ + {"auto", ModeAuto, true}, + {"mcp", ModeMCP, true}, + {"cli", ModeCLI, true}, + {"api", ModeAPI, true}, + {"invalid", Mode("invalid"), false}, + {"empty", Mode(""), false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.valid, tt.mode.IsValid()) + }) + } +} + +func TestCapabilities_Default(t *testing.T) { + caps := Capabilities{} + + assert.False(t, caps.SupportsTemperature) + assert.False(t, caps.SupportsMaxTokens) + assert.False(t, caps.SupportsComplexity) + assert.False(t, caps.SupportsStreaming) + assert.Equal(t, 0, caps.MaxContextLength) + assert.Nil(t, caps.Models) +} + diff --git a/internal/llm/engine/mcp.go b/internal/llm/engine/mcp.go new file mode 100644 index 0000000..909f231 --- /dev/null +++ b/internal/llm/engine/mcp.go @@ -0,0 +1,116 @@ +package engine + +import ( + "context" + "fmt" + "os" + + mcpsdk "github.com/modelcontextprotocol/go-sdk/mcp" +) + +// MCPEngine implements LLMEngine interface for MCP sampling. +// It delegates LLM calls to the host application via MCP's CreateMessage. +type MCPEngine struct { + session *mcpsdk.ServerSession + verbose bool +} + +// MCPEngineOption is a functional option for MCPEngine. +type MCPEngineOption func(*MCPEngine) + +// WithMCPVerbose enables verbose logging. +func WithMCPVerbose(verbose bool) MCPEngineOption { + return func(e *MCPEngine) { e.verbose = verbose } +} + +// NewMCPEngine creates a new MCP sampling engine. +func NewMCPEngine(session *mcpsdk.ServerSession, opts ...MCPEngineOption) *MCPEngine { + e := &MCPEngine{ + session: session, + verbose: false, + } + + for _, opt := range opts { + opt(e) + } + + return e +} + +// Name returns the engine identifier. +func (e *MCPEngine) Name() string { + return "mcp-sampling" +} + +// IsAvailable checks if the engine can be used. +func (e *MCPEngine) IsAvailable() bool { + return e.session != nil +} + +// Capabilities returns engine capabilities. +// MCP sampling capabilities depend on the host LLM, so we're conservative here. +func (e *MCPEngine) Capabilities() Capabilities { + return Capabilities{ + SupportsTemperature: false, // Host decides + SupportsMaxTokens: true, // Passed to CreateMessage + SupportsComplexity: false, // Host decides model + SupportsStreaming: false, // Not implemented + MaxContextLength: 0, // Unknown + Models: nil, // Host decides + } +} + +// Execute sends the request via MCP sampling. +func (e *MCPEngine) Execute(ctx context.Context, req *Request) (string, error) { + if e.session == nil { + return "", fmt.Errorf("MCP session not available") + } + + if e.verbose { + fmt.Fprintf(os.Stderr, "MCP Sampling request:\n MaxTokens: %d\n Prompt length: %d chars\n", + req.MaxTokens, len(req.UserPrompt)) + } + + maxTokens := req.MaxTokens + if maxTokens == 0 { + maxTokens = defaultAPIMaxTokens + } + + result, err := e.session.CreateMessage(ctx, &mcpsdk.CreateMessageParams{ + Messages: []*mcpsdk.SamplingMessage{ + { + Role: "user", + Content: &mcpsdk.TextContent{Text: req.CombinedPrompt()}, + }, + }, + MaxTokens: int64(maxTokens), + }) + if err != nil { + return "", fmt.Errorf("MCP sampling failed: %w", err) + } + + var response string + if textContent, ok := result.Content.(*mcpsdk.TextContent); ok { + response = textContent.Text + } else { + return "", fmt.Errorf("unexpected content type from MCP sampling") + } + + if e.verbose { + fmt.Fprintf(os.Stderr, "MCP Sampling response:\n Model: %s\n Content length: %d chars\n", + result.Model, len(response)) + } + + return response, nil +} + +// GetSession returns the underlying MCP session. +func (e *MCPEngine) GetSession() *mcpsdk.ServerSession { + return e.session +} + +// SetVerbose sets verbose mode. +func (e *MCPEngine) SetVerbose(verbose bool) { + e.verbose = verbose +} + From 6daa442cf43df012eff56c8f1836c45c2fa27215 Mon Sep 17 00:00:00 2001 From: sehwan505 Date: Tue, 2 Dec 2025 14:16:02 +0000 Subject: [PATCH 10/14] feat: implement changes of LLM client --- internal/adapter/checkstyle/converter.go | 24 ++--- internal/adapter/eslint/converter.go | 4 +- internal/adapter/pmd/converter.go | 4 +- internal/adapter/prettier/converter.go | 2 +- internal/adapter/pylint/converter.go | 2 +- internal/adapter/tsc/converter.go | 2 +- internal/cmd/api_key.go | 5 -- internal/cmd/convert.go | 15 ++-- internal/cmd/validate.go | 17 ++-- internal/converter/converter.go | 4 +- internal/llm/client.go | 2 +- internal/llm/client_test.go | 110 +++++++++++++++++++++++ internal/mcp/server.go | 26 ++---- internal/server/server.go | 19 +--- tests/e2e/full_workflow_test.go | 5 +- tests/e2e/mcp_integration_test.go | 5 +- tests/e2e/validator_test.go | 6 +- 17 files changed, 164 insertions(+), 88 deletions(-) create mode 100644 internal/llm/client_test.go diff --git a/internal/adapter/checkstyle/converter.go b/internal/adapter/checkstyle/converter.go index d4ac1c8..c8d12f2 100644 --- a/internal/adapter/checkstyle/converter.go +++ b/internal/adapter/checkstyle/converter.go @@ -121,17 +121,17 @@ func (c *Converter) ConvertRules(ctx context.Context, rules []schema.UserRule, l // Separate modules into Checker-level and TreeWalker-level // Checker-level modules (NOT under TreeWalker) checkerLevelModules := map[string]bool{ - "LineLength": true, - "FileLength": true, - "FileTabCharacter": true, - "NewlineAtEndOfFile": true, - "UniqueProperties": true, - "OrderedProperties": true, - "Translation": true, - "SuppressWarningsFilter": true, + "LineLength": true, + "FileLength": true, + "FileTabCharacter": true, + "NewlineAtEndOfFile": true, + "UniqueProperties": true, + "OrderedProperties": true, + "Translation": true, + "SuppressWarningsFilter": true, "BeforeExecutionExclusionFileFilter": true, - "SuppressionFilter": true, - "SuppressionCommentFilter": true, + "SuppressionFilter": true, + "SuppressionCommentFilter": true, } var checkerModules []checkstyleModule @@ -255,8 +255,8 @@ Output: userPrompt := fmt.Sprintf("Convert this Java rule to Checkstyle module:\n\n%s", rule.Say) - // Call LLM with power model + low reasoning - response, err := llmClient.Request(systemPrompt, userPrompt).WithPower(llm.ReasoningMinimal).Execute(ctx) + // Call LLM with minimal complexity + response, err := llmClient.Request(systemPrompt, userPrompt).WithComplexity(llm.ComplexityMinimal).Execute(ctx) if err != nil { return nil, fmt.Errorf("LLM call failed: %w", err) } diff --git a/internal/adapter/eslint/converter.go b/internal/adapter/eslint/converter.go index 42d5b86..66decfb 100644 --- a/internal/adapter/eslint/converter.go +++ b/internal/adapter/eslint/converter.go @@ -217,8 +217,8 @@ Output: userPrompt += fmt.Sprintf("\nSeverity: %s", rule.Severity) } - // Call LLM with power model + low reasoning - response, err := llmClient.Request(systemPrompt, userPrompt).WithPower(llm.ReasoningMinimal).Execute(ctx) + // Call LLM with minimal complexity + response, err := llmClient.Request(systemPrompt, userPrompt).WithComplexity(llm.ComplexityMinimal).Execute(ctx) if err != nil { return "", nil, fmt.Errorf("LLM call failed: %w", err) } diff --git a/internal/adapter/pmd/converter.go b/internal/adapter/pmd/converter.go index 09116c8..55a76e5 100644 --- a/internal/adapter/pmd/converter.go +++ b/internal/adapter/pmd/converter.go @@ -188,8 +188,8 @@ IMPORTANT: Return ONLY the JSON object. Do NOT include description, message, or userPrompt := fmt.Sprintf("Convert this Java rule to PMD rule reference:\n\n%s", rule.Say) - // Call LLM with power model + low reasoning - response, err := llmClient.Request(systemPrompt, userPrompt).WithPower(llm.ReasoningMinimal).Execute(ctx) + // Call LLM with minimal complexity + response, err := llmClient.Request(systemPrompt, userPrompt).WithComplexity(llm.ComplexityMinimal).Execute(ctx) if err != nil { return nil, fmt.Errorf("LLM call failed: %w", err) } diff --git a/internal/adapter/prettier/converter.go b/internal/adapter/prettier/converter.go index 010d95e..2d951f9 100644 --- a/internal/adapter/prettier/converter.go +++ b/internal/adapter/prettier/converter.go @@ -134,7 +134,7 @@ Output: userPrompt := fmt.Sprintf("Convert this rule to Prettier configuration:\n\n%s", rule.Say) // Call LLM - response, err := llmClient.Request(systemPrompt, userPrompt).WithPower(llm.ReasoningMinimal).Execute(ctx) + response, err := llmClient.Request(systemPrompt, userPrompt).WithComplexity(llm.ComplexityMinimal).Execute(ctx) if err != nil { return nil, fmt.Errorf("LLM call failed: %w", err) } diff --git a/internal/adapter/pylint/converter.go b/internal/adapter/pylint/converter.go index 7bd6b55..e07ece1 100644 --- a/internal/adapter/pylint/converter.go +++ b/internal/adapter/pylint/converter.go @@ -210,7 +210,7 @@ Output: } // Call LLM - response, err := llmClient.Request(systemPrompt, userPrompt).WithPower(llm.ReasoningMinimal).Execute(ctx) + response, err := llmClient.Request(systemPrompt, userPrompt).WithComplexity(llm.ComplexityMinimal).Execute(ctx) if err != nil { return "", nil, fmt.Errorf("LLM call failed: %w", err) } diff --git a/internal/adapter/tsc/converter.go b/internal/adapter/tsc/converter.go index 9259e34..7dc4a9f 100644 --- a/internal/adapter/tsc/converter.go +++ b/internal/adapter/tsc/converter.go @@ -148,7 +148,7 @@ Output: userPrompt := fmt.Sprintf("Convert this rule to TypeScript compiler configuration:\n\n%s", rule.Say) // Call LLM - response, err := llmClient.Request(systemPrompt, userPrompt).WithPower(llm.ReasoningMinimal).Execute(ctx) + response, err := llmClient.Request(systemPrompt, userPrompt).WithComplexity(llm.ComplexityMinimal).Execute(ctx) if err != nil { return nil, fmt.Errorf("LLM call failed: %w", err) } diff --git a/internal/cmd/api_key.go b/internal/cmd/api_key.go index 43fb924..ec5c33e 100644 --- a/internal/cmd/api_key.go +++ b/internal/cmd/api_key.go @@ -16,11 +16,6 @@ func promptAPIKeySetup() { promptAPIKeyConfiguration(false) } -// promptAPIKeyIfNeeded checks if OpenAI API key is configured and prompts if not -func promptAPIKeyIfNeeded() { - promptAPIKeyConfiguration(true) -} - // promptAPIKeyConfiguration handles API key configuration with optional existence check func promptAPIKeyConfiguration(checkExisting bool) { envPath := filepath.Join(".sym", ".env") diff --git a/internal/cmd/convert.go b/internal/cmd/convert.go index 6787c9b..8de835d 100644 --- a/internal/cmd/convert.go +++ b/internal/cmd/convert.go @@ -131,18 +131,19 @@ func runNewConverter(userPolicy *schema.UserPolicy) error { convertOutputDir = ".sym" } - // Setup OpenAI client - apiKey, err := getAPIKey() - if err != nil { - return fmt.Errorf("OpenAI API key required: %w", err) - } - timeout := time.Duration(convertTimeout) * time.Second llmClient := llm.NewClient( - apiKey, llm.WithTimeout(timeout), ) + // Ensure at least one backend is available (MCP/CLI/API) + availabilityCtx, cancelAvailability := context.WithTimeout(context.Background(), timeout) + defer cancelAvailability() + + if err := llmClient.CheckAvailability(availabilityCtx); err != nil { + return fmt.Errorf("no available LLM backend for convert: %w\nTip: run 'sym init --setup-llm' or configure LLM_BACKEND / LLM_CLI / OPENAI_API_KEY in .sym/.env", err) + } + // Create new converter conv := converter.NewConverter(llmClient, convertOutputDir) diff --git a/internal/cmd/validate.go b/internal/cmd/validate.go index 7184b54..b805eea 100644 --- a/internal/cmd/validate.go +++ b/internal/cmd/validate.go @@ -73,18 +73,19 @@ func runValidate(cmd *cobra.Command, args []string) error { return fmt.Errorf("failed to parse policy: %w", err) } - // Get OpenAI API key - apiKey, err := getAPIKey() - if err != nil { - return fmt.Errorf("OpenAI API key not configured: %w\nTip: Run 'sym init' or set OPENAI_API_KEY in .sym/.env", err) - } - // Create LLM client llmClient := llm.NewClient( - apiKey, - llm.WithTimeout(time.Duration(validateTimeout)*time.Second), + llm.WithTimeout(time.Duration(validateTimeout) * time.Second), ) + // Ensure at least one backend is available (MCP/CLI/API) + availabilityCtx, cancel := context.WithTimeout(context.Background(), time.Duration(validateTimeout)*time.Second) + defer cancel() + + if err := llmClient.CheckAvailability(availabilityCtx); err != nil { + return fmt.Errorf("no available LLM backend for validate: %w\nTip: run 'sym init --setup-llm' or configure LLM_BACKEND / LLM_CLI / OPENAI_API_KEY in .sym/.env", err) + } + var changes []validator.GitChange if validateStaged { changes, err = validator.GetStagedChanges() diff --git a/internal/converter/converter.go b/internal/converter/converter.go index 94053f5..e32836e 100644 --- a/internal/converter/converter.go +++ b/internal/converter/converter.go @@ -405,8 +405,8 @@ Reason: Requires knowing which packages are "large"`, linterDescriptions, routin userPrompt := fmt.Sprintf("Rule: %s\nCategory: %s", rule.Say, rule.Category) - // Call LLM with power model + low reasoning (needs some thought for linter selection) - response, err := c.llmClient.Request(systemPrompt, userPrompt).WithPower(llm.ReasoningMedium).Execute(ctx) + // Call LLM with medium complexity (needs some thought for linter selection) + response, err := c.llmClient.Request(systemPrompt, userPrompt).WithComplexity(llm.ComplexityLow).Execute(ctx) if err != nil { fmt.Fprintf(os.Stderr, "Warning: LLM routing failed for rule %s: %v\n", rule.ID, err) return []string{} // Will fall back to llm-validator diff --git a/internal/llm/client.go b/internal/llm/client.go index 74659c3..a1d6f28 100644 --- a/internal/llm/client.go +++ b/internal/llm/client.go @@ -89,7 +89,7 @@ func WithMode(mode engine.Mode) ClientOption { } // NewClient creates a new LLM client. -func NewClient(_ string, opts ...ClientOption) *Client { +func NewClient(opts ...ClientOption) *Client { // Load default config config := LoadLLMConfig() diff --git a/internal/llm/client_test.go b/internal/llm/client_test.go new file mode 100644 index 0000000..7257a88 --- /dev/null +++ b/internal/llm/client_test.go @@ -0,0 +1,110 @@ +package llm + +import ( + "testing" + + "github.com/DevSymphony/sym-cli/internal/llm/engine" + "github.com/stretchr/testify/assert" +) + +func TestNewClient(t *testing.T) { + t.Run("default_config", func(t *testing.T) { + client := NewClient() + assert.NotNil(t, client) + assert.NotNil(t, client.GetConfig()) + }) + + t.Run("with_options_and_config", func(t *testing.T) { + cfg := &LLMConfig{ + Backend: engine.ModeAPI, + APIKey: "sk-test", + } + client := NewClient(WithConfig(cfg), WithVerbose(true)) + assert.NotNil(t, client) + assert.Equal(t, engine.ModeAPI, client.config.Backend) + }) + + t.Run("with_mode_option", func(t *testing.T) { + client := NewClient(WithMode(engine.ModeAPI)) + assert.NotNil(t, client) + }) +} + +func TestClient_GetActiveEngine(t *testing.T) { + t.Run("with API engine", func(t *testing.T) { + cfg := &LLMConfig{ + Backend: engine.ModeAPI, + APIKey: "sk-test", + } + client := NewClient(WithConfig(cfg)) + eng := client.GetActiveEngine() + assert.NotNil(t, eng) + assert.Equal(t, "openai-api", eng.Name()) + }) + + t.Run("no engine available", func(t *testing.T) { + cfg := &LLMConfig{ + Backend: engine.ModeAPI, + // No API key + } + client := NewClient(WithConfig(cfg)) + eng := client.GetActiveEngine() + assert.Nil(t, eng) + }) +} + +func TestClient_GetEngines(t *testing.T) { + cfg := &LLMConfig{ + Backend: engine.ModeAuto, + APIKey: "sk-test", + CLI: "claude", + } + client := NewClient(WithConfig(cfg)) + engines := client.GetEngines() + + // Should have at least API engine (CLI might not be available) + assert.NotEmpty(t, engines) +} + +func TestRequestBuilder(t *testing.T) { + client := NewClient() + + t.Run("basic request", func(t *testing.T) { + builder := client.Request("system", "user") + assert.NotNil(t, builder) + }) + + t.Run("with complexity", func(t *testing.T) { + builder := client.Request("system", "user"). + WithComplexity(engine.ComplexityHigh) + assert.NotNil(t, builder) + }) + + t.Run("with max tokens", func(t *testing.T) { + builder := client.Request("system", "user"). + WithMaxTokens(2000) + assert.NotNil(t, builder) + }) + + t.Run("with temperature", func(t *testing.T) { + builder := client.Request("system", "user"). + WithTemperature(0.7) + assert.NotNil(t, builder) + }) + + t.Run("chained options", func(t *testing.T) { + builder := client.Request("system", "user"). + WithComplexity(engine.ComplexityMedium). + WithMaxTokens(1500). + WithTemperature(0.8) + assert.NotNil(t, builder) + }) +} + +func TestModeConstants(t *testing.T) { + // Verify backward compatibility + assert.Equal(t, engine.ModeAPI, ModeAPI) + assert.Equal(t, engine.ModeMCP, ModeMCP) + assert.Equal(t, engine.ModeCLI, ModeCLI) + assert.Equal(t, engine.ModeAuto, ModeAuto) +} diff --git a/internal/mcp/server.go b/internal/mcp/server.go index d2f927a..1a41c37 100644 --- a/internal/mcp/server.go +++ b/internal/mcp/server.go @@ -34,14 +34,9 @@ func ConvertPolicyWithLLM(userPolicyPath, codePolicyPath string) error { return fmt.Errorf("failed to parse user policy: %w", err) } - // Setup LLM client - apiKey := envutil.GetAPIKey("OPENAI_API_KEY") - if apiKey == "" { - return fmt.Errorf("OPENAI_API_KEY not found in environment or .sym/.env") - } - - llmClient := llm.NewClient(apiKey, - llm.WithTimeout(30*time.Second), + // Setup LLM client (backend auto-selection via @llm) + llmClient := llm.NewClient( + llm.WithTimeout(30 * time.Second), ) // Create converter with output directory @@ -479,19 +474,12 @@ func (s *Server) handleValidateCode(ctx context.Context, session *sdkmcp.ServerS var llmClient *llm.Client if session != nil { // MCP mode: use host LLM via sampling - llmClient = llm.NewClient("", llm.WithMCPSession(session)) + llmClient = llm.NewClient(llm.WithMCPSession(session)) fmt.Fprintf(os.Stderr, "✓ Using host LLM via MCP sampling\n") } else { - // API mode: use OpenAI API directly - apiKey := envutil.GetAPIKey("OPENAI_API_KEY") - if apiKey == "" { - return nil, &RPCError{ - Code: -32000, - Message: "OPENAI_API_KEY not found in environment or .sym/.env", - } - } - llmClient = llm.NewClient(apiKey) - fmt.Fprintf(os.Stderr, "✓ Using OpenAI API directly\n") + // Auto mode: use configured LLM backend (CLI/API) + llmClient = llm.NewClient() + fmt.Fprintf(os.Stderr, "✓ Using configured LLM backend\n") } // Create unified validator that handles all engines + RBAC diff --git a/internal/server/server.go b/internal/server/server.go index d7f52bd..b6947b3 100644 --- a/internal/server/server.go +++ b/internal/server/server.go @@ -667,17 +667,9 @@ func (s *Server) handleConvert(w http.ResponseWriter, r *http.Request) { // Determine output directory (same as input file) outputDir := filepath.Dir(policyPath) - // Get API key - apiKey, err := s.getAPIKey() - if err != nil { - fmt.Printf("Warning: %v, conversion may be limited\n", err) - apiKey = "" - } - - // Setup LLM client + // Setup LLM client (backend auto-selection via @llm) timeout := 30 * time.Second llmClient := llm.NewClient( - apiKey, llm.WithTimeout(timeout), ) @@ -719,12 +711,3 @@ func (s *Server) handleConvert(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") _ = json.NewEncoder(w).Encode(result) } - -// getAPIKey retrieves the OpenAI API key from environment or .sym/.env -func (s *Server) getAPIKey() (string, error) { - key := envutil.GetAPIKey("OPENAI_API_KEY") - if key == "" { - return "", fmt.Errorf("OPENAI_API_KEY not found in environment or .sym/.env") - } - return key, nil -} diff --git a/tests/e2e/full_workflow_test.go b/tests/e2e/full_workflow_test.go index 6c1a418..696eb28 100644 --- a/tests/e2e/full_workflow_test.go +++ b/tests/e2e/full_workflow_test.go @@ -74,8 +74,7 @@ func TestE2E_FullWorkflow(t *testing.T) { t.Log("STEP 2: Converting user policy using LLM") client := llm.NewClient( - apiKey, - llm.WithTimeout(30*time.Second), + llm.WithTimeout(30 * time.Second), ) outputDir := filepath.Join(testDir, ".sym") @@ -333,7 +332,7 @@ func TestE2E_CodeGenerationFeedbackLoop(t *testing.T) { }, } - client := llm.NewClient(apiKey) + client := llm.NewClient() v := validator.NewLLMValidator(client, policy) ctx := context.Background() diff --git a/tests/e2e/mcp_integration_test.go b/tests/e2e/mcp_integration_test.go index 6bb0187..3767d7e 100644 --- a/tests/e2e/mcp_integration_test.go +++ b/tests/e2e/mcp_integration_test.go @@ -142,8 +142,7 @@ func TestMCP_ValidateAIGeneratedCode(t *testing.T) { // Create LLM client client := llm.NewClient( - apiKey, - llm.WithTimeout(30*time.Second), + llm.WithTimeout(30 * time.Second), ) // Create validator @@ -379,7 +378,7 @@ func TestMCP_EndToEndWorkflow(t *testing.T) { // Step 4: Validate generated code t.Log("STEP 4: Validating AI-generated code") - client := llm.NewClient(apiKey) + client := llm.NewClient() v := validator.NewLLMValidator(client, policy) result, err := v.Validate(context.Background(), []validator.GitChange{ diff --git a/tests/e2e/validator_test.go b/tests/e2e/validator_test.go index f9643dc..23e890a 100644 --- a/tests/e2e/validator_test.go +++ b/tests/e2e/validator_test.go @@ -31,7 +31,7 @@ func TestE2E_ValidatorWithPolicy(t *testing.T) { require.NotEmpty(t, policy.Rules, "Policy should have rules") // Create LLM client - client := llm.NewClient(apiKey) + client := llm.NewClient() // Create validator v := validator.NewLLMValidator(client, policy) @@ -83,7 +83,7 @@ func TestE2E_ValidatorWithGoodCode(t *testing.T) { require.NoError(t, err) // Create LLM client - client := llm.NewClient(apiKey) + client := llm.NewClient() // Create validator v := validator.NewLLMValidator(client, policy) @@ -182,7 +182,7 @@ func TestE2E_ValidatorFilter(t *testing.T) { require.NoError(t, err) // Create LLM client - client := llm.NewClient(apiKey) + client := llm.NewClient() // Create validator v := validator.NewLLMValidator(client, policy) From 3e997b8d0da764b4678339e115836b79c55891de Mon Sep 17 00:00:00 2001 From: sehwan505 Date: Wed, 3 Dec 2025 02:34:49 +0000 Subject: [PATCH 11/14] fix: handle LLM configuration error --- internal/llm/client.go | 8 +++++++- internal/llm/engine/cli.go | 2 +- internal/llm/engine/cli_test.go | 2 +- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/internal/llm/client.go b/internal/llm/client.go index a1d6f28..800f190 100644 --- a/internal/llm/client.go +++ b/internal/llm/client.go @@ -77,7 +77,13 @@ func WithMCPSession(session *mcpsdk.ServerSession) ClientOption { // WithConfig sets a custom LLM configuration. func WithConfig(cfg *LLMConfig) ClientOption { return func(c *Client) { + if cfg == nil { + return + } c.config = cfg + if mode := cfg.GetEffectiveBackend(); mode != "" { + c.mode = mode + } } } @@ -98,7 +104,7 @@ func NewClient(opts ...ClientOption) *Client { client := &Client{ config: config, - mode: engine.ModeAuto, + mode: config.GetEffectiveBackend(), maxTokens: defaultMaxTokens, temperature: defaultTemperature, verbose: false, diff --git a/internal/llm/engine/cli.go b/internal/llm/engine/cli.go index 55ae047..1d605bc 100644 --- a/internal/llm/engine/cli.go +++ b/internal/llm/engine/cli.go @@ -171,7 +171,7 @@ func (e *CLIEngine) Execute(ctx context.Context, req *Request) (string, error) { if cmdCtx.Err() == context.DeadlineExceeded { return "", fmt.Errorf("CLI command timed out after %v", e.timeout) } - return "", fmt.Errorf("CLI command failed: %w\nstderr: %s", err, stderr.String()) + return "", fmt.Errorf("CLI command failed: %w\nstdout: %s\nstderr: %s", err, stdout.String(), stderr.String()) } response, err := e.provider.ParseResponse(stdout.Bytes()) diff --git a/internal/llm/engine/cli_test.go b/internal/llm/engine/cli_test.go index da9ec38..bfe3dfe 100644 --- a/internal/llm/engine/cli_test.go +++ b/internal/llm/engine/cli_test.go @@ -38,7 +38,7 @@ func TestCLIEngine_Capabilities(t *testing.T) { caps := engine.Capabilities() - assert.True(t, caps.SupportsMaxTokens) + assert.False(t, caps.SupportsMaxTokens) assert.False(t, caps.SupportsStreaming) assert.True(t, caps.SupportsComplexity) // Has LargeModel assert.NotEmpty(t, caps.Models) From 5ef16ecb7453a6f3bd7a70d9a35e5a627bc5812d Mon Sep 17 00:00:00 2001 From: sehwan505 Date: Wed, 3 Dec 2025 02:34:58 +0000 Subject: [PATCH 12/14] fix: update Claude model configurations and disable max tokens support --- internal/llm/engine/cliprovider/claude.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/llm/engine/cliprovider/claude.go b/internal/llm/engine/cliprovider/claude.go index ba86d97..4961c98 100644 --- a/internal/llm/engine/cliprovider/claude.go +++ b/internal/llm/engine/cliprovider/claude.go @@ -7,8 +7,8 @@ func newClaudeProvider() *Provider { Type: TypeClaude, DisplayName: "Claude CLI", Command: "claude", - DefaultModel: "claude-sonnet-4-20250514", - LargeModel: "claude-sonnet-4-20250514", + DefaultModel: "claude-haiku-4-5-20251001", + LargeModel: "claude-sonnet-4-5-20250929", BuildArgs: func(model string, prompt string) []string { args := []string{ "-p", prompt, @@ -22,8 +22,8 @@ func newClaudeProvider() *Provider { ParseResponse: func(output []byte) (string, error) { return strings.TrimSpace(string(output)), nil }, - SupportsMaxTokens: true, - MaxTokensFlag: "--max-tokens", + SupportsMaxTokens: false, + MaxTokensFlag: "", SupportsTemperature: false, TemperatureFlag: "", } From 0596901718b138cf9809f8130fdec59e0e7ba832 Mon Sep 17 00:00:00 2001 From: sehwan505 Date: Wed, 3 Dec 2025 03:25:49 +0000 Subject: [PATCH 13/14] refactor: clean up duplicated code --- internal/adapter/checkstyle/converter.go | 200 ----------------------- internal/adapter/eslint/converter.go | 34 ---- internal/adapter/pmd/converter.go | 55 ------- internal/adapter/pylint/converter.go | 24 --- internal/converter/converter.go | 40 ----- internal/llm/client.go | 58 ------- internal/mcp/server.go | 2 - 7 files changed, 413 deletions(-) diff --git a/internal/adapter/checkstyle/converter.go b/internal/adapter/checkstyle/converter.go index db466f9..c8d12f2 100644 --- a/internal/adapter/checkstyle/converter.go +++ b/internal/adapter/checkstyle/converter.go @@ -289,9 +289,6 @@ Output: // Filter properties to only include valid ones for this module filteredProps := filterValidProperties(result.ModuleName, result.Properties) - // Filter properties to only include valid ones for this module - filteredProps := filterValidProperties(result.ModuleName, result.Properties) - // Build module module := &checkstyleModule{ Name: result.ModuleName, @@ -304,11 +301,6 @@ Output: Value: mapCheckstyleSeverity(result.Severity), }) - // Add filtered properties - for key, value := range filteredProps { - if key == "severity" { - continue // Already added above - } // Add filtered properties for key, value := range filteredProps { if key == "severity" { @@ -528,195 +520,3 @@ func filterValidProperties(moduleName string, properties map[string]string) map[ } return result } - -// validCheckstyleProperties defines valid properties for each Checkstyle module -// This prevents LLM from generating invalid properties that cause runtime errors -var validCheckstyleProperties = map[string]map[string]bool{ - "TypeName": { - "severity": true, - "format": true, - "tokens": true, - }, - "MethodName": { - "severity": true, - "format": true, - "allowClassName": true, - "applyToPublic": true, - "applyToProtected": true, - "applyToPackage": true, - "applyToPrivate": true, - "tokens": true, - }, - "ParameterName": { - "severity": true, - "format": true, - "ignoreOverridden": true, - "accessModifiers": true, - }, - "LocalVariableName": { - "severity": true, - "format": true, - "allowOneCharVarInForLoop": true, - }, - "ConstantName": { - "severity": true, - "format": true, - "applyToPublic": true, - "applyToProtected": true, - "applyToPackage": true, - "applyToPrivate": true, - }, - "LineLength": { - "severity": true, - "max": true, - "ignorePattern": true, - "fileExtensions": true, - }, - "MethodLength": { - "severity": true, - "max": true, - "countEmpty": true, - "tokens": true, - }, - "ParameterNumber": { - "severity": true, - "max": true, - "ignoreOverriddenMethods": true, - "tokens": true, - }, - "FileLength": { - "severity": true, - "max": true, - "fileExtensions": true, - }, - "Indentation": { - "severity": true, - "basicOffset": true, - "braceAdjustment": true, - "caseIndent": true, - "throwsIndent": true, - "arrayInitIndent": true, - "lineWrappingIndentation": true, - "forceStrictCondition": true, - }, - "WhitespaceAround": { - "severity": true, - "allowEmptyConstructors": true, - "allowEmptyMethods": true, - "allowEmptyTypes": true, - "allowEmptyLoops": true, - "allowEmptyLambdas": true, - "allowEmptyCatches": true, - "ignoreEnhancedForColon": true, - "tokens": true, - }, - "NeedBraces": { - "severity": true, - "allowSingleLineStatement": true, - "allowEmptyLoopBody": true, - "tokens": true, - }, - "LeftCurly": { - "severity": true, - "option": true, - "ignoreEnums": true, - "tokens": true, - }, - "RightCurly": { - "severity": true, - "option": true, - "tokens": true, - }, - "AvoidStarImport": { - "severity": true, - "excludes": true, - "allowClassImports": true, - "allowStaticMemberImports": true, - }, - "IllegalImport": { - "severity": true, - "illegalPkgs": true, - "illegalClasses": true, - "regexp": true, - }, - "UnusedImports": { - "severity": true, - "processJavadoc": true, - }, - "CyclomaticComplexity": { - "severity": true, - "max": true, - "switchBlockAsSingleDecisionPoint": true, - "tokens": true, - }, - "NPathComplexity": { - "severity": true, - "max": true, - }, - "JavadocMethod": { - "severity": true, - "accessModifiers": true, - "allowMissingParamTags": true, - "allowMissingReturnTag": true, - "allowedAnnotations": true, - "validateThrows": true, - "tokens": true, - }, - "JavadocType": { - "severity": true, - "scope": true, - "excludeScope": true, - "authorFormat": true, - "versionFormat": true, - "allowMissingParamTags": true, - "allowUnknownTags": true, - "allowedAnnotations": true, - "tokens": true, - }, - "MissingJavadocMethod": { - "severity": true, - "minLineCount": true, - "allowedAnnotations": true, - "scope": true, - "excludeScope": true, - "allowMissingPropertyJavadoc": true, - "ignoreMethodNamesRegex": true, - "tokens": true, - }, - "EmptyBlock": { - "severity": true, - "option": true, - "tokens": true, - }, - "MagicNumber": { - "severity": true, - "ignoreNumbers": true, - "ignoreHashCodeMethod": true, - "ignoreAnnotation": true, - "ignoreFieldDeclaration": true, - "ignoreAnnotationElementDefaults": true, - "constantWaiverParentToken": true, - "tokens": true, - }, -} - -// filterValidProperties filters out invalid properties for a given module -func filterValidProperties(moduleName string, properties map[string]string) map[string]string { - validProps, hasDefinedProps := validCheckstyleProperties[moduleName] - if !hasDefinedProps { - // If module is not in our whitelist, only allow severity - result := make(map[string]string) - if sev, ok := properties["severity"]; ok { - result["severity"] = sev - } - return result - } - - result := make(map[string]string) - for key, value := range properties { - if validProps[key] { - result[key] = value - } - } - return result -} diff --git a/internal/adapter/eslint/converter.go b/internal/adapter/eslint/converter.go index a9a09be..66decfb 100644 --- a/internal/adapter/eslint/converter.go +++ b/internal/adapter/eslint/converter.go @@ -151,13 +151,6 @@ Return ONLY a JSON object (no markdown fences) with this structure: "options": {...} } -Available native ESLint rules: -- Console/Debug: no-console, no-debugger, no-alert -- Variables: no-unused-vars, no-undef, no-var, prefer-const -- Naming: camelcase, new-cap, id-length, id-match -- Code Quality: eqeqeq, no-eval, no-implied-eval, no-new-func -- Complexity: complexity, max-depth, max-nested-callbacks -- Length/Size: max-len, max-lines, max-lines-per-function, max-params, max-statements Available native ESLint rules: - Console/Debug: no-console, no-debugger, no-alert - Variables: no-unused-vars, no-undef, no-var, prefer-const @@ -169,14 +162,6 @@ Available native ESLint rules: - Imports: no-restricted-imports, no-duplicate-imports - Best Practices: curly, dot-notation, no-else-return, no-empty, no-empty-function, no-magic-numbers, no-throw-literal, no-useless-return, require-await -CRITICAL RULES: -1. ONLY use native ESLint rules - do NOT invent or guess rule names -2. If no rule can enforce this requirement, return rule_name as empty string "" -3. Do NOT suggest plugin rules (e.g., @typescript-eslint/*, eslint-plugin-*) -4. When in doubt, return empty rule_name - it's better to skip than use wrong rule -- Imports: no-restricted-imports, no-duplicate-imports -- Best Practices: curly, dot-notation, no-else-return, no-empty, no-empty-function, no-magic-numbers, no-throw-literal, no-useless-return, require-await - CRITICAL RULES: 1. ONLY use native ESLint rules - do NOT invent or guess rule names 2. If no rule can enforce this requirement, return rule_name as empty string "" @@ -218,25 +203,6 @@ Output: } (Reason: No native ESLint rule for file naming) -Input: "No hardcoded API keys" -Output: -{ - "rule_name": "", - "severity": "off", - "options": null -} -(Reason: Requires plugin or semantic analysis)` -} - -Input: "File names must be kebab-case" -Output: -{ - "rule_name": "", - "severity": "off", - "options": null -} -(Reason: No native ESLint rule for file naming) - Input: "No hardcoded API keys" Output: { diff --git a/internal/adapter/pmd/converter.go b/internal/adapter/pmd/converter.go index 432ce0d..55a76e5 100644 --- a/internal/adapter/pmd/converter.go +++ b/internal/adapter/pmd/converter.go @@ -147,39 +147,12 @@ func (c *Converter) ConvertRules(ctx context.Context, rules []schema.UserRule, l func (c *Converter) convertSingleRule(ctx context.Context, rule schema.UserRule, llmClient *llm.Client) (*pmdRule, error) { systemPrompt := `You are a PMD 7.x configuration expert. Convert natural language Java coding rules to PMD rule references. -Return ONLY a JSON object with exactly these two fields (no other fields): Return ONLY a JSON object with exactly these two fields (no other fields): { "rule_ref": "category/java/category.xml/RuleName", "priority": 1 } -Valid PMD 7.x categories and rules: -- category/java/bestpractices.xml/UnusedPrivateMethod -- category/java/bestpractices.xml/UnusedLocalVariable -- category/java/bestpractices.xml/UnusedFormalParameter -- category/java/bestpractices.xml/AvoidReassigningParameters -- category/java/codestyle.xml/ShortVariable -- category/java/codestyle.xml/LongVariable -- category/java/codestyle.xml/ShortMethodName -- category/java/codestyle.xml/ClassNamingConventions -- category/java/codestyle.xml/MethodNamingConventions -- category/java/codestyle.xml/FieldNamingConventions -- category/java/codestyle.xml/UnnecessaryImport -- category/java/design.xml/TooManyMethods -- category/java/design.xml/ExcessiveMethodLength -- category/java/design.xml/ExcessiveParameterList -- category/java/design.xml/CyclomaticComplexity -- category/java/design.xml/NPathComplexity -- category/java/design.xml/GodClass -- category/java/errorprone.xml/EmptyCatchBlock -- category/java/errorprone.xml/AvoidCatchingNPE -- category/java/errorprone.xml/EmptyIfStmt -- category/java/security.xml/HardCodedCryptoKey - "rule_ref": "category/java/category.xml/RuleName", - "priority": 1 -} - Valid PMD 7.x categories and rules: - category/java/bestpractices.xml/UnusedPrivateMethod - category/java/bestpractices.xml/UnusedLocalVariable @@ -203,17 +176,14 @@ Valid PMD 7.x categories and rules: - category/java/errorprone.xml/EmptyIfStmt - category/java/security.xml/HardCodedCryptoKey -Priority: 1=High, 2=Medium-High, 3=Medium, 4=Low, 5=Info Priority: 1=High, 2=Medium-High, 3=Medium, 4=Low, 5=Info -If the rule cannot be mapped to a valid PMD rule, return: If the rule cannot be mapped to a valid PMD rule, return: { "rule_ref": "", "priority": 3 } -IMPORTANT: Return ONLY the JSON object. Do NOT include description, message, or any other fields.` IMPORTANT: Return ONLY the JSON object. Do NOT include description, message, or any other fields.` userPrompt := fmt.Sprintf("Convert this Java rule to PMD rule reference:\n\n%s", rule.Say) @@ -224,7 +194,6 @@ IMPORTANT: Return ONLY the JSON object. Do NOT include description, message, or return nil, fmt.Errorf("LLM call failed: %w", err) } - // Parse response - extract JSON object // Parse response - extract JSON object response = strings.TrimSpace(response) response = strings.TrimPrefix(response, "```json") @@ -232,11 +201,6 @@ IMPORTANT: Return ONLY the JSON object. Do NOT include description, message, or response = strings.TrimSuffix(response, "```") response = strings.TrimSpace(response) - // Find JSON object boundaries to handle extra text - startIdx := strings.Index(response, "{") - endIdx := strings.LastIndex(response, "}") - if startIdx == -1 || endIdx == -1 || endIdx <= startIdx { - return nil, fmt.Errorf("no valid JSON object found in response") // Find JSON object boundaries to handle extra text startIdx := strings.Index(response, "{") endIdx := strings.LastIndex(response, "}") @@ -244,7 +208,6 @@ IMPORTANT: Return ONLY the JSON object. Do NOT include description, message, or return nil, fmt.Errorf("no valid JSON object found in response") } response = response[startIdx : endIdx+1] - response = response[startIdx : endIdx+1] var result struct { RuleRef string `json:"rule_ref"` @@ -277,24 +240,6 @@ IMPORTANT: Return ONLY the JSON object. Do NOT include description, message, or result.Priority = 5 } - // Validate rule_ref format: must start with "category/java/" - if !strings.HasPrefix(result.RuleRef, "category/java/") { - // Try to fix old format (rulesets/java/...) to new format (category/java/...) - if strings.HasPrefix(result.RuleRef, "rulesets/java/") { - result.RuleRef = strings.Replace(result.RuleRef, "rulesets/java/", "category/java/", 1) - } else { - return nil, nil // Invalid format, skip this rule - } - } - - // Validate priority range - if result.Priority < 1 { - result.Priority = 3 - } - if result.Priority > 5 { - result.Priority = 5 - } - return &pmdRule{ Ref: result.RuleRef, Priority: result.Priority, diff --git a/internal/adapter/pylint/converter.go b/internal/adapter/pylint/converter.go index f07405b..e07ece1 100644 --- a/internal/adapter/pylint/converter.go +++ b/internal/adapter/pylint/converter.go @@ -298,9 +298,6 @@ func (c *Converter) generatePylintRC(enabledRules []string, options map[string]m // getOptionSection returns the Pylint config section for a given option func getOptionSection(option string) string { formatOptions := map[string]bool{ - "max-line-length": true, - "max-module-lines": true, - "indent-string": true, "max-line-length": true, "max-module-lines": true, "indent-string": true, @@ -308,16 +305,6 @@ func getOptionSection(option string) string { } basicOptions := map[string]bool{ - "variable-rgx": true, - "function-rgx": true, - "class-rgx": true, - "const-rgx": true, - "argument-rgx": true, - "attr-rgx": true, - "method-rgx": true, - "module-rgx": true, - "good-names": true, - "bad-names": true, "variable-rgx": true, "function-rgx": true, "class-rgx": true, @@ -343,17 +330,6 @@ func getOptionSection(option string) string { "max-public-methods": true, "max-bool-expr": true, "max-nested-blocks": true, - "max-args": true, - "max-locals": true, - "max-returns": true, - "max-branches": true, - "max-statements": true, - "max-parents": true, - "max-attributes": true, - "min-public-methods": true, - "max-public-methods": true, - "max-bool-expr": true, - "max-nested-blocks": true, } exceptOptions := map[string]bool{ diff --git a/internal/converter/converter.go b/internal/converter/converter.go index 0f6be19..e32836e 100644 --- a/internal/converter/converter.go +++ b/internal/converter/converter.go @@ -37,13 +37,10 @@ func NewConverter(llmClient *llm.Client, outputDir string) *Converter { // ConvertResult represents the result of conversion type ConvertResult struct { - GeneratedFiles []string // List of generated file paths (including code-policy.json) GeneratedFiles []string // List of generated file paths (including code-policy.json) CodePolicy *schema.CodePolicy // Generated code policy Errors map[string]error // Errors per linter Warnings []string // Conversion warnings - Errors map[string]error // Errors per linter - Warnings []string // Conversion warnings } // Convert is the main entry point for converting user policy to linter configs @@ -254,7 +251,6 @@ func (c *Converter) Convert(ctx context.Context, userPolicy *schema.UserPolicy) // routeRulesWithLLM uses LLM to determine which linters are appropriate for each rule // Rules are processed in parallel for better performance -// Rules are processed in parallel for better performance func (c *Converter) routeRulesWithLLM(ctx context.Context, userPolicy *schema.UserPolicy) map[string][]schema.UserRule { type routeResult struct { rule schema.UserRule @@ -264,15 +260,6 @@ func (c *Converter) routeRulesWithLLM(ctx context.Context, userPolicy *schema.Us results := make(chan routeResult, len(userPolicy.Rules)) var wg sync.WaitGroup - // Process rules in parallel - type routeResult struct { - rule schema.UserRule - linters []string - } - - results := make(chan routeResult, len(userPolicy.Rules)) - var wg sync.WaitGroup - // Process rules in parallel for _, rule := range userPolicy.Rules { // Get languages for this rule @@ -286,7 +273,6 @@ func (c *Converter) routeRulesWithLLM(ctx context.Context, userPolicy *schema.Us if len(availableLinters) == 0 { // No language-specific linters, use llm-validator results <- routeResult{rule: rule, linters: []string{llmValidatorEngine}} - results <- routeResult{rule: rule, linters: []string{llmValidatorEngine}} continue } @@ -294,12 +280,6 @@ func (c *Converter) routeRulesWithLLM(ctx context.Context, userPolicy *schema.Us go func(r schema.UserRule, linters []string) { defer wg.Done() - wg.Add(1) - go func(r schema.UserRule, linters []string) { - defer wg.Done() - - // Ask LLM which linters are appropriate for this rule - selectedLinters := c.selectLintersForRule(ctx, r, linters) // Ask LLM which linters are appropriate for this rule selectedLinters := c.selectLintersForRule(ctx, r, linters) @@ -318,26 +298,6 @@ func (c *Converter) routeRulesWithLLM(ctx context.Context, userPolicy *schema.Us close(results) }() - // Collect results - linterRules := make(map[string][]schema.UserRule) - for result := range results { - for _, linter := range result.linters { - linterRules[linter] = append(linterRules[linter], result.rule) - if len(selectedLinters) == 0 { - // LLM couldn't map to any linter, use llm-validator - results <- routeResult{rule: r, linters: []string{llmValidatorEngine}} - } else { - results <- routeResult{rule: r, linters: selectedLinters} - } - }(rule, availableLinters) - } - - // Close results channel after all goroutines complete - go func() { - wg.Wait() - close(results) - }() - // Collect results linterRules := make(map[string][]schema.UserRule) for result := range results { diff --git a/internal/llm/client.go b/internal/llm/client.go index f76a764..800f190 100644 --- a/internal/llm/client.go +++ b/internal/llm/client.go @@ -15,8 +15,6 @@ const ( defaultMaxTokens = 1000 defaultTemperature = 1.0 defaultTimeout = 60 * time.Second - defaultTemperature = 1.0 - defaultTimeout = 60 * time.Second ) const ( @@ -123,62 +121,6 @@ func NewClient(opts ...ClientOption) *Client { return client } -// Request creates a new request builder -// -// Usage: -// -// client.Request(system, user).Execute(ctx) // fast model (gpt-4o-mini) -// client.Request(system, user).WithPower(llm.ReasoningMedium).Execute(ctx) // power model (o3-mini) -// client.Request(system, user).WithMaxTokens(2000).Execute(ctx) // custom tokens -func (c *Client) Request(systemPrompt, userPrompt string) *RequestBuilder { - return &RequestBuilder{ - client: c, - system: systemPrompt, - user: userPrompt, - maxTokens: c.maxTokens, - temperature: c.temperature, - usePower: false, - } -} - -// RequestBuilder builds and executes LLM requests with chain methods -type RequestBuilder struct { - client *Client - system string - user string - maxTokens int - temperature float64 - usePower bool - effort ReasoningEffort -} - -// WithPower enables power model (o3-mini) with specified reasoning effort -func (r *RequestBuilder) WithPower(effort ReasoningEffort) *RequestBuilder { - r.usePower = true - r.effort = effort - return r -} - -// WithMaxTokens sets max tokens for this request -func (r *RequestBuilder) WithMaxTokens(tokens int) *RequestBuilder { - r.maxTokens = tokens - return r -} - -// WithTemperature sets temperature for this request -func (r *RequestBuilder) WithTemperature(temp float64) *RequestBuilder { - r.temperature = temp - return r -} - -// Execute sends the request and returns the response -func (r *RequestBuilder) Execute(ctx context.Context) (string, error) { - if r.client.mode == ModeMCP { - return r.client.executeViaMCP(ctx, r) - } - return r.client.executeViaAPI(ctx, r) -} - // initEngines initializes the engine fallback chain based on configuration. func (c *Client) initEngines() { c.engines = []engine.LLMEngine{} diff --git a/internal/mcp/server.go b/internal/mcp/server.go index 98b74d3..1a41c37 100644 --- a/internal/mcp/server.go +++ b/internal/mcp/server.go @@ -226,7 +226,6 @@ func (s *Server) runStdioWithSDK(ctx context.Context) error { "role": input.Role, } result, rpcErr := s.handleValidateCode(ctx, req.Session, params) - result, rpcErr := s.handleValidateCode(ctx, req.Session, params) if rpcErr != nil { return &sdkmcp.CallToolResult{IsError: true}, nil, fmt.Errorf("%s", rpcErr.Message) } @@ -422,7 +421,6 @@ type ViolationItem struct { // handleValidateCode handles code validation requests. // It validates git changes (diff) instead of entire files for efficiency. -func (s *Server) handleValidateCode(ctx context.Context, session *sdkmcp.ServerSession, params map[string]interface{}) (interface{}, *RPCError) { func (s *Server) handleValidateCode(ctx context.Context, session *sdkmcp.ServerSession, params map[string]interface{}) (interface{}, *RPCError) { // Get policy for validation (convert UserPolicy if needed) validationPolicy, err := s.getValidationPolicy() From fb31d183c3eac6c8482e4d91f152c89ea8824b2a Mon Sep 17 00:00:00 2001 From: sehwan505 Date: Wed, 3 Dec 2025 07:29:44 +0000 Subject: [PATCH 14/14] refactor: remove unused TestClient_GetEngines --- internal/llm/client_test.go | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/internal/llm/client_test.go b/internal/llm/client_test.go index 7257a88..c462d48 100644 --- a/internal/llm/client_test.go +++ b/internal/llm/client_test.go @@ -53,19 +53,6 @@ func TestClient_GetActiveEngine(t *testing.T) { }) } -func TestClient_GetEngines(t *testing.T) { - cfg := &LLMConfig{ - Backend: engine.ModeAuto, - APIKey: "sk-test", - CLI: "claude", - } - client := NewClient(WithConfig(cfg)) - engines := client.GetEngines() - - // Should have at least API engine (CLI might not be available) - assert.NotEmpty(t, engines) -} - func TestRequestBuilder(t *testing.T) { client := NewClient()