diff --git a/.githooks/commit-msg b/.githooks/commit-msg
new file mode 100755
index 0000000..df8064e
--- /dev/null
+++ b/.githooks/commit-msg
@@ -0,0 +1,46 @@
+#!/bin/bash
+# Commit message hook: Validates conventional commit format
+# Format: type(scope): description
+
+set -e
+
+COMMIT_MSG_FILE="$1"
+COMMIT_MSG=$(cat "$COMMIT_MSG_FILE")
+
+# Colors
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+NC='\033[0m' # No Color
+
+# Skip merge commits and fixup commits
+if echo "$COMMIT_MSG" | grep -qE '^(Merge|fixup!|squash!)'; then
+ exit 0
+fi
+
+# Conventional commit regex
+# Format: type(scope): description
+# type is required, scope is optional
+PATTERN='^(feat|fix|docs|style|refactor|perf|test|build|ci|chore|revert|security)(\([a-z0-9_-]+\))?: .{1,100}$'
+
+# Get first line
+FIRST_LINE=$(echo "$COMMIT_MSG" | head -n1)
+
+if echo "$FIRST_LINE" | grep -qE "$PATTERN"; then
+ exit 0
+else
+ echo -e "${RED}Invalid commit message format!${NC}"
+ echo ""
+ echo "Expected format: type(scope): description"
+ echo ""
+ echo "Valid types: feat, fix, docs, style, refactor, perf, test, build, ci, chore, revert, security"
+ echo "Valid scopes: ai, cli, core, tui, scanner, runbook, plugin, mcp, security, config, git, env, deps, ci, docs, release"
+ echo ""
+ echo "Examples:"
+ echo " feat(ai): add Azure OpenAI provider"
+ echo " fix(tui): resolve input handling issue"
+ echo " docs: update README with new features"
+ echo ""
+ echo "Your message: $FIRST_LINE"
+ exit 1
+fi
diff --git a/.githooks/install.sh b/.githooks/install.sh
new file mode 100755
index 0000000..cf1dd56
--- /dev/null
+++ b/.githooks/install.sh
@@ -0,0 +1,83 @@
+#!/bin/bash
+# Git hooks installation script for Palrun
+#
+# This script installs the git hooks from .githooks/ to .git/hooks/
+# Run this once after cloning the repository.
+
+set -e
+
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
+GIT_HOOKS_DIR="$REPO_ROOT/.git/hooks"
+
+# Colors
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+BLUE='\033[0;34m'
+NC='\033[0m' # No Color
+
+echo -e "${BLUE}Installing Palrun git hooks...${NC}"
+echo ""
+
+# Ensure .git/hooks directory exists
+if [ ! -d "$GIT_HOOKS_DIR" ]; then
+ echo -e "${RED}Error: .git/hooks directory not found${NC}"
+ echo "Are you in a git repository?"
+ exit 1
+fi
+
+# List of hooks to install
+HOOKS=("pre-commit" "pre-push" "commit-msg")
+
+for hook in "${HOOKS[@]}"; do
+ src="$SCRIPT_DIR/$hook"
+ dst="$GIT_HOOKS_DIR/$hook"
+
+ if [ -f "$src" ]; then
+ # Backup existing hook if it exists and is not a symlink to ours
+ if [ -f "$dst" ] && [ ! -L "$dst" ]; then
+ echo -e "${YELLOW}Backing up existing $hook hook to $hook.backup${NC}"
+ mv "$dst" "$dst.backup"
+ fi
+
+ # Create symlink
+ ln -sf "$src" "$dst"
+ chmod +x "$src"
+ echo -e "${GREEN}✓${NC} Installed $hook"
+ else
+ echo -e "${YELLOW}⚠${NC} $hook hook not found in .githooks/"
+ fi
+done
+
+echo ""
+echo -e "${GREEN}Git hooks installed successfully!${NC}"
+echo ""
+echo "Installed hooks:"
+echo " pre-commit - Format check, clippy, build verification"
+echo " pre-push - Full test suite, security audit, license check"
+echo " commit-msg - Conventional commit format validation"
+echo ""
+echo "To skip hooks temporarily, use:"
+echo " git commit --no-verify"
+echo " git push --no-verify"
+echo ""
+
+# Optional: Check for required tools
+echo -e "${BLUE}Checking for optional tools...${NC}"
+
+check_tool() {
+ if command -v "$1" &> /dev/null; then
+ echo -e "${GREEN}✓${NC} $1 found"
+ return 0
+ else
+ echo -e "${YELLOW}⚠${NC} $1 not found - install with: $2"
+ return 1
+ fi
+}
+
+check_tool "cargo-audit" "cargo install cargo-audit"
+check_tool "cargo-deny" "cargo install cargo-deny"
+
+echo ""
+echo -e "${GREEN}Setup complete!${NC}"
diff --git a/.githooks/pre-commit b/.githooks/pre-commit
new file mode 100755
index 0000000..dacb6e5
--- /dev/null
+++ b/.githooks/pre-commit
@@ -0,0 +1,48 @@
+#!/bin/bash
+# Pre-commit hook: Fast quality checks
+# Runs: format check, lint (fast mode), basic tests
+
+set -e
+
+# Colors
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+NC='\033[0m' # No Color
+
+echo -e "${YELLOW}Running pre-commit checks...${NC}"
+
+# 1. Format check (fast)
+echo -n " Checking format... "
+if cargo fmt --all --check > /dev/null 2>&1; then
+ echo -e "${GREEN}OK${NC}"
+else
+ echo -e "${RED}FAILED${NC}"
+ echo ""
+ echo "Format issues found. Run 'cargo fmt' to fix."
+ exit 1
+fi
+
+# 2. Clippy lint (fast mode - only changed files would be better but this is still fast with cache)
+echo -n " Running clippy... "
+if cargo clippy --all-features --quiet -- -D clippy::correctness -D clippy::suspicious 2>/dev/null; then
+ echo -e "${GREEN}OK${NC}"
+else
+ echo -e "${RED}FAILED${NC}"
+ echo ""
+ echo "Clippy found issues. Run 'cargo clippy --all-features' to see details."
+ exit 1
+fi
+
+# 3. Quick compile check (catches type errors)
+echo -n " Checking build... "
+if cargo check --all-features --quiet 2>/dev/null; then
+ echo -e "${GREEN}OK${NC}"
+else
+ echo -e "${RED}FAILED${NC}"
+ echo ""
+ echo "Build check failed. Run 'cargo check --all-features' to see errors."
+ exit 1
+fi
+
+echo -e "${GREEN}All pre-commit checks passed!${NC}"
diff --git a/.githooks/pre-push b/.githooks/pre-push
new file mode 100755
index 0000000..d484001
--- /dev/null
+++ b/.githooks/pre-push
@@ -0,0 +1,64 @@
+#!/bin/bash
+# Pre-push hook: Full quality + security checks
+# Runs: tests, security audit, license check
+
+set -e
+
+# Colors
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+BLUE='\033[0;34m'
+NC='\033[0m' # No Color
+
+echo -e "${YELLOW}Running pre-push checks (this may take a moment)...${NC}"
+
+# 1. Run tests
+echo -e "${BLUE}Running tests...${NC}"
+if cargo test --all-features --workspace --quiet 2>/dev/null; then
+ echo -e " Tests: ${GREEN}OK${NC}"
+else
+ echo -e " Tests: ${RED}FAILED${NC}"
+ echo ""
+ echo "Tests failed. Run 'cargo test --all-features' to see failures."
+ exit 1
+fi
+
+# 2. Security audit (if cargo-audit is installed)
+if command -v cargo-audit &> /dev/null; then
+ echo -n " Security audit... "
+ if cargo audit --quiet 2>/dev/null; then
+ echo -e "${GREEN}OK${NC}"
+ else
+ echo -e "${YELLOW}WARNINGS${NC} (check 'cargo audit' for details)"
+ # Don't fail on warnings - just inform
+ fi
+else
+ echo -e " Security audit: ${YELLOW}SKIPPED${NC} (install with: cargo install cargo-audit)"
+fi
+
+# 3. License/dependency check (if cargo-deny is installed)
+if command -v cargo-deny &> /dev/null; then
+ echo -n " License check... "
+ if cargo deny check licenses --quiet 2>/dev/null; then
+ echo -e "${GREEN}OK${NC}"
+ else
+ echo -e "${RED}FAILED${NC}"
+ echo ""
+ echo "License check failed. Run 'cargo deny check' for details."
+ exit 1
+ fi
+
+ echo -n " Dependency check... "
+ if cargo deny check bans --quiet 2>/dev/null; then
+ echo -e "${GREEN}OK${NC}"
+ else
+ echo -e "${YELLOW}WARNINGS${NC}"
+ # Don't fail - just inform
+ fi
+else
+ echo -e " License check: ${YELLOW}SKIPPED${NC} (install with: cargo install cargo-deny)"
+fi
+
+echo ""
+echo -e "${GREEN}All pre-push checks passed!${NC}"
diff --git a/.gitignore b/.gitignore
index 0f14168..d0abe6c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -75,3 +75,7 @@ Cargo.lock.bak
# Internal documentation (keep local)
/local-docs/
+
+# Claude Code local files
+CLAUDE.md
+.claude/
diff --git a/Cargo.lock b/Cargo.lock
index 913278c..b7a746a 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -934,7 +934,7 @@ dependencies = [
"libc",
"option-ext",
"redox_users 0.5.2",
- "windows-sys 0.59.0",
+ "windows-sys 0.61.2",
]
[[package]]
@@ -1056,7 +1056,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb"
dependencies = [
"libc",
- "windows-sys 0.59.0",
+ "windows-sys 0.61.2",
]
[[package]]
@@ -1765,7 +1765,7 @@ checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46"
dependencies = [
"hermit-abi",
"libc",
- "windows-sys 0.59.0",
+ "windows-sys 0.61.2",
]
[[package]]
@@ -2138,7 +2138,7 @@ version = "0.50.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5"
dependencies = [
- "windows-sys 0.59.0",
+ "windows-sys 0.61.2",
]
[[package]]
@@ -2311,7 +2311,7 @@ dependencies = [
[[package]]
name = "palrun"
-version = "0.1.0-beta.1"
+version = "0.3.0"
dependencies = [
"anyhow",
"assert_cmd",
@@ -2343,6 +2343,7 @@ dependencies = [
"serde",
"serde_json",
"serde_yaml",
+ "serial_test",
"sha2",
"shellexpand",
"tempfile",
@@ -2637,7 +2638,7 @@ dependencies = [
"once_cell",
"socket2",
"tracing",
- "windows-sys 0.59.0",
+ "windows-sys 0.60.2",
]
[[package]]
@@ -2898,7 +2899,7 @@ dependencies = [
"errno",
"libc",
"linux-raw-sys 0.11.0",
- "windows-sys 0.59.0",
+ "windows-sys 0.61.2",
]
[[package]]
@@ -2957,12 +2958,27 @@ dependencies = [
"winapi-util",
]
+[[package]]
+name = "scc"
+version = "2.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "46e6f046b7fef48e2660c57ed794263155d713de679057f2d0c169bfc6e756cc"
+dependencies = [
+ "sdd",
+]
+
[[package]]
name = "scopeguard"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
+[[package]]
+name = "sdd"
+version = "3.0.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "490dcfcbfef26be6800d11870ff2df8774fa6e86d047e3e8c8a76b25655e41ca"
+
[[package]]
name = "semver"
version = "1.0.27"
@@ -3061,6 +3077,32 @@ dependencies = [
"unsafe-libyaml",
]
+[[package]]
+name = "serial_test"
+version = "3.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0d0b343e184fc3b7bb44dff0705fffcf4b3756ba6aff420dddd8b24ca145e555"
+dependencies = [
+ "futures-executor",
+ "futures-util",
+ "log",
+ "once_cell",
+ "parking_lot",
+ "scc",
+ "serial_test_derive",
+]
+
+[[package]]
+name = "serial_test_derive"
+version = "3.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6f50427f258fb77356e4cd4aa0e87e2bd2c66dbcee41dc405282cae2bfc26c83"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
[[package]]
name = "sha2"
version = "0.10.9"
@@ -3275,7 +3317,7 @@ dependencies = [
"getrandom 0.3.4",
"once_cell",
"rustix 1.1.3",
- "windows-sys 0.59.0",
+ "windows-sys 0.61.2",
]
[[package]]
@@ -4278,7 +4320,7 @@ version = "0.1.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22"
dependencies = [
- "windows-sys 0.48.0",
+ "windows-sys 0.61.2",
]
[[package]]
diff --git a/Cargo.toml b/Cargo.toml
index 56f26d7..ce845f6 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "palrun"
-version = "0.2.0-beta.2"
+version = "0.3.0"
edition = "2021"
rust-version = "1.82"
default-run = "palrun"
@@ -101,6 +101,9 @@ insta = { version = "1", features = ["yaml"] }
# Async Testing
tokio-test = "0.4"
+# Serial test execution for env var tests
+serial_test = "3"
+
# Benchmarking
criterion = { version = "0.5", features = ["html_reports"] }
diff --git a/README.md b/README.md
index 483c40f..7f5f493 100644
--- a/README.md
+++ b/README.md
@@ -1,324 +1,336 @@
+
+
# PALRUN
-Project-aware command palette for your terminal with AI-powered intelligence.
+**Stop memorizing commands. Start shipping.**
+
+A blazing-fast command palette for your terminal with multi-provider AI intelligence.
+
+[](https://crates.io/crates/palrun)
+[](https://crates.io/crates/palrun)
+[](https://github.com/GLINCKER/palrun/actions)
+[](LICENSE)
-[](https://github.com/GLINCKER/palrun/actions/workflows/ci.yml)
-[](https://github.com/GLINCKER/palrun/actions/workflows/release.yml)
-[](https://opensource.org/licenses/MIT)
-[](https://www.rust-lang.org)
-[](https://crates.io/crates/palrun)
+
+
+```bash
+brew install GLINCKER/palrun/palrun
+```
-## Why Palrun?
+**Works on Mac, Windows, and Linux.**
-Stop memorizing commands. Palrun automatically discovers every command available in your project and presents them in a blazing-fast fuzzy-searchable interface. Whether you're working with npm, cargo, make, docker, or any of 9+ supported project types, Palrun knows what you can run.
+
```
-┌─────────────────────────────────────────────────────────────────────────────┐
-│ PALRUN v0.1.0 │
-├─────────────────────────────────────────────────────────────────────────────┤
-│ │
-│ Project Scan ──► Command Discovery ──► Fuzzy Search ──► Execute │
-│ (9+ types) (nucleo) (context-aware) │
-│ │
-│ Cargo.toml ──► cargo build, test ──► "bui" ──► cargo build │
-│ package.json ──► npm run dev, test ──► "dev" ──► npm run dev │
-│ Makefile ──► make all, clean ──► "cle" ──► make clean │
-│ │
-└─────────────────────────────────────────────────────────────────────────────┘
+┌────────────────────────────────────────────────────────────────────┐
+│ PALRUN [rust] main ✓ │
+├────────────────────────────────────────────────────────────────────┤
+│ > build │
+├────────────────────────────────────────────────────────────────────┤
+│ → cargo build Build the project │
+│ cargo build --release Build optimized binary │
+│ npm run build Bundle frontend │
+│ make build Run makefile target │
+├────────────────────────────────────────────────────────────────────┤
+│ ↑↓ navigate ⏎ execute tab preview esc quit │
+└────────────────────────────────────────────────────────────────────┘
```
-## Features
+
+
+*"Finally stopped grepping through package.json to find scripts."*
+
+*"The AI diagnostics saved me 2 hours debugging a cryptic npm error."*
+
+*"Just type 3 letters and hit enter. That's it."*
+
+
+
+[Why Palrun](#why-palrun) · [Install](#install) · [How It Works](#how-it-works) · [AI Features](#ai-features) · [Commands](#commands)
-### Core Capabilities
+
-- **Project-Aware Discovery**: Automatically detects commands from 9+ project types
-- **Fuzzy Search**: Lightning-fast fuzzy matching powered by nucleo engine
-- **Context-Aware Sorting**: Commands sorted by proximity to your current directory
-- **Cross-Platform**: Works on macOS, Linux, and Windows
-- **Shell Integration**: Keyboard shortcuts for instant access
-- **TUI Interface**: Beautiful terminal UI with keyboard navigation
-- **Plugin System**: Extensible architecture for custom scanners
+---
-### Supported Project Types
+## Why Palrun
-| Project Type | Config Files | Commands Generated |
-|-------------|--------------|-------------------|
-| NPM/Yarn/PNPM/Bun | `package.json` | npm/yarn/pnpm/bun scripts |
-| Rust | `Cargo.toml` | cargo build, test, run, clippy |
-| Go | `go.mod` | go build, test, run |
-| Python | `pyproject.toml`, `requirements.txt` | pytest, pip, poetry, pdm |
-| Make | `Makefile` | make targets |
-| Task | `Taskfile.yml` | task commands |
-| Docker | `docker-compose.yml` | docker compose up/down/logs |
-| Nx | `nx.json` | nx build, serve, test |
-| Turborepo | `turbo.json` | turbo run tasks |
+Every project has commands scattered everywhere. npm scripts in package.json. Cargo commands in Cargo.toml. Make targets. Docker compose. Task runners. You end up:
-## Installation
+- Scrolling through 50 npm scripts to find the right one
+- Forgetting that obscure cargo command you used last week
+- Grepping through config files looking for targets
+- Context-switching to docs constantly
-### Using Cargo
+Palrun fixes this. It scans your project, finds every command, and gives you a fuzzy-searchable interface. Type 2-3 characters, hit enter, done.
+
+The AI features are optional but powerful — generate commands from natural language, explain what complex commands do, diagnose errors without leaving your terminal.
+
+---
+
+## Install
```bash
+# Homebrew (macOS/Linux) - Recommended
+brew install GLINCKER/palrun/palrun
+
+# Cargo
cargo install palrun
-```
-### From Source
+# NPM
+npm install -g @glincker/palrun
-```bash
-git clone https://github.com/GLINCKER/palrun.git
-cd palrun
-cargo install --path .
+# Download binary
+# https://github.com/GLINCKER/palrun/releases
```
-### Homebrew (macOS/Linux)
+Then just run:
```bash
-brew tap GLINCKER/tap
-brew install palrun
+palrun
```
-### NPM (Node.js users)
+---
-```bash
-npm install -g @glinr/palrun
-```
+## How It Works
-### Quick Install Script
+### 1. Auto-Discovery
-```bash
-curl -fsSL https://raw.githubusercontent.com/GLINCKER/palrun/main/scripts/install.sh | bash
-```
+Palrun scans your project and finds commands from:
-## Quick Start
+| Source | Files | What It Finds |
+|--------|-------|---------------|
+| **Node.js** | `package.json` | npm/yarn/pnpm/bun scripts |
+| **Rust** | `Cargo.toml` | cargo build, test, run, clippy |
+| **Go** | `go.mod` | go build, test, run |
+| **Python** | `pyproject.toml` | pytest, poetry, pdm commands |
+| **Make** | `Makefile` | All make targets |
+| **Docker** | `docker-compose.yml` | compose up/down/logs |
+| **Task** | `Taskfile.yml` | task commands |
+| **Monorepos** | `nx.json`, `turbo.json` | nx/turbo commands |
-### 1. Set Up Your Project
+### 2. Fuzzy Search
-Initialize Palrun in your project with intelligent detection:
+Type a few characters, palrun finds the match:
-```bash
-palrun setup
-```
+- `bui` → `cargo build`
+- `td` → `npm run test:debug`
+- `dcu` → `docker compose up`
-This will:
-- Detect your project type (Node.js, Rust, Python, etc.)
-- Create `.palrun.toml` with recommended settings
-- Generate `.palrun/runbooks/` with sample workflows
-- Suggest relevant configurations
+Powered by [nucleo](https://github.com/helix-editor/nucleo) — the same engine behind Helix editor.
-Options:
-```bash
-palrun setup --dry-run # Preview what would be created
-palrun setup --force # Overwrite existing files
-palrun setup --non-interactive # Use defaults without prompts
-```
+### 3. Context-Aware
-### 2. Interactive Mode
+Commands are ranked by proximity to your current directory. Working in `src/api/`? API-related commands appear first.
-Launch the command palette:
+---
-```bash
-palrun
-```
+## AI Features
-Use arrow keys to navigate, type to search, and press Enter to execute.
+Palrun supports multiple AI providers with automatic fallback:
-### List Commands
+| Provider | API Key Env Var | Best For |
+|----------|-----------------|----------|
+| **Claude** | `ANTHROPIC_API_KEY` | Complex reasoning |
+| **OpenAI** | `OPENAI_API_KEY` | Fast, general purpose |
+| **Azure OpenAI** | `AZURE_OPENAI_API_KEY` | Enterprise deployments |
+| **Grok** | `XAI_API_KEY` | Alternative option |
+| **Ollama** | None (local) | Offline, privacy |
-Show all discovered commands:
+### Generate Commands
```bash
-palrun list
+palrun ai "run tests with coverage"
+# → cargo test --all-features -- --nocapture
```
-Output as JSON:
+### Explain Commands
```bash
-palrun list --format json
+palrun ai explain "git rebase -i HEAD~5"
+# Explains what interactive rebase does
```
-Filter by source type:
+### Diagnose Errors
```bash
-palrun list --source cargo
-palrun list --source npm
+palrun ai diagnose "npm ERR! peer dep missing: react@18"
+# Suggests: npm install react@18 --save-peer
```
-### Scan Project
+### Configuration
-Preview what commands would be discovered:
+Set keys via environment variables or config file:
```bash
-palrun scan
-palrun scan --recursive
+# Environment (recommended)
+export ANTHROPIC_API_KEY="sk-ant-..."
+
+# Or in ~/.config/palrun/palrun.toml
+[ai.claude]
+api_key = "sk-ant-..."
```
-### Execute Directly
+---
-Run a command by name:
+## Commands
+
+### Interactive Mode
```bash
-palrun exec build
-palrun exec "npm test"
+palrun # Launch TUI
+palrun list # List all commands
+palrun list --json # JSON output for scripting
```
-Skip confirmation:
+### Direct Execution
```bash
-palrun exec build -y
+palrun exec build # Run by name
+palrun exec "npm test" # Run specific command
+palrun exec build -y # Skip confirmation
```
-## Shell Integration
+### Project Setup
+
+```bash
+palrun setup # Initialize for your project
+palrun setup --dry-run # Preview changes
+```
-Add to your shell configuration for keyboard shortcuts:
+### IDE Integration
-### Bash
+Generate slash commands for AI coding tools:
```bash
-eval "$(palrun init bash)"
+palrun slash generate claude # For Claude Code
+palrun slash generate cursor # For Cursor
+palrun slash generate aider # For Aider
```
-### Zsh
+---
+
+## Shell Integration
+
+Add keyboard shortcuts to your shell:
```bash
-eval "$(palrun init zsh)"
-```
+# Bash
+eval "$(palrun init bash)"
-### Fish
+# Zsh
+eval "$(palrun init zsh)"
-```fish
+# Fish
palrun init fish | source
-```
-
-### PowerShell
-```powershell
+# PowerShell
palrun init powershell | Invoke-Expression
```
-## Keyboard Shortcuts
-
-| Key | Action |
-|-----|--------|
-| `Enter` | Execute selected command |
-| `Up/Down` | Navigate command list |
-| `Ctrl+N/P` | Navigate (vim-style) |
-| `Ctrl+U` | Clear search input |
-| `Escape` | Quit |
-| `Tab` | Toggle preview |
-| `Ctrl+Space` | Toggle context-aware filtering |
+---
## Configuration
-Configuration file location: `~/.config/palrun/config.toml`
+Create `~/.config/palrun/palrun.toml`:
```toml
-# Theme settings
-[theme]
-highlight_color = "cyan"
+[general]
+confirm_dangerous = true
-# Shell settings
-[shell]
-default = "bash"
-
-# Scanner settings
-[scanner]
-exclude_patterns = ["node_modules", "target", ".git"]
-```
+[ui]
+theme = "default"
+show_preview = true
+show_icons = true
-Show config path:
+[ai]
+provider = "claude"
+fallback_enabled = true
-```bash
-palrun config --path
+[ai.claude]
+model = "claude-sonnet-4-20250514"
```
-## Shell Completions
-
-Generate shell completions:
-
-```bash
-# Bash
-palrun completions bash > /etc/bash_completion.d/palrun
-
-# Zsh
-palrun completions zsh > ~/.zfunc/_palrun
+For API keys, use environment variables or the system config file — never commit secrets to your repo.
-# Fish
-palrun completions fish > ~/.config/fish/completions/palrun.fish
-```
+---
-## Plugin System
+## Why Not Just Use...
-Palrun supports custom scanners through a plugin architecture. Example plugins are included:
+| Alternative | Palrun Advantage |
+|-------------|------------------|
+| `cat package.json \| jq` | One command, fuzzy search, instant |
+| fzf + custom scripts | Zero setup, auto-discovers everything |
+| IDE command palette | Works in terminal, any project type |
+| Memorizing commands | You have better things to remember |
-- **cargo-scanner**: Enhanced Cargo.toml scanning
-- **composer-scanner**: PHP Composer support
-- **gradle-scanner**: Gradle build tool support
-- **maven-scanner**: Maven build tool support
-- **poetry-scanner**: Python Poetry support
+**For AI tools:** Pre-computed command index saves ~1500 tokens per query. AI doesn't need to scan your project.
-See `examples/plugins/` for implementation details.
+---
## Development
-### Building
-
```bash
-cargo build
+# Build
cargo build --release
-```
-
-### Testing
-```bash
-cargo test
+# Test (527 tests)
cargo test --all-features
+
+# Run locally
+cargo run -- list
```
-### Running
+### Git Hooks
+
+Local quality gates (auto-installed):
```bash
-cargo run
-cargo run -- list
-cargo run -- scan
+./.githooks/install.sh
+# pre-commit: format, clippy, build
+# pre-push: tests, security audit
+# commit-msg: conventional commits
```
-## Features Status
-
-### Completed
-- [x] AI-powered command suggestions (Claude, OpenAI, Ollama)
-- [x] Runbook system for team workflows
-- [x] Command history and analytics
-- [x] Git integration (branch switching, status)
-- [x] Environment management (nvm, pyenv, etc.)
-- [x] Plugin system with SDK
-- [x] MCP (Model Context Protocol) integration
-- [x] Advanced search and filtering
-- [x] Theme support (multiple built-in themes)
-
-### Coming Soon
-- [ ] Cloud sync and team collaboration
+---
+
+## Roadmap
+
+- [x] Multi-provider AI (Claude, OpenAI, Azure, Grok, Ollama)
+- [x] Agentic workflow system
+- [x] IDE slash command generation
+- [x] Hierarchical config with secrets management
+- [ ] MCP server mode for AI agents
+- [ ] Chat history and session persistence
+- [ ] Streaming AI responses
- [ ] VS Code extension
-- [ ] Signed binaries for macOS/Windows
-- [ ] More IDE integrations
+
+---
## Contributing
-We welcome contributions! See [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines.
+Contributions welcome! See [CONTRIBUTING.md](CONTRIBUTING.md).
+
+```bash
+git clone https://github.com/GLINCKER/palrun.git
+cd palrun
+cargo test
+cargo run
+```
-- Bug reports and fixes
-- New project type scanners
-- Performance improvements
-- Documentation updates
+---
## License
-MIT License - free for personal and commercial use.
+MIT License — free for personal and commercial use.
-See [LICENSE](LICENSE) for details.
+---
-## Support
+
-- Documentation: [GitHub Wiki](https://github.com/GLINCKER/palrun/wiki)
-- Issues: [GitHub Issues](https://github.com/GLINCKER/palrun/issues)
-- Discussions: [GitHub Discussions](https://github.com/GLINCKER/palrun/discussions)
+**Your terminal has hundreds of commands. Palrun finds the right one instantly.**
+
+[GitHub](https://github.com/GLINCKER/palrun) · [Issues](https://github.com/GLINCKER/palrun/issues) · [Discussions](https://github.com/GLINCKER/palrun/discussions)
Built by [GLINCKER](https://glincker.com)
+
+
diff --git a/examples/config/PALRUN.md b/examples/config/PALRUN.md
new file mode 100644
index 0000000..e1d90ec
--- /dev/null
+++ b/examples/config/PALRUN.md
@@ -0,0 +1,72 @@
+# PALRUN.md - Project Rules Example
+
+This file is read by Palrun's AI when you're working in this project.
+Place it in your project root as `PALRUN.md` or `.palrun/agent.md`.
+
+## Project Overview
+
+Describe your project here so the AI understands the context:
+
+- **Project Name**: My Awesome App
+- **Tech Stack**: Node.js, TypeScript, React, PostgreSQL
+- **Build System**: npm
+
+## Coding Standards
+
+### Style Guide
+
+- Use TypeScript strict mode
+- Prefer `const` over `let`
+- Use async/await over raw promises
+- Use functional components with hooks in React
+
+### File Organization
+
+```
+src/
+ components/ # React components
+ hooks/ # Custom hooks
+ services/ # API services
+ utils/ # Utility functions
+ types/ # TypeScript types
+```
+
+## Common Commands
+
+The AI can help you run these commands:
+
+- `npm run dev` - Start development server
+- `npm run build` - Build for production
+- `npm test` - Run tests
+- `npm run lint` - Check code quality
+
+## Dependencies
+
+Key dependencies to be aware of:
+
+- React 18 with concurrent features
+- TanStack Query for data fetching
+- Zod for validation
+- Tailwind CSS for styling
+
+## AI Behavior Guidelines
+
+When using AI in this project:
+
+1. **Testing**: Always suggest running tests after code changes
+2. **Types**: Prefer strict TypeScript types, avoid `any`
+3. **Commits**: Use conventional commits format (feat:, fix:, etc.)
+4. **Security**: Never commit secrets, use environment variables
+
+## Environment Variables
+
+Required environment variables (add to `.env.local`):
+
+```env
+DATABASE_URL=postgresql://...
+API_KEY=your-api-key
+```
+
+## Additional Notes
+
+Add any project-specific notes here that would help the AI assist you better.
diff --git a/examples/config/ai-claude.toml b/examples/config/ai-claude.toml
new file mode 100644
index 0000000..924f9f6
--- /dev/null
+++ b/examples/config/ai-claude.toml
@@ -0,0 +1,52 @@
+# Palrun AI Configuration - Claude (Anthropic)
+#
+# To use Claude as your AI provider:
+# 1. Get an API key from https://console.anthropic.com/
+# 2. Set the environment variable: export ANTHROPIC_API_KEY="your-key-here"
+# 3. Copy this file to ~/.config/palrun/palrun.toml or your project root
+#
+# Claude models available:
+# - claude-3-opus-20240229 (most capable, highest cost)
+# - claude-3-sonnet-20240229 (balanced)
+# - claude-3-haiku-20240307 (fastest, lowest cost)
+# - claude-3-5-sonnet-20241022 (latest, recommended)
+
+[ai]
+enabled = true
+provider = "claude"
+
+# Optional: specify model (defaults to claude-3-5-sonnet)
+# model = "claude-3-5-sonnet-20241022"
+
+# Note: Set ANTHROPIC_API_KEY in your environment
+# Example: export ANTHROPIC_API_KEY="sk-ant-api03-..."
+
+# ============================================================================
+# MCP Servers for Claude
+# ============================================================================
+# MCP (Model Context Protocol) allows Claude to use external tools
+# See: https://docs.anthropic.com/claude/docs/model-context-protocol
+
+[mcp]
+enabled = true
+
+# File system access (read/write files)
+[[mcp.servers]]
+name = "filesystem"
+command = "npx"
+args = ["-y", "@anthropic/mcp-server-filesystem", "/path/to/your/project"]
+
+# Git operations
+[[mcp.servers]]
+name = "git"
+command = "npx"
+args = ["-y", "@anthropic/mcp-server-git"]
+cwd = "/path/to/your/repo"
+
+# GitHub integration (issues, PRs, etc.)
+# [[mcp.servers]]
+# name = "github"
+# command = "npx"
+# args = ["-y", "@anthropic/mcp-server-github"]
+# [mcp.servers.env]
+# GITHUB_TOKEN = "ghp_..."
diff --git a/examples/config/ai-openai.toml b/examples/config/ai-openai.toml
new file mode 100644
index 0000000..f75466a
--- /dev/null
+++ b/examples/config/ai-openai.toml
@@ -0,0 +1,49 @@
+# Palrun AI Configuration - OpenAI / GPT-4
+#
+# To use OpenAI as your AI provider:
+# 1. Get an API key from https://platform.openai.com/api-keys
+# 2. Set the environment variable: export OPENAI_API_KEY="your-key-here"
+# 3. Copy this file to ~/.config/palrun/palrun.toml or your project root
+#
+# OpenAI models available:
+# - gpt-4o (most capable, multimodal)
+# - gpt-4o-mini (faster, cheaper)
+# - gpt-4-turbo (GPT-4 with better instruction following)
+# - gpt-3.5-turbo (fastest, most affordable)
+# - o1-preview (reasoning model)
+# - o1-mini (faster reasoning)
+
+[ai]
+enabled = true
+provider = "openai"
+
+# Optional: specify model (defaults to gpt-4o)
+# model = "gpt-4o"
+
+# Note: Set OPENAI_API_KEY in your environment
+# Example: export OPENAI_API_KEY="sk-proj-..."
+
+# ============================================================================
+# Azure OpenAI Configuration
+# ============================================================================
+# To use Azure OpenAI instead:
+# 1. Set AZURE_OPENAI_API_KEY
+# 2. Set AZURE_OPENAI_ENDPOINT (e.g., "https://your-resource.openai.azure.com")
+# 3. Set AZURE_OPENAI_DEPLOYMENT_NAME
+
+# [ai]
+# enabled = true
+# provider = "azure-openai"
+# model = "gpt-4o" # Your deployment name
+
+# ============================================================================
+# Grok (xAI) Configuration
+# ============================================================================
+# To use Grok:
+# 1. Get an API key from https://console.x.ai/
+# 2. Set: export XAI_API_KEY="your-key-here"
+
+# [ai]
+# enabled = true
+# provider = "grok"
+# model = "grok-beta"
diff --git a/examples/config/palrun-secrets.toml.example b/examples/config/palrun-secrets.toml.example
new file mode 100644
index 0000000..be597c4
--- /dev/null
+++ b/examples/config/palrun-secrets.toml.example
@@ -0,0 +1,83 @@
+# Palrun Secrets Configuration
+#
+# COPY THIS FILE TO: ~/.config/palrun/palrun.toml
+# OR TO YOUR PROJECT: .palrun.local.toml (gitignored)
+#
+# DO NOT commit this file with real API keys!
+# This file is for your personal machine only.
+
+# ============================================================================
+# AI Provider API Keys
+# ============================================================================
+# Only configure the providers you want to use.
+# Environment variables take precedence over config file values.
+
+# Claude (Anthropic)
+# Get your key at: https://console.anthropic.com/
+[ai.claude]
+api_key = "sk-ant-api03-YOUR-KEY-HERE"
+model = "claude-sonnet-4-20250514" # or claude-3-5-haiku-20241022 for cheaper
+
+# OpenAI
+# Get your key at: https://platform.openai.com/api-keys
+[ai.openai]
+api_key = "sk-YOUR-KEY-HERE"
+model = "gpt-4o" # or gpt-4o-mini for cheaper
+
+# Azure OpenAI
+# Get credentials from Azure Portal > Your OpenAI Resource > Keys and Endpoint
+[ai.azure]
+endpoint = "https://YOUR-RESOURCE-NAME.openai.azure.com"
+api_key = "YOUR-AZURE-KEY-HERE"
+deployment = "gpt-4" # Your deployment name
+api_version = "2024-02-01"
+
+# Grok (xAI)
+# Get your key at: https://console.x.ai/
+[ai.grok]
+api_key = "xai-YOUR-KEY-HERE"
+model = "grok-beta"
+
+# Ollama (Local - no API key needed)
+[ai.ollama]
+base_url = "http://localhost:11434"
+model = "llama3.2" # or qwen2.5-coder, codellama, etc.
+
+# ============================================================================
+# Provider Selection
+# ============================================================================
+[ai]
+# Default provider (claude, openai, azure, grok, ollama)
+provider = "claude"
+
+# Enable automatic fallback if primary provider fails
+fallback_enabled = true
+
+# Fallback order (tries each until one works)
+fallback_chain = ["claude", "openai", "grok", "ollama"]
+
+# ============================================================================
+# Cost Controls (Optional)
+# ============================================================================
+[ai.budget]
+# Daily spending limit in USD (0 = unlimited)
+daily_limit = 5.00
+
+# Warn when approaching limit (percentage)
+warn_at_percent = 80
+
+# Preferred model for quick tasks (saves money)
+quick_model = "gpt-4o-mini" # or claude-3-5-haiku
+
+# ============================================================================
+# External Service Tokens (Optional)
+# ============================================================================
+[tokens]
+# GitHub Personal Access Token (for issues, PRs)
+# github = "ghp_YOUR-TOKEN-HERE"
+
+# Linear API Key (for issue tracking)
+# linear = "lin_api_YOUR-KEY-HERE"
+
+# Slack Webhook (for notifications)
+# slack_webhook = "https://hooks.slack.com/services/..."
diff --git a/examples/config/palrun.toml b/examples/config/palrun.toml
new file mode 100644
index 0000000..40ea8d3
--- /dev/null
+++ b/examples/config/palrun.toml
@@ -0,0 +1,179 @@
+# Palrun Configuration Example
+# Copy this file to ~/.config/palrun/palrun.toml or to your project root
+
+# ============================================================================
+# General Settings
+# ============================================================================
+[general]
+# Show hidden commands (those starting with _)
+show_hidden = false
+
+# Ask for confirmation before running dangerous commands
+confirm_dangerous = true
+
+# Maximum number of history entries to keep
+max_history = 1000
+
+# Default shell (optional, defaults to system shell)
+# shell = "/bin/zsh"
+
+# ============================================================================
+# UI Settings
+# ============================================================================
+[ui]
+# Color theme: default, dracula, nord, solarized-dark, gruvbox, monokai
+theme = "default"
+
+# Show command preview panel
+show_preview = true
+
+# Show source icons (npm, cargo, make, etc.)
+show_icons = true
+
+# Maximum commands to display in the list
+max_display = 20
+
+# Enable mouse support
+mouse = true
+
+# Custom color overrides (hex format)
+# [ui.custom_colors]
+# primary = "#61afef"
+# secondary = "#98c379"
+# accent = "#c678dd"
+# highlight = "#e5c07b"
+# text = "#abb2bf"
+# text_muted = "#5c6370"
+# text_dim = "#4b5263"
+# background = "#282c34"
+# surface = "#21252b"
+# border = "#3e4451"
+# success = "#98c379"
+# error = "#e06c75"
+# warning = "#e5c07b"
+
+# ============================================================================
+# Scanner Settings
+# ============================================================================
+[scanner]
+# Additional paths to scan for commands
+additional_paths = []
+
+# File patterns to ignore
+ignore_patterns = ["node_modules", "target", ".git", "build"]
+
+# Enable specific scanners
+enable_npm = true
+enable_cargo = true
+enable_make = true
+enable_composer = true
+enable_gradle = true
+enable_maven = true
+enable_poetry = true
+enable_docker = true
+enable_just = true
+enable_deno = true
+enable_bun = true
+enable_custom = true
+
+# ============================================================================
+# AI Settings (requires --features ai)
+# ============================================================================
+[ai]
+# Enable AI features
+enabled = true
+
+# Default AI provider: "ollama", "claude", "openai", "azure", "grok"
+# API keys should be in ~/.config/palrun/palrun.toml or .palrun.local.toml
+# NOT in this file (which may be committed to git)
+provider = "ollama"
+
+# Enable automatic fallback if primary provider fails
+fallback_enabled = true
+
+# Ollama settings (local LLM, no API key needed)
+[ai.ollama]
+base_url = "http://localhost:11434"
+model = "llama3.2"
+
+# NOTE: For API keys, create one of these files:
+# - ~/.config/palrun/palrun.toml (system-wide, recommended)
+# - .palrun.local.toml (project-local, gitignored)
+#
+# See examples/config/palrun-secrets.toml.example for the format
+
+# ============================================================================
+# Keybindings
+# ============================================================================
+[keys]
+quit = "esc"
+select = "enter"
+up = "up"
+down = "down"
+clear = "ctrl+u"
+favorite = "ctrl+s"
+background = "ctrl+b"
+multi_select = "ctrl+space"
+help = "?"
+history = "ctrl+h"
+analytics = "ctrl+g"
+palette = "ctrl+p"
+ai_toggle = "ctrl+t"
+
+# ============================================================================
+# Git Hooks (requires --features git)
+# ============================================================================
+[hooks]
+# Enable git hooks integration
+enabled = true
+
+# Auto-commit after successful hook completion
+auto_commit = false
+
+# ============================================================================
+# Command Aliases
+# ============================================================================
+# Define shortcuts for frequently used commands
+
+[[aliases]]
+name = "build"
+command = "npm run build"
+description = "Build the project"
+
+[[aliases]]
+name = "test"
+command = "npm test"
+description = "Run tests"
+
+[[aliases]]
+name = "lint"
+command = "npm run lint"
+description = "Run linter"
+
+# ============================================================================
+# MCP (Model Context Protocol) Servers
+# ============================================================================
+[mcp]
+# Enable MCP for AI agent tools
+enabled = false
+
+# Example: File system server
+# [[mcp.servers]]
+# name = "filesystem"
+# command = "npx"
+# args = ["-y", "@anthropic/mcp-server-filesystem", "/path/to/allowed/dir"]
+
+# Example: Git server
+# [[mcp.servers]]
+# name = "git"
+# command = "npx"
+# args = ["-y", "@anthropic/mcp-server-git"]
+# cwd = "/path/to/repo"
+
+# Example: GitHub server
+# [[mcp.servers]]
+# name = "github"
+# command = "npx"
+# args = ["-y", "@anthropic/mcp-server-github"]
+# [mcp.servers.env]
+# GITHUB_TOKEN = "your-github-token"
diff --git a/src/ai/agent.rs b/src/ai/agent.rs
index c3e5383..87019b3 100644
--- a/src/ai/agent.rs
+++ b/src/ai/agent.rs
@@ -387,13 +387,9 @@ mod tests {
#[test]
fn test_build_system_prompt() {
- let context = ProjectContext {
- project_name: "my-app".to_string(),
- project_type: "node".to_string(),
- available_commands: vec!["npm run build".to_string()],
- current_directory: PathBuf::from("/project"),
- recent_commands: vec![],
- };
+ let mut context = ProjectContext::new("my-app", PathBuf::from("/project"));
+ context.project_type = "node".to_string();
+ context.available_commands = vec!["npm run build".to_string()];
let tools = vec![AgentTool {
name: "read_file".to_string(),
diff --git a/src/ai/azure.rs b/src/ai/azure.rs
new file mode 100644
index 0000000..e2a6d3b
--- /dev/null
+++ b/src/ai/azure.rs
@@ -0,0 +1,261 @@
+//! Azure OpenAI API integration.
+//!
+//! Implements the AIProvider trait for Azure OpenAI deployments.
+
+use async_trait::async_trait;
+use reqwest::Client;
+use serde::{Deserialize, Serialize};
+
+use super::{AIProvider, ProjectContext};
+
+/// Azure OpenAI API provider.
+pub struct AzureOpenAIProvider {
+ client: Client,
+ endpoint: String,
+ api_key: String,
+ deployment: String,
+ api_version: String,
+}
+
+impl AzureOpenAIProvider {
+ /// Create a new Azure OpenAI provider from config or environment variables.
+ ///
+ /// Environment variables:
+ /// - AZURE_OPENAI_ENDPOINT
+ /// - AZURE_OPENAI_API_KEY
+ /// - AZURE_OPENAI_DEPLOYMENT
+ pub fn new() -> anyhow::Result {
+ let endpoint = std::env::var("AZURE_OPENAI_ENDPOINT")
+ .map_err(|_| anyhow::anyhow!("AZURE_OPENAI_ENDPOINT not set"))?;
+ let api_key = std::env::var("AZURE_OPENAI_API_KEY")
+ .map_err(|_| anyhow::anyhow!("AZURE_OPENAI_API_KEY not set"))?;
+ let deployment = std::env::var("AZURE_OPENAI_DEPLOYMENT")
+ .map_err(|_| anyhow::anyhow!("AZURE_OPENAI_DEPLOYMENT not set"))?;
+
+ Ok(Self {
+ client: Client::new(),
+ endpoint,
+ api_key,
+ deployment,
+ api_version: "2024-02-01".to_string(),
+ })
+ }
+
+ /// Create from explicit config values.
+ pub fn from_config(
+ endpoint: impl Into,
+ api_key: impl Into,
+ deployment: impl Into,
+ ) -> Self {
+ Self {
+ client: Client::new(),
+ endpoint: endpoint.into(),
+ api_key: api_key.into(),
+ deployment: deployment.into(),
+ api_version: "2024-02-01".to_string(),
+ }
+ }
+
+ /// Set the API version.
+ pub fn with_api_version(mut self, version: impl Into) -> Self {
+ self.api_version = version.into();
+ self
+ }
+
+ /// Make a request to the Azure OpenAI API.
+ async fn request(&self, system: &str, user_message: &str) -> anyhow::Result {
+ let request = AzureOpenAIRequest {
+ messages: vec![
+ ChatMessage { role: "system".to_string(), content: system.to_string() },
+ ChatMessage { role: "user".to_string(), content: user_message.to_string() },
+ ],
+ max_tokens: Some(1024),
+ temperature: Some(0.7),
+ };
+
+ // Azure OpenAI URL format:
+ // {endpoint}/openai/deployments/{deployment}/chat/completions?api-version={api_version}
+ let url = format!(
+ "{}/openai/deployments/{}/chat/completions?api-version={}",
+ self.endpoint.trim_end_matches('/'),
+ self.deployment,
+ self.api_version
+ );
+
+ let response = self
+ .client
+ .post(&url)
+ .header("api-key", &self.api_key) // Azure uses api-key header, not Bearer
+ .header("Content-Type", "application/json")
+ .json(&request)
+ .send()
+ .await?;
+
+ if !response.status().is_success() {
+ let status = response.status();
+ let body = response.text().await.unwrap_or_default();
+ anyhow::bail!("Azure OpenAI API error ({}): {}", status, body);
+ }
+
+ let response: AzureOpenAIResponse = response.json().await?;
+
+ response
+ .choices
+ .first()
+ .map(|c| c.message.content.clone())
+ .ok_or_else(|| anyhow::anyhow!("No response from Azure OpenAI"))
+ }
+}
+
+#[async_trait]
+impl AIProvider for AzureOpenAIProvider {
+ async fn generate_command(
+ &self,
+ prompt: &str,
+ context: &ProjectContext,
+ ) -> anyhow::Result {
+ let system = format!(
+ r"You are Palrun, an AI assistant for terminal commands.
+Your task is to generate the exact shell command the user needs.
+
+Current directory: {}
+Project type: {}
+Available commands: {}
+
+Rules:
+1. Output ONLY the command, nothing else
+2. Use the correct package manager for this project
+3. If multiple commands are needed, join with && or ;
+4. Never explain, just output the command",
+ context.current_directory.display(),
+ context.project_type,
+ context.available_commands.join(", ")
+ );
+
+ self.request(&system, prompt).await
+ }
+
+ async fn explain_command(
+ &self,
+ command: &str,
+ context: &ProjectContext,
+ ) -> anyhow::Result {
+ let system = format!(
+ r"You are Palrun, an AI assistant for terminal commands.
+Explain what this command does in plain English.
+
+Current directory: {}
+Project type: {}
+
+Be concise but thorough. Explain each part of the command.",
+ context.current_directory.display(),
+ context.project_type
+ );
+
+ self.request(&system, &format!("Explain: {}", command)).await
+ }
+
+ async fn diagnose_error(
+ &self,
+ command: &str,
+ error: &str,
+ context: &ProjectContext,
+ ) -> anyhow::Result {
+ let system = format!(
+ r"You are Palrun, an AI assistant for terminal commands.
+Diagnose why this command failed and suggest a fix.
+
+Current directory: {}
+Project type: {}
+
+Be concise. Focus on the most likely cause and solution.",
+ context.current_directory.display(),
+ context.project_type
+ );
+
+ let user_message = format!("Command: {}\n\nError:\n{}", command, error);
+
+ self.request(&system, &user_message).await
+ }
+
+ fn name(&self) -> &str {
+ "azure"
+ }
+
+ async fn is_available(&self) -> bool {
+ // Check if we can reach the API by making a simple request
+ // Azure doesn't have a /models endpoint like OpenAI, so we just check connectivity
+ let url = format!(
+ "{}/openai/deployments?api-version={}",
+ self.endpoint.trim_end_matches('/'),
+ self.api_version
+ );
+
+ let response = self
+ .client
+ .get(&url)
+ .header("api-key", &self.api_key)
+ .timeout(std::time::Duration::from_secs(5))
+ .send()
+ .await;
+
+ // Accept 200 OK or 404 (deployment list may not be accessible)
+ // The key thing is the API responds, not a network error
+ response.map(|r| r.status().is_success() || r.status().as_u16() == 404).unwrap_or(false)
+ }
+}
+
+// Request/Response types
+
+#[derive(Debug, Serialize)]
+struct AzureOpenAIRequest {
+ messages: Vec,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ max_tokens: Option,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ temperature: Option,
+}
+
+#[derive(Debug, Serialize, Deserialize)]
+struct ChatMessage {
+ role: String,
+ content: String,
+}
+
+#[derive(Debug, Deserialize)]
+struct AzureOpenAIResponse {
+ choices: Vec,
+}
+
+#[derive(Debug, Deserialize)]
+struct Choice {
+ message: ChatMessage,
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_azure_provider_from_config() {
+ let provider = AzureOpenAIProvider::from_config(
+ "https://my-resource.openai.azure.com",
+ "test-key",
+ "gpt-4",
+ );
+ assert_eq!(provider.endpoint, "https://my-resource.openai.azure.com");
+ assert_eq!(provider.deployment, "gpt-4");
+ assert_eq!(provider.api_version, "2024-02-01");
+ }
+
+ #[test]
+ fn test_azure_provider_with_api_version() {
+ let provider = AzureOpenAIProvider::from_config(
+ "https://my-resource.openai.azure.com",
+ "test-key",
+ "gpt-4",
+ )
+ .with_api_version("2024-06-01");
+ assert_eq!(provider.api_version, "2024-06-01");
+ }
+}
diff --git a/src/ai/claude.rs b/src/ai/claude.rs
index a947fdc..3a4f631 100644
--- a/src/ai/claude.rs
+++ b/src/ai/claude.rs
@@ -188,13 +188,9 @@ mod tests {
#[test]
fn test_project_context_creation() {
- let context = ProjectContext {
- project_name: "test".to_string(),
- project_type: "node".to_string(),
- available_commands: vec!["npm run build".to_string()],
- current_directory: PathBuf::from("."),
- recent_commands: vec![],
- };
+ let mut context = ProjectContext::new("test", PathBuf::from("."));
+ context.project_type = "node".to_string();
+ context.available_commands = vec!["npm run build".to_string()];
assert_eq!(context.project_name, "test");
}
diff --git a/src/ai/context.rs b/src/ai/context.rs
index aeae015..c5db46f 100644
--- a/src/ai/context.rs
+++ b/src/ai/context.rs
@@ -21,17 +21,38 @@ pub struct ProjectContext {
/// Recent commands run in this project
pub recent_commands: Vec,
+
+ /// Current date (YYYY-MM-DD)
+ pub current_date: String,
+
+ /// Current time (HH:MM)
+ pub current_time: String,
+
+ /// Git branch name (if in a git repo)
+ pub git_branch: Option,
+
+ /// Git status summary (e.g., "3 modified, 2 untracked")
+ pub git_status: Option,
+
+ /// Whether the repo has uncommitted changes
+ pub git_dirty: bool,
}
impl ProjectContext {
/// Create a new project context.
pub fn new(project_name: impl Into, current_directory: PathBuf) -> Self {
+ let now = chrono::Local::now();
Self {
project_name: project_name.into(),
project_type: "unknown".to_string(),
available_commands: Vec::new(),
current_directory,
recent_commands: Vec::new(),
+ current_date: now.format("%Y-%m-%d").to_string(),
+ current_time: now.format("%H:%M").to_string(),
+ git_branch: None,
+ git_status: None,
+ git_dirty: false,
}
}
@@ -46,9 +67,67 @@ impl ProjectContext {
// Detect project type
context.project_type = detect_project_type(&cwd);
+ // Get git info if available
+ context.populate_git_info(&cwd);
+
Ok(context)
}
+ /// Populate git information from the current directory.
+ fn populate_git_info(&mut self, path: &PathBuf) {
+ // Try to get git branch
+ if let Ok(output) = std::process::Command::new("git")
+ .args(["rev-parse", "--abbrev-ref", "HEAD"])
+ .current_dir(path)
+ .output()
+ {
+ if output.status.success() {
+ let branch = String::from_utf8_lossy(&output.stdout).trim().to_string();
+ if !branch.is_empty() {
+ self.git_branch = Some(branch);
+ }
+ }
+ }
+
+ // Try to get git status
+ if let Ok(output) = std::process::Command::new("git")
+ .args(["status", "--porcelain"])
+ .current_dir(path)
+ .output()
+ {
+ if output.status.success() {
+ let status = String::from_utf8_lossy(&output.stdout);
+ let lines: Vec<&str> = status.lines().collect();
+
+ if !lines.is_empty() {
+ self.git_dirty = true;
+
+ // Count modified, untracked, etc.
+ let modified =
+ lines.iter().filter(|l| l.starts_with(" M") || l.starts_with("M ")).count();
+ let untracked = lines.iter().filter(|l| l.starts_with("??")).count();
+ let staged =
+ lines.iter().filter(|l| l.starts_with("A ") || l.starts_with("D ")).count();
+
+ let mut parts = Vec::new();
+ if modified > 0 {
+ parts.push(format!("{}M", modified));
+ }
+ if staged > 0 {
+ parts.push(format!("{}S", staged));
+ }
+ if untracked > 0 {
+ parts.push(format!("{}?", untracked));
+ }
+
+ if !parts.is_empty() {
+ self.git_status = Some(parts.join(" "));
+ }
+ }
+ }
+ }
+ }
+
/// Set the available commands.
pub fn with_commands(mut self, commands: Vec) -> Self {
// Limit to avoid token overflow
@@ -72,6 +151,82 @@ impl ProjectContext {
self.recent_commands.len()
)
}
+
+ /// Build a rich system prompt for AI chat.
+ pub fn build_system_prompt(&self) -> String {
+ let mut prompt = String::new();
+
+ // Expert framing (like Cursor/Claude Code)
+ prompt.push_str(
+ "You are an expert software developer assistant. You have deep knowledge of:\n",
+ );
+ prompt.push_str("- Terminal commands, git, and shell scripting\n");
+ prompt.push_str("- The current project's tech stack and patterns\n");
+ prompt.push_str("- Best practices for clean, maintainable code\n\n");
+
+ // Response style
+ prompt.push_str("Response style:\n");
+ prompt.push_str("- Be direct and concise\n");
+ prompt.push_str("- Use `backticks` for commands and code\n");
+ prompt.push_str("- Use ```language blocks for multi-line code\n");
+ prompt.push_str("- Give working solutions, not just explanations\n");
+ prompt
+ .push_str("- If asked 'how to X', show the command first, then explain if needed\n\n");
+
+ // Project context
+ prompt.push_str("Current project:\n");
+ prompt.push_str(&format!("- Name: {}\n", self.project_name));
+ prompt.push_str(&format!("- Type: {}\n", self.project_type));
+ prompt.push_str(&format!("- Path: {}\n", self.current_directory.display()));
+
+ if let Some(ref branch) = self.git_branch {
+ let status = if self.git_dirty {
+ format!("with changes ({})", self.git_status.as_deref().unwrap_or("modified"))
+ } else {
+ "clean".to_string()
+ };
+ prompt.push_str(&format!("- Git: {} ({})\n", branch, status));
+ }
+
+ if !self.available_commands.is_empty() {
+ prompt.push_str(&format!("- Commands: {} available\n", self.available_commands.len()));
+ }
+
+ // Load project-specific rules if they exist
+ if let Some(rules) = self.load_project_rules() {
+ prompt.push_str("\nProject rules:\n");
+ prompt.push_str(&rules);
+ prompt.push('\n');
+ }
+
+ prompt
+ }
+
+ /// Load project-specific AI rules from .palrun/ai.md or PALRUN.md
+ fn load_project_rules(&self) -> Option {
+ let palrun_ai = self.current_directory.join(".palrun/ai.md");
+ let palrun_md = self.current_directory.join("PALRUN.md");
+
+ // Try .palrun/ai.md first, then PALRUN.md
+ let rules_path = if palrun_ai.exists() {
+ Some(palrun_ai)
+ } else if palrun_md.exists() {
+ Some(palrun_md)
+ } else {
+ None
+ };
+
+ if let Some(path) = rules_path {
+ if let Ok(content) = std::fs::read_to_string(&path) {
+ // Limit to first 500 chars to avoid token overflow
+ let truncated =
+ if content.len() > 500 { format!("{}...", &content[..500]) } else { content };
+ return Some(truncated);
+ }
+ }
+
+ None
+ }
}
/// Detect the project type from files in the directory.
@@ -115,12 +270,18 @@ fn detect_project_type(path: &PathBuf) -> String {
impl Default for ProjectContext {
fn default() -> Self {
+ let now = chrono::Local::now();
Self {
project_name: "unknown".to_string(),
project_type: "unknown".to_string(),
available_commands: Vec::new(),
current_directory: PathBuf::from("."),
recent_commands: Vec::new(),
+ current_date: now.format("%Y-%m-%d").to_string(),
+ current_time: now.format("%H:%M").to_string(),
+ git_branch: None,
+ git_status: None,
+ git_dirty: false,
}
}
}
@@ -134,6 +295,9 @@ mod tests {
let context = ProjectContext::new("test", PathBuf::from("/tmp/test"));
assert_eq!(context.project_name, "test");
assert_eq!(context.project_type, "unknown");
+ // Check that date/time are populated
+ assert!(!context.current_date.is_empty());
+ assert!(!context.current_time.is_empty());
}
#[test]
@@ -153,4 +317,32 @@ mod tests {
assert!(summary.contains("test"));
assert!(summary.contains('2'));
}
+
+ #[test]
+ fn test_build_system_prompt() {
+ let mut context = ProjectContext::new("my-project", PathBuf::from("/home/user/project"));
+ context.project_type = "rust".to_string();
+ context.available_commands = vec!["cargo build".to_string(), "cargo test".to_string()];
+ context.git_branch = Some("main".to_string());
+ context.git_dirty = true;
+ context.git_status = Some("2M 1?".to_string());
+
+ let prompt = context.build_system_prompt();
+
+ // Check that key information is included
+ assert!(prompt.contains("expert software developer"));
+ assert!(prompt.contains("my-project"));
+ assert!(prompt.contains("rust"));
+ assert!(prompt.contains("main"));
+ assert!(prompt.contains("Response style"));
+ }
+
+ #[test]
+ fn test_default_context() {
+ let context = ProjectContext::default();
+ assert_eq!(context.project_name, "unknown");
+ assert_eq!(context.project_type, "unknown");
+ // Date/time should be set
+ assert!(!context.current_date.is_empty());
+ }
}
diff --git a/src/ai/grok.rs b/src/ai/grok.rs
new file mode 100644
index 0000000..b13f7a3
--- /dev/null
+++ b/src/ai/grok.rs
@@ -0,0 +1,228 @@
+//! Grok (xAI) API integration.
+//!
+//! Implements the AIProvider trait for xAI's Grok models.
+//! Grok uses an OpenAI-compatible API.
+
+use async_trait::async_trait;
+use reqwest::Client;
+use serde::{Deserialize, Serialize};
+
+use super::{AIProvider, ProjectContext};
+
+/// Grok (xAI) API provider.
+pub struct GrokProvider {
+ client: Client,
+ api_key: String,
+ model: String,
+}
+
+impl GrokProvider {
+ /// Create a new Grok provider.
+ ///
+ /// Reads API key from XAI_API_KEY environment variable.
+ pub fn new() -> anyhow::Result {
+ let api_key =
+ std::env::var("XAI_API_KEY").map_err(|_| anyhow::anyhow!("XAI_API_KEY not set"))?;
+
+ Ok(Self { client: Client::new(), api_key, model: "grok-2".to_string() })
+ }
+
+ /// Create with a specific model.
+ pub fn with_model(mut self, model: impl Into) -> Self {
+ self.model = model.into();
+ self
+ }
+
+ /// Make a request to the Grok API.
+ async fn request(&self, system: &str, user_message: &str) -> anyhow::Result {
+ let request = GrokRequest {
+ model: self.model.clone(),
+ messages: vec![
+ ChatMessage { role: "system".to_string(), content: system.to_string() },
+ ChatMessage { role: "user".to_string(), content: user_message.to_string() },
+ ],
+ max_tokens: Some(1024),
+ temperature: Some(0.7),
+ };
+
+ let response = self
+ .client
+ .post("https://api.x.ai/v1/chat/completions")
+ .header("Authorization", format!("Bearer {}", self.api_key))
+ .header("Content-Type", "application/json")
+ .json(&request)
+ .send()
+ .await?;
+
+ if !response.status().is_success() {
+ let status = response.status();
+ let body = response.text().await.unwrap_or_default();
+ anyhow::bail!("Grok API error ({}): {}", status, body);
+ }
+
+ let response: GrokResponse = response.json().await?;
+
+ response
+ .choices
+ .first()
+ .map(|c| c.message.content.clone())
+ .ok_or_else(|| anyhow::anyhow!("No response from Grok"))
+ }
+}
+
+#[async_trait]
+impl AIProvider for GrokProvider {
+ async fn generate_command(
+ &self,
+ prompt: &str,
+ context: &ProjectContext,
+ ) -> anyhow::Result {
+ let system = format!(
+ r"You are Palrun, an AI assistant for terminal commands.
+Your task is to generate the exact shell command the user needs.
+
+Current directory: {}
+Project type: {}
+Available commands: {}
+
+Rules:
+1. Output ONLY the command, nothing else
+2. Use the correct package manager for this project
+3. If multiple commands are needed, join with && or ;
+4. Never explain, just output the command",
+ context.current_directory.display(),
+ context.project_type,
+ context.available_commands.join(", ")
+ );
+
+ self.request(&system, prompt).await
+ }
+
+ async fn explain_command(
+ &self,
+ command: &str,
+ context: &ProjectContext,
+ ) -> anyhow::Result {
+ let system = format!(
+ r"You are Palrun, an AI assistant for terminal commands.
+Explain what this command does in plain English.
+
+Current directory: {}
+Project type: {}
+
+Be concise but thorough. Explain each part of the command.",
+ context.current_directory.display(),
+ context.project_type
+ );
+
+ self.request(&system, &format!("Explain: {}", command)).await
+ }
+
+ async fn diagnose_error(
+ &self,
+ command: &str,
+ error: &str,
+ context: &ProjectContext,
+ ) -> anyhow::Result {
+ let system = format!(
+ r"You are Palrun, an AI assistant for terminal commands.
+Diagnose why this command failed and suggest a fix.
+
+Current directory: {}
+Project type: {}
+
+Be concise. Focus on the most likely cause and solution.",
+ context.current_directory.display(),
+ context.project_type
+ );
+
+ let user_message = format!("Command: {}\n\nError:\n{}", command, error);
+
+ self.request(&system, &user_message).await
+ }
+
+ fn name(&self) -> &str {
+ "grok"
+ }
+
+ async fn is_available(&self) -> bool {
+ // Check if we can reach the API
+ let response = self
+ .client
+ .get("https://api.x.ai/v1/models")
+ .header("Authorization", format!("Bearer {}", self.api_key))
+ .timeout(std::time::Duration::from_secs(5))
+ .send()
+ .await;
+
+ response.map(|r| r.status().is_success()).unwrap_or(false)
+ }
+}
+
+// Request/Response types (OpenAI-compatible)
+
+#[derive(Debug, Serialize)]
+struct GrokRequest {
+ model: String,
+ messages: Vec,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ max_tokens: Option,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ temperature: Option,
+}
+
+#[derive(Debug, Serialize, Deserialize)]
+struct ChatMessage {
+ role: String,
+ content: String,
+}
+
+#[derive(Debug, Deserialize)]
+struct GrokResponse {
+ choices: Vec,
+}
+
+#[derive(Debug, Deserialize)]
+struct Choice {
+ message: ChatMessage,
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use serial_test::serial;
+
+ #[test]
+ #[serial(xai_env)]
+ fn test_grok_provider_requires_api_key() {
+ // Save current value
+ let original = std::env::var("XAI_API_KEY").ok();
+ std::env::remove_var("XAI_API_KEY");
+
+ let result = GrokProvider::new();
+
+ // Restore original value
+ if let Some(val) = original {
+ std::env::set_var("XAI_API_KEY", val);
+ }
+
+ assert!(result.is_err());
+ }
+
+ #[test]
+ #[serial(xai_env)]
+ fn test_grok_provider_with_model() {
+ // Save current value
+ let original = std::env::var("XAI_API_KEY").ok();
+ std::env::set_var("XAI_API_KEY", "test-key");
+
+ let provider = GrokProvider::new().unwrap().with_model("grok-2-mini");
+ assert_eq!(provider.model, "grok-2-mini");
+
+ // Restore or remove
+ match original {
+ Some(val) => std::env::set_var("XAI_API_KEY", val),
+ None => std::env::remove_var("XAI_API_KEY"),
+ }
+ }
+}
diff --git a/src/ai/mod.rs b/src/ai/mod.rs
index 5fb3b70..d3fef24 100644
--- a/src/ai/mod.rs
+++ b/src/ai/mod.rs
@@ -8,21 +8,30 @@
//! - Command explanation
//! - Error diagnosis
//! - **Agentic tool use** - AI can use MCP tools autonomously
+//! - **Model routing** - Route tasks to optimal models
mod agent;
+mod azure;
mod claude;
mod context;
mod executor;
+mod grok;
mod ollama;
+mod openai;
+mod routing;
pub use agent::{
mcp_tools_to_agent_tools, Agent, AgentMessage, AgentProvider, AgentResponse, AgentState,
AgentStopReason, AgentTool, AgentToolCall, AgentToolResult, ToolExecutor,
};
+pub use azure::AzureOpenAIProvider;
pub use claude::ClaudeProvider;
pub use context::ProjectContext;
pub use executor::{CompositeExecutor, MCPToolExecutor, ShellExecutor};
+pub use grok::GrokProvider;
pub use ollama::OllamaProvider;
+pub use openai::OpenAIProvider;
+pub use routing::{FallbackChain, ModelRouter, RoutingConfig, RoutingDecision, TaskCategory};
use async_trait::async_trait;
@@ -79,7 +88,12 @@ pub enum AIError {
/// AI provider manager with fallback support.
///
-/// Tries providers in order: Claude (if API key available) -> Ollama (if running) -> None
+/// Tries providers in order based on availability:
+/// 1. Claude (if ANTHROPIC_API_KEY set)
+/// 2. OpenAI (if OPENAI_API_KEY set)
+/// 3. Azure (if AZURE_OPENAI_* vars set)
+/// 4. Grok (if XAI_API_KEY set)
+/// 5. Ollama (if running locally)
pub struct AIManager {
providers: Vec>,
}
@@ -96,7 +110,28 @@ impl AIManager {
}
}
- // Then Ollama (local LLM)
+ // Then OpenAI (requires API key)
+ if let Ok(openai) = OpenAIProvider::new() {
+ if openai.is_available().await {
+ providers.push(Box::new(openai));
+ }
+ }
+
+ // Then Azure OpenAI (requires endpoint + key + deployment)
+ if let Ok(azure) = AzureOpenAIProvider::new() {
+ if azure.is_available().await {
+ providers.push(Box::new(azure));
+ }
+ }
+
+ // Then Grok (requires API key)
+ if let Ok(grok) = GrokProvider::new() {
+ if grok.is_available().await {
+ providers.push(Box::new(grok));
+ }
+ }
+
+ // Finally Ollama (local LLM, always available if running)
let ollama = OllamaProvider::new();
if ollama.is_available().await {
providers.push(Box::new(ollama));
@@ -105,11 +140,30 @@ impl AIManager {
Self { providers }
}
+ /// Create with a specific provider.
+ pub fn with_provider(provider: impl Into) -> anyhow::Result {
+ let provider_name = provider.into();
+ let provider: Box = match provider_name.as_str() {
+ "claude" => Box::new(ClaudeProvider::new()?),
+ "openai" => Box::new(OpenAIProvider::new()?),
+ "azure" => Box::new(AzureOpenAIProvider::new()?),
+ "grok" => Box::new(GrokProvider::new()?),
+ "ollama" => Box::new(OllamaProvider::new()),
+ other => anyhow::bail!("Unknown provider: {}", other),
+ };
+ Ok(Self { providers: vec![provider] })
+ }
+
/// Create with only Ollama (for local-only usage).
pub fn ollama_only() -> Self {
Self { providers: vec![Box::new(OllamaProvider::new())] }
}
+ /// List all available providers.
+ pub fn available_providers(&self) -> Vec<&str> {
+ self.providers.iter().map(|p| p.name()).collect()
+ }
+
/// Check if any AI provider is available.
pub fn is_available(&self) -> bool {
!self.providers.is_empty()
@@ -185,4 +239,24 @@ mod tests {
let manager = AIManager::ollama_only();
assert_eq!(manager.active_provider(), Some("ollama"));
}
+
+ #[test]
+ fn test_ai_manager_with_provider() {
+ // Test with ollama (doesn't require API key)
+ let manager = AIManager::with_provider("ollama").unwrap();
+ assert_eq!(manager.active_provider(), Some("ollama"));
+ }
+
+ #[test]
+ fn test_ai_manager_with_invalid_provider() {
+ let result = AIManager::with_provider("invalid");
+ assert!(result.is_err());
+ }
+
+ #[test]
+ fn test_available_providers() {
+ let manager = AIManager::ollama_only();
+ let providers = manager.available_providers();
+ assert_eq!(providers, vec!["ollama"]);
+ }
}
diff --git a/src/ai/ollama.rs b/src/ai/ollama.rs
index c6e1791..0e3eac2 100644
--- a/src/ai/ollama.rs
+++ b/src/ai/ollama.rs
@@ -438,13 +438,9 @@ mod tests {
#[test]
fn test_command_prompt_building() {
- let context = ProjectContext {
- project_name: "test-project".to_string(),
- project_type: "node".to_string(),
- available_commands: vec!["npm run build".to_string(), "npm test".to_string()],
- current_directory: PathBuf::from("/project"),
- recent_commands: vec![],
- };
+ let mut context = ProjectContext::new("test-project", PathBuf::from("/project"));
+ context.project_type = "node".to_string();
+ context.available_commands = vec!["npm run build".to_string(), "npm test".to_string()];
let prompt = OllamaProvider::build_command_prompt("run tests", &context);
diff --git a/src/ai/openai.rs b/src/ai/openai.rs
new file mode 100644
index 0000000..9bc659f
--- /dev/null
+++ b/src/ai/openai.rs
@@ -0,0 +1,239 @@
+//! OpenAI API integration.
+//!
+//! Implements the AIProvider trait for OpenAI GPT models.
+
+use async_trait::async_trait;
+use reqwest::Client;
+use serde::{Deserialize, Serialize};
+
+use super::{AIProvider, ProjectContext};
+
+/// OpenAI API provider.
+pub struct OpenAIProvider {
+ client: Client,
+ api_key: String,
+ model: String,
+ base_url: String,
+}
+
+impl OpenAIProvider {
+ /// Create a new OpenAI provider.
+ ///
+ /// Reads API key from OPENAI_API_KEY environment variable.
+ pub fn new() -> anyhow::Result {
+ let api_key = std::env::var("OPENAI_API_KEY")
+ .map_err(|_| anyhow::anyhow!("OPENAI_API_KEY not set"))?;
+
+ Ok(Self {
+ client: Client::new(),
+ api_key,
+ model: "gpt-4o".to_string(),
+ base_url: "https://api.openai.com/v1".to_string(),
+ })
+ }
+
+ /// Create with a specific model.
+ pub fn with_model(mut self, model: impl Into) -> Self {
+ self.model = model.into();
+ self
+ }
+
+ /// Create with a custom base URL (for Azure OpenAI or compatible APIs).
+ pub fn with_base_url(mut self, url: impl Into) -> Self {
+ self.base_url = url.into();
+ self
+ }
+
+ /// Make a request to the OpenAI API.
+ async fn request(&self, system: &str, user_message: &str) -> anyhow::Result {
+ let request = OpenAIRequest {
+ model: self.model.clone(),
+ messages: vec![
+ ChatMessage { role: "system".to_string(), content: system.to_string() },
+ ChatMessage { role: "user".to_string(), content: user_message.to_string() },
+ ],
+ max_tokens: Some(1024),
+ temperature: Some(0.7),
+ };
+
+ let response = self
+ .client
+ .post(format!("{}/chat/completions", self.base_url))
+ .header("Authorization", format!("Bearer {}", self.api_key))
+ .header("Content-Type", "application/json")
+ .json(&request)
+ .send()
+ .await?;
+
+ if !response.status().is_success() {
+ let status = response.status();
+ let body = response.text().await.unwrap_or_default();
+ anyhow::bail!("OpenAI API error ({}): {}", status, body);
+ }
+
+ let response: OpenAIResponse = response.json().await?;
+
+ response
+ .choices
+ .first()
+ .map(|c| c.message.content.clone())
+ .ok_or_else(|| anyhow::anyhow!("No response from OpenAI"))
+ }
+}
+
+#[async_trait]
+impl AIProvider for OpenAIProvider {
+ async fn generate_command(
+ &self,
+ prompt: &str,
+ context: &ProjectContext,
+ ) -> anyhow::Result {
+ let system = format!(
+ r"You are Palrun, an AI assistant for terminal commands.
+Your task is to generate the exact shell command the user needs.
+
+Current directory: {}
+Project type: {}
+Available commands: {}
+
+Rules:
+1. Output ONLY the command, nothing else
+2. Use the correct package manager for this project
+3. If multiple commands are needed, join with && or ;
+4. Never explain, just output the command",
+ context.current_directory.display(),
+ context.project_type,
+ context.available_commands.join(", ")
+ );
+
+ self.request(&system, prompt).await
+ }
+
+ async fn explain_command(
+ &self,
+ command: &str,
+ context: &ProjectContext,
+ ) -> anyhow::Result {
+ let system = format!(
+ r"You are Palrun, an AI assistant for terminal commands.
+Explain what this command does in plain English.
+
+Current directory: {}
+Project type: {}
+
+Be concise but thorough. Explain each part of the command.",
+ context.current_directory.display(),
+ context.project_type
+ );
+
+ self.request(&system, &format!("Explain: {}", command)).await
+ }
+
+ async fn diagnose_error(
+ &self,
+ command: &str,
+ error: &str,
+ context: &ProjectContext,
+ ) -> anyhow::Result {
+ let system = format!(
+ r"You are Palrun, an AI assistant for terminal commands.
+Diagnose why this command failed and suggest a fix.
+
+Current directory: {}
+Project type: {}
+
+Be concise. Focus on the most likely cause and solution.",
+ context.current_directory.display(),
+ context.project_type
+ );
+
+ let user_message = format!("Command: {}\n\nError:\n{}", command, error);
+
+ self.request(&system, &user_message).await
+ }
+
+ fn name(&self) -> &str {
+ "openai"
+ }
+
+ async fn is_available(&self) -> bool {
+ // Check if we can reach the API
+ let response = self
+ .client
+ .get(format!("{}/models", self.base_url))
+ .header("Authorization", format!("Bearer {}", self.api_key))
+ .timeout(std::time::Duration::from_secs(5))
+ .send()
+ .await;
+
+ response.map(|r| r.status().is_success()).unwrap_or(false)
+ }
+}
+
+// Request/Response types
+
+#[derive(Debug, Serialize)]
+struct OpenAIRequest {
+ model: String,
+ messages: Vec,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ max_tokens: Option,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ temperature: Option,
+}
+
+#[derive(Debug, Serialize, Deserialize)]
+struct ChatMessage {
+ role: String,
+ content: String,
+}
+
+#[derive(Debug, Deserialize)]
+struct OpenAIResponse {
+ choices: Vec,
+}
+
+#[derive(Debug, Deserialize)]
+struct Choice {
+ message: ChatMessage,
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use serial_test::serial;
+
+ #[test]
+ #[serial(openai_env)]
+ fn test_openai_provider_requires_api_key() {
+ // Save current value
+ let original = std::env::var("OPENAI_API_KEY").ok();
+ std::env::remove_var("OPENAI_API_KEY");
+
+ let result = OpenAIProvider::new();
+
+ // Restore original value
+ if let Some(val) = original {
+ std::env::set_var("OPENAI_API_KEY", val);
+ }
+
+ assert!(result.is_err());
+ }
+
+ #[test]
+ #[serial(openai_env)]
+ fn test_openai_provider_with_model() {
+ // Save current value
+ let original = std::env::var("OPENAI_API_KEY").ok();
+ std::env::set_var("OPENAI_API_KEY", "test-key");
+
+ let provider = OpenAIProvider::new().unwrap().with_model("gpt-4-turbo");
+ assert_eq!(provider.model, "gpt-4-turbo");
+
+ // Restore or remove
+ match original {
+ Some(val) => std::env::set_var("OPENAI_API_KEY", val),
+ None => std::env::remove_var("OPENAI_API_KEY"),
+ }
+ }
+}
diff --git a/src/ai/routing.rs b/src/ai/routing.rs
new file mode 100644
index 0000000..46d8261
--- /dev/null
+++ b/src/ai/routing.rs
@@ -0,0 +1,385 @@
+//! AI Model Routing Engine.
+//!
+//! Intelligently routes tasks to the optimal AI model based on task type,
+//! cost, performance, and availability.
+
+use serde::{Deserialize, Serialize};
+
+use super::{AIProvider, ClaudeProvider, GrokProvider, OllamaProvider, OpenAIProvider};
+
+/// Task category for routing decisions.
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
+pub enum TaskCategory {
+ /// Strategic planning (roadmap, architecture)
+ Planning,
+
+ /// Writing new code
+ CodeGeneration,
+
+ /// Reviewing/analyzing existing code
+ CodeReview,
+
+ /// Quick tasks (simple queries, short responses)
+ QuickTask,
+
+ /// Writing documentation
+ Documentation,
+
+ /// Error diagnosis
+ ErrorDiagnosis,
+}
+
+impl TaskCategory {
+ /// Infer category from prompt content.
+ pub fn from_prompt(prompt: &str) -> Self {
+ let lower = prompt.to_lowercase();
+
+ if lower.contains("plan") || lower.contains("roadmap") || lower.contains("architect") {
+ Self::Planning
+ } else if lower.contains("review") || lower.contains("analyze") || lower.contains("check") {
+ Self::CodeReview
+ } else if lower.contains("document")
+ || lower.contains("readme")
+ || lower.contains("explain")
+ {
+ Self::Documentation
+ } else if lower.contains("error") || lower.contains("fix") || lower.contains("debug") {
+ Self::ErrorDiagnosis
+ } else if lower.contains("write") || lower.contains("implement") || lower.contains("create")
+ {
+ Self::CodeGeneration
+ } else if prompt.len() < 100 {
+ Self::QuickTask
+ } else {
+ Self::CodeGeneration
+ }
+ }
+
+ /// Get the default model for this category.
+ pub fn default_model(&self) -> &'static str {
+ match self {
+ Self::Planning => "claude",
+ Self::CodeGeneration => "claude",
+ Self::CodeReview => "claude",
+ Self::QuickTask => "ollama",
+ Self::Documentation => "claude",
+ Self::ErrorDiagnosis => "claude",
+ }
+ }
+}
+
+/// Routing configuration.
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct RoutingConfig {
+ /// Model for planning tasks
+ #[serde(default = "default_planning")]
+ pub planning: String,
+
+ /// Model for code generation
+ #[serde(default = "default_code_generation")]
+ pub code_generation: String,
+
+ /// Model for code review
+ #[serde(default = "default_code_review")]
+ pub code_review: String,
+
+ /// Model for quick tasks
+ #[serde(default = "default_quick_tasks")]
+ pub quick_tasks: String,
+
+ /// Model for documentation
+ #[serde(default = "default_documentation")]
+ pub documentation: String,
+
+ /// Model for error diagnosis
+ #[serde(default = "default_error_diagnosis")]
+ pub error_diagnosis: String,
+
+ /// Fallback model when primary fails
+ #[serde(default = "default_fallback")]
+ pub fallback: String,
+
+ /// Local model for offline/budget mode
+ #[serde(default = "default_local")]
+ pub local: String,
+}
+
+fn default_planning() -> String {
+ "claude".to_string()
+}
+fn default_code_generation() -> String {
+ "claude".to_string()
+}
+fn default_code_review() -> String {
+ "claude".to_string()
+}
+fn default_quick_tasks() -> String {
+ "ollama".to_string()
+}
+fn default_documentation() -> String {
+ "claude".to_string()
+}
+fn default_error_diagnosis() -> String {
+ "claude".to_string()
+}
+fn default_fallback() -> String {
+ "openai".to_string()
+}
+fn default_local() -> String {
+ "ollama".to_string()
+}
+
+impl Default for RoutingConfig {
+ fn default() -> Self {
+ Self {
+ planning: default_planning(),
+ code_generation: default_code_generation(),
+ code_review: default_code_review(),
+ quick_tasks: default_quick_tasks(),
+ documentation: default_documentation(),
+ error_diagnosis: default_error_diagnosis(),
+ fallback: default_fallback(),
+ local: default_local(),
+ }
+ }
+}
+
+impl RoutingConfig {
+ /// Get the model name for a task category.
+ pub fn model_for(&self, category: TaskCategory) -> &str {
+ match category {
+ TaskCategory::Planning => &self.planning,
+ TaskCategory::CodeGeneration => &self.code_generation,
+ TaskCategory::CodeReview => &self.code_review,
+ TaskCategory::QuickTask => &self.quick_tasks,
+ TaskCategory::Documentation => &self.documentation,
+ TaskCategory::ErrorDiagnosis => &self.error_diagnosis,
+ }
+ }
+}
+
+/// Model router that selects the best provider for each task.
+pub struct ModelRouter {
+ config: RoutingConfig,
+ providers: Vec<(String, Box)>,
+}
+
+impl ModelRouter {
+ /// Create a new router with default configuration.
+ pub async fn new() -> Self {
+ Self::with_config(RoutingConfig::default()).await
+ }
+
+ /// Create a router with custom configuration.
+ pub async fn with_config(config: RoutingConfig) -> Self {
+ let mut providers: Vec<(String, Box)> = Vec::new();
+
+ // Try to initialize each provider
+ if let Ok(claude) = ClaudeProvider::new() {
+ if claude.is_available().await {
+ providers.push(("claude".to_string(), Box::new(claude)));
+ }
+ }
+
+ if let Ok(openai) = OpenAIProvider::new() {
+ if openai.is_available().await {
+ providers.push(("openai".to_string(), Box::new(openai)));
+ }
+ }
+
+ if let Ok(grok) = GrokProvider::new() {
+ if grok.is_available().await {
+ providers.push(("grok".to_string(), Box::new(grok)));
+ }
+ }
+
+ let ollama = OllamaProvider::new();
+ if ollama.is_available().await {
+ providers.push(("ollama".to_string(), Box::new(ollama)));
+ }
+
+ Self { config, providers }
+ }
+
+ /// Select the best provider for a task category.
+ pub fn select(&self, category: TaskCategory) -> Option<&dyn AIProvider> {
+ let model_name = self.config.model_for(category);
+ self.get_provider(model_name)
+ }
+
+ /// Get a provider by name.
+ pub fn get_provider(&self, name: &str) -> Option<&dyn AIProvider> {
+ self.providers.iter().find(|(n, _)| n == name).map(|(_, p)| p.as_ref())
+ }
+
+ /// Get a fallback chain for a task category.
+ pub fn fallback_chain(&self, category: TaskCategory) -> FallbackChain<'_> {
+ let mut chain = Vec::new();
+
+ // Primary model for this category
+ if let Some(primary) = self.select(category) {
+ chain.push(primary);
+ }
+
+ // Fallback model
+ if let Some(fallback) = self.get_provider(&self.config.fallback) {
+ if !chain.iter().any(|p| p.name() == fallback.name()) {
+ chain.push(fallback);
+ }
+ }
+
+ // Local model as last resort
+ if let Some(local) = self.get_provider(&self.config.local) {
+ if !chain.iter().any(|p| p.name() == local.name()) {
+ chain.push(local);
+ }
+ }
+
+ FallbackChain::new(chain)
+ }
+
+ /// List available providers.
+ pub fn available_providers(&self) -> Vec<&str> {
+ self.providers.iter().map(|(n, _)| n.as_str()).collect()
+ }
+
+ /// Check if a specific provider is available.
+ pub fn has_provider(&self, name: &str) -> bool {
+ self.providers.iter().any(|(n, _)| n == name)
+ }
+}
+
+/// A chain of providers to try in order.
+pub struct FallbackChain<'a> {
+ providers: Vec<&'a dyn AIProvider>,
+ current_index: usize,
+}
+
+impl<'a> FallbackChain<'a> {
+ /// Create a new fallback chain.
+ pub fn new(providers: Vec<&'a dyn AIProvider>) -> Self {
+ Self { providers, current_index: 0 }
+ }
+
+ /// Get the current provider.
+ pub fn current(&self) -> Option<&'a dyn AIProvider> {
+ self.providers.get(self.current_index).copied()
+ }
+
+ /// Move to the next provider.
+ pub fn next(&mut self) -> Option<&'a dyn AIProvider> {
+ self.current_index += 1;
+ self.current()
+ }
+
+ /// Reset to the first provider.
+ pub fn reset(&mut self) {
+ self.current_index = 0;
+ }
+
+ /// Get all providers in the chain.
+ pub fn providers(&self) -> &[&'a dyn AIProvider] {
+ &self.providers
+ }
+
+ /// Execute a request with fallback.
+ pub async fn execute(&mut self, mut f: F) -> Result
+ where
+ F: FnMut(
+ &'a dyn AIProvider,
+ )
+ -> std::pin::Pin> + Send + 'a>>,
+ E: std::fmt::Display,
+ {
+ self.reset();
+ while let Some(provider) = self.current() {
+ match f(provider).await {
+ Ok(result) => return Ok(result),
+ Err(e) => {
+ tracing::warn!(
+ provider = provider.name(),
+ error = %e,
+ "Provider failed, trying next"
+ );
+ if self.next().is_none() {
+ return Err(e);
+ }
+ }
+ }
+ }
+ unreachable!("Chain should have at least one provider")
+ }
+}
+
+/// Routing decision with metadata.
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct RoutingDecision {
+ /// Selected provider name
+ pub provider: String,
+
+ /// Task category
+ pub category: TaskCategory,
+
+ /// Reason for selection
+ pub reason: String,
+
+ /// Alternative providers available
+ pub alternatives: Vec,
+}
+
+impl RoutingDecision {
+ /// Create a new routing decision.
+ pub fn new(provider: &str, category: TaskCategory, alternatives: Vec<&str>) -> Self {
+ Self {
+ provider: provider.to_string(),
+ category,
+ reason: format!("{} is the configured model for {:?} tasks", provider, category),
+ alternatives: alternatives.into_iter().map(String::from).collect(),
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_task_category_from_prompt() {
+ assert_eq!(TaskCategory::from_prompt("plan the architecture"), TaskCategory::Planning);
+ assert_eq!(TaskCategory::from_prompt("review this code"), TaskCategory::CodeReview);
+ assert_eq!(TaskCategory::from_prompt("write documentation"), TaskCategory::Documentation);
+ assert_eq!(TaskCategory::from_prompt("fix this error"), TaskCategory::ErrorDiagnosis);
+ assert_eq!(TaskCategory::from_prompt("implement feature"), TaskCategory::CodeGeneration);
+ assert_eq!(TaskCategory::from_prompt("hi"), TaskCategory::QuickTask);
+ }
+
+ #[test]
+ fn test_routing_config_default() {
+ let config = RoutingConfig::default();
+ assert_eq!(config.planning, "claude");
+ assert_eq!(config.quick_tasks, "ollama");
+ assert_eq!(config.fallback, "openai");
+ }
+
+ #[test]
+ fn test_routing_config_model_for() {
+ let config = RoutingConfig::default();
+ assert_eq!(config.model_for(TaskCategory::Planning), "claude");
+ assert_eq!(config.model_for(TaskCategory::QuickTask), "ollama");
+ }
+
+ #[test]
+ fn test_task_category_default_model() {
+ assert_eq!(TaskCategory::Planning.default_model(), "claude");
+ assert_eq!(TaskCategory::QuickTask.default_model(), "ollama");
+ }
+
+ #[test]
+ fn test_routing_decision() {
+ let decision =
+ RoutingDecision::new("claude", TaskCategory::Planning, vec!["openai", "ollama"]);
+ assert_eq!(decision.provider, "claude");
+ assert_eq!(decision.category, TaskCategory::Planning);
+ assert_eq!(decision.alternatives.len(), 2);
+ }
+}
diff --git a/src/app.rs b/src/app.rs
index 1b90839..98ad602 100644
--- a/src/app.rs
+++ b/src/app.rs
@@ -9,7 +9,7 @@ use std::path::PathBuf;
use crate::core::{
send_notification, BackgroundEvent, BackgroundManager, CaptureManager, ChainExecutor,
ChainStepStatus, Command, CommandChain, CommandContext, CommandRegistry, Config, ContextFilter,
- HistoryManager, ParsedQuery,
+ HistoryManager, ParsedQuery, TrustStore,
};
use crate::tui::Theme;
@@ -139,6 +139,67 @@ pub struct App {
/// Resilience manager for retry and circuit breaker logic
pub resilience: crate::core::ResilienceManager,
+
+ /// Workflow context for GSD-style project management
+ pub workflow_context: Option,
+
+ /// AI chat input buffer
+ #[cfg(feature = "ai")]
+ pub ai_chat_input: String,
+
+ /// AI chat history for current session
+ #[cfg(feature = "ai")]
+ pub ai_chat_history: Vec<(String, String)>,
+
+ /// Available AI models (from Ollama)
+ #[cfg(feature = "ai")]
+ pub ai_models: Vec,
+
+ /// Whether AI models are being loaded
+ #[cfg(feature = "ai")]
+ pub ai_models_loading: bool,
+
+ /// Selected model index in AI setup
+ #[cfg(feature = "ai")]
+ pub ai_model_selected: usize,
+
+ /// Pull progress message for downloading models
+ #[cfg(feature = "ai")]
+ pub ai_pull_progress: Option,
+
+ /// Input for pulling new models
+ #[cfg(feature = "ai")]
+ pub ai_model_input: String,
+
+ /// Pending delete confirmation (model name to delete)
+ #[cfg(feature = "ai")]
+ pub ai_delete_pending: Option,
+
+ /// Whether AI is currently thinking/processing
+ #[cfg(feature = "ai")]
+ pub ai_thinking: bool,
+
+ /// Scroll position for AI chat history
+ #[cfg(feature = "ai")]
+ pub ai_chat_scroll: usize,
+
+ /// Animation frame for spinner (increments on tick)
+ pub spinner_frame: usize,
+
+ /// Trust store for managing directory trust
+ pub trust_store: TrustStore,
+
+ /// Selected option in trust confirmation dialog (0 = Yes, 1 = No)
+ pub trust_selected: usize,
+}
+
+/// Represents an Ollama model
+#[cfg(feature = "ai")]
+#[derive(Debug, Clone)]
+pub struct OllamaModel {
+ pub name: String,
+ pub size: u64,
+ pub modified_at: String,
}
/// A slash command entry
@@ -161,6 +222,13 @@ fn get_slash_commands() -> Vec {
SlashCommand::new("/history", "View command history"),
SlashCommand::new("/analytics", "View usage analytics"),
SlashCommand::new("/favorites", "Show favorite commands"),
+ SlashCommand::new("/workflow", "Open workflow dashboard"),
+ SlashCommand::new("/plan", "Show current task plan"),
+ SlashCommand::new("/roadmap", "Show project roadmap"),
+ #[cfg(feature = "ai")]
+ SlashCommand::new("/ai", "Open AI chat mode"),
+ #[cfg(feature = "ai")]
+ SlashCommand::new("/models", "Manage AI models"),
SlashCommand::new("/settings", "Open settings"),
SlashCommand::new("/theme", "Change color theme"),
SlashCommand::new("/quit", "Exit palrun"),
@@ -231,6 +299,20 @@ pub enum AppMode {
/// Context menu for selected command
ContextMenu,
+
+ /// AI chat mode for natural language interaction
+ #[cfg(feature = "ai")]
+ AiChat,
+
+ /// AI setup mode for managing models
+ #[cfg(feature = "ai")]
+ AiSetup,
+
+ /// Workflow mode showing project context and tasks
+ Workflow,
+
+ /// Trust confirmation dialog for new directories
+ TrustConfirmation,
}
impl App {
@@ -254,6 +336,13 @@ impl App {
// Resolve theme from config
let theme = Self::resolve_theme(&config);
+ // Load trust store and check if directory is trusted
+ let trust_store = TrustStore::load().unwrap_or_default();
+ let is_trusted = trust_store.is_trusted(&cwd);
+
+ // Start in trust confirmation mode if directory not trusted
+ let initial_mode = if is_trusted { AppMode::default() } else { AppMode::TrustConfirmation };
+
Ok(Self {
input: String::new(),
cursor_position: 0,
@@ -264,7 +353,7 @@ impl App {
command_selected: false,
cwd,
config,
- mode: AppMode::default(),
+ mode: initial_mode,
status_message: None,
context,
context_aware: true,
@@ -294,6 +383,30 @@ impl App {
degradation: crate::core::DegradationManager::new(),
offline_manager: crate::core::OfflineManager::new(),
resilience: crate::core::ResilienceManager::new(),
+ workflow_context: None,
+ #[cfg(feature = "ai")]
+ ai_chat_input: String::new(),
+ #[cfg(feature = "ai")]
+ ai_chat_history: Vec::new(),
+ #[cfg(feature = "ai")]
+ ai_models: Vec::new(),
+ #[cfg(feature = "ai")]
+ ai_models_loading: false,
+ #[cfg(feature = "ai")]
+ ai_model_selected: 0,
+ #[cfg(feature = "ai")]
+ ai_pull_progress: None,
+ #[cfg(feature = "ai")]
+ ai_model_input: String::new(),
+ #[cfg(feature = "ai")]
+ ai_delete_pending: None,
+ #[cfg(feature = "ai")]
+ ai_thinking: false,
+ #[cfg(feature = "ai")]
+ ai_chat_scroll: 0,
+ spinner_frame: 0,
+ trust_store,
+ trust_selected: 0,
})
}
@@ -420,6 +533,30 @@ impl App {
degradation: crate::core::DegradationManager::new(),
offline_manager: crate::core::OfflineManager::new(),
resilience: crate::core::ResilienceManager::new(),
+ workflow_context: None,
+ #[cfg(feature = "ai")]
+ ai_chat_input: String::new(),
+ #[cfg(feature = "ai")]
+ ai_chat_history: Vec::new(),
+ #[cfg(feature = "ai")]
+ ai_models: Vec::new(),
+ #[cfg(feature = "ai")]
+ ai_models_loading: false,
+ #[cfg(feature = "ai")]
+ ai_model_selected: 0,
+ #[cfg(feature = "ai")]
+ ai_pull_progress: None,
+ #[cfg(feature = "ai")]
+ ai_model_input: String::new(),
+ #[cfg(feature = "ai")]
+ ai_delete_pending: None,
+ #[cfg(feature = "ai")]
+ ai_thinking: false,
+ #[cfg(feature = "ai")]
+ ai_chat_scroll: 0,
+ spinner_frame: 0,
+ trust_store: TrustStore::default(),
+ trust_selected: 0,
}
}
@@ -753,6 +890,11 @@ impl App {
"/analytics" => self.show_analytics(),
"/quit" => self.quit(),
"/favorites" => self.set_status("Favorites: coming soon!"),
+ "/workflow" | "/plan" | "/roadmap" => self.show_workflow(),
+ #[cfg(feature = "ai")]
+ "/ai" => self.show_ai_chat(),
+ #[cfg(feature = "ai")]
+ "/models" => self.show_ai_setup(),
"/settings" => self.set_status("Settings: coming soon!"),
"/theme" => self.set_status("Theme picker: coming soon!"),
_ => {}
@@ -904,9 +1046,31 @@ impl App {
/// Perform periodic updates (called on tick).
pub fn tick(&mut self) {
+ // Update spinner animation frame
+ self.spinner_frame = self.spinner_frame.wrapping_add(1);
// Future: Update file watchers, refresh commands, etc.
}
+ /// Get the current spinner character for loading animations.
+ pub fn spinner_char(&self) -> char {
+ const SPINNER: &[char] = &['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏'];
+ SPINNER[self.spinner_frame % SPINNER.len()]
+ }
+
+ /// Get dynamic thinking message (rotates through different messages).
+ #[cfg(feature = "ai")]
+ pub fn thinking_message(&self) -> &'static str {
+ const MESSAGES: &[&str] = &[
+ "Thinking...",
+ "Processing...",
+ "Analyzing...",
+ "Generating response...",
+ "Working on it...",
+ ];
+ // Change message every ~20 frames (slower rotation)
+ MESSAGES[(self.spinner_frame / 20) % MESSAGES.len()]
+ }
+
/// Refresh Git information.
#[cfg(feature = "git")]
pub fn refresh_git_info(&mut self) {
@@ -1891,6 +2055,366 @@ impl App {
pub fn next_tip(&mut self) {
self.tip_index = (self.tip_index + 1) % 6;
}
+
+ // --- Workflow mode methods ---
+
+ /// Show the workflow dashboard.
+ pub fn show_workflow(&mut self) {
+ // Load workflow context if not already loaded
+ if self.workflow_context.is_none() {
+ self.load_workflow_context();
+ }
+ self.mode = AppMode::Workflow;
+ }
+
+ /// Dismiss the workflow dashboard and return to normal mode.
+ pub fn dismiss_workflow(&mut self) {
+ self.mode = AppMode::Normal;
+ }
+
+ /// Check if workflow is currently shown.
+ pub fn is_workflow_shown(&self) -> bool {
+ matches!(self.mode, AppMode::Workflow)
+ }
+
+ /// Load workflow context from current directory.
+ pub fn load_workflow_context(&mut self) {
+ match crate::workflow::WorkflowContext::load(&self.cwd) {
+ Ok(ctx) => {
+ self.workflow_context = Some(ctx);
+ }
+ Err(_) => {
+ // No workflow documents found, that's OK
+ self.workflow_context = None;
+ }
+ }
+ }
+
+ /// Get workflow status summary for display.
+ pub fn workflow_summary(&self) -> Option {
+ self.workflow_context.as_ref().map(|ctx| {
+ let mut summary = String::new();
+ if let Some(ref project) = ctx.project {
+ summary.push_str(&format!("Project: {}\n", project.name));
+ }
+ if let Some(ref roadmap) = ctx.roadmap {
+ let completed = roadmap
+ .phases
+ .iter()
+ .filter(|p| matches!(p.status, crate::workflow::PhaseStatus::Completed))
+ .count();
+ summary.push_str(&format!(
+ "Roadmap: {}/{} phases\n",
+ completed,
+ roadmap.phases.len()
+ ));
+ }
+ if let Some(ref state) = ctx.state {
+ summary.push_str(&format!("Current: Phase {}\n", state.current_phase));
+ }
+ summary
+ })
+ }
+
+ // --- AI Chat mode methods ---
+
+ /// Show the AI chat mode.
+ #[cfg(feature = "ai")]
+ pub fn show_ai_chat(&mut self) {
+ self.ai_chat_input.clear();
+ self.mode = AppMode::AiChat;
+ }
+
+ /// Dismiss the AI chat mode and return to normal.
+ #[cfg(feature = "ai")]
+ pub fn dismiss_ai_chat(&mut self) {
+ self.mode = AppMode::Normal;
+ }
+
+ /// Check if AI chat is currently shown.
+ #[cfg(feature = "ai")]
+ pub fn is_ai_chat_shown(&self) -> bool {
+ matches!(self.mode, AppMode::AiChat)
+ }
+
+ /// Scroll AI chat up.
+ #[cfg(feature = "ai")]
+ pub fn ai_chat_scroll_up(&mut self) {
+ self.ai_chat_scroll = self.ai_chat_scroll.saturating_add(1);
+ }
+
+ /// Scroll AI chat down.
+ #[cfg(feature = "ai")]
+ pub fn ai_chat_scroll_down(&mut self) {
+ self.ai_chat_scroll = self.ai_chat_scroll.saturating_sub(1);
+ }
+
+ /// Auto-scroll AI chat to bottom (latest message).
+ #[cfg(feature = "ai")]
+ pub fn ai_chat_scroll_to_bottom(&mut self) {
+ self.ai_chat_scroll = 0;
+ }
+
+ // --- AI Setup mode methods ---
+
+ /// Show the AI setup mode (model management).
+ #[cfg(feature = "ai")]
+ pub fn show_ai_setup(&mut self) {
+ self.ai_model_input.clear();
+ self.ai_model_selected = 0;
+ self.ai_pull_progress = None;
+ self.mode = AppMode::AiSetup;
+ // Trigger model list refresh
+ self.refresh_ai_models();
+ }
+
+ /// Dismiss the AI setup mode and return to normal.
+ #[cfg(feature = "ai")]
+ pub fn dismiss_ai_setup(&mut self) {
+ self.mode = AppMode::Normal;
+ }
+
+ /// Check if AI setup is currently shown.
+ #[cfg(feature = "ai")]
+ pub fn is_ai_setup_shown(&self) -> bool {
+ matches!(self.mode, AppMode::AiSetup)
+ }
+
+ /// Refresh the list of available AI models from Ollama.
+ #[cfg(feature = "ai")]
+ pub fn refresh_ai_models(&mut self) {
+ self.ai_models_loading = true;
+ self.ai_models.clear();
+
+ // Create runtime for async call
+ let rt = tokio::runtime::Builder::new_current_thread().enable_all().build();
+
+ match rt {
+ Ok(runtime) => {
+ let result = runtime.block_on(async { list_ollama_models().await });
+
+ match result {
+ Ok(models) => {
+ self.ai_models = models;
+ self.ai_models_loading = false;
+ }
+ Err(e) => {
+ self.ai_models_loading = false;
+ self.set_status(format!("Failed to list models: {}", e));
+ }
+ }
+ }
+ Err(_) => {
+ self.ai_models_loading = false;
+ self.set_status("Failed to create async runtime");
+ }
+ }
+ }
+
+ /// Pull/download a new AI model.
+ #[cfg(feature = "ai")]
+ pub fn pull_ai_model(&mut self, model_name: &str) {
+ if model_name.is_empty() {
+ self.set_status("Please enter a model name");
+ return;
+ }
+
+ let model = model_name.to_string();
+ self.ai_pull_progress = Some(format!("Pulling {}...", model));
+ self.set_status(format!("Starting download of {}...", model));
+
+ // Create runtime for async call
+ let rt = tokio::runtime::Builder::new_current_thread().enable_all().build();
+
+ match rt {
+ Ok(runtime) => {
+ let result = runtime.block_on(async { pull_ollama_model(&model).await });
+
+ match result {
+ Ok(_) => {
+ self.ai_pull_progress = None;
+ self.set_status(format!("Successfully pulled {}", model));
+ self.ai_model_input.clear();
+ // Refresh the model list
+ self.refresh_ai_models();
+ }
+ Err(e) => {
+ self.ai_pull_progress = None;
+ self.set_status(format!("Failed to pull {}: {}", model, e));
+ }
+ }
+ }
+ Err(_) => {
+ self.ai_pull_progress = None;
+ self.set_status("Failed to create async runtime");
+ }
+ }
+ }
+
+ /// Request to delete an AI model (requires confirmation).
+ #[cfg(feature = "ai")]
+ pub fn request_delete_ai_model(&mut self) {
+ if let Some(model) = self.ai_models.get(self.ai_model_selected) {
+ // Set pending delete - user must press 'd' again to confirm
+ self.ai_delete_pending = Some(model.name.clone());
+ self.set_status(format!("Press 'd' again to delete {}, Esc to cancel", model.name));
+ }
+ }
+
+ /// Cancel pending delete.
+ #[cfg(feature = "ai")]
+ pub fn cancel_delete_ai_model(&mut self) {
+ if self.ai_delete_pending.is_some() {
+ self.ai_delete_pending = None;
+ self.set_status("Delete cancelled");
+ }
+ }
+
+ /// Confirm and execute the pending delete.
+ #[cfg(feature = "ai")]
+ pub fn confirm_delete_ai_model(&mut self) {
+ if let Some(model_name) = self.ai_delete_pending.take() {
+ // Create runtime for async call
+ let rt = tokio::runtime::Builder::new_current_thread().enable_all().build();
+
+ match rt {
+ Ok(runtime) => {
+ let result = runtime.block_on(async { delete_ollama_model(&model_name).await });
+
+ match result {
+ Ok(_) => {
+ self.set_status(format!("Deleted {}", model_name));
+ // Refresh the model list
+ self.refresh_ai_models();
+ // Adjust selection
+ if self.ai_model_selected > 0 {
+ self.ai_model_selected -= 1;
+ }
+ }
+ Err(e) => {
+ self.set_status(format!("Failed to delete {}: {}", model_name, e));
+ }
+ }
+ }
+ Err(_) => {
+ self.set_status("Failed to create async runtime");
+ }
+ }
+ }
+ }
+
+ /// Set the selected model as the default OLLAMA_MODEL.
+ #[cfg(feature = "ai")]
+ pub fn use_selected_model(&mut self) {
+ if let Some(model) = self.ai_models.get(self.ai_model_selected).cloned() {
+ std::env::set_var("OLLAMA_MODEL", &model.name);
+ self.set_status(format!("Now using {} as default model", model.name));
+ // Update AI status
+ self.ai_status = Some(format!("Ollama ({})", model.name));
+ }
+ }
+}
+
+/// List available Ollama models.
+#[cfg(feature = "ai")]
+async fn list_ollama_models() -> anyhow::Result> {
+ let client = reqwest::Client::new();
+ let base_url =
+ std::env::var("OLLAMA_HOST").unwrap_or_else(|_| "http://localhost:11434".to_string());
+
+ #[derive(serde::Deserialize)]
+ struct OllamaListResponse {
+ models: Vec,
+ }
+
+ #[derive(serde::Deserialize)]
+ struct OllamaModelInfo {
+ name: String,
+ size: u64,
+ modified_at: String,
+ }
+
+ let response = client
+ .get(format!("{}/api/tags", base_url))
+ .timeout(std::time::Duration::from_secs(10))
+ .send()
+ .await?;
+
+ if !response.status().is_success() {
+ if response.status() == reqwest::StatusCode::NOT_FOUND {
+ anyhow::bail!("Ollama not running. Start with: ollama serve");
+ }
+ anyhow::bail!("Ollama error: {}", response.status());
+ }
+
+ let result: OllamaListResponse = response.json().await?;
+
+ Ok(result
+ .models
+ .into_iter()
+ .map(|m| OllamaModel { name: m.name, size: m.size, modified_at: m.modified_at })
+ .collect())
+}
+
+/// Pull/download an Ollama model.
+#[cfg(feature = "ai")]
+async fn pull_ollama_model(model: &str) -> anyhow::Result<()> {
+ let client = reqwest::Client::new();
+ let base_url =
+ std::env::var("OLLAMA_HOST").unwrap_or_else(|_| "http://localhost:11434".to_string());
+
+ #[derive(serde::Serialize)]
+ struct PullRequest {
+ name: String,
+ stream: bool,
+ }
+
+ let request = PullRequest { name: model.to_string(), stream: false };
+
+ let response = client
+ .post(format!("{}/api/pull", base_url))
+ .json(&request)
+ .timeout(std::time::Duration::from_secs(600)) // 10 minutes for large models
+ .send()
+ .await?;
+
+ if !response.status().is_success() {
+ let status = response.status();
+ let text = response.text().await.unwrap_or_default();
+ anyhow::bail!("Pull failed ({}): {}", status, text);
+ }
+
+ Ok(())
+}
+
+/// Delete an Ollama model.
+#[cfg(feature = "ai")]
+async fn delete_ollama_model(model: &str) -> anyhow::Result<()> {
+ let client = reqwest::Client::new();
+ let base_url =
+ std::env::var("OLLAMA_HOST").unwrap_or_else(|_| "http://localhost:11434".to_string());
+
+ #[derive(serde::Serialize)]
+ struct DeleteRequest {
+ name: String,
+ }
+
+ let request = DeleteRequest { name: model.to_string() };
+
+ let response = client
+ .delete(format!("{}/api/delete", base_url))
+ .json(&request)
+ .timeout(std::time::Duration::from_secs(30))
+ .send()
+ .await?;
+
+ if !response.status().is_success() {
+ let status = response.status();
+ let text = response.text().await.unwrap_or_default();
+ anyhow::bail!("Delete failed ({}): {}", status, text);
+ }
+
+ Ok(())
}
impl Default for App {
@@ -1937,6 +2461,30 @@ impl Default for App {
degradation: crate::core::DegradationManager::new(),
offline_manager: crate::core::OfflineManager::new(),
resilience: crate::core::ResilienceManager::new(),
+ workflow_context: None,
+ #[cfg(feature = "ai")]
+ ai_chat_input: String::new(),
+ #[cfg(feature = "ai")]
+ ai_chat_history: Vec::new(),
+ #[cfg(feature = "ai")]
+ ai_models: Vec::new(),
+ #[cfg(feature = "ai")]
+ ai_models_loading: false,
+ #[cfg(feature = "ai")]
+ ai_model_selected: 0,
+ #[cfg(feature = "ai")]
+ ai_pull_progress: None,
+ #[cfg(feature = "ai")]
+ ai_model_input: String::new(),
+ #[cfg(feature = "ai")]
+ ai_delete_pending: None,
+ #[cfg(feature = "ai")]
+ ai_thinking: false,
+ #[cfg(feature = "ai")]
+ ai_chat_scroll: 0,
+ spinner_frame: 0,
+ trust_store: TrustStore::default(),
+ trust_selected: 0,
}
})
}
diff --git a/src/commands/mod.rs b/src/commands/mod.rs
new file mode 100644
index 0000000..ca436dd
--- /dev/null
+++ b/src/commands/mod.rs
@@ -0,0 +1,32 @@
+//! Universal slash command system for AI IDEs.
+//!
+//! This module provides the infrastructure to generate and install
+//! Palrun commands as slash commands in any AI IDE (Claude Code,
+//! Cursor, Windsurf, Continue.dev, Aider, etc.).
+//!
+//! ## Architecture
+//!
+//! - `CommandTarget` trait: Abstracts over IDE-specific command formats
+//! - `SlashCommandRegistry`: Manages available commands and targets
+//! - `PalrunCommand`: Represents a command that can be exposed to IDEs
+//!
+//! ## Usage
+//!
+//! ```bash
+//! # Install commands to all detected IDEs
+//! palrun commands install --all
+//!
+//! # Install to specific IDE
+//! palrun commands install --target claude
+//!
+//! # List available targets
+//! palrun commands list
+//! ```
+
+mod registry;
+mod target;
+pub mod targets;
+
+pub use registry::{SlashCommandRegistry, PALRUN_COMMANDS};
+pub use target::{CommandArg, CommandCategory, CommandTarget, PalrunCommand};
+pub use targets::default_registry;
diff --git a/src/commands/registry.rs b/src/commands/registry.rs
new file mode 100644
index 0000000..c5cf0a2
--- /dev/null
+++ b/src/commands/registry.rs
@@ -0,0 +1,293 @@
+//! Command registry for managing IDE targets.
+//!
+//! The registry maintains a list of all Palrun commands and
+//! can install them to any detected IDE.
+
+use std::path::Path;
+
+use once_cell::sync::Lazy;
+
+use super::target::{CommandArg, CommandCategory, CommandTarget, PalrunCommand};
+
+/// All available Palrun commands.
+pub static PALRUN_COMMANDS: Lazy> = Lazy::new(|| {
+ vec![
+ // Project commands
+ PalrunCommand::new(
+ "new-project",
+ "Initialize new Palrun project with PROJECT.md and STATE.md",
+ "palrun project new",
+ CommandCategory::Project,
+ ),
+ PalrunCommand::new(
+ "analyze",
+ "Analyze codebase and generate CODEBASE.md",
+ "palrun analyze",
+ CommandCategory::Project,
+ ),
+ // Planning commands
+ PalrunCommand::new(
+ "create-roadmap",
+ "Create ROADMAP.md from PROJECT.md",
+ "palrun roadmap create",
+ CommandCategory::Planning,
+ ),
+ PalrunCommand::new(
+ "plan-phase",
+ "Create PLAN.md for a specific phase",
+ "palrun plan phase",
+ CommandCategory::Planning,
+ )
+ .with_arg(CommandArg::required("phase", "Phase number to plan")),
+ // Execution commands
+ PalrunCommand::new(
+ "execute",
+ "Execute the current plan",
+ "palrun execute",
+ CommandCategory::Execution,
+ )
+ .with_arg(CommandArg::optional("task", "Specific task number to execute")),
+ PalrunCommand::new(
+ "run",
+ "Run a specific command from the palette",
+ "palrun run",
+ CommandCategory::Execution,
+ )
+ .with_arg(CommandArg::required("command", "Command to run")),
+ // Status commands
+ PalrunCommand::new(
+ "status",
+ "Show current project status",
+ "palrun status",
+ CommandCategory::Status,
+ ),
+ PalrunCommand::new(
+ "verify",
+ "Run verification steps for current task",
+ "palrun verify",
+ CommandCategory::Status,
+ ),
+ // Utility commands
+ PalrunCommand::new(
+ "ai-ask",
+ "Ask AI a question about the codebase",
+ "palrun ai ask",
+ CommandCategory::Utility,
+ )
+ .with_arg(CommandArg::required("question", "Question to ask")),
+ PalrunCommand::new(
+ "ai-generate",
+ "Generate a command from natural language",
+ "palrun ai generate",
+ CommandCategory::Utility,
+ )
+ .with_arg(CommandArg::required("prompt", "What you want to do")),
+ ]
+});
+
+/// Registry for managing command targets.
+pub struct SlashCommandRegistry {
+ targets: Vec>,
+}
+
+impl SlashCommandRegistry {
+ /// Create a new empty registry.
+ pub fn new() -> Self {
+ Self { targets: Vec::new() }
+ }
+
+ /// Register a command target.
+ pub fn register(&mut self, target: Box) {
+ self.targets.push(target);
+ }
+
+ /// Get all registered targets.
+ pub fn targets(&self) -> &[Box] {
+ &self.targets
+ }
+
+ /// Detect which IDEs are installed.
+ pub fn detect_installed(&self) -> Vec<&dyn CommandTarget> {
+ self.targets.iter().filter(|t| t.detect()).map(|t| t.as_ref()).collect()
+ }
+
+ /// Get a target by name.
+ pub fn get(&self, name: &str) -> Option<&dyn CommandTarget> {
+ self.targets.iter().find(|t| t.name() == name).map(|t| t.as_ref())
+ }
+
+ /// Install commands to all detected IDEs.
+ pub fn install_all(&self) -> anyhow::Result> {
+ let mut installed = Vec::new();
+ for target in self.detect_installed() {
+ self.install_target(target)?;
+ installed.push(target.name().to_string());
+ }
+ Ok(installed)
+ }
+
+ /// Install commands to a specific target.
+ pub fn install_to(&self, name: &str) -> anyhow::Result<()> {
+ let target = self.get(name).ok_or_else(|| anyhow::anyhow!("Unknown target: {}", name))?;
+
+ if !target.detect() {
+ anyhow::bail!("{} is not installed on this system", target.display_name());
+ }
+
+ self.install_target(target)
+ }
+
+ /// Install commands to a target.
+ fn install_target(&self, target: &dyn CommandTarget) -> anyhow::Result<()> {
+ let path = target.install_path()?;
+ std::fs::create_dir_all(&path)?;
+
+ for cmd in PALRUN_COMMANDS.iter() {
+ let content = target.generate(cmd)?;
+ let filename = target.filename(cmd);
+ std::fs::write(path.join(&filename), content)?;
+ }
+
+ Ok(())
+ }
+
+ /// List installed commands for a target.
+ pub fn list_installed(&self, name: &str) -> anyhow::Result> {
+ let target = self.get(name).ok_or_else(|| anyhow::anyhow!("Unknown target: {}", name))?;
+
+ let path = target.install_path()?;
+ if !path.exists() {
+ return Ok(Vec::new());
+ }
+
+ let mut commands = Vec::new();
+ for entry in std::fs::read_dir(&path)? {
+ let entry = entry?;
+ if let Some(name) = entry.file_name().to_str() {
+ if name.starts_with("palrun-") {
+ commands.push(name.to_string());
+ }
+ }
+ }
+
+ Ok(commands)
+ }
+
+ /// Sync commands (reinstall all).
+ pub fn sync(&self) -> anyhow::Result> {
+ self.install_all()
+ }
+
+ /// Uninstall commands from a target.
+ pub fn uninstall(&self, name: &str) -> anyhow::Result<()> {
+ let target = self.get(name).ok_or_else(|| anyhow::anyhow!("Unknown target: {}", name))?;
+
+ let path = target.install_path()?;
+ if path.exists() {
+ // Only remove palrun command files
+ for entry in std::fs::read_dir(&path)? {
+ let entry = entry?;
+ if let Some(name) = entry.file_name().to_str() {
+ if name.starts_with("palrun-") {
+ std::fs::remove_file(entry.path())?;
+ }
+ }
+ }
+
+ // Remove directory if empty
+ if is_dir_empty(&path)? {
+ std::fs::remove_dir(&path)?;
+ }
+ }
+
+ Ok(())
+ }
+}
+
+impl Default for SlashCommandRegistry {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+fn is_dir_empty(path: &Path) -> anyhow::Result {
+ Ok(std::fs::read_dir(path)?.next().is_none())
+}
+
+#[cfg(test)]
+mod tests {
+ use std::path::PathBuf;
+
+ use super::*;
+
+ struct MockTarget {
+ installed: bool,
+ }
+
+ impl MockTarget {
+ fn new(installed: bool) -> Self {
+ Self { installed }
+ }
+ }
+
+ impl CommandTarget for MockTarget {
+ fn name(&self) -> &str {
+ "mock"
+ }
+
+ fn detect(&self) -> bool {
+ self.installed
+ }
+
+ fn install_path(&self) -> anyhow::Result {
+ Ok(PathBuf::from("/tmp/mock-commands"))
+ }
+
+ fn generate(&self, cmd: &PalrunCommand) -> anyhow::Result {
+ Ok(format!(
+ "# {}\n\n{}\n\n```bash\n{}\n```",
+ cmd.name, cmd.description, cmd.palrun_command
+ ))
+ }
+ }
+
+ #[test]
+ fn test_palrun_commands_defined() {
+ assert!(!PALRUN_COMMANDS.is_empty());
+ assert!(PALRUN_COMMANDS.iter().any(|c| c.name == "new-project"));
+ assert!(PALRUN_COMMANDS.iter().any(|c| c.name == "analyze"));
+ }
+
+ #[test]
+ fn test_registry_new() {
+ let registry = SlashCommandRegistry::new();
+ assert!(registry.targets().is_empty());
+ }
+
+ #[test]
+ fn test_registry_register() {
+ let mut registry = SlashCommandRegistry::new();
+ registry.register(Box::new(MockTarget::new(true)));
+ assert_eq!(registry.targets().len(), 1);
+ }
+
+ #[test]
+ fn test_registry_detect_installed() {
+ let mut registry = SlashCommandRegistry::new();
+ registry.register(Box::new(MockTarget::new(true)));
+ registry.register(Box::new(MockTarget::new(false)));
+
+ // Note: both have same name "mock", so only one detected
+ let installed = registry.detect_installed();
+ assert_eq!(installed.len(), 1);
+ }
+
+ #[test]
+ fn test_registry_get() {
+ let mut registry = SlashCommandRegistry::new();
+ registry.register(Box::new(MockTarget::new(true)));
+
+ assert!(registry.get("mock").is_some());
+ assert!(registry.get("nonexistent").is_none());
+ }
+}
diff --git a/src/commands/target.rs b/src/commands/target.rs
new file mode 100644
index 0000000..04e7a8e
--- /dev/null
+++ b/src/commands/target.rs
@@ -0,0 +1,217 @@
+//! Command target abstraction for IDE-agnostic command generation.
+//!
+//! Defines the `CommandTarget` trait that allows Palrun to generate
+//! slash commands for any AI IDE (Claude Code, Cursor, Windsurf, etc.).
+
+use std::path::PathBuf;
+
+use serde::{Deserialize, Serialize};
+
+/// Trait for IDE-specific command targets.
+///
+/// Each supported IDE implements this trait to define how
+/// Palrun commands should be generated and installed.
+pub trait CommandTarget: Send + Sync {
+ /// Name of the target IDE (e.g., "claude", "cursor").
+ fn name(&self) -> &str;
+
+ /// Human-readable display name.
+ fn display_name(&self) -> &str {
+ self.name()
+ }
+
+ /// Check if this IDE is installed on the system.
+ fn detect(&self) -> bool;
+
+ /// Where to install commands for this IDE.
+ fn install_path(&self) -> anyhow::Result;
+
+ /// Generate command file content in IDE-specific format.
+ fn generate(&self, command: &PalrunCommand) -> anyhow::Result;
+
+ /// File extension for command files.
+ fn file_extension(&self) -> &str {
+ "md"
+ }
+
+ /// Generate filename for a command.
+ fn filename(&self, command: &PalrunCommand) -> String {
+ format!("palrun-{}.{}", command.name, self.file_extension())
+ }
+}
+
+/// A Palrun command that can be exposed to IDEs.
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct PalrunCommand {
+ /// Command name (used in slash command, e.g., "new-project").
+ pub name: String,
+
+ /// Human-readable description.
+ pub description: String,
+
+ /// The actual palrun CLI command to run.
+ pub palrun_command: String,
+
+ /// Command category.
+ pub category: CommandCategory,
+
+ /// Optional arguments the command accepts.
+ pub args: Vec,
+}
+
+impl PalrunCommand {
+ /// Create a new command.
+ pub fn new(
+ name: impl Into,
+ description: impl Into,
+ palrun_command: impl Into,
+ category: CommandCategory,
+ ) -> Self {
+ Self {
+ name: name.into(),
+ description: description.into(),
+ palrun_command: palrun_command.into(),
+ category,
+ args: Vec::new(),
+ }
+ }
+
+ /// Add an argument.
+ pub fn with_arg(mut self, arg: CommandArg) -> Self {
+ self.args.push(arg);
+ self
+ }
+}
+
+/// Command category for organization.
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
+pub enum CommandCategory {
+ /// Project commands (new, analyze).
+ Project,
+ /// Planning commands (plan, roadmap).
+ Planning,
+ /// Execution commands (execute, run).
+ Execution,
+ /// Status commands (status, verify).
+ Status,
+ /// Utility commands.
+ Utility,
+}
+
+impl CommandCategory {
+ /// Get display name for the category.
+ pub fn display_name(&self) -> &str {
+ match self {
+ Self::Project => "Project",
+ Self::Planning => "Planning",
+ Self::Execution => "Execution",
+ Self::Status => "Status",
+ Self::Utility => "Utility",
+ }
+ }
+}
+
+/// A command argument.
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct CommandArg {
+ /// Argument name.
+ pub name: String,
+
+ /// Description.
+ pub description: String,
+
+ /// Whether the argument is required.
+ pub required: bool,
+
+ /// Default value if any.
+ pub default: Option,
+}
+
+impl CommandArg {
+ /// Create a new required argument.
+ pub fn required(name: impl Into, description: impl Into) -> Self {
+ Self { name: name.into(), description: description.into(), required: true, default: None }
+ }
+
+ /// Create a new optional argument.
+ pub fn optional(name: impl Into, description: impl Into) -> Self {
+ Self { name: name.into(), description: description.into(), required: false, default: None }
+ }
+
+ /// Set a default value.
+ pub fn with_default(mut self, default: impl Into) -> Self {
+ self.default = Some(default.into());
+ self
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ struct MockTarget;
+
+ impl CommandTarget for MockTarget {
+ fn name(&self) -> &str {
+ "mock"
+ }
+
+ fn detect(&self) -> bool {
+ true
+ }
+
+ fn install_path(&self) -> anyhow::Result {
+ Ok(PathBuf::from("/tmp/mock"))
+ }
+
+ fn generate(&self, cmd: &PalrunCommand) -> anyhow::Result {
+ Ok(format!("# {}\n{}", cmd.name, cmd.description))
+ }
+ }
+
+ #[test]
+ fn test_mock_target() {
+ let target = MockTarget;
+ assert_eq!(target.name(), "mock");
+ assert!(target.detect());
+ assert_eq!(target.file_extension(), "md");
+ }
+
+ #[test]
+ fn test_mock_target_generate() {
+ let target = MockTarget;
+ let cmd =
+ PalrunCommand::new("test", "Test command", "palrun test", CommandCategory::Utility);
+ let content = target.generate(&cmd).unwrap();
+ assert!(content.contains("# test"));
+ assert!(content.contains("Test command"));
+ }
+
+ #[test]
+ fn test_palrun_command() {
+ let cmd = PalrunCommand::new(
+ "analyze",
+ "Analyze the codebase",
+ "palrun analyze",
+ CommandCategory::Project,
+ )
+ .with_arg(CommandArg::optional("verbose", "Enable verbose output"));
+
+ assert_eq!(cmd.name, "analyze");
+ assert_eq!(cmd.args.len(), 1);
+ }
+
+ #[test]
+ fn test_command_category() {
+ assert_eq!(CommandCategory::Project.display_name(), "Project");
+ assert_eq!(CommandCategory::Planning.display_name(), "Planning");
+ }
+
+ #[test]
+ fn test_command_arg() {
+ let arg = CommandArg::required("name", "The name").with_default("default");
+
+ assert!(arg.required);
+ assert_eq!(arg.default, Some("default".to_string()));
+ }
+}
diff --git a/src/commands/targets/aider.rs b/src/commands/targets/aider.rs
new file mode 100644
index 0000000..710911e
--- /dev/null
+++ b/src/commands/targets/aider.rs
@@ -0,0 +1,150 @@
+//! Aider command target.
+//!
+//! Generates slash commands for Aider in Markdown format.
+
+use std::path::PathBuf;
+
+use super::super::target::{CommandTarget, PalrunCommand};
+
+/// Aider command target.
+///
+/// Aider is an AI pair programming tool that works in the terminal.
+/// It uses markdown files for custom commands, loaded from `.aider/commands/`.
+pub struct AiderTarget;
+
+impl CommandTarget for AiderTarget {
+ fn name(&self) -> &str {
+ "aider"
+ }
+
+ fn display_name(&self) -> &str {
+ "Aider"
+ }
+
+ fn detect(&self) -> bool {
+ // Check for .aider directory in current project
+ let local = std::env::current_dir().map(|cwd| cwd.join(".aider").exists()).unwrap_or(false);
+
+ // Check for global .aider directory
+ let global = dirs::home_dir().map(|h| h.join(".aider").exists()).unwrap_or(false);
+
+ local || global
+ }
+
+ fn install_path(&self) -> anyhow::Result {
+ // Prefer project-level if .aider exists
+ if let Ok(cwd) = std::env::current_dir() {
+ let project_aider = cwd.join(".aider");
+ if project_aider.exists() {
+ return Ok(project_aider.join("commands/palrun"));
+ }
+ }
+
+ // Fall back to global
+ let home = dirs::home_dir().ok_or_else(|| anyhow::anyhow!("No home directory found"))?;
+ Ok(home.join(".aider/commands/palrun"))
+ }
+
+ fn generate(&self, cmd: &PalrunCommand) -> anyhow::Result {
+ let mut content = String::new();
+
+ // Command header
+ content.push_str(&format!("# /palrun-{}\n\n", cmd.name));
+
+ // Description
+ content.push_str(&format!("{}\n\n", cmd.description));
+
+ // Arguments section
+ if !cmd.args.is_empty() {
+ content.push_str("## Arguments\n\n");
+ for arg in &cmd.args {
+ let req = if arg.required { "required" } else { "optional" };
+ if let Some(default) = &arg.default {
+ content.push_str(&format!(
+ "- `{}` ({}) - {} [default: {}]\n",
+ arg.name, req, arg.description, default
+ ));
+ } else {
+ content
+ .push_str(&format!("- `{}` ({}) - {}\n", arg.name, req, arg.description));
+ }
+ }
+ content.push('\n');
+ }
+
+ // Usage section
+ content.push_str("## Usage\n\n");
+ content.push_str("Run this command in your terminal:\n\n");
+ content.push_str("```bash\n");
+ content.push_str(&cmd.palrun_command);
+ content.push_str("\n```\n\n");
+
+ // Category tag
+ content.push_str(&format!("---\n*Category: {}*\n", cmd.category.display_name()));
+
+ Ok(content)
+ }
+
+ fn file_extension(&self) -> &str {
+ "md"
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::commands::{CommandArg, CommandCategory};
+
+ use super::*;
+
+ #[test]
+ fn test_aider_target_name() {
+ let target = AiderTarget;
+ assert_eq!(target.name(), "aider");
+ assert_eq!(target.display_name(), "Aider");
+ }
+
+ #[test]
+ fn test_aider_target_extension() {
+ let target = AiderTarget;
+ assert_eq!(target.file_extension(), "md");
+ }
+
+ #[test]
+ fn test_aider_target_generate() {
+ let target = AiderTarget;
+ let cmd = PalrunCommand {
+ name: "test".to_string(),
+ description: "Test command".to_string(),
+ palrun_command: "palrun test".to_string(),
+ category: CommandCategory::Utility,
+ args: Vec::new(),
+ };
+
+ let content = target.generate(&cmd).unwrap();
+ assert!(content.contains("# /palrun-test"));
+ assert!(content.contains("Test command"));
+ assert!(content.contains("```bash"));
+ assert!(content.contains("palrun test"));
+ assert!(content.contains("Category: Utility"));
+ }
+
+ #[test]
+ fn test_aider_target_generate_with_args() {
+ let target = AiderTarget;
+ let cmd = PalrunCommand {
+ name: "build".to_string(),
+ description: "Build the project".to_string(),
+ palrun_command: "palrun build".to_string(),
+ category: CommandCategory::Execution,
+ args: vec![
+ CommandArg::optional("target", "Build target"),
+ CommandArg::required("config", "Config file"),
+ ],
+ };
+
+ let content = target.generate(&cmd).unwrap();
+ assert!(content.contains("## Arguments"));
+ assert!(content.contains("`target` (optional)"));
+ assert!(content.contains("`config` (required)"));
+ }
+}
diff --git a/src/commands/targets/claude.rs b/src/commands/targets/claude.rs
new file mode 100644
index 0000000..5ec2b80
--- /dev/null
+++ b/src/commands/targets/claude.rs
@@ -0,0 +1,125 @@
+//! Claude Code command target.
+//!
+//! Generates slash commands for Claude Code in Markdown + YAML frontmatter format.
+
+use std::path::PathBuf;
+
+use super::super::target::{CommandTarget, PalrunCommand};
+
+/// Claude Code command target.
+///
+/// Claude Code uses Markdown files with YAML frontmatter for slash commands.
+/// Commands are installed to `~/.claude/commands/palrun/`.
+pub struct ClaudeCodeTarget;
+
+impl CommandTarget for ClaudeCodeTarget {
+ fn name(&self) -> &str {
+ "claude"
+ }
+
+ fn display_name(&self) -> &str {
+ "Claude Code"
+ }
+
+ fn detect(&self) -> bool {
+ dirs::home_dir().map(|h| h.join(".claude").exists()).unwrap_or(false)
+ }
+
+ fn install_path(&self) -> anyhow::Result {
+ let home = dirs::home_dir().ok_or_else(|| anyhow::anyhow!("No home directory found"))?;
+ Ok(home.join(".claude/commands/palrun"))
+ }
+
+ fn generate(&self, cmd: &PalrunCommand) -> anyhow::Result {
+ let mut content = String::new();
+
+ // YAML frontmatter
+ content.push_str("---\n");
+ content.push_str(&format!("name: palrun:{}\n", cmd.name));
+ content.push_str(&format!("description: {}\n", cmd.description));
+ content.push_str("---\n\n");
+
+ // Command title
+ content.push_str(&format!("# {}\n\n", cmd.name));
+
+ // Description
+ content.push_str(&format!("{}\n\n", cmd.description));
+
+ // Arguments if any
+ if !cmd.args.is_empty() {
+ content.push_str("## Arguments\n\n");
+ for arg in &cmd.args {
+ let req = if arg.required { "(required)" } else { "(optional)" };
+ content.push_str(&format!("- `{}` {} - {}\n", arg.name, req, arg.description));
+ }
+ content.push('\n');
+ }
+
+ // Command to run
+ content.push_str("## Command\n\n");
+ content.push_str("```bash\n");
+ content.push_str(&cmd.palrun_command);
+ content.push_str("\n```\n");
+
+ Ok(content)
+ }
+
+ fn file_extension(&self) -> &str {
+ "md"
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::commands::CommandCategory;
+
+ use super::*;
+
+ #[test]
+ fn test_claude_target_name() {
+ let target = ClaudeCodeTarget;
+ assert_eq!(target.name(), "claude");
+ assert_eq!(target.display_name(), "Claude Code");
+ }
+
+ #[test]
+ fn test_claude_target_extension() {
+ let target = ClaudeCodeTarget;
+ assert_eq!(target.file_extension(), "md");
+ }
+
+ #[test]
+ fn test_claude_target_generate() {
+ let target = ClaudeCodeTarget;
+ let cmd = PalrunCommand {
+ name: "test".to_string(),
+ description: "Test command".to_string(),
+ palrun_command: "palrun test".to_string(),
+ category: CommandCategory::Utility,
+ args: Vec::new(),
+ };
+
+ let content = target.generate(&cmd).unwrap();
+ assert!(content.contains("---"));
+ assert!(content.contains("name: palrun:test"));
+ assert!(content.contains("description: Test command"));
+ assert!(content.contains("```bash"));
+ assert!(content.contains("palrun test"));
+ }
+
+ #[test]
+ fn test_claude_target_generate_with_args() {
+ let target = ClaudeCodeTarget;
+ let cmd = PalrunCommand {
+ name: "analyze".to_string(),
+ description: "Analyze codebase".to_string(),
+ palrun_command: "palrun analyze".to_string(),
+ category: CommandCategory::Project,
+ args: vec![crate::commands::CommandArg::optional("verbose", "Enable verbose output")],
+ };
+
+ let content = target.generate(&cmd).unwrap();
+ assert!(content.contains("## Arguments"));
+ assert!(content.contains("`verbose`"));
+ }
+}
diff --git a/src/commands/targets/continue_dev.rs b/src/commands/targets/continue_dev.rs
new file mode 100644
index 0000000..4843033
--- /dev/null
+++ b/src/commands/targets/continue_dev.rs
@@ -0,0 +1,108 @@
+//! Continue.dev command target.
+//!
+//! Generates slash commands for Continue.dev in its configuration format.
+
+use std::path::PathBuf;
+
+use serde_json::json;
+
+use super::super::target::{CommandTarget, PalrunCommand};
+
+/// Continue.dev command target.
+///
+/// Continue is an open-source AI code assistant that works with any LLM.
+/// Commands are defined in `~/.continue/config.json` under the `slashCommands` array.
+/// We generate individual command files that can be imported.
+pub struct ContinueDevTarget;
+
+impl CommandTarget for ContinueDevTarget {
+ fn name(&self) -> &str {
+ "continue"
+ }
+
+ fn display_name(&self) -> &str {
+ "Continue.dev"
+ }
+
+ fn detect(&self) -> bool {
+ // Check for ~/.continue directory
+ dirs::home_dir().map(|h| h.join(".continue").exists()).unwrap_or(false)
+ }
+
+ fn install_path(&self) -> anyhow::Result {
+ let home = dirs::home_dir().ok_or_else(|| anyhow::anyhow!("No home directory found"))?;
+ Ok(home.join(".continue/commands/palrun"))
+ }
+
+ fn generate(&self, cmd: &PalrunCommand) -> anyhow::Result {
+ // Continue.dev uses a specific format for slash commands
+ // The command runs a shell command and streams the output
+ let args: Vec<_> = cmd
+ .args
+ .iter()
+ .map(|a| {
+ json!({
+ "name": a.name,
+ "description": a.description,
+ "required": a.required
+ })
+ })
+ .collect();
+
+ let command = json!({
+ "name": cmd.name,
+ "description": cmd.description,
+ "command": {
+ "type": "shell",
+ "command": cmd.palrun_command
+ },
+ "params": args,
+ "source": "palrun"
+ });
+
+ Ok(serde_json::to_string_pretty(&command)?)
+ }
+
+ fn file_extension(&self) -> &str {
+ "json"
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::commands::CommandCategory;
+
+ use super::*;
+
+ #[test]
+ fn test_continue_target_name() {
+ let target = ContinueDevTarget;
+ assert_eq!(target.name(), "continue");
+ assert_eq!(target.display_name(), "Continue.dev");
+ }
+
+ #[test]
+ fn test_continue_target_extension() {
+ let target = ContinueDevTarget;
+ assert_eq!(target.file_extension(), "json");
+ }
+
+ #[test]
+ fn test_continue_target_generate() {
+ let target = ContinueDevTarget;
+ let cmd = PalrunCommand {
+ name: "analyze".to_string(),
+ description: "Analyze project".to_string(),
+ palrun_command: "palrun analyze".to_string(),
+ category: CommandCategory::Project,
+ args: Vec::new(),
+ };
+
+ let content = target.generate(&cmd).unwrap();
+ assert!(content.contains("\"name\": \"analyze\""));
+ assert!(content.contains("\"description\": \"Analyze project\""));
+ assert!(content.contains("\"type\": \"shell\""));
+ assert!(content.contains("\"command\": \"palrun analyze\""));
+ assert!(content.contains("\"source\": \"palrun\""));
+ }
+}
diff --git a/src/commands/targets/cursor.rs b/src/commands/targets/cursor.rs
new file mode 100644
index 0000000..480443a
--- /dev/null
+++ b/src/commands/targets/cursor.rs
@@ -0,0 +1,117 @@
+//! Cursor command target.
+//!
+//! Generates slash commands for Cursor in JSON format.
+
+use std::path::PathBuf;
+
+use serde_json::json;
+
+use super::super::target::{CommandTarget, PalrunCommand};
+
+/// Cursor IDE command target.
+///
+/// Cursor uses JSON files for custom commands.
+/// Commands are installed to `.cursor/commands/` in the project directory
+/// or `~/.cursor/commands/` for global commands.
+pub struct CursorTarget;
+
+impl CommandTarget for CursorTarget {
+ fn name(&self) -> &str {
+ "cursor"
+ }
+
+ fn display_name(&self) -> &str {
+ "Cursor"
+ }
+
+ fn detect(&self) -> bool {
+ // Check for global .cursor directory
+ let global = dirs::home_dir().map(|h| h.join(".cursor").exists()).unwrap_or(false);
+
+ // Check for project-level .cursor directory
+ let local =
+ std::env::current_dir().map(|cwd| cwd.join(".cursor").exists()).unwrap_or(false);
+
+ global || local
+ }
+
+ fn install_path(&self) -> anyhow::Result {
+ // Prefer project-level if .cursor exists, otherwise use global
+ if let Ok(cwd) = std::env::current_dir() {
+ let project_cursor = cwd.join(".cursor");
+ if project_cursor.exists() {
+ return Ok(project_cursor.join("commands/palrun"));
+ }
+ }
+
+ // Fall back to global
+ let home = dirs::home_dir().ok_or_else(|| anyhow::anyhow!("No home directory found"))?;
+ Ok(home.join(".cursor/commands/palrun"))
+ }
+
+ fn generate(&self, cmd: &PalrunCommand) -> anyhow::Result {
+ let args: Vec<_> = cmd
+ .args
+ .iter()
+ .map(|a| {
+ json!({
+ "name": a.name,
+ "description": a.description,
+ "required": a.required,
+ "default": a.default
+ })
+ })
+ .collect();
+
+ let command = json!({
+ "name": format!("palrun:{}", cmd.name),
+ "description": cmd.description,
+ "command": cmd.palrun_command,
+ "category": cmd.category.display_name(),
+ "arguments": args
+ });
+
+ Ok(serde_json::to_string_pretty(&command)?)
+ }
+
+ fn file_extension(&self) -> &str {
+ "json"
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::commands::CommandCategory;
+
+ use super::*;
+
+ #[test]
+ fn test_cursor_target_name() {
+ let target = CursorTarget;
+ assert_eq!(target.name(), "cursor");
+ assert_eq!(target.display_name(), "Cursor");
+ }
+
+ #[test]
+ fn test_cursor_target_extension() {
+ let target = CursorTarget;
+ assert_eq!(target.file_extension(), "json");
+ }
+
+ #[test]
+ fn test_cursor_target_generate() {
+ let target = CursorTarget;
+ let cmd = PalrunCommand {
+ name: "test".to_string(),
+ description: "Test command".to_string(),
+ palrun_command: "palrun test".to_string(),
+ category: CommandCategory::Utility,
+ args: Vec::new(),
+ };
+
+ let content = target.generate(&cmd).unwrap();
+ assert!(content.contains("\"name\": \"palrun:test\""));
+ assert!(content.contains("\"description\": \"Test command\""));
+ assert!(content.contains("\"command\": \"palrun test\""));
+ }
+}
diff --git a/src/commands/targets/mod.rs b/src/commands/targets/mod.rs
new file mode 100644
index 0000000..2868fec
--- /dev/null
+++ b/src/commands/targets/mod.rs
@@ -0,0 +1,28 @@
+//! IDE-specific command target implementations.
+//!
+//! Each supported IDE has its own implementation of the `CommandTarget` trait.
+
+mod aider;
+mod claude;
+mod continue_dev;
+mod cursor;
+mod windsurf;
+
+pub use aider::AiderTarget;
+pub use claude::ClaudeCodeTarget;
+pub use continue_dev::ContinueDevTarget;
+pub use cursor::CursorTarget;
+pub use windsurf::WindsurfTarget;
+
+use super::SlashCommandRegistry;
+
+/// Create a registry with all built-in targets.
+pub fn default_registry() -> SlashCommandRegistry {
+ let mut registry = SlashCommandRegistry::new();
+ registry.register(Box::new(ClaudeCodeTarget));
+ registry.register(Box::new(CursorTarget));
+ registry.register(Box::new(WindsurfTarget));
+ registry.register(Box::new(ContinueDevTarget));
+ registry.register(Box::new(AiderTarget));
+ registry
+}
diff --git a/src/commands/targets/windsurf.rs b/src/commands/targets/windsurf.rs
new file mode 100644
index 0000000..6a122e8
--- /dev/null
+++ b/src/commands/targets/windsurf.rs
@@ -0,0 +1,119 @@
+//! Windsurf command target.
+//!
+//! Generates slash commands for Windsurf (Codeium's AI IDE) in JSON format.
+
+use std::path::PathBuf;
+
+use serde_json::json;
+
+use super::super::target::{CommandTarget, PalrunCommand};
+
+/// Windsurf IDE command target.
+///
+/// Windsurf is Codeium's AI-powered IDE, based on VSCode.
+/// Commands are installed to `.windsurf/commands/` in the project directory
+/// or `~/.windsurf/commands/` for global commands.
+pub struct WindsurfTarget;
+
+impl CommandTarget for WindsurfTarget {
+ fn name(&self) -> &str {
+ "windsurf"
+ }
+
+ fn display_name(&self) -> &str {
+ "Windsurf"
+ }
+
+ fn detect(&self) -> bool {
+ // Check for global .windsurf directory
+ let global = dirs::home_dir().map(|h| h.join(".windsurf").exists()).unwrap_or(false);
+
+ // Check for project-level .windsurf directory
+ let local =
+ std::env::current_dir().map(|cwd| cwd.join(".windsurf").exists()).unwrap_or(false);
+
+ global || local
+ }
+
+ fn install_path(&self) -> anyhow::Result {
+ // Prefer project-level if .windsurf exists, otherwise use global
+ if let Ok(cwd) = std::env::current_dir() {
+ let project_windsurf = cwd.join(".windsurf");
+ if project_windsurf.exists() {
+ return Ok(project_windsurf.join("commands/palrun"));
+ }
+ }
+
+ // Fall back to global
+ let home = dirs::home_dir().ok_or_else(|| anyhow::anyhow!("No home directory found"))?;
+ Ok(home.join(".windsurf/commands/palrun"))
+ }
+
+ fn generate(&self, cmd: &PalrunCommand) -> anyhow::Result {
+ let args: Vec<_> = cmd
+ .args
+ .iter()
+ .map(|a| {
+ json!({
+ "name": a.name,
+ "description": a.description,
+ "required": a.required,
+ "default": a.default
+ })
+ })
+ .collect();
+
+ let command = json!({
+ "name": format!("palrun:{}", cmd.name),
+ "description": cmd.description,
+ "command": cmd.palrun_command,
+ "category": cmd.category.display_name(),
+ "arguments": args,
+ "source": "palrun"
+ });
+
+ Ok(serde_json::to_string_pretty(&command)?)
+ }
+
+ fn file_extension(&self) -> &str {
+ "json"
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::commands::CommandCategory;
+
+ use super::*;
+
+ #[test]
+ fn test_windsurf_target_name() {
+ let target = WindsurfTarget;
+ assert_eq!(target.name(), "windsurf");
+ assert_eq!(target.display_name(), "Windsurf");
+ }
+
+ #[test]
+ fn test_windsurf_target_extension() {
+ let target = WindsurfTarget;
+ assert_eq!(target.file_extension(), "json");
+ }
+
+ #[test]
+ fn test_windsurf_target_generate() {
+ let target = WindsurfTarget;
+ let cmd = PalrunCommand {
+ name: "test".to_string(),
+ description: "Test command".to_string(),
+ palrun_command: "palrun test".to_string(),
+ category: CommandCategory::Utility,
+ args: Vec::new(),
+ };
+
+ let content = target.generate(&cmd).unwrap();
+ assert!(content.contains("\"name\": \"palrun:test\""));
+ assert!(content.contains("\"description\": \"Test command\""));
+ assert!(content.contains("\"command\": \"palrun test\""));
+ assert!(content.contains("\"source\": \"palrun\""));
+ }
+}
diff --git a/src/core/config.rs b/src/core/config.rs
index 1f09962..bfe6ed4 100644
--- a/src/core/config.rs
+++ b/src/core/config.rs
@@ -150,14 +150,44 @@ pub struct AiConfig {
/// Whether AI features are enabled
pub enabled: bool,
- /// AI provider (claude, ollama, openai)
+ /// Default AI provider (claude, ollama, openai, azure, grok)
pub provider: String,
- /// Model to use
+ /// Model to use (overrides provider-specific model)
pub model: Option,
+ /// Enable automatic fallback if primary provider fails
+ #[serde(default = "default_true")]
+ pub fallback_enabled: bool,
+
+ /// Fallback order
+ #[serde(default)]
+ pub fallback_chain: Vec,
+
/// Ollama-specific settings
+ #[serde(default)]
pub ollama: OllamaConfig,
+
+ /// Claude-specific settings
+ #[serde(default)]
+ pub claude: ClaudeConfig,
+
+ /// OpenAI-specific settings
+ #[serde(default)]
+ pub openai: OpenAIConfig,
+
+ /// Azure OpenAI-specific settings
+ #[serde(default)]
+ pub azure: AzureOpenAIConfig,
+
+ /// Grok-specific settings
+ #[serde(default)]
+ pub grok: GrokConfig,
+}
+
+#[cfg(feature = "ai")]
+fn default_true() -> bool {
+ true
}
/// Ollama configuration.
@@ -172,6 +202,94 @@ pub struct OllamaConfig {
pub model: String,
}
+/// Claude (Anthropic) configuration.
+#[cfg(feature = "ai")]
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(default)]
+pub struct ClaudeConfig {
+ /// API key (prefer env var ANTHROPIC_API_KEY)
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub api_key: Option,
+
+ /// Model to use
+ #[serde(default = "default_claude_model")]
+ pub model: String,
+}
+
+#[cfg(feature = "ai")]
+fn default_claude_model() -> String {
+ "claude-sonnet-4-20250514".to_string()
+}
+
+/// OpenAI configuration.
+#[cfg(feature = "ai")]
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(default)]
+pub struct OpenAIConfig {
+ /// API key (prefer env var OPENAI_API_KEY)
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub api_key: Option,
+
+ /// Model to use
+ #[serde(default = "default_openai_model")]
+ pub model: String,
+
+ /// Base URL (for OpenAI-compatible APIs)
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub base_url: Option,
+}
+
+#[cfg(feature = "ai")]
+fn default_openai_model() -> String {
+ "gpt-4o".to_string()
+}
+
+/// Azure OpenAI configuration.
+#[cfg(feature = "ai")]
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(default)]
+pub struct AzureOpenAIConfig {
+ /// Azure OpenAI endpoint (e.g., https://your-resource.openai.azure.com)
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub endpoint: Option,
+
+ /// API key (prefer env var AZURE_OPENAI_API_KEY)
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub api_key: Option,
+
+ /// Deployment name
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub deployment: Option,
+
+ /// API version
+ #[serde(default = "default_azure_api_version")]
+ pub api_version: String,
+}
+
+#[cfg(feature = "ai")]
+fn default_azure_api_version() -> String {
+ "2024-02-01".to_string()
+}
+
+/// Grok (xAI) configuration.
+#[cfg(feature = "ai")]
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(default)]
+pub struct GrokConfig {
+ /// API key (prefer env var XAI_API_KEY)
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub api_key: Option,
+
+ /// Model to use
+ #[serde(default = "default_grok_model")]
+ pub model: String,
+}
+
+#[cfg(feature = "ai")]
+fn default_grok_model() -> String {
+ "grok-beta".to_string()
+}
+
/// Keybinding configuration.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default)]
@@ -316,29 +434,69 @@ impl HooksConfig {
}
impl Config {
- /// Load configuration from the default location.
+ /// Load configuration with hierarchical merging.
///
- /// Looks for config in:
- /// 1. `.palrun.toml` in current directory
- /// 2. `~/.config/palrun/config.toml`
- /// 3. Falls back to defaults
+ /// Loading order (later overrides earlier):
+ /// 1. Defaults
+ /// 2. `~/.config/palrun/palrun.toml` (system - can have secrets)
+ /// 3. `palrun.toml` in current directory (project - NO secrets)
+ /// 4. `.palrun.local.toml` in current directory (local - can have secrets, gitignored)
+ /// 5. Environment variables (highest priority)
pub fn load() -> anyhow::Result {
- // Try local config first
- let local_config = PathBuf::from(".palrun.toml");
- if local_config.exists() {
- return Self::load_from_file(&local_config);
- }
+ let mut config = Self::default();
- // Try global config
+ // 1. Load system config (can have secrets)
if let Some(config_dir) = dirs::config_dir() {
- let global_config = config_dir.join("palrun").join("config.toml");
- if global_config.exists() {
- return Self::load_from_file(&global_config);
+ let system_config = config_dir.join("palrun").join("palrun.toml");
+ if system_config.exists() {
+ if let Ok(system) = Self::load_from_file(&system_config) {
+ config = config.merge(system);
+ tracing::debug!("Loaded system config from {}", system_config.display());
+ }
+ }
+ // Also check legacy path
+ let legacy_config = config_dir.join("palrun").join("config.toml");
+ if legacy_config.exists() && !system_config.exists() {
+ if let Ok(legacy) = Self::load_from_file(&legacy_config) {
+ config = config.merge(legacy);
+ tracing::debug!("Loaded legacy config from {}", legacy_config.display());
+ }
}
}
- // Return defaults
- Ok(Self::default())
+ // 2. Load project config (NO secrets - may be committed)
+ let project_config = PathBuf::from("palrun.toml");
+ if project_config.exists() {
+ if let Ok(project) = Self::load_from_file(&project_config) {
+ config = config.merge(project);
+ tracing::debug!("Loaded project config from palrun.toml");
+ }
+ }
+ // Also check .palrun.toml (legacy project config)
+ let legacy_project = PathBuf::from(".palrun.toml");
+ if legacy_project.exists() {
+ if let Ok(legacy) = Self::load_from_file(&legacy_project) {
+ config = config.merge(legacy);
+ tracing::debug!("Loaded legacy project config from .palrun.toml");
+ }
+ }
+
+ // 3. Load local config (can have secrets - gitignored)
+ let local_config = PathBuf::from(".palrun.local.toml");
+ if local_config.exists() {
+ if let Ok(local) = Self::load_from_file(&local_config) {
+ config = config.merge(local);
+ tracing::debug!("Loaded local config from .palrun.local.toml");
+ }
+ }
+
+ // 4. Apply environment variable overrides
+ #[cfg(feature = "ai")]
+ {
+ config = config.apply_env_overrides();
+ }
+
+ Ok(config)
}
/// Load configuration from a specific file.
@@ -348,6 +506,130 @@ impl Config {
Ok(config)
}
+ /// Merge another config into this one (other takes precedence).
+ pub fn merge(mut self, other: Self) -> Self {
+ // General - use other's values if they differ from default
+ if other.general.show_hidden {
+ self.general.show_hidden = true;
+ }
+ if !other.general.confirm_dangerous {
+ self.general.confirm_dangerous = false;
+ }
+ if other.general.max_history != 1000 {
+ self.general.max_history = other.general.max_history;
+ }
+ if other.general.shell.is_some() {
+ self.general.shell = other.general.shell;
+ }
+
+ // UI
+ if other.ui.theme != "default" {
+ self.ui.theme = other.ui.theme;
+ }
+ if !other.ui.show_preview {
+ self.ui.show_preview = false;
+ }
+ if !other.ui.show_icons {
+ self.ui.show_icons = false;
+ }
+ if other.ui.max_display != 50 {
+ self.ui.max_display = other.ui.max_display;
+ }
+ if !other.ui.mouse {
+ self.ui.mouse = false;
+ }
+ if other.ui.custom_colors.is_some() {
+ self.ui.custom_colors = other.ui.custom_colors;
+ }
+
+ // Scanner
+ if !other.scanner.enabled.is_empty() {
+ self.scanner.enabled = other.scanner.enabled;
+ }
+ if !other.scanner.ignore_dirs.is_empty() {
+ self.scanner.ignore_dirs = other.scanner.ignore_dirs;
+ }
+
+ // AI config
+ #[cfg(feature = "ai")]
+ {
+ self.ai = self.ai.merge(other.ai);
+ }
+
+ // Keys - use other if different from default
+ let default_keys = KeyConfig::default();
+ if other.keys.quit != default_keys.quit {
+ self.keys.quit = other.keys.quit;
+ }
+ if other.keys.select != default_keys.select {
+ self.keys.select = other.keys.select;
+ }
+
+ // Aliases - append
+ if !other.aliases.is_empty() {
+ self.aliases.extend(other.aliases);
+ }
+
+ // MCP
+ if other.mcp.enabled {
+ self.mcp.enabled = true;
+ }
+ if !other.mcp.servers.is_empty() {
+ self.mcp.servers.extend(other.mcp.servers);
+ }
+
+ // Hooks
+ #[cfg(feature = "git")]
+ {
+ if other.hooks.pre_commit.is_some() {
+ self.hooks.pre_commit = other.hooks.pre_commit;
+ }
+ if other.hooks.commit_msg.is_some() {
+ self.hooks.commit_msg = other.hooks.commit_msg;
+ }
+ // ... other hooks follow the same pattern
+ }
+
+ self
+ }
+
+ /// Apply environment variable overrides to AI config.
+ #[cfg(feature = "ai")]
+ fn apply_env_overrides(mut self) -> Self {
+ // Claude
+ if let Ok(key) = std::env::var("ANTHROPIC_API_KEY") {
+ self.ai.claude.api_key = Some(key);
+ }
+
+ // OpenAI
+ if let Ok(key) = std::env::var("OPENAI_API_KEY") {
+ self.ai.openai.api_key = Some(key);
+ }
+
+ // Azure OpenAI
+ if let Ok(key) = std::env::var("AZURE_OPENAI_API_KEY") {
+ self.ai.azure.api_key = Some(key);
+ }
+ if let Ok(endpoint) = std::env::var("AZURE_OPENAI_ENDPOINT") {
+ self.ai.azure.endpoint = Some(endpoint);
+ }
+ if let Ok(deployment) = std::env::var("AZURE_OPENAI_DEPLOYMENT") {
+ self.ai.azure.deployment = Some(deployment);
+ }
+
+ // Grok
+ if let Ok(key) = std::env::var("XAI_API_KEY") {
+ self.ai.grok.api_key = Some(key);
+ }
+
+ // Ollama
+ if let Ok(url) = std::env::var("OLLAMA_HOST") {
+ self.ai.ollama.base_url = url;
+ }
+
+ self
+ }
+
/// Save configuration to the global config file.
pub fn save(&self) -> anyhow::Result<()> {
let config_dir = dirs::config_dir()
@@ -442,7 +724,153 @@ impl Default for AiConfig {
enabled: true,
provider: "claude".to_string(),
model: None,
+ fallback_enabled: true,
+ fallback_chain: vec![
+ "claude".to_string(),
+ "openai".to_string(),
+ "azure".to_string(),
+ "grok".to_string(),
+ "ollama".to_string(),
+ ],
ollama: OllamaConfig::default(),
+ claude: ClaudeConfig::default(),
+ openai: OpenAIConfig::default(),
+ azure: AzureOpenAIConfig::default(),
+ grok: GrokConfig::default(),
+ }
+ }
+}
+
+#[cfg(feature = "ai")]
+impl Default for ClaudeConfig {
+ fn default() -> Self {
+ Self { api_key: None, model: default_claude_model() }
+ }
+}
+
+#[cfg(feature = "ai")]
+impl Default for OpenAIConfig {
+ fn default() -> Self {
+ Self { api_key: None, model: default_openai_model(), base_url: None }
+ }
+}
+
+#[cfg(feature = "ai")]
+impl Default for AzureOpenAIConfig {
+ fn default() -> Self {
+ Self {
+ endpoint: None,
+ api_key: None,
+ deployment: None,
+ api_version: default_azure_api_version(),
+ }
+ }
+}
+
+#[cfg(feature = "ai")]
+impl Default for GrokConfig {
+ fn default() -> Self {
+ Self { api_key: None, model: default_grok_model() }
+ }
+}
+
+#[cfg(feature = "ai")]
+impl AiConfig {
+ /// Merge another AI config into this one (other takes precedence for non-None values).
+ pub fn merge(mut self, other: Self) -> Self {
+ // Basic settings
+ if !other.enabled {
+ self.enabled = false;
+ }
+ if other.provider != "claude" {
+ self.provider = other.provider;
+ }
+ if other.model.is_some() {
+ self.model = other.model;
+ }
+ if !other.fallback_enabled {
+ self.fallback_enabled = false;
+ }
+ if !other.fallback_chain.is_empty() {
+ self.fallback_chain = other.fallback_chain;
+ }
+
+ // Ollama
+ if other.ollama.base_url != "http://localhost:11434" {
+ self.ollama.base_url = other.ollama.base_url;
+ }
+ if other.ollama.model != "codellama:7b" {
+ self.ollama.model = other.ollama.model;
+ }
+
+ // Claude
+ if other.claude.api_key.is_some() {
+ self.claude.api_key = other.claude.api_key;
+ }
+ if other.claude.model != default_claude_model() {
+ self.claude.model = other.claude.model;
+ }
+
+ // OpenAI
+ if other.openai.api_key.is_some() {
+ self.openai.api_key = other.openai.api_key;
+ }
+ if other.openai.model != default_openai_model() {
+ self.openai.model = other.openai.model;
+ }
+ if other.openai.base_url.is_some() {
+ self.openai.base_url = other.openai.base_url;
+ }
+
+ // Azure
+ if other.azure.endpoint.is_some() {
+ self.azure.endpoint = other.azure.endpoint;
+ }
+ if other.azure.api_key.is_some() {
+ self.azure.api_key = other.azure.api_key;
+ }
+ if other.azure.deployment.is_some() {
+ self.azure.deployment = other.azure.deployment;
+ }
+ if other.azure.api_version != default_azure_api_version() {
+ self.azure.api_version = other.azure.api_version;
+ }
+
+ // Grok
+ if other.grok.api_key.is_some() {
+ self.grok.api_key = other.grok.api_key;
+ }
+ if other.grok.model != default_grok_model() {
+ self.grok.model = other.grok.model;
+ }
+
+ self
+ }
+
+ /// Check if a provider has credentials configured.
+ pub fn has_credentials(&self, provider: &str) -> bool {
+ match provider {
+ "claude" => self.claude.api_key.is_some(),
+ "openai" => self.openai.api_key.is_some(),
+ "azure" => {
+ self.azure.api_key.is_some()
+ && self.azure.endpoint.is_some()
+ && self.azure.deployment.is_some()
+ }
+ "grok" => self.grok.api_key.is_some(),
+ "ollama" => true, // Ollama doesn't need credentials
+ _ => false,
+ }
+ }
+
+ /// Get the API key for a provider (from config, not env).
+ pub fn get_api_key(&self, provider: &str) -> Option<&str> {
+ match provider {
+ "claude" => self.claude.api_key.as_deref(),
+ "openai" => self.openai.api_key.as_deref(),
+ "azure" => self.azure.api_key.as_deref(),
+ "grok" => self.grok.api_key.as_deref(),
+ _ => None,
}
}
}
diff --git a/src/core/mod.rs b/src/core/mod.rs
index 0cceea3..385dca1 100644
--- a/src/core/mod.rs
+++ b/src/core/mod.rs
@@ -20,6 +20,7 @@ mod parallel;
mod registry;
mod resilience;
mod retry;
+mod trust;
pub use analytics::{
Analytics, AnalyticsReport, CommandStats, Insight, InsightCategory, TimePeriod,
@@ -39,6 +40,10 @@ pub use command::{Command, CommandSource};
pub use config::Config;
#[cfg(feature = "git")]
pub use config::HooksConfig;
+#[cfg(feature = "ai")]
+pub use config::{
+ AiConfig, AzureOpenAIConfig, ClaudeConfig, GrokConfig, OllamaConfig, OpenAIConfig,
+};
pub use context::{CommandContext, ContextFilter, LocationIndicator};
pub use degradation::{
with_fallback, DegradationManager, DegradationReason, DegradedFeature, FallbackResult, Feature,
@@ -58,3 +63,4 @@ pub use parallel::{
pub use registry::CommandRegistry;
pub use resilience::{execute_resilient, FeatureResilience, ResilienceManager, ResilientResult};
pub use retry::{retry, retry_async, CircuitBreaker, CircuitState, RetryConfig, RetryResult};
+pub use trust::{trust_warning_message, TrustDecision, TrustStore};
diff --git a/src/core/trust.rs b/src/core/trust.rs
new file mode 100644
index 0000000..06b483b
--- /dev/null
+++ b/src/core/trust.rs
@@ -0,0 +1,206 @@
+//! Directory trust management for Palrun.
+//!
+//! Implements a trust system similar to Claude Code where users must
+//! explicitly trust directories before Palrun can execute commands.
+
+use std::collections::HashSet;
+use std::path::{Path, PathBuf};
+
+use serde::{Deserialize, Serialize};
+
+/// Trust store for managing trusted directories.
+#[derive(Debug, Clone, Default, Serialize, Deserialize)]
+pub struct TrustStore {
+ /// Set of trusted directory paths (canonical paths)
+ #[serde(default)]
+ pub trusted_directories: HashSet,
+
+ /// Whether to skip trust check for home directory subpaths
+ #[serde(default)]
+ pub trust_home_subdirs: bool,
+}
+
+impl TrustStore {
+ /// Load the trust store from the default location.
+ ///
+ /// Location: `~/.config/palrun/trust.json`
+ pub fn load() -> anyhow::Result {
+ let path = Self::store_path()?;
+
+ if !path.exists() {
+ return Ok(Self::default());
+ }
+
+ let content = std::fs::read_to_string(&path)?;
+ let store: Self = serde_json::from_str(&content)?;
+ Ok(store)
+ }
+
+ /// Save the trust store to disk.
+ pub fn save(&self) -> anyhow::Result<()> {
+ let path = Self::store_path()?;
+
+ // Ensure parent directory exists
+ if let Some(parent) = path.parent() {
+ std::fs::create_dir_all(parent)?;
+ }
+
+ let content = serde_json::to_string_pretty(self)?;
+ std::fs::write(&path, content)?;
+
+ Ok(())
+ }
+
+ /// Get the path to the trust store file.
+ fn store_path() -> anyhow::Result {
+ let config_dir = dirs::config_dir()
+ .ok_or_else(|| anyhow::anyhow!("Could not determine config directory"))?;
+ Ok(config_dir.join("palrun").join("trust.json"))
+ }
+
+ /// Check if a directory is trusted.
+ ///
+ /// A directory is trusted if:
+ /// 1. It's in the trusted_directories set, OR
+ /// 2. It's a parent of a trusted directory, OR
+ /// 3. trust_home_subdirs is true and it's under the home directory
+ pub fn is_trusted(&self, path: &Path) -> bool {
+ // Canonicalize the path
+ let canonical = match path.canonicalize() {
+ Ok(p) => p,
+ Err(_) => path.to_path_buf(),
+ };
+
+ // Check if exactly in trusted set
+ if self.trusted_directories.contains(&canonical) {
+ return true;
+ }
+
+ // Check if any trusted directory is a subdirectory of this path
+ // (i.e., if we've trusted a child, trust the parent too)
+ for trusted in &self.trusted_directories {
+ if trusted.starts_with(&canonical) {
+ return true;
+ }
+ }
+
+ // Check home directory option
+ if self.trust_home_subdirs {
+ if let Some(home) = dirs::home_dir() {
+ if canonical.starts_with(&home) {
+ return true;
+ }
+ }
+ }
+
+ false
+ }
+
+ /// Add a directory to the trusted set.
+ pub fn trust_directory(&mut self, path: &Path) -> anyhow::Result<()> {
+ let canonical = path.canonicalize()?;
+ self.trusted_directories.insert(canonical);
+ self.save()
+ }
+
+ /// Remove a directory from the trusted set.
+ #[allow(dead_code)]
+ pub fn untrust_directory(&mut self, path: &Path) -> anyhow::Result<()> {
+ let canonical = path.canonicalize()?;
+ self.trusted_directories.remove(&canonical);
+ self.save()
+ }
+
+ /// Enable trusting all home subdirectories.
+ #[allow(dead_code)]
+ pub fn trust_all_home_subdirs(&mut self) -> anyhow::Result<()> {
+ self.trust_home_subdirs = true;
+ self.save()
+ }
+}
+
+/// Trust confirmation result.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub enum TrustDecision {
+ /// User trusts the directory
+ Trust,
+ /// User declined to trust (exit)
+ Decline,
+}
+
+/// Information about what trusting a directory means.
+pub fn trust_warning_message(path: &Path) -> Vec {
+ vec![
+ "Do you trust the files in this folder?".to_string(),
+ String::new(),
+ format!(" {}", path.display()),
+ String::new(),
+ "Palrun may read files and execute commands in this".to_string(),
+ "directory. Only trust folders with code you trust.".to_string(),
+ ]
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use std::fs;
+ use tempfile::tempdir;
+
+ #[test]
+ fn test_trust_store_default() {
+ let store = TrustStore::default();
+ assert!(store.trusted_directories.is_empty());
+ assert!(!store.trust_home_subdirs);
+ }
+
+ #[test]
+ fn test_trust_directory() {
+ let temp = tempdir().unwrap();
+ // Use canonical path to handle symlinks (e.g., /tmp -> /private/tmp on macOS)
+ let path = temp.path().canonicalize().unwrap();
+
+ let mut store = TrustStore::default();
+ // Don't save to disk in test - use canonical path
+ store.trusted_directories.insert(path.clone());
+
+ assert!(store.is_trusted(&path));
+ }
+
+ #[test]
+ fn test_untrusted_directory() {
+ let temp = tempdir().unwrap();
+ let path = temp.path().canonicalize().unwrap();
+
+ let store = TrustStore::default();
+ assert!(!store.is_trusted(&path));
+ }
+
+ #[test]
+ fn test_child_trusts_parent() {
+ let temp = tempdir().unwrap();
+ let parent = temp.path().canonicalize().unwrap();
+ let child = parent.join("subdir");
+ fs::create_dir(&child).unwrap();
+ let child = child.canonicalize().unwrap();
+
+ let mut store = TrustStore::default();
+ store.trusted_directories.insert(child.clone());
+
+ // Parent should be trusted if child is trusted
+ assert!(store.is_trusted(&parent));
+ assert!(store.is_trusted(&child));
+ }
+
+ #[test]
+ fn test_serialization() {
+ let mut store = TrustStore::default();
+ store.trusted_directories.insert(PathBuf::from("/tmp/test"));
+ store.trust_home_subdirs = true;
+
+ let json = serde_json::to_string(&store).unwrap();
+ let loaded: TrustStore = serde_json::from_str(&json).unwrap();
+
+ assert_eq!(loaded.trusted_directories.len(), 1);
+ assert!(loaded.trust_home_subdirs);
+ }
+}
diff --git a/src/lib.rs b/src/lib.rs
index 4986fed..8a66445 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -101,7 +101,19 @@ pub use ai::{
OllamaProvider, ProjectContext, ShellExecutor, ToolExecutor,
};
+pub mod commands;
pub mod runbook;
+pub mod workflow;
+
+pub use commands::{
+ default_registry, CommandArg, CommandCategory, CommandTarget, PalrunCommand,
+ SlashCommandRegistry, PALRUN_COMMANDS,
+};
+pub use workflow::{
+ analyze_codebase, CodebaseAnalysis, Decision, ExecutionSummary, ExecutorConfig, Phase,
+ PhaseStatus, PlanDoc, PlanGenerator, ProjectDoc, RoadmapDoc, StateDoc, Task, TaskExecutor,
+ TaskResult, TaskStatus, TaskType, VerificationResult, WorkflowContext,
+};
#[cfg(feature = "git")]
pub mod git;
diff --git a/src/main.rs b/src/main.rs
index 30032bd..ea289bf 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -88,7 +88,7 @@ enum Commands {
dry_run: bool,
/// Variable assignments (key=value)
- #[arg(short, long)]
+ #[arg(long)]
var: Vec,
},
@@ -130,6 +130,13 @@ enum Commands {
path: bool,
},
+ /// Project workflow management (GSD-style)
+ Workflow {
+ /// Workflow operation
+ #[command(subcommand)]
+ operation: WorkflowOperation,
+ },
+
/// AI-powered command generation
#[cfg(feature = "ai")]
Ai {
@@ -210,6 +217,20 @@ enum Commands {
operation: McpOperation,
},
+ /// Generate and install slash commands for AI IDEs
+ Slash {
+ /// Slash command operation
+ #[command(subcommand)]
+ operation: SlashOperation,
+ },
+
+ /// Set up Claude AI integration for your project
+ Claude {
+ /// Claude operation
+ #[command(subcommand)]
+ operation: ClaudeOperation,
+ },
+
/// Debug and inspect Palrun internals
Debug {
/// Debug operation
@@ -293,6 +314,126 @@ enum McpOperation {
Config,
}
+/// Slash command operations.
+#[derive(Subcommand)]
+enum SlashOperation {
+ /// List all available palrun slash commands
+ List,
+
+ /// Show available IDE targets
+ Targets,
+
+ /// Generate slash commands for a specific IDE
+ Generate {
+ /// Target IDE (claude, cursor, windsurf, continue, aider)
+ target: String,
+
+ /// Output directory (uses default if not specified)
+ #[arg(short, long)]
+ output: Option,
+
+ /// Dry run - show what would be generated
+ #[arg(short, long)]
+ dry_run: bool,
+ },
+
+ /// Install slash commands to all detected IDEs
+ Install {
+ /// Force overwrite existing files
+ #[arg(short, long)]
+ force: bool,
+
+ /// Dry run - show what would be installed
+ #[arg(short, long)]
+ dry_run: bool,
+ },
+
+ /// Show the generated content for a specific command
+ Show {
+ /// Command name
+ command: String,
+
+ /// Target IDE (defaults to claude)
+ #[arg(short, long, default_value = "claude")]
+ target: String,
+ },
+}
+
+/// Claude AI setup operations.
+#[derive(Subcommand)]
+enum ClaudeOperation {
+ /// Initialize Claude AI configuration for your project
+ Init {
+ /// Force overwrite existing CLAUDE.md files
+ #[arg(short, long)]
+ force: bool,
+
+ /// Dry run - show what would be created
+ #[arg(short, long)]
+ dry_run: bool,
+
+ /// Include directory-specific CLAUDE.md files for key directories
+ #[arg(short, long)]
+ recursive: bool,
+ },
+
+ /// Show current Claude configuration status
+ Status,
+}
+
+/// Workflow operations (GSD-style planning and execution).
+#[derive(Subcommand)]
+enum WorkflowOperation {
+ /// Initialize project documents (PROJECT.md, STATE.md)
+ Init {
+ /// Force overwrite existing files
+ #[arg(short, long)]
+ force: bool,
+ },
+
+ /// Show current project status
+ Status,
+
+ /// Create a plan from a roadmap phase
+ Plan {
+ /// Phase number to plan
+ phase: usize,
+
+ /// Dry run - show plan without saving
+ #[arg(short, long)]
+ dry_run: bool,
+ },
+
+ /// Execute the current plan
+ Execute {
+ /// Specific task ID to execute (runs all if not specified)
+ #[arg(short, long)]
+ task: Option,
+
+ /// Dry run - show what would be executed
+ #[arg(short, long)]
+ dry_run: bool,
+
+ /// AI provider to use
+ #[arg(short, long)]
+ provider: Option,
+ },
+
+ /// Verify the current task or plan
+ Verify {
+ /// Specific task ID to verify
+ #[arg(short, long)]
+ task: Option,
+ },
+
+ /// Analyze the codebase and generate CODEBASE.md
+ Analyze {
+ /// Output path (defaults to .palrun/CODEBASE.md)
+ #[arg(short, long)]
+ output: Option,
+ },
+}
+
/// Environment operations.
#[derive(Subcommand)]
enum EnvOperation {
@@ -543,6 +684,12 @@ enum AiOperation {
#[arg(long)]
local: bool,
},
+
+ /// Open interactive AI chat mode
+ Chat {
+ /// Initial prompt to start the conversation
+ prompt: Option,
+ },
}
/// CI/CD operations.
@@ -872,6 +1019,9 @@ fn main() -> Result<()> {
Some(Commands::Config { path }) => {
cmd_config(path)?;
}
+ Some(Commands::Workflow { operation }) => {
+ cmd_workflow(operation)?;
+ }
#[cfg(feature = "ai")]
Some(Commands::Ai { operation }) => {
cmd_ai(operation)?;
@@ -908,6 +1058,12 @@ fn main() -> Result<()> {
Some(Commands::Mcp { operation }) => {
cmd_mcp(operation)?;
}
+ Some(Commands::Slash { operation }) => {
+ cmd_slash(operation)?;
+ }
+ Some(Commands::Claude { operation }) => {
+ cmd_claude(operation)?;
+ }
Some(Commands::Debug { operation }) => {
cmd_debug(operation)?;
}
@@ -1129,11 +1285,284 @@ fn cmd_config(show_path: bool) -> Result<()> {
Ok(())
}
+/// Handle workflow commands.
+fn cmd_workflow(operation: WorkflowOperation) -> Result<()> {
+ use palrun::{
+ analyze_codebase, ExecutorConfig, PlanDoc, PlanGenerator, ProjectDoc, RoadmapDoc, StateDoc,
+ TaskExecutor,
+ };
+ use std::fs;
+
+ let cwd = std::env::current_dir()?;
+ let palrun_dir = cwd.join(".palrun");
+
+ match operation {
+ WorkflowOperation::Init { force } => {
+ println!("Initializing Palrun workflow documents...\n");
+
+ // Create .palrun directory
+ fs::create_dir_all(&palrun_dir)?;
+
+ // Create PROJECT.md
+ let project_path = palrun_dir.join("PROJECT.md");
+ if project_path.exists() && !force {
+ println!(" PROJECT.md already exists (use --force to overwrite)");
+ } else {
+ let project_name = cwd.file_name().and_then(|n| n.to_str()).unwrap_or("My Project");
+ let content = ProjectDoc::template(project_name);
+ fs::write(&project_path, content)?;
+ println!(" Created: {}", project_path.display());
+ }
+
+ // Create ROADMAP.md
+ let roadmap_path = palrun_dir.join("ROADMAP.md");
+ if roadmap_path.exists() && !force {
+ println!(" ROADMAP.md already exists (use --force to overwrite)");
+ } else {
+ let project_name = cwd.file_name().and_then(|n| n.to_str()).unwrap_or("Project");
+ let content = RoadmapDoc::template(project_name);
+ fs::write(&roadmap_path, content)?;
+ println!(" Created: {}", roadmap_path.display());
+ }
+
+ // Create STATE.md
+ let state_path = palrun_dir.join("STATE.md");
+ if state_path.exists() && !force {
+ println!(" STATE.md already exists (use --force to overwrite)");
+ } else {
+ let content = StateDoc::template();
+ fs::write(&state_path, content)?;
+ println!(" Created: {}", state_path.display());
+ }
+
+ println!("\nWorkflow initialized! Edit the files in .palrun/ to get started.");
+ println!("\nNext steps:");
+ println!(" 1. Edit .palrun/PROJECT.md with your project vision");
+ println!(" 2. Edit .palrun/ROADMAP.md with your phases");
+ println!(" 3. Run: palrun workflow plan 1");
+ }
+
+ WorkflowOperation::Status => {
+ println!("Project Workflow Status\n");
+
+ // Load and show state
+ let state_path = palrun_dir.join("STATE.md");
+ if state_path.exists() {
+ let state = StateDoc::load(&state_path)?;
+ println!("Current Position:");
+ println!(" Phase: {}", state.current_phase);
+ println!(" Plan: {}", state.current_plan.as_deref().unwrap_or("none"));
+ println!(" Task: {}", state.current_task);
+ println!(" Status: {}", state.status);
+
+ if !state.blockers.is_empty() {
+ println!("\nBlockers:");
+ for blocker in &state.blockers {
+ println!(" - {}", blocker);
+ }
+ }
+ } else {
+ println!("No STATE.md found. Run: palrun workflow init");
+ }
+
+ // Load and show plan if exists
+ let plan_path = palrun_dir.join("PLAN.md");
+ if plan_path.exists() {
+ let plan = PlanDoc::load(&plan_path)?;
+ let (completed, total) = plan.progress();
+ println!("\nCurrent Plan: {}", plan.name);
+ println!(" Progress: {}/{} tasks", completed, total);
+
+ if let Some(next) = plan.next_task() {
+ println!(" Next Task: {} - {}", next.id, next.name);
+ }
+ }
+ }
+
+ WorkflowOperation::Plan { phase, dry_run } => {
+ println!("Creating plan for Phase {}...\n", phase);
+
+ // Load roadmap
+ let roadmap_path = palrun_dir.join("ROADMAP.md");
+ if !roadmap_path.exists() {
+ anyhow::bail!("No ROADMAP.md found. Run: palrun workflow init");
+ }
+
+ let roadmap = RoadmapDoc::load(&roadmap_path)?;
+
+ // Find the phase
+ let phase_data = roadmap
+ .phases
+ .iter()
+ .find(|p| p.number == phase)
+ .ok_or_else(|| anyhow::anyhow!("Phase {} not found in roadmap", phase))?;
+
+ // Generate plan
+ let generator = PlanGenerator::new();
+ let plan = generator.generate_detailed(phase_data, phase, None);
+
+ if dry_run {
+ println!("Plan Preview:\n");
+ println!("{}", plan.to_markdown());
+ } else {
+ // Save plan
+ let plan_path = palrun_dir.join("PLAN.md");
+ fs::write(&plan_path, plan.to_markdown())?;
+ println!("Plan created: {}", plan_path.display());
+ println!("\nPlan: {} ({} tasks)", plan.name, plan.tasks.len());
+ for task in &plan.tasks {
+ println!(" Task {}: {}", task.id, task.name);
+ }
+ println!("\nRun: palrun workflow execute");
+ }
+ }
+
+ WorkflowOperation::Execute { task, dry_run, provider: _ } => {
+ println!("Executing plan...\n");
+
+ // Load plan
+ let plan_path = palrun_dir.join("PLAN.md");
+ if !plan_path.exists() {
+ anyhow::bail!("No PLAN.md found. Run: palrun workflow plan ");
+ }
+
+ let mut plan = PlanDoc::load(&plan_path)?;
+
+ // Create executor
+ let config = ExecutorConfig { dry_run, working_dir: cwd.clone(), ..Default::default() };
+ let executor = TaskExecutor::with_config(config);
+
+ // Execute
+ let results = if let Some(task_id) = task {
+ // Execute specific task
+ match executor.execute_task_by_id(&mut plan, task_id) {
+ Some(result) => vec![result],
+ None => {
+ anyhow::bail!("Task {} not found", task_id);
+ }
+ }
+ } else {
+ // Execute all pending tasks
+ executor.execute_plan(&mut plan)
+ };
+
+ // Show results
+ println!("Execution Results:\n");
+ for result in &results {
+ let status = if result.success { "✓" } else { "✗" };
+ println!(" {} Task {}: {}", status, result.task_id, result.output);
+ }
+
+ // Save updated plan
+ if !dry_run {
+ fs::write(&plan_path, plan.to_markdown())?;
+
+ let (completed, total) = plan.progress();
+ println!("\nProgress: {}/{} tasks completed", completed, total);
+
+ if plan.is_complete() {
+ println!("\nPlan complete!");
+ }
+ }
+ }
+
+ WorkflowOperation::Verify { task } => {
+ println!("Running verification...\n");
+
+ // Load plan
+ let plan_path = palrun_dir.join("PLAN.md");
+ if !plan_path.exists() {
+ anyhow::bail!("No PLAN.md found. Run: palrun workflow plan ");
+ }
+
+ let plan = PlanDoc::load(&plan_path)?;
+ let executor = TaskExecutor::new();
+
+ let tasks_to_verify: Vec<_> = if let Some(task_id) = task {
+ plan.tasks.iter().filter(|t| t.id == task_id).collect()
+ } else if let Some(current) = plan.next_task() {
+ vec![current]
+ } else {
+ vec![]
+ };
+
+ if tasks_to_verify.is_empty() {
+ println!("No tasks to verify.");
+ return Ok(());
+ }
+
+ for task in tasks_to_verify {
+ println!("Verifying Task {}: {}\n", task.id, task.name);
+
+ let results = executor.verify_task(task);
+ for result in results {
+ let status = if result.passed { "✓" } else { "✗" };
+ println!(" {} {}", status, result.step);
+ if !result.passed && !result.output.is_empty() {
+ println!(" {}", result.output);
+ }
+ }
+ }
+ }
+
+ WorkflowOperation::Analyze { output } => {
+ println!("Analyzing codebase...\n");
+
+ let analysis = analyze_codebase(&cwd)?;
+
+ // Convert to markdown
+ let md = analysis.to_markdown();
+
+ let output_path = if let Some(ref path) = output {
+ std::path::PathBuf::from(path)
+ } else {
+ fs::create_dir_all(&palrun_dir)?;
+ palrun_dir.join("CODEBASE.md")
+ };
+
+ fs::write(&output_path, &md)?;
+ println!("Analysis saved to: {}", output_path.display());
+
+ // Show summary
+ println!("\nStack:");
+ for item in &analysis.stack {
+ println!(" {} ({})", item.name, item.category);
+ }
+
+ if !analysis.patterns.is_empty() {
+ println!("\nPatterns:");
+ for pattern in &analysis.patterns {
+ println!(" - {}", pattern);
+ }
+ }
+ }
+ }
+
+ Ok(())
+}
+
/// Handle AI commands.
#[cfg(feature = "ai")]
fn cmd_ai(operation: AiOperation) -> Result<()> {
use palrun::ai::{AIManager, ProjectContext};
+ // Handle Chat operation - use inline mode for native terminal scrolling
+ if let AiOperation::Chat { prompt } = operation {
+ let mut app = palrun::App::new()?;
+
+ // Set initial prompt if provided
+ if let Some(p) = prompt {
+ app.ai_chat_input = p;
+ }
+
+ // Check Ollama status for AI
+ let status = check_ollama_status();
+ app.ai_status = Some(status);
+
+ // Run inline AI chat (Claude-like native scrolling)
+ return palrun::tui::run_ai_chat_inline(app);
+ }
+
// Create tokio runtime for async operations
let rt = tokio::runtime::Runtime::new()?;
@@ -1313,12 +1742,35 @@ fn cmd_ai(operation: AiOperation) -> Result<()> {
);
}
}
+
+ AiOperation::Chat { .. } => {
+ // Chat is handled before the async block with an early return
+ unreachable!("Chat operation should be handled before async block");
+ }
}
Ok(())
})
}
+/// Check Ollama status for AI chat.
+#[cfg(feature = "ai")]
+fn check_ollama_status() -> String {
+ let base_url =
+ std::env::var("OLLAMA_HOST").unwrap_or_else(|_| "http://localhost:11434".to_string());
+
+ // Quick sync check
+ let client = reqwest::blocking::Client::new();
+ match client
+ .get(format!("{}/api/tags", base_url))
+ .timeout(std::time::Duration::from_secs(2))
+ .send()
+ {
+ Ok(resp) if resp.status().is_success() => "Ollama ready".to_string(),
+ _ => "Ollama not available".to_string(),
+ }
+}
+
/// Handle Git hooks commands.
#[cfg(feature = "git")]
fn cmd_hooks(operation: HooksOperation) -> Result<()> {
@@ -3469,6 +3921,338 @@ fn cmd_mcp(operation: McpOperation) -> Result<()> {
Ok(())
}
+/// Handle slash command operations.
+fn cmd_slash(operation: SlashOperation) -> Result<()> {
+ use palrun::commands::{default_registry, PALRUN_COMMANDS};
+ use std::fs;
+
+ let registry = default_registry();
+
+ match operation {
+ SlashOperation::List => {
+ println!("Available Palrun Slash Commands:\n");
+
+ for cmd in PALRUN_COMMANDS.iter() {
+ println!(" /{}", cmd.name);
+ println!(" {}", cmd.description);
+ if !cmd.args.is_empty() {
+ print!(" Args: ");
+ let args: Vec<_> = cmd.args.iter().map(|a| a.name.as_str()).collect();
+ println!("{}", args.join(", "));
+ }
+ println!();
+ }
+
+ println!("Total: {} commands", PALRUN_COMMANDS.len());
+ }
+
+ SlashOperation::Targets => {
+ println!("Available IDE Targets:\n");
+
+ for target in registry.targets() {
+ let detected = if target.detect() { " (detected)" } else { "" };
+ println!(" {} - {}{}", target.name(), target.display_name(), detected);
+ if let Ok(path) = target.install_path() {
+ println!(" Install path: {}", path.display());
+ }
+ }
+
+ println!("\nTotal: {} targets", registry.targets().len());
+ }
+
+ SlashOperation::Generate { target, output, dry_run } => {
+ let target_impl = registry.get(&target).ok_or_else(|| {
+ anyhow::anyhow!(
+ "Unknown target '{}'. Available: {}",
+ target,
+ registry.targets().iter().map(|t| t.name()).collect::>().join(", ")
+ )
+ })?;
+
+ let output_dir = if let Some(ref out) = output {
+ std::path::PathBuf::from(out)
+ } else {
+ target_impl.install_path()?
+ };
+
+ println!(
+ "Generating {} slash commands for {}...\n",
+ PALRUN_COMMANDS.len(),
+ target_impl.display_name()
+ );
+
+ for cmd in PALRUN_COMMANDS.iter() {
+ let content = target_impl.generate(cmd)?;
+ let filename = format!("{}.{}", cmd.name, target_impl.file_extension());
+ let path = output_dir.join(&filename);
+
+ if dry_run {
+ println!("Would create: {}", path.display());
+ println!("---");
+ println!("{}", content);
+ println!();
+ } else {
+ fs::create_dir_all(&output_dir)?;
+ fs::write(&path, &content)?;
+ println!(" Created: {}", path.display());
+ }
+ }
+
+ if !dry_run {
+ println!(
+ "\nGenerated {} commands to {}",
+ PALRUN_COMMANDS.len(),
+ output_dir.display()
+ );
+ }
+ }
+
+ SlashOperation::Install { force, dry_run } => {
+ println!("Installing slash commands to detected IDEs...\n");
+
+ let mut installed_count = 0;
+
+ for target in registry.targets() {
+ if !target.detect() {
+ continue;
+ }
+
+ let output_dir = match target.install_path() {
+ Ok(p) => p,
+ Err(e) => {
+ println!(" Skipping {} ({})", target.display_name(), e);
+ continue;
+ }
+ };
+
+ println!(" {} ({})", target.display_name(), output_dir.display());
+
+ for cmd in PALRUN_COMMANDS.iter() {
+ let content = match target.generate(cmd) {
+ Ok(c) => c,
+ Err(e) => {
+ println!(" Error generating {}: {}", cmd.name, e);
+ continue;
+ }
+ };
+
+ let filename = format!("{}.{}", cmd.name, target.file_extension());
+ let path = output_dir.join(&filename);
+
+ if path.exists() && !force {
+ if dry_run {
+ println!(" Would skip (exists): {}", filename);
+ }
+ continue;
+ }
+
+ if dry_run {
+ println!(" Would create: {}", filename);
+ } else {
+ if let Err(e) = fs::create_dir_all(&output_dir) {
+ println!(" Error creating directory: {}", e);
+ continue;
+ }
+ if let Err(e) = fs::write(&path, &content) {
+ println!(" Error writing {}: {}", filename, e);
+ continue;
+ }
+ installed_count += 1;
+ }
+ }
+ }
+
+ if !dry_run {
+ println!("\nInstalled {} command files", installed_count);
+ }
+ }
+
+ SlashOperation::Show { command, target } => {
+ let cmd = PALRUN_COMMANDS.iter().find(|c| c.name == command).ok_or_else(|| {
+ anyhow::anyhow!(
+ "Unknown command '{}'. Use 'palrun slash list' to see available commands.",
+ command
+ )
+ })?;
+
+ let target_impl = registry.get(&target).ok_or_else(|| {
+ anyhow::anyhow!(
+ "Unknown target '{}'. Use 'palrun slash targets' to see available targets.",
+ target
+ )
+ })?;
+
+ let content = target_impl.generate(cmd)?;
+ println!("Command: /{} (for {})\n", cmd.name, target_impl.display_name());
+ println!("{}", content);
+ }
+ }
+
+ Ok(())
+}
+
+/// Handle Claude AI setup commands.
+fn cmd_claude(operation: ClaudeOperation) -> Result<()> {
+ use std::fs;
+
+ let cwd = std::env::current_dir()?;
+
+ match operation {
+ ClaudeOperation::Init { force, dry_run, recursive } => {
+ println!("Initializing Claude AI configuration...\n");
+
+ // Detect project info
+ let project_name = cwd
+ .file_name()
+ .map(|n| n.to_string_lossy().to_string())
+ .unwrap_or_else(|| "project".to_string());
+
+ // Check for existing project files to determine type
+ let is_rust = cwd.join("Cargo.toml").exists();
+ let is_node = cwd.join("package.json").exists();
+ let is_python = cwd.join("pyproject.toml").exists() || cwd.join("setup.py").exists();
+ let is_go = cwd.join("go.mod").exists();
+
+ let lang = if is_rust {
+ "Rust"
+ } else if is_node {
+ "TypeScript/JavaScript"
+ } else if is_python {
+ "Python"
+ } else if is_go {
+ "Go"
+ } else {
+ "Unknown"
+ };
+
+ // Generate root CLAUDE.md content
+ let root_content = format!(
+ r#"# {}
+
+## Project Overview
+This is a {} project.
+
+## Key Commands
+- `palrun` - Open the command palette
+- `palrun list` - List all available commands
+- `palrun ai agent` - Start an AI agent session
+
+## Project Structure
+Describe your project's main directories and their purposes here.
+
+## Development Guidelines
+Add your coding standards and best practices here.
+
+## Important Files
+List key files that Claude should know about.
+"#,
+ project_name, lang
+ );
+
+ let claude_md_path = cwd.join("CLAUDE.md");
+
+ if claude_md_path.exists() && !force {
+ println!(" CLAUDE.md already exists (use --force to overwrite)");
+ } else if dry_run {
+ println!(" Would create: CLAUDE.md");
+ println!("\n--- Content Preview ---\n{}", root_content);
+ } else {
+ fs::write(&claude_md_path, &root_content)?;
+ println!(" Created: CLAUDE.md");
+ }
+
+ // Create directory-specific CLAUDE.md files if recursive
+ if recursive {
+ let key_dirs = ["src", "docs", "tests", "examples", "lib"];
+
+ for dir in key_dirs {
+ let dir_path = cwd.join(dir);
+ if dir_path.exists() && dir_path.is_dir() {
+ let dir_claude_md = dir_path.join("CLAUDE.md");
+
+ let dir_content = format!(
+ r#"# {} Directory
+
+## Purpose
+Describe what this directory contains.
+
+## Key Files
+List important files in this directory.
+
+## Guidelines
+Add directory-specific guidelines here.
+"#,
+ dir
+ );
+
+ if dir_claude_md.exists() && !force {
+ println!(
+ " {}/CLAUDE.md already exists (use --force to overwrite)",
+ dir
+ );
+ } else if dry_run {
+ println!(" Would create: {}/CLAUDE.md", dir);
+ } else {
+ fs::write(&dir_claude_md, &dir_content)?;
+ println!(" Created: {}/CLAUDE.md", dir);
+ }
+ }
+ }
+ }
+
+ if !dry_run {
+ println!("\nClaude AI configuration initialized!");
+ println!("\nNext steps:");
+ println!(" 1. Edit CLAUDE.md to describe your project");
+ println!(" 2. Run 'palrun ai agent' to start working with Claude");
+ }
+ }
+
+ ClaudeOperation::Status => {
+ println!("Claude AI Configuration Status:\n");
+
+ let claude_md_path = cwd.join("CLAUDE.md");
+ let claude_dir = cwd.join(".claude");
+
+ if claude_md_path.exists() {
+ println!(" CLAUDE.md: Found");
+ if let Ok(metadata) = fs::metadata(&claude_md_path) {
+ println!(" Size: {} bytes", metadata.len());
+ }
+ } else {
+ println!(" CLAUDE.md: Not found");
+ println!(" Run 'palrun claude init' to create one");
+ }
+
+ if claude_dir.exists() {
+ println!(" .claude/: Found (Claude project directory)");
+ } else {
+ println!(" .claude/: Not found");
+ }
+
+ // Check for directory-specific CLAUDE.md files
+ let key_dirs = ["src", "docs", "tests", "examples", "lib"];
+ let mut found_dirs = Vec::new();
+
+ for dir in key_dirs {
+ let dir_claude_md = cwd.join(dir).join("CLAUDE.md");
+ if dir_claude_md.exists() {
+ found_dirs.push(dir);
+ }
+ }
+
+ if !found_dirs.is_empty() {
+ println!("\n Directory-specific CLAUDE.md files:");
+ for dir in found_dirs {
+ println!(" - {}/CLAUDE.md", dir);
+ }
+ }
+ }
+ }
+
+ Ok(())
+}
+
/// Handle debug commands.
fn cmd_debug(operation: DebugOperation) -> Result<()> {
use palrun::Config;
diff --git a/src/tui/app.rs b/src/tui/app.rs
index aecf0eb..0253e31 100644
--- a/src/tui/app.rs
+++ b/src/tui/app.rs
@@ -11,7 +11,7 @@ use crossterm::{
execute,
terminal::{disable_raw_mode, enable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen},
};
-use ratatui::{backend::CrosstermBackend, Terminal};
+use ratatui::{backend::CrosstermBackend, Terminal, TerminalOptions, Viewport};
use super::{draw, handle_events};
use crate::App;
@@ -126,3 +126,243 @@ pub fn run_tui_and_get_command(mut app: App) -> Result