diff --git a/.claude/settings.json b/.claude/settings.json index dec0ed4c..e11a8b41 100644 --- a/.claude/settings.json +++ b/.claude/settings.json @@ -1,5 +1,6 @@ { "enabledPlugins": { - "test-automation": true + "test-automation": true, + "doc-enforcement": true } } diff --git a/.launcher-config.template.yaml b/.launcher-config.template.yaml new file mode 100644 index 00000000..53865fed --- /dev/null +++ b/.launcher-config.template.yaml @@ -0,0 +1,43 @@ +# Launcher Configuration Template +# Copy this to your project root as .launcher-config.yaml + +project: + name: myproject # Internal project name (lowercase, no spaces) + display_name: My Project # Human-readable name shown in UI + +prerequisites: + required: + - docker + - git + optional: + - python + - uv + +setup: + command: ./setup.sh # Command to run when setting up the project + env_vars: + - PROJECT_ROOT + - WORKTREE_PATH + +infrastructure: + compose_file: docker-compose.yml # Path to compose file for shared infrastructure + project_name: myproject-infra # Docker compose project name + profile: default # Optional: compose profile to use + +containers: + naming_pattern: "{env_name}-{service}" # How containers are named + primary_service: backend # Main service to health-check + health_endpoint: /health # Health check endpoint + tailscale_project_prefix: myproject # Optional: Tailscale prefix + +ports: + allocation_strategy: offset # or: fixed + base_port: 8000 # Starting port for services + offset: + min: 0 + max: 100 + step: 10 + +worktrees: + default_parent: ../worktrees/{project_name} # Where worktrees are created + branch_prefix: env/ # Optional prefix for environment branches diff --git a/.launcher-config.yaml b/.launcher-config.yaml new file mode 100644 index 00000000..a2c50000 --- /dev/null +++ b/.launcher-config.yaml @@ -0,0 +1,80 @@ +# Ushadow Launcher Configuration +# This file defines how the launcher manages environments for this project + +project: + name: "ushadow" + display_name: "Ushadow" + +# Prerequisites required for this project +prerequisites: + required: + - docker + - git + - python3 + optional: + - tailscale + - uv + +# Environment setup configuration +setup: + # Command to run when creating a new environment + # Available variables: {ENV_NAME}, {PORT_OFFSET}, {WORKING_DIR} + command: "uv run --with pyyaml setup/run.py --dev --quick --skip-admin" + + # Environment variables passed during setup + env_vars: + - ENV_NAME + - PORT_OFFSET + - USHADOW_NO_BROWSER=1 + +# Docker infrastructure configuration (shared services) +infrastructure: + # Path to infrastructure compose file (relative to project root) + compose_file: "compose/docker-compose.infra.yml" + + # Docker Compose project name for infrastructure + project_name: "infra" + + # Profile to use when starting infrastructure (optional) + profile: "infra" + +# Container naming and discovery +containers: + # Naming pattern for environment containers + # Variables: {project_name}, {env_name}, {service_name} + # Note: {env_name} will be empty for "default" environment + naming_pattern: "{project_name}{env_name}-{service_name}" + + # Primary service that exposes the main application port + # This service's port is used to calculate other service ports + primary_service: "backend" + + # Health check endpoint (relative to primary service) + health_endpoint: "/api/unodes/leader/info" + + # Optional: Project prefix for Tailscale hostnames (for multi-project setups) + # If not set, Tailscale URLs will be: https://{env}.{tailnet} + # If set to "ushadow", URLs will be: https://ushadow-{env}.{tailnet} + # tailscale_project_prefix: "ushadow" + +# Port management +ports: + # Strategy: "hash" (deterministic from env name) | "sequential" | "random" + allocation_strategy: "hash" + + # Base port for primary service (default environment) + base_port: 8000 + + # Port offset configuration (for hash-based allocation) + offset: + min: 0 + max: 500 + step: 10 # Offsets are multiples of 10: 0, 10, 20, 30... + +# Worktree configuration +worktrees: + # Default parent directory for worktrees (expandable via ~) + default_parent: "~/repos/worktrees/{project_name}" + + # Optional prefix for branch names (e.g., "env/" creates "env/staging") + branch_prefix: "" diff --git a/.workmux.yaml b/.workmux.yaml index 17d84c34..223f0483 100644 --- a/.workmux.yaml +++ b/.workmux.yaml @@ -53,8 +53,6 @@ panes: # Main pane - ready for commands or agent interaction - command: "echo 'πŸš€ Ushadow Environment: $(basename $(pwd))' && echo '' && echo 'Quick commands:' && echo ' ./dev.sh - Start in dev mode' && echo ' ./go.sh - Start in prod mode' && echo ' make test - Run tests' && echo ' code . - Open in VSCode' && echo '' && $SHELL" focus: true - split: horizontal - size: 75% # Agent status icons (shown in tmux status bar) status_icons: diff --git a/ENV_CONFIG_GUIDE.md b/ENV_CONFIG_GUIDE.md new file mode 100644 index 00000000..628f00fb --- /dev/null +++ b/ENV_CONFIG_GUIDE.md @@ -0,0 +1,135 @@ +# Environment Configuration System + +## Overview + +The launcher now supports comprehensive environment configuration including: +- Custom startup commands per project +- Automatic port detection and allocation +- Multi-environment support with port offsetting +- Database and service port management + +## How It Works + +### 1. Project Configuration (`.launcher-config.yaml`) + +Each project can have a configuration file that defines: + +```yaml +project: + name: myproject + display_name: My Project + +setup: + command: ./go.sh # Command to start an environment + env_vars: # Vars to inject + - PROJECT_ROOT + - WORKTREE_PATH + - PORT_OFFSET # Auto-calculated offset + +ports: + allocation_strategy: offset # or: fixed + base_port: 8000 + offset: + min: 0 + max: 100 + step: 10 # Each env gets ports +10 from previous +``` + +### 2. Port Detection + +The launcher scans `.env.template` or `.env.example` to detect: +- Port variables (e.g., `BACKEND_PORT`, `WEBUI_PORT`) +- Database ports (e.g., `POSTGRES_PORT`, `REDIS_PORT`) +- Default values + +**Example `.env.template`:** +```bash +BACKEND_PORT=8000 +WEBUI_PORT=3000 +POSTGRES_PORT=5432 +REDIS_PORT=6379 +``` + +### 3. Port Allocation + +When creating environments, ports are automatically offset: + +**Environment 1 (offset=0):** +- BACKEND_PORT=8000 +- WEBUI_PORT=3000 +- POSTGRES_PORT=5432 + +**Environment 2 (offset=10):** +- BACKEND_PORT=8010 +- WEBUI_PORT=3010 +- POSTGRES_PORT=5442 + +**Environment 3 (offset=20):** +- BACKEND_PORT=8020 +- WEBUI_PORT=3020 +- POSTGRES_PORT=5452 + +### 4. Environment Startup + +When you create or start an environment: +1. Launcher calculates the port offset +2. Injects environment variables: + ```bash + PROJECT_ROOT=/Users/you/repos/myproject + WORKTREE_PATH=/Users/you/repos/worktrees/myproject/dev + PORT_OFFSET=10 + ``` +3. Runs the startup command (e.g., `./go.sh`) +4. Your startup script reads PORT_OFFSET and adjusts ports accordingly + +## Startup Script Pattern + +Your `go.sh` or startup script should use the PORT_OFFSET: + +```bash +#!/bin/bash + +# Get port offset from environment (default to 0) +OFFSET=${PORT_OFFSET:-0} + +# Calculate actual ports +export BACKEND_PORT=$((8000 + OFFSET)) +export WEBUI_PORT=$((3000 + OFFSET)) +export POSTGRES_PORT=$((5432 + OFFSET)) +export REDIS_PORT=$((6379 + OFFSET)) + +# Load base .env if it exists +if [ -f .env.template ]; then + source .env.template +fi + +# Override with offset ports +cat > .env.local < setAppMode('kanban')}> + + Kanban + + ``` + +3. **Rendering**: Kanban board renders when `appMode === 'kanban'` + +## Key Design Decisions + +### 1. Simple 1:1 Ticket-Tmux Mapping +Each ticket = exactly one tmux window. This keeps the mental model simple. + +**Alternative considered**: One ticket = multiple windows (frontend, backend, tests) +**Why rejected**: Added complexity without clear benefit for most workflows + +### 2. Epic + Tag Based Context Sharing +Enables both structured (epic) and ad-hoc (tag) relationships. + +**Structured (Epic)**: "All auth tickets share same branch" +**Ad-hoc (Tags)**: "All tickets tagged 'api' can see each other" + +### 3. Shared Branches for Epic Tickets +Tickets in the same epic use one shared branch. + +**Alternative considered**: One branch per ticket with merging +**Why rejected**: Sharing context across tickets is the explicit goal + +### 4. Standalone Kanban (No External Dependency) +Built directly into launcher, no vibe-kanban required. + +**Alternative considered**: Two-way sync with vibe-kanban +**Why rejected**: Simpler architecture, fewer moving parts + +## Color Team System + +Color inheritance creates visual organization: + +``` +Epic: "Authentication" (Purple #8B5CF6) + β”œβ”€ Ticket 1: "JWT validation" (inherits purple) + β”œβ”€ Ticket 2: "Refresh tokens" (inherits purple) + └─ Ticket 3: "OAuth flow" (overrides with orange) + +Epic: "Database" (Green #10B981) + β”œβ”€ Ticket 4: "Add indexes" (inherits green) + └─ Ticket 5: "Migration" (inherits green) +``` + +Visual indicators: +- **Ticket card border**: 4px left border in team color +- **Epic badge**: Badge with epic color at 20% opacity +- **Generated colors**: Hash-based HSL when no color set + +## Next Steps / TODOs + +### Immediate +- [ ] Add "Create Ticket from Environment" button to EnvironmentsPanel + - **Decision needed**: Where to place button? (card action, context menu, or details panel) + - See `KANBAN_INTEGRATION.md` for options + +### Enhancements +- [ ] Drag-and-drop to change ticket status +- [ ] Ticket detail modal with full description + comments +- [ ] Assign tickets to users (already has `assigned_to` field) +- [ ] Epic progress visualization (% tickets complete) +- [ ] Timeline view (Gantt chart style) +- [ ] Sprint planning mode +- [ ] Ticket time tracking integration with tmux activity + +### Integration Opportunities +- [ ] Auto-create ticket when running `/commit` in tmux +- [ ] Show active ticket in launcher status bar +- [ ] Link tickets to PRs via GitHub integration +- [ ] Chronicle integration (link tickets to memories) +- [ ] Notification when ticket's tmux window becomes inactive + +## Testing + +### Backend API Testing +```bash +# Start backend +cd ushadow/backend +uv run main.py + +# Create epic +curl -X POST http://localhost:8000/api/kanban/epics \ + -H "Content-Type: application/json" \ + -d '{"title": "Test Epic", "color": "#3B82F6", "base_branch": "main"}' + +# Create ticket +curl -X POST http://localhost:8000/api/kanban/tickets \ + -H "Content-Type: application/json" \ + -d '{"title": "Test Ticket", "priority": "medium", "tags": ["test"]}' + +# List tickets +curl http://localhost:8000/api/kanban/tickets +``` + +### Frontend Testing +```bash +# Start launcher +cd ushadow/launcher +npm run dev + +# Navigate to Kanban tab +# Should see empty kanban board +# Click "New Epic" or "New Ticket" to create items +``` + +### Tmux Integration Testing +```bash +# From launcher, create ticket via UI +# Check tmux window created +tmux list-windows -t workmux + +# Should see: ushadow-{branch-name} +``` + +## Architecture Diagram + +``` +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ Vibe Launcher (Tauri) β”‚ +β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ +β”‚ Navigation: [Install] [Infra] [Environments] [Kanban] β”‚ +β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ +β”‚ β”‚ +β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ +β”‚ β”‚ KanbanBoard Component β”‚ β”‚ +β”‚ β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β” β”‚ β”‚ +β”‚ β”‚ β”‚Back- β”‚ To β”‚ In β”‚ In β”‚ Done β”‚ β”‚ β”‚ +β”‚ β”‚ β”‚log β”‚ Do β”‚Prog β”‚Reviewβ”‚ β”‚ β”‚ β”‚ +β”‚ β”‚ β”œβ”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€ β”‚ β”‚ +β”‚ β”‚ β”‚[Card]β”‚[Card]β”‚[Card]β”‚[Card]β”‚[Card]β”‚ β”‚ β”‚ +β”‚ β”‚ β”‚[Card]β”‚[Card]β”‚ β”‚ β”‚[Card]β”‚ β”‚ β”‚ +β”‚ β”‚ β”‚ β”‚[Card]β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ +β”‚ β”‚ β””β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”˜ β”‚ β”‚ +β”‚ β”‚ β”‚ β”‚ +β”‚ β”‚ Epic Filter: [All Tickets β–Ό] β”‚ β”‚ +β”‚ β”‚ Actions: [New Epic] [New Ticket] β”‚ β”‚ +β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ +β”‚ β”‚ β”‚ +β”‚ β”‚ API Calls β”‚ +β”‚ β–Ό β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + β”‚ + β”‚ +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β–Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ Backend (FastAPI + MongoDB) β”‚ +β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ +β”‚ Routers: β”‚ +β”‚ /api/kanban/tickets β”‚ +β”‚ /api/kanban/epics β”‚ +β”‚ /api/kanban/stats β”‚ +β”‚ β”‚ +β”‚ Models: β”‚ +β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ +β”‚ β”‚ Epic β”‚ β”‚ Ticket β”‚ β”‚ +β”‚ │─────────│ │──────────│ β”‚ +β”‚ β”‚ title β”‚1 βˆžβ”‚ title β”‚ β”‚ +β”‚ β”‚ color │◀───────│ epic_id β”‚ β”‚ +β”‚ β”‚ branch β”‚ β”‚ tags[] β”‚ β”‚ +β”‚ β”‚ base_br β”‚ β”‚ status β”‚ β”‚ +β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ tmux_win β”‚ β”‚ +β”‚ β”‚ branch β”‚ β”‚ +β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ +β”‚ β”‚ β”‚ +β”‚ β”‚ Worktree Creation β”‚ +β”‚ β–Ό β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + β”‚ + β”‚ +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β–Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ Tauri Commands (Rust) + Tmux β”‚ +β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ +β”‚ create_ticket_worktree() β”‚ +β”‚ β”œβ”€ git worktree add β”‚ +β”‚ β”œβ”€ tmux new-window -n ushadow-{branch} β”‚ +β”‚ └─ cd {worktree_path} β”‚ +β”‚ β”‚ +β”‚ attach_ticket_to_worktree() β”‚ +β”‚ └─ verify tmux window exists β”‚ +β”‚ β”‚ +β”‚ Tmux Session: "workmux" β”‚ +β”‚ β”œβ”€ Window: ushadow-epic-auth (3 tickets) β”‚ +β”‚ β”œβ”€ Window: ushadow-ticket-123 (1 ticket) β”‚ +β”‚ └─ Window: ushadow-database (2 tickets) β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ +``` + +## Files Modified/Created + +### Backend +- βœ… `ushadow/backend/src/models/kanban.py` - Data models +- βœ… `ushadow/backend/src/routers/kanban.py` - API routes +- βœ… `ushadow/backend/main.py` - Router registration + Beanie init + +### Launcher Backend +- βœ… `ushadow/launcher/src-tauri/src/commands/kanban.rs` - Tmux integration commands +- βœ… `ushadow/launcher/src-tauri/src/commands/mod.rs` - Module exports +- βœ… `ushadow/launcher/src-tauri/src/main.rs` - Command registration + +### Frontend +- βœ… `ushadow/launcher/src/components/KanbanBoard.tsx` - Main board +- βœ… `ushadow/launcher/src/components/TicketCard.tsx` - Ticket cards +- βœ… `ushadow/launcher/src/components/CreateTicketDialog.tsx` - Ticket creation modal +- βœ… `ushadow/launcher/src/components/CreateEpicDialog.tsx` - Epic creation modal +- βœ… `ushadow/launcher/src/store/appStore.ts` - Added 'kanban' mode +- βœ… `ushadow/launcher/src/App.tsx` - Navigation + routing + +### Documentation +- βœ… `KANBAN_INTEGRATION.md` - This file! diff --git a/MULTI_PROJECT_GUIDE.md b/MULTI_PROJECT_GUIDE.md new file mode 100644 index 00000000..65b97ff2 --- /dev/null +++ b/MULTI_PROJECT_GUIDE.md @@ -0,0 +1,93 @@ +# Multi-Project Launcher Guide + +## Overview + +The launcher now supports managing multiple projects beyond ushadow. Each project can have its own configuration, prerequisites, and infrastructure. + +## Quick Start + +### 1. Enable Multi-Project Mode + +1. Open launcher settings (βš™οΈ icon) +2. Toggle "Multi-Project Mode" to ON +3. Navigate to "Setup & Installation" tab +4. You'll see the ProjectManager UI + +### 2. Add a Project + +1. Click "+ Add Project" in the ProjectManager +2. Select your project folder (e.g., `/Users/stu/repos/chronicle`) +3. The launcher will: + - Use the folder as the project root + - Create worktrees in `../worktrees/projectname` + - Look for `.launcher-config.yaml` in the project root + +### 3. Create Project Configuration + +Each project needs a `.launcher-config.yaml` in its root directory: + +```yaml +project: + name: chronicle + display_name: Chronicle + +prerequisites: + required: + - docker + - git + optional: + - python + +setup: + command: ./setup.sh + env_vars: + - PROJECT_ROOT + +infrastructure: + compose_file: docker-compose.yml + project_name: chronicle-infra + +containers: + naming_pattern: "{env_name}-{service}" + primary_service: backend + health_endpoint: /health + +ports: + allocation_strategy: offset + base_port: 8000 + offset: + min: 0 + max: 100 + step: 10 + +worktrees: + default_parent: ../worktrees/chronicle +``` + +See `.launcher-config.template.yaml` for full documentation. + +## Current Limitations (To Be Fixed) + +1. **Prerequisites per project**: Currently uses global prerequisites. Need to load from project config. +2. **Infrastructure per project**: Infrastructure panel doesn't yet read from project config. +3. **Environment commands**: setup.command not yet integrated into environment creation flow. + +## Workaround for Now + +For projects like Chronicle: + +1. **Add the project** to get it in the list +2. **Manually create worktrees** using git: + ```bash + cd /Users/stu/repos/chronicle + git worktree add ../worktrees/chronicle/dev + ``` +3. **Use discovery** - The launcher will discover and manage existing worktrees + +## Next Steps to Complete Integration + +- [ ] Load prerequisites from active project's config +- [ ] Load infrastructure services from project config +- [ ] Run setup.command when creating new environments +- [ ] Show project-specific status in UI +- [ ] Add config editor UI for projects without `.launcher-config.yaml` diff --git a/REFACTORING_PLAN.md b/REFACTORING_PLAN.md deleted file mode 100644 index a325ef70..00000000 --- a/REFACTORING_PLAN.md +++ /dev/null @@ -1,220 +0,0 @@ -# Refactoring Plan: Instance β†’ ServiceConfiguration - -## Goal -Rename "Instance" to "ServiceConfiguration" to better reflect that this represents a configured service (either cloud credentials or deployable service with config). - -## New Naming Convention - -| Current | New | Purpose | -|---------|-----|---------| -| `Template` | `Template` | Abstract service or provider definition (keep) | -| `Instance` | `ServiceConfiguration` | Template + Config + DeploymentTarget | -| `InstanceManager` | `ConfigurationManager` | Manages service configurations | -| `InstanceStatus` | `ConfigurationStatus` | Status enum | -| `InstanceConfig` | `ServiceConfig` | Configuration values | -| `InstanceOutputs` | `ConfigurationOutputs` | Runtime outputs | -| `InstanceCreate` | `ConfigurationCreate` | API request model | -| `InstanceUpdate` | `ConfigurationUpdate` | API request model | -| `InstanceSummary` | `ConfigurationSummary` | API response model | -| `instances.yaml` | `configurations.yaml` | YAML storage file | - -## Status Values Semantic - -### Cloud Providers -- `configured` - Has valid credentials, ready to use -- `unconfigured` - Missing required credentials - -### Deployable Services (ComposeService, local providers) -- `pending` - Created but not yet started -- `deploying` - Currently starting -- `running` - Running and accessible -- `stopped` - Stopped gracefully -- `error` - Failed to deploy or crashed - -## Files to Update - -### Backend Models (Priority 1) - -1. **`ushadow/backend/src/models/instance.py`** β†’ Rename to `configuration.py` - - `Instance` β†’ `ServiceConfiguration` - - `InstanceStatus` β†’ `ConfigurationStatus` - - `InstanceConfig` β†’ `ServiceConfig` - - `InstanceOutputs` β†’ `ConfigurationOutputs` - - `InstanceCreate` β†’ `ConfigurationCreate` - - `InstanceUpdate` β†’ `ConfigurationUpdate` - - `InstanceSummary` β†’ `ConfigurationSummary` - - `Wiring` stays the same - - Update all docstrings - -2. **`ushadow/backend/src/services/instance_manager.py`** β†’ Rename to `configuration_manager.py` - - `InstanceManager` β†’ `ConfigurationManager` - - `_instances` β†’ `_configurations` - - `instances.yaml` β†’ `configurations.yaml` - - All method names: `create_instance` β†’ `create_configuration`, etc. - - Update all docstrings - -### Backend Services (Priority 1) - -3. **`ushadow/backend/src/services/capability_resolver.py`** - - Update all references to `Instance` β†’ `ServiceConfiguration` - - `consumer_instance_id` β†’ `consumer_config_id` - - `provider_instance` β†’ `provider_config` - -4. **`ushadow/backend/src/services/deployment_manager.py`** - - `instance_id` parameter β†’ `config_id` - - Update docstrings - -5. **`ushadow/backend/src/services/service_orchestrator.py`** - - `instance_id` parameter β†’ `config_id` - -### API Routes (Priority 1) - -6. **`ushadow/backend/src/routers/instances.py`** β†’ Rename to `configurations.py` - - Update all endpoint paths: - - `/api/instances` β†’ `/api/configurations` - - `/api/instances/{id}` β†’ `/api/configurations/{id}` - - Add backwards-compatibility aliases (optional) - - Update all request/response models - - Update docstrings - -7. **`ushadow/backend/src/main.py`** - - Update router import and include - -### Frontend Types (Priority 2) - -8. **`ushadow/frontend/src/services/api.ts`** - - `Instance` β†’ `ServiceConfiguration` - - `InstanceSummary` β†’ `ConfigurationSummary` - - `InstanceCreateRequest` β†’ `ConfigurationCreateRequest` - - `instancesApi` β†’ `configurationsApi` (or keep as `configurationsApi` but map endpoints) - - Update endpoint URLs - -### Frontend Pages (Priority 2) - -9. **`ushadow/frontend/src/pages/InstancesPage.tsx`** β†’ Rename to `ConfigurationsPage.tsx` - - Update component name - - Update all variable names - - Update all API calls - - Update test IDs - -10. **`ushadow/frontend/src/App.tsx`** - - Update route import and component - -11. **`ushadow/frontend/src/components/wiring/WiringBoard.tsx`** - - Update all references to instances - -### Configuration Files (Priority 3) - -12. **`config/instances.yaml`** β†’ Rename to `configurations.yaml` - - Update key: `instances:` β†’ `configurations:` - - Migrate existing data - -13. **`config/wiring.yaml`** - - Update references if needed - -### Documentation (Priority 3) - -14. **Update all markdown files** - - `ARCHITECTURE_OVERVIEW.md` - - `README.md` - - Any other docs - -## Migration Strategy - -### Phase 1: Backend Models (Break nothing) -1. Create new `configuration.py` alongside `instance.py` -2. Copy all classes with new names -3. Add type aliases in `instance.py` for backwards compatibility: - ```python - # Backwards compatibility - Instance = ServiceConfiguration - InstanceManager = ConfigurationManager - ``` - -### Phase 2: Backend Services & Routes (Gradual migration) -1. Update internal services to use new names -2. Keep old API endpoints working with aliases -3. Add deprecation warnings to old endpoints - -### Phase 3: Frontend (Coordinated update) -1. Update API client first -2. Update pages and components -3. Test thoroughly - -### Phase 4: Cleanup (After testing) -1. Remove backwards compatibility aliases -2. Remove old files -3. Rename YAML files (with data migration) - -## Backwards Compatibility Considerations - -### Option 1: Hard Break (Fast, risky) -- Rename everything at once -- Update all references in one PR -- Requires coordination with frontend - -### Option 2: Soft Transition (Safer, slower) -- Keep old API endpoints working -- Add deprecation warnings -- Gradually migrate frontend -- Remove old code after 1-2 releases - -**Recommendation**: Option 2 for production, Option 1 for development branches - -## Testing Checklist - -- [ ] All backend tests pass -- [ ] All frontend tests pass -- [ ] API endpoints work with new names -- [ ] YAML config loads correctly -- [ ] Create new configuration works -- [ ] Deploy configuration works -- [ ] Stop configuration works -- [ ] Delete configuration works -- [ ] Wiring still works -- [ ] Frontend UI displays correctly -- [ ] No broken imports -- [ ] Documentation updated - -## Rollback Plan - -If issues arise: -1. Keep old model files as `instance.py` -2. Git revert specific commits -3. Use type aliases to minimize changes - -## Estimated Effort - -- Backend models: 1-2 hours -- Backend services: 2-3 hours -- API routes: 1-2 hours -- Frontend types: 1 hour -- Frontend pages: 2-3 hours -- Testing: 2-3 hours -- Documentation: 1 hour - -**Total: ~12-15 hours** - -## Next Steps - -1. Get approval on naming convention -2. Choose migration strategy (hard break vs soft transition) -3. Start with backend models (Phase 1) -4. Test each phase before proceeding -5. Update documentation as you go - ---- - -## Questions to Resolve - -1. **API endpoint naming**: Keep `/api/instances` with alias or change to `/api/configurations`? -2. **YAML filename**: Migrate `instances.yaml` β†’ `configurations.yaml` now or later? -3. **Variable names**: `config_id` or `configuration_id`? -4. **Backwards compatibility**: How long to keep old names? - -## Decision Log - -- [x] Use `ServiceConfiguration` instead of `Instance` -- [x] Keep unified model (not splitting cloud/local) -- [ ] API endpoint strategy: TBD -- [ ] Migration timeline: TBD diff --git a/claude.md b/claude.md index be67204b..edfbf7c1 100644 --- a/claude.md +++ b/claude.md @@ -2,6 +2,15 @@ - There may be multiple environments running simultaneously using different worktrees. To determine the corren environment, you can get port numbers and env name from the root .env file. - When refactoring module names, run `grep -r "old_module_name" .` before committing to catch all remaining references (especially entry points like `main.py`). Use `__init__.py` re-exports for backward compatibility. +## Doc Enforcement Plugin + +The `doc-enforcement` plugin (enabled in `.claude/settings.json`) enforces that AI agents read the quick reference documentation before modifying code: + +- **Backend files**: Must read `ushadow/backend/BACKEND_QUICK_REF.md` before editing Python code +- **Frontend files**: Must read `ushadow/frontend/AGENT_QUICK_REF.md` before editing React/TypeScript code + +The plugin uses PreToolUse hooks to validate documentation has been read. If not, it blocks the edit with a clear message directing to read the docs first. This prevents code duplication and ensures architectural patterns are followed. + ## Backend Development Workflow **BEFORE writing ANY backend code, follow this workflow:** diff --git a/compose/backend.yml b/compose/backend.yml index ad34cf25..c5a86c98 100644 --- a/compose/backend.yml +++ b/compose/backend.yml @@ -31,7 +31,7 @@ services: # Config directory location - CONFIG_DIR=/config - MONGODB_DATABASE=${MONGODB_DATABASE:-ushadow} - - CORS_ORIGINS=${CORS_ORIGINS:-http://localhost:5173,http://localhost:3000,http://localhost:${WEBUI_PORT}} + - CORS_ORIGINS=${CORS_ORIGINS:-http://localhost:5173,http://localhost:3000,http://localhost:1421,http://localhost:${WEBUI_PORT:-3000}} # Rich console width for logging (prevents log wrapping) - COLUMNS=200 # Database configuration @@ -48,7 +48,7 @@ services: - KEYCLOAK_CLIENT_ID=${KEYCLOAK_CLIENT_ID:-ushadow-frontend} - KEYCLOAK_CLIENT_SECRET=${KEYCLOAK_CLIENT_SECRET:-} - KEYCLOAK_ADMIN=${KEYCLOAK_ADMIN:-admin} - - KEYCLOAK_ADMIN_PASSWORD=${KEYCLOAK_ADMIN_PASSWORD:-changeme} + - KEYCLOAK_ADMIN_PASSWORD=${KEYCLOAK_ADMIN_PASSWORD:-admin} volumes: - ../ushadow/backend:/app - ../config:/config # Mount config directory (read-write for feature flags) diff --git a/compose/docker-compose.infra.yml b/compose/docker-compose.infra.yml index 481cc43b..33698189 100644 --- a/compose/docker-compose.infra.yml +++ b/compose/docker-compose.infra.yml @@ -159,7 +159,6 @@ services: - KC_DB_USERNAME=${POSTGRES_USER:-ushadow} - KC_DB_PASSWORD=${POSTGRES_PASSWORD:-ushadow} - KC_HOSTNAME_STRICT=false - - KC_HOSTNAME_STRICT_HTTPS=false - KC_HTTP_ENABLED=true - KC_HEALTH_ENABLED=true volumes: diff --git a/compose/openmemory-compose.yaml b/compose/openmemory-compose.yaml index 0a4e5634..70beb997 100644 --- a/compose/openmemory-compose.yaml +++ b/compose/openmemory-compose.yaml @@ -52,7 +52,9 @@ services: volumes: - mem0_data:/app/data networks: - - ushadow-network + ushadow-network: + aliases: + - mem0 # Allow other containers to reach via http://mem0:8765 healthcheck: test: ["CMD", "python3", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:8765/api/v1/config/'); exit(0)"] interval: 10s diff --git a/compose/ushadow-compose.yaml b/compose/ushadow-compose.yaml index 922fbbb9..8909d98d 100644 --- a/compose/ushadow-compose.yaml +++ b/compose/ushadow-compose.yaml @@ -72,7 +72,7 @@ services: # Service management is handled via kubectl to the K8s API networks: - - infra-network + - ushadow-network healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8000/health"] @@ -94,12 +94,12 @@ services: ports: - "3000:80" environment: - - VITE_BACKEND_URL=${VITE_BACKEND_URL:-http://ushadow-backend.ushadow.svc.cluster.local:8000} + # - VITE_BACKEND_URL=${VITE_BACKEND_URL:-http://ushadow-backend.ushadow.svc.cluster.local:8000} - VITE_ENV_NAME=${VITE_ENV_NAME:-k8s} # BACKEND_HOST for nginx proxy (K8s service name) - BACKEND_HOST=${BACKEND_HOST:-ushadow-backend} networks: - - infra-network + - ushadow-network depends_on: - ushadow-backend restart: unless-stopped @@ -116,5 +116,8 @@ volumes: networks: infra-network: - name: infra-network + name: ushadow-network + external: true + ushadow-network: + name: ushadow-network external: true diff --git a/config/config.defaults.yaml b/config/config.defaults.yaml index 5655b596..f6c95818 100644 --- a/config/config.defaults.yaml +++ b/config/config.defaults.yaml @@ -87,7 +87,7 @@ network: # Security Configuration security: # Merges CORS_ORIGINS env var with defaults (deduplicates) - cors_origins: ${merge_csv:${oc.env:CORS_ORIGINS},http://localhost:5173,http://localhost:3000,http://127.0.0.1:5173,http://127.0.0.1:3000} + cors_origins: ${merge_csv:${oc.env:CORS_ORIGINS},http://localhost:5173,http://localhost:3000,http://localhost:1421,http://127.0.0.1:5173,http://127.0.0.1:3000} # Infrastructure Services infrastructure: @@ -104,6 +104,9 @@ infrastructure: ollama_base_url: http://ollama:11434 openai_base_url: https://api.openai.com/v1 + # DEPRECATED: This default is for local dev only. The backend should dynamically + # set this to the internal proxy URL when starting Chronicle. + # TODO: Remove this once backend sets MEMORY_SERVER_URL dynamically memory_server_url: http://mem0:8765 # Miscellaneous Settings diff --git a/config/defaults.yml b/config/defaults.yml index 46ce632b..4fa1a3df 100644 --- a/config/defaults.yml +++ b/config/defaults.yml @@ -174,9 +174,9 @@ models: interim_type: Results final_type: Results extract: - text: results.channels[0].alternatives[0].transcript - words: results.channels[0].alternatives[0].words - segments: results.utterances + text: channel.alternatives[0].transcript + words: channel.alternatives[0].words + segments: channel.alternatives[0].paragraphs.paragraphs - name: stt-parakeet-stream description: Parakeet streaming transcription over WebSocket @@ -229,7 +229,7 @@ models: # Memory Configuration # =========================== memory: - provider: chronicle + provider: openmemory_mcp timeout_seconds: 1200 extraction: enabled: true @@ -240,7 +240,7 @@ memory: # OpenMemory MCP provider settings (used when provider: openmemory_mcp) openmemory_mcp: - server_url: http://localhost:8765 + server_url: ${oc.env:MEMORY_SERVER_URL,'http://localhost:8765'} client_name: chronicle user_id: default timeout: 30 diff --git a/config/keycloak/realm-export.json b/config/keycloak/realm-export.json index 0e158ee5..71fe59df 100644 --- a/config/keycloak/realm-export.json +++ b/config/keycloak/realm-export.json @@ -36,6 +36,14 @@ "actionTokenGeneratedByUserLifespan": 300, "oauth2DeviceCodeLifespan": 600, "oauth2DevicePollingInterval": 5, + "browserSecurityHeaders": { + "contentSecurityPolicy": "frame-src 'self'; frame-ancestors 'self' http: https: tauri:; object-src 'none';", + "xContentTypeOptions": "nosniff", + "xRobotsTag": "none", + "xFrameOptions": "", + "xXSSProtection": "1; mode=block", + "strictTransportSecurity": "max-age=31536000; includeSubDomains" + }, "clientScopes": [ { "name": "openid", @@ -165,11 +173,13 @@ "fullScopeAllowed": true, "redirectUris": [ "http://localhost:3000/oauth/callback", - "http://localhost:*/oauth/callback" + "http://localhost:*/oauth/callback", + "tauri://oauth-callback" ], "webOrigins": [ "http://localhost:3000", - "http://localhost:*" + "http://localhost:*", + "tauri://localhost" ], "attributes": { "pkce.code.challenge.method": "S256", diff --git a/config/tailscale copy.yaml b/config/tailscale copy.yaml deleted file mode 100644 index ac0e6d60..00000000 --- a/config/tailscale copy.yaml +++ /dev/null @@ -1,12 +0,0 @@ -backend_port: 8000 -deployment_mode: - environment: dev - mode: single -environments: -- dev -- test -- prod -frontend_port: 3000 -hostname: gold.spangled-kettle.ts.net -https_enabled: true -use_caddy_proxy: false diff --git a/config/tailscale-serve.json b/config/tailscale-serve.json new file mode 100644 index 00000000..7463ba25 --- /dev/null +++ b/config/tailscale-serve.json @@ -0,0 +1,23 @@ +{ + "version": "alpha0", + "TCP": { + "443": { + "HTTPS": true + } + }, + "Web": { + "gold.spangled-kettle.ts.net:443": { + "Handlers": { + "/auth": { + "Proxy": "http://ushadow-gold-backend:8000/auth" + }, + "/api": { + "Proxy": "http://ushadow-gold-backend:8000/api" + }, + "/": { + "Proxy": "http://ushadow-gold-webui:5173" + } + } + } + } +} \ No newline at end of file diff --git a/ushadow/backend/src/config/keycloak_settings.py b/ushadow/backend/src/config/keycloak_settings.py index 26a98963..33966f3f 100644 --- a/ushadow/backend/src/config/keycloak_settings.py +++ b/ushadow/backend/src/config/keycloak_settings.py @@ -1,25 +1,19 @@ """Keycloak configuration settings. -This module provides configuration for Keycloak integration using python-keycloak library. +This module provides configuration for Keycloak integration using OmegaConf. All sensitive values (passwords, client secrets) are stored in secrets.yaml. - -Architecture: -- Uses KeycloakOpenIDConnection for centralized configuration -- Public URL is dynamically constructed from Tailscale hostname or config -- Provides singleton instances for KeycloakAdmin and KeycloakOpenID """ -from typing import Optional import logging +from typing import Optional -from keycloak import KeycloakOpenIDConnection, KeycloakAdmin, KeycloakOpenID -from keycloak.exceptions import KeycloakError +from keycloak import KeycloakAdmin, KeycloakOpenID, KeycloakOpenIDConnection from src.config import get_settings_store as get_settings logger = logging.getLogger(__name__) -# Singleton instances +# Global instances (initialized on first use) _keycloak_connection: Optional[KeycloakOpenIDConnection] = None _keycloak_admin: Optional[KeycloakAdmin] = None _keycloak_openid: Optional[KeycloakOpenID] = None @@ -28,58 +22,18 @@ def get_keycloak_public_url() -> str: """Get the Keycloak public URL. - Priority: - 1. KEYCLOAK_PUBLIC_URL environment variable (for explicit override) - 2. Query Tailscale to find the host's IP address (for development) - 3. Config setting (keycloak.public_url from config.defaults.yaml) - 4. Fallback to localhost + Returns the URL that browsers/frontends use to access Keycloak. + + Resolution handled by OmegaConf in config.defaults.yaml: + - keycloak.public_url: ${oc.env:KEYCLOAK_PUBLIC_URL,http://localhost:8081} + + This automatically checks KEYCLOAK_PUBLIC_URL env var and falls back to localhost:8081. Returns: - Public URL like "http://keycloak.root.svc.cluster.local:8080" or "http://localhost:8080" + Public URL like "http://localhost:8081" """ - import os - - # Check environment variable first (highest priority) - env_url = os.environ.get("KEYCLOAK_PUBLIC_URL") - if env_url: - logger.info(f"[KC-SETTINGS] Using KEYCLOAK_PUBLIC_URL from env: {env_url}") - return env_url - - # Try Tailscale discovery (for development environments) - host_hostname = os.environ.get("HOST_HOSTNAME") - if host_hostname: - try: - from src.services.tailscale_manager import get_tailscale_manager - - manager = get_tailscale_manager() - - # Check if Tailscale is running and authenticated - status = manager.get_container_status() - if status.running and status.authenticated: - # Query Tailscale peers for host's IP - host_ip = manager.get_peer_ip_by_hostname(host_hostname) - - if host_ip: - url = f"http://{host_ip}:8081" - logger.info(f"[KC-SETTINGS] Using Tailscale IP for Keycloak: {url}") - return url - else: - logger.warning(f"[KC-SETTINGS] Could not find host '{host_hostname}' in Tailscale peers") - else: - logger.debug("[KC-SETTINGS] Tailscale not running or not authenticated") - except Exception as e: - logger.warning(f"[KC-SETTINGS] Failed to query Tailscale: {e}") - - # Check config setting settings = get_settings() - config_url = settings.get_sync("keycloak.public_url") - if config_url: - logger.info(f"[KC-SETTINGS] Using keycloak.public_url from config: {config_url}") - return config_url - - # Fallback to localhost - logger.info("[KC-SETTINGS] Using localhost for Keycloak (fallback)") - return "http://localhost:8080" + return settings.get_sync("keycloak.public_url", "http://localhost:8081") def get_keycloak_connection() -> KeycloakOpenIDConnection: @@ -104,12 +58,8 @@ def get_keycloak_connection() -> KeycloakOpenIDConnection: settings = get_settings() # Backend uses internal URL for direct connection to Keycloak - # Priority: KEYCLOAK_URL env var > config setting > default - internal_url = ( - os.environ.get("KEYCLOAK_URL") or - settings.get_sync("keycloak.url") or - "http://keycloak:8080" - ) + # Resolved by OmegaConf: ${oc.env:KEYCLOAK_URL,http://keycloak:8080} + internal_url = settings.get_sync("keycloak.url", "http://keycloak:8080") # Admin user authenticates against master realm, not application realm # This allows cross-realm admin operations (managing ushadow realm) @@ -153,14 +103,11 @@ def get_keycloak_admin() -> KeycloakAdmin: settings = get_settings() # Get application realm to manage - app_realm = settings.get_sync("keycloak.realm", "ushadow") + app_realm = settings.get_sync("keycloak.realm", "master") # Internal URL for backend-to-Keycloak communication - internal_url = ( - os.environ.get("KEYCLOAK_URL") or - settings.get_sync("keycloak.url") or - "http://keycloak:8080" - ) + # Resolved by OmegaConf: ${oc.env:KEYCLOAK_URL,http://keycloak:8080} + internal_url = settings.get_sync("keycloak.url", "http://keycloak:8080") # Admin credentials from master realm admin_user = settings.get_sync("keycloak.admin_user", "admin") @@ -200,7 +147,10 @@ def get_keycloak_openid(client_id: Optional[str] = None) -> KeycloakOpenID: if _keycloak_openid is None: settings = get_settings() - connection = get_keycloak_connection() + + # Internal URL for backend-to-Keycloak communication + # Resolved by OmegaConf: ${oc.env:KEYCLOAK_URL,http://keycloak:8080} + internal_url = settings.get_sync("keycloak.url", "http://keycloak:8080") # Use provided client_id or default to frontend if client_id is None: @@ -214,7 +164,7 @@ def get_keycloak_openid(client_id: Optional[str] = None) -> KeycloakOpenID: logger.info(f"[KC-SETTINGS] Initializing KeycloakOpenID for client: {client_id}") _keycloak_openid = KeycloakOpenID( - server_url=connection.server_url, + server_url=internal_url, realm_name=app_realm, # Use application realm for token operations client_id=client_id, client_secret_key=client_secret, @@ -226,39 +176,46 @@ def get_keycloak_openid(client_id: Optional[str] = None) -> KeycloakOpenID: def get_keycloak_config() -> dict: """Get Keycloak configuration from OmegaConf settings. - Legacy compatibility function - provides dict interface for code - that hasn't been migrated to use connection objects directly. + Dynamically determines public_url based on Tailscale configuration: + - If tailscale.hostname exists: Use http://{hostname}:8081 + - Otherwise: Use localhost fallback Returns: dict with keys: - enabled: bool - url: str (internal Docker URL) - - public_url: str (external browser URL, dynamically constructed) + - public_url: str (external browser URL - dynamically determined) - realm: str - backend_client_id: str - backend_client_secret: str (from secrets.yaml) - frontend_client_id: str - - admin_keycloak_user: str - - admin_keycloak_password: str + - admin_keycloak_user: str (from secrets.yaml keycloak.admin_user) + - admin_keycloak_password: str (from secrets.yaml keycloak.admin_password) """ settings = get_settings() - connection = get_keycloak_connection() # Application realm (not master realm used for admin connection) app_realm = settings.get_sync("keycloak.realm", "ushadow") - return { + # Build config dict + config = { "enabled": settings.get_sync("keycloak.enabled", False), "url": settings.get_sync("keycloak.url", "http://keycloak:8080"), - "public_url": connection.server_url, # From connection (dynamic) + "public_url": get_keycloak_public_url(), # Dynamic public URL "realm": app_realm, # Application realm (ushadow), not master "backend_client_id": settings.get_sync("keycloak.backend_client_id", "ushadow-backend"), "frontend_client_id": settings.get_sync("keycloak.frontend_client_id", "ushadow-frontend"), - "backend_client_secret": settings.get_sync("keycloak.backend_client_secret"), - "admin_keycloak_user": connection.username, - "admin_keycloak_password": connection.password, } + # Secrets (from config/SECRETS/secrets.yaml) + config["backend_client_secret"] = settings.get_sync("keycloak.backend_client_secret") + + # Keycloak admin credentials (separate from Ushadow admin) + config["admin_keycloak_user"] = settings.get_sync("keycloak.admin_user", "admin") + config["admin_keycloak_password"] = settings.get_sync("keycloak.admin_password", "admin") + + return config + def is_keycloak_enabled() -> bool: """Check if Keycloak authentication is enabled. diff --git a/ushadow/backend/src/config/store.py b/ushadow/backend/src/config/store.py index 403041a8..dc914c67 100644 --- a/ushadow/backend/src/config/store.py +++ b/ushadow/backend/src/config/store.py @@ -104,8 +104,9 @@ def __init__(self, config_dir: Optional[Path] = None): self.config_dir = Path(config_dir) - # File paths (merge order: defaults β†’ secrets β†’ overrides β†’ instance_overrides) + # File paths (merge order: defaults β†’ tailscale β†’ secrets β†’ overrides β†’ instance_overrides) self.defaults_path = self.config_dir / "config.defaults.yaml" + self.tailscale_path = self.config_dir / "tailscale.yaml" self.secrets_path = self.config_dir / "SECRETS" / "secrets.yaml" self.overrides_path = self.config_dir / "config.overrides.yaml" self.instance_overrides_path = self.config_dir / "instance-overrides.yaml" @@ -137,9 +138,10 @@ async def load_config(self, use_cache: bool = True) -> DictConfig: Merge order (later overrides earlier): 1. config.defaults.yaml - All default values - 2. secrets.yaml - API keys, passwords (gitignored) - 3. config.overrides.yaml - Template-level overrides (gitignored) - 4. instance-overrides.yaml - Instance-level overrides (gitignored) + 2. tailscale.yaml - Tailscale configuration (hostname, etc.) + 3. secrets.yaml - API keys, passwords (gitignored) + 4. config.overrides.yaml - Template-level overrides (gitignored) + 5. instance-overrides.yaml - Instance-level overrides (gitignored) Returns: OmegaConf DictConfig with all values merged @@ -158,6 +160,10 @@ async def load_config(self, use_cache: bool = True) -> DictConfig: configs.append(cfg) logger.debug(f"Loaded defaults from {self.defaults_path}") + if cfg := self._load_yaml_if_exists(self.tailscale_path): + configs.append(cfg) + logger.debug(f"Loaded tailscale config from {self.tailscale_path}") + if cfg := self._load_yaml_if_exists(self.secrets_path): configs.append(cfg) logger.debug(f"Loaded secrets from {self.secrets_path}") @@ -191,6 +197,18 @@ async def get(self, key_path: str, default: Any = None) -> Any: Resolved value (interpolations are automatically resolved) Converts OmegaConf containers to regular Python dicts/lists """ + # Special handling for dynamic service_urls.{service_name} pattern + if key_path.startswith("service_urls."): + service_name = key_path[len("service_urls."):] + try: + from src.utils.service_urls import get_internal_proxy_url + internal_url = get_internal_proxy_url(service_name) + logger.debug(f"Dynamically resolved {key_path} -> {internal_url}") + return internal_url + except Exception as e: + logger.warning(f"Failed to resolve dynamic service URL for {service_name}: {e}") + # Fall through to normal config lookup + config = await self.load_config() value = OmegaConf.select(config, key_path, default=default) @@ -207,6 +225,18 @@ def get_sync(self, key_path: str, default: Any = None) -> Any: Use this when you need config values at import time (e.g., SECRET_KEY). For async contexts, prefer the async get() method. """ + # Special handling for dynamic service_urls.{service_name} pattern + if key_path.startswith("service_urls."): + service_name = key_path[len("service_urls."):] + try: + from src.utils.service_urls import get_internal_proxy_url + internal_url = get_internal_proxy_url(service_name) + logger.debug(f"Dynamically resolved {key_path} -> {internal_url}") + return internal_url + except Exception as e: + logger.warning(f"Failed to resolve dynamic service URL for {service_name}: {e}") + # Fall through to normal config lookup + if self._cache is None: # Force sync load - _load_yaml_if_exists is already sync configs = [] diff --git a/ushadow/backend/src/middleware/app_middleware.py b/ushadow/backend/src/middleware/app_middleware.py index 8eba3f61..13deb273 100644 --- a/ushadow/backend/src/middleware/app_middleware.py +++ b/ushadow/backend/src/middleware/app_middleware.py @@ -84,17 +84,32 @@ def setup_cors_middleware(app: FastAPI) -> None: allowed_origins.append(tailscale_origin) logger.info(f"Added Tailscale origin to CORS: {tailscale_origin}") - # Build Tailscale origin regex for any tailnet + # Build regex patterns for CORS + regex_patterns = [] + + # Tailscale origin regex for any tailnet tailscale_regex = _get_tailscale_origin_regex() if tailscale_regex: + regex_patterns.append(tailscale_regex) logger.info(f"Tailscale CORS regex: {tailscale_regex}") + # In development mode, allow any localhost port for launcher/dev tools + dev_mode = os.getenv("DEV_MODE", "false").lower() in ("true", "1", "yes") + env_mode = os.getenv("ENVIRONMENT_MODE", "") + if dev_mode or env_mode == "development": + localhost_regex = r"http://(localhost|127\.0\.0\.1):\d+" + regex_patterns.append(localhost_regex) + logger.info(f"Development mode: allowing all localhost ports via regex") + + # Combine regex patterns + combined_regex = "|".join(f"({pattern})" for pattern in regex_patterns) if regex_patterns else None + logger.info(f"CORS configured with origins: {allowed_origins}") app.add_middleware( CORSMiddleware, allow_origins=allowed_origins, - allow_origin_regex=tailscale_regex, + allow_origin_regex=combined_regex, allow_credentials=True, allow_methods=["*"], allow_headers=["*"], diff --git a/ushadow/backend/src/models/kanban.py b/ushadow/backend/src/models/kanban.py new file mode 100644 index 00000000..bfa45255 --- /dev/null +++ b/ushadow/backend/src/models/kanban.py @@ -0,0 +1,267 @@ +"""Kanban ticket models for integrated task management with tmux. + +This module provides models for kanban boards, tickets, and epics that integrate +directly with the launcher's tmux and worktree management. + +Key Features: +- Tickets linked to tmux windows for context preservation +- Epic-based grouping for related tickets +- Tag-based context sharing for ad-hoc relationships +- Color teams for visual organization +- Shared branches for collaborative tickets +""" + +import logging +from datetime import datetime +from enum import Enum +from typing import Optional, List + +from beanie import Document, PydanticObjectId, Link +from pydantic import ConfigDict, Field, BaseModel + +logger = logging.getLogger(__name__) + + +class TicketStatus(str, Enum): + """Ticket workflow status.""" + BACKLOG = "backlog" + TODO = "todo" + IN_PROGRESS = "in_progress" + IN_REVIEW = "in_review" + DONE = "done" + ARCHIVED = "archived" + + +class TicketPriority(str, Enum): + """Ticket priority levels.""" + LOW = "low" + MEDIUM = "medium" + HIGH = "high" + URGENT = "urgent" + + +class Epic(Document): + """Epic for grouping related tickets with shared context. + + Epics enable: + - Logical grouping of related tickets + - Shared branch across all tickets in the epic + - Unified color team for visual organization + - Context sharing (all tickets access same worktree) + """ + + model_config = ConfigDict( + from_attributes=True, + populate_by_name=True, + ) + + # Core fields + title: str = Field(..., min_length=1, max_length=200) + description: Optional[str] = None + + # Color team (hex color for UI) + color: str = Field(default="#3B82F6") # Default blue + + # Branch management + branch_name: Optional[str] = None # Shared branch for all tickets + base_branch: str = Field(default="main") # Branch to fork from + + # Project association + project_id: Optional[str] = None # Links to launcher project + + # Metadata + created_at: datetime = Field(default_factory=datetime.utcnow) + updated_at: datetime = Field(default_factory=datetime.utcnow) + created_by: Optional[PydanticObjectId] = None # User who created epic + + class Settings: + name = "epics" + + async def save(self, *args, **kwargs): + """Override save to update timestamp.""" + self.updated_at = datetime.utcnow() + return await super().save(*args, **kwargs) + + +class Ticket(Document): + """Kanban ticket with tmux and worktree integration. + + Each ticket represents a unit of work that: + - Has exactly one tmux window (1:1 mapping) + - May belong to an epic (shared branch) + - Has tags for ad-hoc context sharing + - Uses color from epic or generates own color + """ + + model_config = ConfigDict( + from_attributes=True, + populate_by_name=True, + ) + + # Core fields + title: str = Field(..., min_length=1, max_length=200) + description: Optional[str] = None + status: TicketStatus = Field(default=TicketStatus.TODO) + priority: TicketPriority = Field(default=TicketPriority.MEDIUM) + + # Epic relationship (optional) + epic_id: Optional[PydanticObjectId] = None + epic: Optional[Link[Epic]] = None + + # Tags for context sharing + tags: List[str] = Field(default_factory=list) + + # Color team (inherited from epic or unique) + color: Optional[str] = None # If None, inherit from epic or generate + + # Tmux integration + tmux_window_name: Optional[str] = None # e.g., "ushadow-ticket-123" + tmux_session_name: Optional[str] = None # Usually project name + + # Worktree/branch integration + branch_name: Optional[str] = None # Own branch or epic's shared branch + worktree_path: Optional[str] = None # Path to worktree on filesystem + + # Environment association + environment_name: Optional[str] = None # Links to launcher environment + project_id: Optional[str] = None # Links to launcher project + + # Assignment + assigned_to: Optional[PydanticObjectId] = None # User assigned to ticket + + # Ordering + order: int = Field(default=0) # For custom ordering within status column + + # Metadata + created_at: datetime = Field(default_factory=datetime.utcnow) + updated_at: datetime = Field(default_factory=datetime.utcnow) + created_by: Optional[PydanticObjectId] = None + + class Settings: + name = "tickets" + indexes = [ + "status", + "epic_id", + "project_id", + "tags", + "assigned_to", + ] + + async def save(self, *args, **kwargs): + """Override save to update timestamp.""" + self.updated_at = datetime.utcnow() + return await super().save(*args, **kwargs) + + @property + def ticket_id_str(self) -> str: + """Return short ticket ID for display (last 6 chars).""" + return str(self.id)[-6:] + + async def get_effective_color(self) -> str: + """Get the color to use for this ticket (own or epic's).""" + if self.color: + return self.color + + if self.epic_id and self.epic: + epic = await self.epic.fetch() + return epic.color if epic else self._generate_color() + + return self._generate_color() + + def _generate_color(self) -> str: + """Generate a color based on ticket ID hash.""" + # Simple hash-based color generation + id_hash = hash(str(self.id)) + hue = id_hash % 360 + return f"hsl({hue}, 70%, 60%)" + + async def get_effective_branch(self) -> Optional[str]: + """Get the branch to use (own or epic's shared branch).""" + if self.branch_name: + return self.branch_name + + if self.epic_id and self.epic: + epic = await self.epic.fetch() + return epic.branch_name if epic else None + + return None + + +# Pydantic schemas for API requests/responses + +class EpicCreate(BaseModel): + """Schema for creating a new epic.""" + title: str + description: Optional[str] = None + color: Optional[str] = None + base_branch: str = "main" + project_id: Optional[str] = None + + +class EpicRead(BaseModel): + """Schema for reading epic data.""" + id: PydanticObjectId + title: str + description: Optional[str] + color: str + branch_name: Optional[str] + base_branch: str + project_id: Optional[str] + created_at: datetime + updated_at: datetime + + +class EpicUpdate(BaseModel): + """Schema for updating epic data.""" + title: Optional[str] = None + description: Optional[str] = None + color: Optional[str] = None + branch_name: Optional[str] = None + + +class TicketCreate(BaseModel): + """Schema for creating a new ticket.""" + title: str + description: Optional[str] = None + status: TicketStatus = TicketStatus.TODO + priority: TicketPriority = TicketPriority.MEDIUM + epic_id: Optional[str] = None + tags: List[str] = [] + color: Optional[str] = None + project_id: Optional[str] = None + assigned_to: Optional[str] = None + + +class TicketRead(BaseModel): + """Schema for reading ticket data.""" + id: PydanticObjectId + title: str + description: Optional[str] + status: TicketStatus + priority: TicketPriority + epic_id: Optional[PydanticObjectId] + tags: List[str] + color: Optional[str] + tmux_window_name: Optional[str] + tmux_session_name: Optional[str] + branch_name: Optional[str] + worktree_path: Optional[str] + environment_name: Optional[str] + project_id: Optional[str] + assigned_to: Optional[PydanticObjectId] + order: int + created_at: datetime + updated_at: datetime + + +class TicketUpdate(BaseModel): + """Schema for updating ticket data.""" + title: Optional[str] = None + description: Optional[str] = None + status: Optional[TicketStatus] = None + priority: Optional[TicketPriority] = None + epic_id: Optional[str] = None + tags: Optional[List[str]] = None + color: Optional[str] = None + assigned_to: Optional[str] = None + order: Optional[int] = None diff --git a/ushadow/backend/src/models/kubernetes.py b/ushadow/backend/src/models/kubernetes.py index a6fee80e..3b377501 100644 --- a/ushadow/backend/src/models/kubernetes.py +++ b/ushadow/backend/src/models/kubernetes.py @@ -26,6 +26,7 @@ class KubernetesCluster(BaseModel): version: Optional[str] = Field(None, description="Kubernetes version") node_count: Optional[int] = Field(None, description="Number of nodes in cluster") namespace: str = Field("default", description="Default namespace for deployments") + infra_namespace: Optional[str] = Field(None, description="Namespace where infrastructure services (mongo, redis, etc.) are located") # Infrastructure scan results (cached per namespace) infra_scans: Dict[str, Dict[str, Any]] = Field( @@ -153,6 +154,7 @@ class KubernetesClusterUpdate(BaseModel): name: Optional[str] = None namespace: Optional[str] = None + infra_namespace: Optional[str] = None labels: Optional[Dict[str, str]] = None ingress_domain: Optional[str] = None ingress_class: Optional[str] = None diff --git a/ushadow/backend/src/routers/audio_relay.py b/ushadow/backend/src/routers/audio_relay.py index 770af6f8..52bdf12f 100644 --- a/ushadow/backend/src/routers/audio_relay.py +++ b/ushadow/backend/src/routers/audio_relay.py @@ -39,7 +39,7 @@ async def connect(self): import websockets # Add token to URL (use & if URL already has query params) - separator = '&' if '?' in self.url else '?' + separator = "&" if "?" in self.url else "?" url_with_token = f"{self.url}{separator}token={self.token}" # Detect endpoint type for logging @@ -205,33 +205,26 @@ async def audio_relay_websocket( try: destinations_param = websocket.query_params.get("destinations") token = websocket.query_params.get("token") + codec = websocket.query_params.get("codec", "pcm") # Default to PCM if not specified if not destinations_param or not token: await websocket.close(code=1008, reason="Missing destinations or token parameter") return - # Bridge Keycloak token to service token for destinations - from src.services.token_bridge import bridge_to_service_token - service_token = await bridge_to_service_token( - token, - audiences=["ushadow", "chronicle", "mycelia"] - ) - - if not service_token: - logger.error("[AudioRelay] Token bridging failed") - await websocket.close(code=1008, reason="Authentication failed") - return - - logger.info("[AudioRelay] βœ“ Token bridged successfully") - # Use service token for downstream connections - token = service_token - destinations = json.loads(destinations_param) if not isinstance(destinations, list) or len(destinations) == 0: await websocket.close(code=1008, reason="destinations must be a non-empty array") return + # Add codec parameter to destination URLs if not already present + for dest in destinations: + if "codec=" not in dest['url']: + separator = "&" if "?" in dest['url'] else "?" + dest['url'] = f"{dest['url']}{separator}codec={codec}" + logger.info(f"[AudioRelay] Destinations: {[d['name'] for d in destinations]}") + logger.info(f"[AudioRelay] Using codec: {codec}") + # Log exact URLs received from client for debugging for dest in destinations: # Detect endpoint type (check for old formats first, then new) @@ -285,6 +278,17 @@ async def audio_relay_websocket( except WebSocketDisconnect: logger.info("[AudioRelay] Client disconnected") break + except RuntimeError as e: + # Handle "Cannot call receive once a disconnect message has been received" + if "disconnect" in str(e).lower(): + logger.info("[AudioRelay] Client disconnected (disconnect message received)") + break + raise + + # Check for disconnect message type + if message.get("type") == "websocket.disconnect": + logger.info("[AudioRelay] Client disconnected (disconnect message)") + break # Relay text messages (Wyoming protocol headers) if "text" in message: diff --git a/ushadow/backend/src/routers/auth.py b/ushadow/backend/src/routers/auth.py index 3eaa25c3..26e96fc3 100644 --- a/ushadow/backend/src/routers/auth.py +++ b/ushadow/backend/src/routers/auth.py @@ -526,3 +526,89 @@ async def refresh_access_token(request: TokenRefreshRequest): status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(e) ) + + +# Dynamic Redirect URI Registration +class RedirectUriRequest(BaseModel): + """Request for registering a redirect URI with Keycloak.""" + redirect_uri: str = Field(..., description="OAuth redirect URI to register (e.g., http://localhost:3500/oauth/callback)") + post_logout_redirect_uri: Optional[str] = Field(None, description="Optional post-logout redirect URI") + + +class RedirectUriResponse(BaseModel): + """Response after registering redirect URI.""" + success: bool + redirect_uri: str + message: str + + +@router.post("/register-redirect-uri", response_model=RedirectUriResponse) +async def register_redirect_uri_endpoint(request: RedirectUriRequest): + """Register this environment's redirect URI with Keycloak. + + Called by frontend on startup to dynamically register its OAuth callback URL. + This allows multiple environments to run on different ports without pre-configuring + all possible redirect URIs in Keycloak. + + Uses the existing KeycloakAdminClient.register_redirect_uri() service method. + + Args: + request: Contains redirect URI to register + + Returns: + Success status and registered URI + + Raises: + 400: If redirect URI is invalid + 500: If Keycloak registration fails + """ + from src.services.keycloak_admin import get_keycloak_admin + + # Validate redirect URI format + # Allow http://, https://, tauri:// (desktop app), ushadow:// (mobile), exp:// (Expo) + allowed_schemes = ('http://', 'https://', 'tauri://', 'ushadow://', 'exp://') + if not request.redirect_uri.startswith(allowed_schemes): + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail=f"redirect_uri must start with one of: {', '.join(allowed_schemes)}" + ) + + try: + kc_admin = get_keycloak_admin() + + # Use existing service method to register redirect URI + success = await kc_admin.register_redirect_uri( + client_id="ushadow-frontend", + redirect_uri=request.redirect_uri + ) + + if not success: + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="Failed to register redirect URI with Keycloak" + ) + + # Optionally register post-logout redirect URI + if request.post_logout_redirect_uri: + await kc_admin.update_post_logout_redirect_uris( + client_id="ushadow-frontend", + post_logout_redirect_uris=[request.post_logout_redirect_uri], + merge=True + ) + + logger.info(f"[REDIRECT-URI] βœ“ Registered redirect URI: {request.redirect_uri}") + + return RedirectUriResponse( + success=True, + redirect_uri=request.redirect_uri, + message=f"Redirect URI registered successfully" + ) + + except HTTPException: + raise + except Exception as e: + logger.error(f"[REDIRECT-URI] Failed to register: {e}", exc_info=True) + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"Failed to register redirect URI: {str(e)}" + ) diff --git a/ushadow/backend/src/routers/deployments.py b/ushadow/backend/src/routers/deployments.py index 8990cf94..b844d9a6 100644 --- a/ushadow/backend/src/routers/deployments.py +++ b/ushadow/backend/src/routers/deployments.py @@ -78,20 +78,22 @@ async def list_deployment_targets( logger.info(f" β†’ Adding K8s cluster: {cluster.name} (status: {cluster.status})") parsed = parse_deployment_target_id(cluster.deployment_target_id) - # Get infrastructure - try cluster's namespace first, then any available namespace + # Get infrastructure - skip target namespace as it contains deployed services, not infra infra = {} if cluster.infra_scans: - # Try cluster's configured namespace first - if cluster.namespace in cluster.infra_scans: - infra = cluster.infra_scans[cluster.namespace] - logger.info(f" βœ“ Using infrastructure from namespace '{cluster.namespace}'") + # Filter out scans of the target namespace + infra_scans_filtered = { + ns: scan for ns, scan in cluster.infra_scans.items() + if ns != cluster.namespace + } + + if not infra_scans_filtered: + logger.info(f" ⚠️ No infrastructure scans available (target namespace '{cluster.namespace}' excluded)") else: - # Use first available namespace with infrastructure - for ns, ns_infra in cluster.infra_scans.items(): - if ns_infra: # Non-empty infrastructure - infra = ns_infra - logger.info(f" βœ“ Using infrastructure from namespace '{ns}' (cluster namespace '{cluster.namespace}' not found)") - break + # Use the first available infrastructure scan + infra_ns = next(iter(infra_scans_filtered.keys())) + infra = infra_scans_filtered[infra_ns] + logger.info(f" βœ“ Using infrastructure from namespace '{infra_ns}'") if infra: logger.info(f" Infrastructure services: {list(infra.keys())}") diff --git a/ushadow/backend/src/routers/kanban.py b/ushadow/backend/src/routers/kanban.py new file mode 100644 index 00000000..5d3c09b4 --- /dev/null +++ b/ushadow/backend/src/routers/kanban.py @@ -0,0 +1,404 @@ +"""API routes for kanban ticket management. + +This router provides CRUD operations for tickets and epics, integrating with +the launcher's tmux and worktree systems for context-aware task management. +""" + +import logging +from typing import List, Optional, Dict, Any + +from fastapi import APIRouter, HTTPException, Depends, Query +from beanie import PydanticObjectId +from pydantic import BaseModel + +from src.models.kanban import ( + Ticket, + Epic, + TicketStatus, + TicketPriority, + TicketCreate, + TicketRead, + TicketUpdate, + EpicCreate, + EpicRead, + EpicUpdate, +) +from src.services.auth import get_current_user + +logger = logging.getLogger(__name__) + +router = APIRouter(prefix="/api/kanban", tags=["kanban"]) + + +# ============================================================================= +# Epic Endpoints +# ============================================================================= + +@router.post("/epics", response_model=Dict[str, Any]) +async def create_epic( + epic_data: EpicCreate, + current_user: dict = Depends(get_current_user) +): + """Create a new epic for grouping related tickets.""" + try: + epic = Epic( + title=epic_data.title, + description=epic_data.description, + color=epic_data.color or "#3B82F6", + base_branch=epic_data.base_branch, + project_id=epic_data.project_id, + created_by=PydanticObjectId(current_user["id"]) + ) + await epic.save() + + logger.info(f"Created epic: {epic.title} (ID: {epic.id})") + return epic.model_dump() + except Exception as e: + logger.error(f"Failed to create epic: {e}") + raise HTTPException(status_code=500, detail=str(e)) + + +@router.get("/epics", response_model=List[Dict[str, Any]]) +async def list_epics( + project_id: Optional[str] = Query(None), + current_user: dict = Depends(get_current_user) +): + """List all epics, optionally filtered by project.""" + try: + query = {} + if project_id: + query["project_id"] = project_id + + epics = await Epic.find(query).to_list() + return [epic.model_dump() for epic in epics] + except Exception as e: + logger.error(f"Failed to list epics: {e}") + raise HTTPException(status_code=500, detail=str(e)) + + +@router.get("/epics/{epic_id}", response_model=Dict[str, Any]) +async def get_epic( + epic_id: str, + current_user: dict = Depends(get_current_user) +): + """Get a specific epic by ID.""" + try: + epic = await Epic.get(PydanticObjectId(epic_id)) + if not epic: + raise HTTPException(status_code=404, detail="Epic not found") + return epic.model_dump() + except HTTPException: + raise + except Exception as e: + logger.error(f"Failed to get epic {epic_id}: {e}") + raise HTTPException(status_code=500, detail=str(e)) + + +@router.patch("/epics/{epic_id}", response_model=Dict[str, Any]) +async def update_epic( + epic_id: str, + update_data: EpicUpdate, + current_user: dict = Depends(get_current_user) +): + """Update an epic.""" + try: + epic = await Epic.get(PydanticObjectId(epic_id)) + if not epic: + raise HTTPException(status_code=404, detail="Epic not found") + + # Update fields + if update_data.title is not None: + epic.title = update_data.title + if update_data.description is not None: + epic.description = update_data.description + if update_data.color is not None: + epic.color = update_data.color + if update_data.branch_name is not None: + epic.branch_name = update_data.branch_name + + await epic.save() + logger.info(f"Updated epic: {epic.title} (ID: {epic.id})") + return epic.model_dump() + except HTTPException: + raise + except Exception as e: + logger.error(f"Failed to update epic {epic_id}: {e}") + raise HTTPException(status_code=500, detail=str(e)) + + +@router.delete("/epics/{epic_id}") +async def delete_epic( + epic_id: str, + current_user: dict = Depends(get_current_user) +): + """Delete an epic. Tickets in the epic will have epic_id set to None.""" + try: + epic = await Epic.get(PydanticObjectId(epic_id)) + if not epic: + raise HTTPException(status_code=404, detail="Epic not found") + + # Unlink tickets from epic + tickets = await Ticket.find(Ticket.epic_id == epic.id).to_list() + for ticket in tickets: + ticket.epic_id = None + ticket.epic = None + await ticket.save() + + await epic.delete() + logger.info(f"Deleted epic: {epic.title} (ID: {epic.id})") + return {"status": "success", "deleted": str(epic.id)} + except HTTPException: + raise + except Exception as e: + logger.error(f"Failed to delete epic {epic_id}: {e}") + raise HTTPException(status_code=500, detail=str(e)) + + +# ============================================================================= +# Ticket Endpoints +# ============================================================================= + +@router.post("/tickets", response_model=Dict[str, Any]) +async def create_ticket( + ticket_data: TicketCreate, + current_user: dict = Depends(get_current_user) +): + """Create a new ticket.""" + try: + # Validate epic exists if provided + epic_obj_id = None + if ticket_data.epic_id: + epic = await Epic.get(PydanticObjectId(ticket_data.epic_id)) + if not epic: + raise HTTPException(status_code=400, detail="Epic not found") + epic_obj_id = epic.id + + ticket = Ticket( + title=ticket_data.title, + description=ticket_data.description, + status=ticket_data.status, + priority=ticket_data.priority, + epic_id=epic_obj_id, + tags=ticket_data.tags, + color=ticket_data.color, + project_id=ticket_data.project_id, + assigned_to=PydanticObjectId(ticket_data.assigned_to) if ticket_data.assigned_to else None, + created_by=PydanticObjectId(current_user["id"]) + ) + await ticket.save() + + logger.info(f"Created ticket: {ticket.title} (ID: {ticket.id})") + return ticket.model_dump() + except HTTPException: + raise + except Exception as e: + logger.error(f"Failed to create ticket: {e}") + raise HTTPException(status_code=500, detail=str(e)) + + +@router.get("/tickets", response_model=List[Dict[str, Any]]) +async def list_tickets( + project_id: Optional[str] = Query(None), + epic_id: Optional[str] = Query(None), + status: Optional[TicketStatus] = Query(None), + tags: Optional[str] = Query(None), # Comma-separated tags + assigned_to: Optional[str] = Query(None), + current_user: dict = Depends(get_current_user) +): + """List tickets with optional filters.""" + try: + query = {} + if project_id: + query["project_id"] = project_id + if epic_id: + query["epic_id"] = PydanticObjectId(epic_id) + if status: + query["status"] = status + if assigned_to: + query["assigned_to"] = PydanticObjectId(assigned_to) + + # Tag filtering (find tickets with ANY of the specified tags) + if tags: + tag_list = [t.strip() for t in tags.split(",")] + query["tags"] = {"$in": tag_list} + + tickets = await Ticket.find(query).sort("+order").to_list() + return [ticket.model_dump() for ticket in tickets] + except Exception as e: + logger.error(f"Failed to list tickets: {e}") + raise HTTPException(status_code=500, detail=str(e)) + + +@router.get("/tickets/{ticket_id}", response_model=Dict[str, Any]) +async def get_ticket( + ticket_id: str, + current_user: dict = Depends(get_current_user) +): + """Get a specific ticket by ID.""" + try: + ticket = await Ticket.get(PydanticObjectId(ticket_id)) + if not ticket: + raise HTTPException(status_code=404, detail="Ticket not found") + return ticket.model_dump() + except HTTPException: + raise + except Exception as e: + logger.error(f"Failed to get ticket {ticket_id}: {e}") + raise HTTPException(status_code=500, detail=str(e)) + + +@router.patch("/tickets/{ticket_id}", response_model=Dict[str, Any]) +async def update_ticket( + ticket_id: str, + update_data: TicketUpdate, + current_user: dict = Depends(get_current_user) +): + """Update a ticket.""" + try: + ticket = await Ticket.get(PydanticObjectId(ticket_id)) + if not ticket: + raise HTTPException(status_code=404, detail="Ticket not found") + + # Update fields + if update_data.title is not None: + ticket.title = update_data.title + if update_data.description is not None: + ticket.description = update_data.description + if update_data.status is not None: + ticket.status = update_data.status + if update_data.priority is not None: + ticket.priority = update_data.priority + if update_data.epic_id is not None: + # Validate epic exists + epic = await Epic.get(PydanticObjectId(update_data.epic_id)) + if not epic: + raise HTTPException(status_code=400, detail="Epic not found") + ticket.epic_id = epic.id + if update_data.tags is not None: + ticket.tags = update_data.tags + if update_data.color is not None: + ticket.color = update_data.color + if update_data.assigned_to is not None: + ticket.assigned_to = PydanticObjectId(update_data.assigned_to) if update_data.assigned_to else None + if update_data.order is not None: + ticket.order = update_data.order + + await ticket.save() + logger.info(f"Updated ticket: {ticket.title} (ID: {ticket.id})") + return ticket.model_dump() + except HTTPException: + raise + except Exception as e: + logger.error(f"Failed to update ticket {ticket_id}: {e}") + raise HTTPException(status_code=500, detail=str(e)) + + +@router.delete("/tickets/{ticket_id}") +async def delete_ticket( + ticket_id: str, + current_user: dict = Depends(get_current_user) +): + """Delete a ticket.""" + try: + ticket = await Ticket.get(PydanticObjectId(ticket_id)) + if not ticket: + raise HTTPException(status_code=404, detail="Ticket not found") + + await ticket.delete() + logger.info(f"Deleted ticket: {ticket.title} (ID: {ticket.id})") + return {"status": "success", "deleted": str(ticket.id)} + except HTTPException: + raise + except Exception as e: + logger.error(f"Failed to delete ticket {ticket_id}: {e}") + raise HTTPException(status_code=500, detail=str(e)) + + +# ============================================================================= +# Context Sharing Endpoints +# ============================================================================= + +@router.get("/tickets/{ticket_id}/related", response_model=List[Dict[str, Any]]) +async def get_related_tickets( + ticket_id: str, + current_user: dict = Depends(get_current_user) +): + """Find tickets related to this one via epic or shared tags.""" + try: + ticket = await Ticket.get(PydanticObjectId(ticket_id)) + if not ticket: + raise HTTPException(status_code=404, detail="Ticket not found") + + related = [] + + # Find tickets in same epic + if ticket.epic_id: + epic_tickets = await Ticket.find( + Ticket.epic_id == ticket.epic_id, + Ticket.id != ticket.id + ).to_list() + related.extend(epic_tickets) + + # Find tickets with shared tags + if ticket.tags: + tag_tickets = await Ticket.find( + Ticket.tags == {"$in": ticket.tags}, + Ticket.id != ticket.id + ).to_list() + # Deduplicate + existing_ids = {t.id for t in related} + for t in tag_tickets: + if t.id not in existing_ids: + related.append(t) + + return [t.model_dump() for t in related] + except HTTPException: + raise + except Exception as e: + logger.error(f"Failed to get related tickets for {ticket_id}: {e}") + raise HTTPException(status_code=500, detail=str(e)) + + +# ============================================================================= +# Statistics Endpoints +# ============================================================================= + +@router.get("/stats", response_model=Dict[str, Any]) +async def get_kanban_stats( + project_id: Optional[str] = Query(None), + current_user: dict = Depends(get_current_user) +): + """Get kanban board statistics.""" + try: + query = {} + if project_id: + query["project_id"] = project_id + + tickets = await Ticket.find(query).to_list() + + stats = { + "total": len(tickets), + "by_status": {}, + "by_priority": {}, + "by_epic": {}, + "with_tmux": sum(1 for t in tickets if t.tmux_window_name), + } + + for status in TicketStatus: + stats["by_status"][status.value] = sum(1 for t in tickets if t.status == status) + + for priority in TicketPriority: + stats["by_priority"][priority.value] = sum(1 for t in tickets if t.priority == priority) + + # Count tickets per epic + epic_counts = {} + for ticket in tickets: + if ticket.epic_id: + epic_id_str = str(ticket.epic_id) + epic_counts[epic_id_str] = epic_counts.get(epic_id_str, 0) + 1 + stats["by_epic"] = epic_counts + + return stats + except Exception as e: + logger.error(f"Failed to get kanban stats: {e}") + raise HTTPException(status_code=500, detail=str(e)) diff --git a/ushadow/backend/src/routers/kubernetes.py b/ushadow/backend/src/routers/kubernetes.py index 6214bab7..f2e0e56b 100644 --- a/ushadow/backend/src/routers/kubernetes.py +++ b/ushadow/backend/src/routers/kubernetes.py @@ -238,6 +238,14 @@ async def scan_cluster_for_infra( if not cluster: raise HTTPException(status_code=404, detail="Cluster not found") + # Don't allow scanning the target namespace - it contains deployed services, not infrastructure + if request.namespace == cluster.namespace: + raise HTTPException( + status_code=400, + detail=f"Cannot scan target namespace '{cluster.namespace}' for infrastructure. " + f"This namespace contains deployed services. Scan a different namespace where infrastructure services are located." + ) + results = await k8s_manager.scan_cluster_for_infra_services( cluster_id, request.namespace @@ -257,6 +265,41 @@ async def scan_cluster_for_infra( } +@router.delete("/{cluster_id}/scan-infra/{namespace}") +async def delete_infra_scan( + cluster_id: str, + namespace: str, + current_user: User = Depends(get_current_user) +): + """ + Delete an infrastructure scan for a specific namespace. + + Useful for removing stale or incorrect scan data. + """ + k8s_manager = await get_kubernetes_manager() + + # Verify cluster exists + cluster = await k8s_manager.get_cluster(cluster_id) + if not cluster: + raise HTTPException(status_code=404, detail="Cluster not found") + + # Check if scan exists + if not cluster.infra_scans or namespace not in cluster.infra_scans: + raise HTTPException( + status_code=404, + detail=f"No infrastructure scan found for namespace '{namespace}'" + ) + + # Remove the scan + await k8s_manager.delete_cluster_infra_scan(cluster_id, namespace) + + return { + "cluster_id": cluster_id, + "namespace": namespace, + "message": f"Infrastructure scan for namespace '{namespace}' deleted successfully" + } + + @router.post("/{cluster_id}/envmap") async def create_or_update_envmap( cluster_id: str, diff --git a/ushadow/backend/src/routers/settings.py b/ushadow/backend/src/routers/settings.py index 9dcfb236..e95d3a8b 100644 --- a/ushadow/backend/src/routers/settings.py +++ b/ushadow/backend/src/routers/settings.py @@ -43,11 +43,24 @@ async def get_settings_info(): @router.get("/config") async def get_config(): - """Get merged configuration with secrets masked.""" + """Get merged configuration with secrets masked. + + Dynamically injects keycloak.public_url based on Tailscale configuration. + """ try: + from src.config.keycloak_settings import get_keycloak_config + settings = get_settings() all_config = await settings.get_all() + # Inject dynamic Keycloak config (public_url determined from tailscale.hostname) + keycloak_config = get_keycloak_config() + if "keycloak" not in all_config: + all_config["keycloak"] = {} + all_config["keycloak"]["public_url"] = keycloak_config["public_url"] + all_config["keycloak"]["realm"] = keycloak_config["realm"] + all_config["keycloak"]["frontend_client_id"] = keycloak_config["frontend_client_id"] + # Recursively mask all sensitive values masked_config = mask_dict_secrets(all_config) diff --git a/ushadow/backend/src/routers/tailscale.py b/ushadow/backend/src/routers/tailscale.py index 69de0e6f..87f2d361 100644 --- a/ushadow/backend/src/routers/tailscale.py +++ b/ushadow/backend/src/routers/tailscale.py @@ -90,6 +90,7 @@ class DeploymentMode(BaseModel): class TailscaleConfig(BaseModel): """Complete Tailscale configuration""" hostname: str = Field(..., description="Tailscale hostname (e.g., machine-name.tail12345.ts.net)") + ip_address: Optional[str] = Field(None, description="Tailscale IP address (e.g., 100.105.225.45)") deployment_mode: DeploymentMode https_enabled: bool = True use_caddy_proxy: bool = Field(..., description="True for multi-env, False for single-env") @@ -415,6 +416,7 @@ async def generate_serve_config(config: TailscaleConfig) -> Dict[str, str]: f"tailscale serve https / http://localhost:{frontend_port}", f"tailscale serve https /api http://localhost:{backend_port}", f"tailscale serve https /auth http://localhost:{backend_port}", + f"tailscale serve https /keycloak http://localhost:8081", "", "# To view current configuration:", "tailscale serve status", @@ -1388,6 +1390,14 @@ async def configure_tailscale_serve( try: manager = get_tailscale_manager() + # Get container status to capture IP address + container_status = manager.get_container_status() + if container_status.ip_address: + config.ip_address = container_status.ip_address + logger.info(f"Captured Tailscale IP: {container_status.ip_address}") + else: + logger.warning("Could not capture Tailscale IP address") + # Save configuration to disk first config_data = config.model_dump() with open(TAILSCALE_CONFIG_FILE, 'w') as f: diff --git a/ushadow/backend/src/services/keycloak_admin.py b/ushadow/backend/src/services/keycloak_admin.py index f5988b8e..ccb4297b 100644 --- a/ushadow/backend/src/services/keycloak_admin.py +++ b/ushadow/backend/src/services/keycloak_admin.py @@ -135,13 +135,13 @@ async def update_client_redirect_uris( logger.info(f"[KC-ADMIN] Extracted {len(final_origins)} webOrigins from redirect URIs") # Update client using official library method - self.admin.update_client( - client_uuid, - { - "redirectUris": final_uris, - "webOrigins": final_origins, - } - ) + # IMPORTANT: Must update the full client object, not partial update + # Partial updates cause Hibernate to try INSERT instead of REPLACE, + # leading to duplicate key violations on redirectUris + client["redirectUris"] = final_uris + client["webOrigins"] = final_origins + + self.admin.update_client(client_uuid, client) logger.info(f"[KC-ADMIN] βœ“ Updated redirect URIs for client '{client_id}'") for uri in final_uris: @@ -208,14 +208,13 @@ async def update_post_logout_redirect_uris( logger.info(f"[KC-ADMIN] Replacing post-logout redirect URIs with {len(final_uris)} URIs") # Post-logout redirect URIs are stored as a ## delimited string in attributes - attributes = client.get("attributes", {}) - attributes["post.logout.redirect.uris"] = "##".join(final_uris) + # Update full client object to avoid Hibernate collection merge issues + if "attributes" not in client: + client["attributes"] = {} + client["attributes"]["post.logout.redirect.uris"] = "##".join(final_uris) - # Update using official library - self.admin.update_client( - client_uuid, - {"attributes": attributes} - ) + # Update using official library with full client object + self.admin.update_client(client_uuid, client) logger.info(f"[KC-ADMIN] βœ“ Updated post-logout redirect URIs for client '{client_id}'") for uri in final_uris: @@ -226,6 +225,37 @@ async def update_post_logout_redirect_uris( logger.error(f"[KC-ADMIN] Failed to update post-logout redirect URIs: {e}") return False + def update_realm_browser_security_headers(self, headers: dict) -> None: + """ + Update realm's browser security headers (CSP, X-Frame-Options, etc.). + + Args: + headers: Dictionary of browser security headers to update + """ + from ..config.keycloak_settings import get_keycloak_config + + try: + # Get realm from config + config = get_keycloak_config() + realm = config["realm"] + + # Get current realm configuration + realm_config = self.admin.get_realm(realm) + + # Update browserSecurityHeaders + realm_config["browserSecurityHeaders"] = headers + + # Update realm + self.admin.update_realm(realm, realm_config) + + logger.info(f"[KC-ADMIN] βœ“ Updated realm browser security headers for realm: {realm}") + for key, value in headers.items(): + logger.info(f"[KC-ADMIN] {key}: {value[:50]}...") # Truncate long values + + except KeycloakError as e: + logger.error(f"[KC-ADMIN] Failed to update realm: {e}") + raise + async def register_current_environment_redirect_uri() -> bool: """ diff --git a/ushadow/backend/src/services/keycloak_auth.py b/ushadow/backend/src/services/keycloak_auth.py index 69df7498..b8363ae2 100644 --- a/ushadow/backend/src/services/keycloak_auth.py +++ b/ushadow/backend/src/services/keycloak_auth.py @@ -33,21 +33,21 @@ def get_jwks_client() -> PyJWKClient: from src.config import get_settings_store settings = get_settings_store() + app_realm = settings.get_sync("keycloak.realm", "ushadow") - # Get Keycloak internal URL + # IMPORTANT: Backend must use internal URL for JWKS, never external/proxy URLs + # Priority: KEYCLOAK_URL env var > config setting > Docker default internal_url = ( os.environ.get("KEYCLOAK_URL") or settings.get_sync("keycloak.url") or "http://keycloak:8080" ) - # Get application realm (where tokens are issued) - app_realm = settings.get_sync("keycloak.realm", "ushadow") - - # Construct JWKS URL from application realm (not master) + # Construct JWKS URL from internal Keycloak URL + # IMPORTANT: Use application realm (ushadow), not admin realm (master) jwks_url = f"{internal_url}/realms/{app_realm}/protocol/openid-connect/certs" _jwks_client = PyJWKClient(jwks_url) - logger.info(f"[KC-AUTH] Initialized JWKS client: {jwks_url}") + logger.info(f"[KC-AUTH] Initialized JWKS client for realm '{app_realm}': {jwks_url}") return _jwks_client diff --git a/ushadow/backend/src/services/keycloak_startup.py b/ushadow/backend/src/services/keycloak_startup.py index a95a2778..544e0589 100644 --- a/ushadow/backend/src/services/keycloak_startup.py +++ b/ushadow/backend/src/services/keycloak_startup.py @@ -152,6 +152,7 @@ def get_web_origins() -> List[str]: if cors_origins and cors_origins.strip(): # Split comma-separated origins and strip whitespace origins = [origin.strip() for origin in cors_origins.split(",") if origin.strip()] + logger.info(f"[KC_STARTUP] CORS: {cors_origins}") logger.info(f"[KC-STARTUP] Using {len(origins)} web origins from settings") return origins except Exception as e: @@ -222,6 +223,23 @@ async def register_current_environment(): else: logger.warning("[KC-STARTUP] ⚠️ Failed to register post-logout redirect URIs") + # Update realm CSP to allow embedding from any origin (Tauri, Tailscale, etc.) + try: + logger.info("[KC-STARTUP] πŸ”’ Updating realm CSP to allow embedding...") + headers = { + "contentSecurityPolicy": "frame-src 'self'; frame-ancestors 'self' http: https: tauri:; object-src 'none';", + "xContentTypeOptions": "nosniff", + "xRobotsTag": "none", + "xFrameOptions": "", # Remove X-Frame-Options (conflicts with CSP frame-ancestors) + "xXSSProtection": "1; mode=block", + "strictTransportSecurity": "max-age=31536000; includeSubDomains" + } + admin_client.update_realm_browser_security_headers(headers) + logger.info("[KC-STARTUP] βœ… Realm CSP updated successfully") + except Exception as csp_error: + logger.warning(f"[KC-STARTUP] ⚠️ Failed to update realm CSP: {csp_error}") + logger.warning("[KC-STARTUP] You may need to manually configure CSP in Keycloak admin console") + except Exception as e: logger.warning(f"[KC-STARTUP] ⚠️ Failed to auto-register Keycloak URIs: {e}") logger.warning("[KC-STARTUP] This is non-critical - you can manually configure URIs in Keycloak admin console") diff --git a/ushadow/backend/src/services/kubernetes_manager.py b/ushadow/backend/src/services/kubernetes_manager.py index 148c69b4..e97dca94 100644 --- a/ushadow/backend/src/services/kubernetes_manager.py +++ b/ushadow/backend/src/services/kubernetes_manager.py @@ -339,6 +339,31 @@ async def update_cluster_infra_scan( logger.error(f"Error updating cluster infra scan: {e}") return False + async def delete_cluster_infra_scan( + self, + cluster_id: str, + namespace: str + ) -> bool: + """ + Delete cached infrastructure scan for a specific namespace. + + Args: + cluster_id: The cluster ID + namespace: The namespace scan to delete + + Returns: + True if deletion was successful + """ + try: + result = await self.clusters_collection.update_one( + {"cluster_id": cluster_id}, + {"$unset": {f"infra_scans.{namespace}": ""}} + ) + return result.modified_count > 0 + except Exception as e: + logger.error(f"Error deleting cluster infra scan: {e}") + return False + async def update_cluster( self, cluster_id: str, diff --git a/ushadow/backend/src/services/tailscale_manager.py b/ushadow/backend/src/services/tailscale_manager.py index bc201de5..c85bce48 100644 --- a/ushadow/backend/src/services/tailscale_manager.py +++ b/ushadow/backend/src/services/tailscale_manager.py @@ -707,6 +707,7 @@ def configure_base_routes(self, Sets up: - /api/* β†’ backend (REST APIs through generic proxy) - /auth/* β†’ backend (authentication) + - /keycloak/* β†’ keycloak (OIDC authentication) - /* β†’ frontend (SPA catch-all) Note: Chronicle and other deployed services are accessed via their own ports, @@ -745,6 +746,11 @@ def configure_base_routes(self, if not self.add_serve_route(route, target): success = False + # Keycloak authentication service + keycloak_target = "http://keycloak:8080" + if not self.add_serve_route("/keycloak", keycloak_target): + success = False + # Chronicle WebSocket routes removed - Chronicle is now a deployed service # accessed via its own port (e.g., http://localhost:8090) diff --git a/ushadow/frontend/src/App.tsx b/ushadow/frontend/src/App.tsx index c9b83d95..8342ac2f 100644 --- a/ushadow/frontend/src/App.tsx +++ b/ushadow/frontend/src/App.tsx @@ -1,4 +1,5 @@ import { BrowserRouter, Routes, Route, Navigate } from 'react-router-dom' +import { useEffect } from 'react' import { ErrorBoundary } from './components/ErrorBoundary' import { ThemeProvider } from './contexts/ThemeContext' import { AuthProvider, useAuth } from './contexts/AuthContext' @@ -72,6 +73,9 @@ function AppContent() { const { backendError, checkSetupStatus, isLoading, token } = useAuth() + // Note: Redirect URI registration moved to login flow (KeycloakAuthContext) + // to avoid unnecessary calls on every app mount + // Show error page if backend has configuration errors if (backendError) { return @@ -126,7 +130,7 @@ function AppContent() { } /> } /> } /> - } /> + } /> } /> } /> } /> diff --git a/ushadow/frontend/src/auth/ServiceTokenManager.ts b/ushadow/frontend/src/auth/ServiceTokenManager.ts index 11bc00c8..c789b8b2 100644 --- a/ushadow/frontend/src/auth/ServiceTokenManager.ts +++ b/ushadow/frontend/src/auth/ServiceTokenManager.ts @@ -44,13 +44,13 @@ export async function getServiceToken( /** * Get a Chronicle-compatible token for the current user. - * Automatically retrieves the Keycloak token from session storage. - * + * Automatically retrieves the Keycloak token from local storage. + * * @returns Service token ready to use with Chronicle WebSocket */ export async function getChronicleToken(): Promise { - const keycloakToken = sessionStorage.getItem('kc_access_token') - + const keycloakToken = localStorage.getItem('kc_access_token') + if (!keycloakToken) { throw new Error('No Keycloak token found. Please log in first.') } diff --git a/ushadow/frontend/src/auth/TokenManager.ts b/ushadow/frontend/src/auth/TokenManager.ts index 9dd36083..462cfa14 100644 --- a/ushadow/frontend/src/auth/TokenManager.ts +++ b/ushadow/frontend/src/auth/TokenManager.ts @@ -2,7 +2,7 @@ * Token Manager * * Handles OIDC token storage, retrieval, and validation. - * Uses sessionStorage for security (tokens cleared when tab closes). + * Uses localStorage for persistence across browser sessions. */ import { jwtDecode } from 'jwt-decode' @@ -50,73 +50,146 @@ interface DecodedToken { export class TokenManager { /** - * Store tokens in sessionStorage with expiry times + * Check if running inside launcher iframe + * + * Simple check: if we're in an iframe, assume it's the launcher. + * Will attempt to request tokens via postMessage (worst case: 5s timeout if wrong). + */ + private static isInLauncher(): boolean { + return window.parent !== window + } + + /** + * Store tokens in localStorage with expiry times */ static storeTokens(tokens: TokenResponse): void { const now = Math.floor(Date.now() / 1000) if (tokens.access_token) { - sessionStorage.setItem(TOKEN_KEY, tokens.access_token) + localStorage.setItem(TOKEN_KEY, tokens.access_token) } if (tokens.refresh_token) { - sessionStorage.setItem(REFRESH_TOKEN_KEY, tokens.refresh_token) + localStorage.setItem(REFRESH_TOKEN_KEY, tokens.refresh_token) } if (tokens.id_token) { - sessionStorage.setItem(ID_TOKEN_KEY, tokens.id_token) + localStorage.setItem(ID_TOKEN_KEY, tokens.id_token) } // Store expiry times (OAuth2 standard: use expires_in from token response) if (tokens.expires_in) { const expiresAt = now + tokens.expires_in - sessionStorage.setItem(EXPIRES_AT_KEY, expiresAt.toString()) + localStorage.setItem(EXPIRES_AT_KEY, expiresAt.toString()) console.log('[TokenManager] Access token expires in:', tokens.expires_in, 'seconds') } // Store refresh token expiry if provided if (tokens.refresh_expires_in) { const refreshExpiresAt = now + tokens.refresh_expires_in - sessionStorage.setItem(REFRESH_EXPIRES_AT_KEY, refreshExpiresAt.toString()) + localStorage.setItem(REFRESH_EXPIRES_AT_KEY, refreshExpiresAt.toString()) console.log('[TokenManager] Refresh token expires in:', tokens.refresh_expires_in, 'seconds') } } /** - * Get access token from storage + * Get access token from storage (or from launcher if in iframe) + */ + static async getAccessToken(): Promise { + // If in launcher iframe, request token from parent + if (this.isInLauncher()) { + return this.getTokenFromLauncher() + } + + // Otherwise use localStorage + return localStorage.getItem(TOKEN_KEY) + } + + /** + * Get access token synchronously (for backwards compatibility) + */ + static getAccessTokenSync(): string | null { + return localStorage.getItem(TOKEN_KEY) + } + + /** + * Request token from launcher via postMessage + * Caches tokens in localStorage for synchronous access */ - static getAccessToken(): string | null { - return sessionStorage.getItem(TOKEN_KEY) + private static async getTokenFromLauncher(): Promise { + return new Promise((resolve) => { + console.log('[TokenManager] Requesting token from launcher...') + + // Send request to launcher + window.parent.postMessage({ type: 'GET_KC_TOKEN' }, '*') + + // Listen for response + const handler = (event: MessageEvent) => { + if (event.data.type === 'KC_TOKEN_RESPONSE') { + window.removeEventListener('message', handler) + + const tokens = event.data.tokens + console.log('[TokenManager] Received tokens from launcher:', { + hasToken: !!tokens.token, + hasRefresh: !!tokens.refreshToken, + hasId: !!tokens.idToken + }) + + // Cache tokens in iframe localStorage for synchronous access + if (tokens.token) { + localStorage.setItem(TOKEN_KEY, tokens.token) + } + if (tokens.refreshToken) { + localStorage.setItem(REFRESH_TOKEN_KEY, tokens.refreshToken) + } + if (tokens.idToken) { + localStorage.setItem(ID_TOKEN_KEY, tokens.idToken) + } + + console.log('[TokenManager] βœ“ Tokens cached in iframe localStorage') + resolve(tokens.token) + } + } + + window.addEventListener('message', handler) + + // Timeout after 5 seconds + setTimeout(() => { + window.removeEventListener('message', handler) + console.warn('[TokenManager] ⚠️ Timeout requesting token from launcher') + resolve(null) + }, 5000) + }) } /** * Get refresh token from storage */ static getRefreshToken(): string | null { - return sessionStorage.getItem(REFRESH_TOKEN_KEY) + return localStorage.getItem(REFRESH_TOKEN_KEY) } /** * Get ID token from storage */ static getIdToken(): string | null { - return sessionStorage.getItem(ID_TOKEN_KEY) + return localStorage.getItem(ID_TOKEN_KEY) } /** * Clear all tokens from storage */ static clearTokens(): void { - sessionStorage.removeItem(TOKEN_KEY) - sessionStorage.removeItem(REFRESH_TOKEN_KEY) - sessionStorage.removeItem(ID_TOKEN_KEY) - sessionStorage.removeItem(EXPIRES_AT_KEY) - sessionStorage.removeItem(REFRESH_EXPIRES_AT_KEY) + localStorage.removeItem(TOKEN_KEY) + localStorage.removeItem(REFRESH_TOKEN_KEY) + localStorage.removeItem(ID_TOKEN_KEY) + localStorage.removeItem(EXPIRES_AT_KEY) + localStorage.removeItem(REFRESH_EXPIRES_AT_KEY) } /** * Get access token expiry info from storage (OAuth2 standard) */ static getTokenExpiry(): { expiresAt: number; expiresIn: number } | null { - const expiresAtStr = sessionStorage.getItem(EXPIRES_AT_KEY) + const expiresAtStr = localStorage.getItem(EXPIRES_AT_KEY) if (!expiresAtStr) return null const expiresAt = parseInt(expiresAtStr, 10) @@ -130,7 +203,7 @@ export class TokenManager { * Get refresh token expiry info from storage (OAuth2 standard) */ static getRefreshTokenExpiry(): { expiresAt: number; expiresIn: number } | null { - const expiresAtStr = sessionStorage.getItem(REFRESH_EXPIRES_AT_KEY) + const expiresAtStr = localStorage.getItem(REFRESH_EXPIRES_AT_KEY) if (!expiresAtStr) return null const expiresAt = parseInt(expiresAtStr, 10) @@ -142,11 +215,77 @@ export class TokenManager { /** * Check if user is authenticated (has valid token) + * + * If running in launcher, attempts to get token from parent first. + * This is an async operation that will resolve quickly (cached or from launcher). + */ + static async isAuthenticatedAsync(): Promise { + // If in launcher, request token from parent first + if (this.isInLauncher()) { + const token = await this.getTokenFromLauncher() + if (!token) { + console.log('[TokenManager] No token from launcher') + return false + } + // Token is now cached in localStorage, continue with validation below + } + + // Check for Keycloak token (localStorage) + let token = localStorage.getItem(TOKEN_KEY) + + // Check for native token (localStorage - persists) + if (!token) { + token = localStorage.getItem('ushadow_access_token') + } + + if (!token) { + console.log('[TokenManager] No access token found in localStorage') + return false + } + + try { + const decoded = jwtDecode(token) + const now = Math.floor(Date.now() / 1000) + const isValid = decoded.exp > now + const expiresIn = decoded.exp - now + + console.log('[TokenManager] Token check:', { + isValid, + expiresIn: `${Math.floor(expiresIn / 60)}m ${expiresIn % 60}s`, + expiresAt: new Date(decoded.exp * 1000).toISOString(), + now: new Date(now * 1000).toISOString() + }) + + if (!isValid) { + console.warn('[TokenManager] ⚠️ Token EXPIRED!', { + expiredAgo: `${Math.floor(Math.abs(expiresIn) / 60)}m ${Math.abs(expiresIn) % 60}s ago` + }) + } + + return isValid + } catch (error) { + console.error('[TokenManager] Invalid token:', error) + return false + } + } + + /** + * Check if user is authenticated (synchronous version) + * + * Note: This only checks localStorage and won't request from launcher. + * Use isAuthenticatedAsync() for launcher-aware check. */ static isAuthenticated(): boolean { - const token = this.getAccessToken() + // Check for Keycloak token first (localStorage) + let token = localStorage.getItem(TOKEN_KEY) + + // Check for native token (localStorage - persists) + if (!token) { + token = localStorage.getItem('ushadow_access_token') + } + if (!token) { - console.log('[TokenManager] No access token found') + console.log('[TokenManager] No access token found in localStorage') return false } @@ -177,10 +316,10 @@ export class TokenManager { } /** - * Get user info from decoded token + * Get user info from decoded token (synchronous - uses localStorage) */ static getUserInfo(): any | null { - const token = this.getAccessToken() + const token = localStorage.getItem(TOKEN_KEY) if (!token) return null try { diff --git a/ushadow/frontend/src/auth/config.ts b/ushadow/frontend/src/auth/config.ts index 53e815f5..a2ba3dad 100644 --- a/ushadow/frontend/src/auth/config.ts +++ b/ushadow/frontend/src/auth/config.ts @@ -25,67 +25,22 @@ function getBackendUrl(): string { return import.meta.env.VITE_BACKEND_URL || 'http://localhost:8000' } -/** - * Get Keycloak URL for frontend browser access. - * - * Frontend always uses localhost:8081 because: - * - When accessing locally, Keycloak is on localhost:8081 - * - When accessing via Tailscale, Tailscale routes to the same machine where localhost:8081 works - * - Backend uses a different URL (internal Docker network) for server-to-server communication - */ -function getKeycloakUrl(): string { - return 'http://localhost:8081' -} - // Backend config is static (based on origin) export const backendConfig = { url: getBackendUrl(), } -// Internal state for Keycloak config (can be updated from backend settings) -let _keycloakRealm = 'ushadow' -let _keycloakClientId = 'ushadow-frontend' - -// Keycloak config - URL is always dynamic based on current origin -// Use Object.defineProperty to create getters that recalculate on each access -export const keycloakConfig: { - readonly url: string - realm: string - clientId: string -} = Object.defineProperties({}, { - url: { - get() { - return getKeycloakUrl() // Recalculates every time it's accessed - }, - enumerable: true - }, - realm: { - get() { - return _keycloakRealm - }, - set(value: string) { - _keycloakRealm = value - }, - enumerable: true - }, - clientId: { - get() { - return _keycloakClientId - }, - set(value: string) { - _keycloakClientId = value - }, - enumerable: true - } -}) as any +// Keycloak config will be populated from backend settings +// Default to localhost for initial load, then update from backend +export let keycloakConfig = { + url: 'http://localhost:8081', + realm: 'ushadow', + clientId: 'ushadow-frontend', +} /** * Update Keycloak config from backend settings. * Should be called on app initialization and after settings changes. - * - * Note: The URL is always determined dynamically based on the current origin, - * not from settings. This allows seamless switching between localhost and Tailscale. - * Settings are only used for realm and clientId configuration. */ export function updateKeycloakConfig(settings: { keycloak?: { @@ -95,16 +50,51 @@ export function updateKeycloakConfig(settings: { } }) { if (settings.keycloak) { - if (settings.keycloak.realm) { - _keycloakRealm = settings.keycloak.realm + keycloakConfig = { + url: settings.keycloak.public_url || keycloakConfig.url, + realm: settings.keycloak.realm || keycloakConfig.realm, + clientId: settings.keycloak.frontend_client_id || keycloakConfig.clientId, } - if (settings.keycloak.frontend_client_id) { - _keycloakClientId = settings.keycloak.frontend_client_id - } - console.log('[Config] Updated Keycloak config:', { - url: keycloakConfig.url, - realm: keycloakConfig.realm, - clientId: keycloakConfig.clientId + console.log('[Config] Updated Keycloak config from backend:', keycloakConfig) + } +} + +/** + * Register this environment's OAuth redirect URI with Keycloak. + * Called on app initialization to enable dynamic redirect URI registration. + * + * This allows multiple environments running on different ports to register + * their callback URLs without pre-configuring them in Keycloak. + */ +export async function registerRedirectUri(): Promise { + // Build redirect URI for this environment + const redirectUri = `${window.location.origin}/oauth/callback` + const postLogoutRedirectUri = `${window.location.origin}/` + + try { + console.log('[Auth] Registering redirect URI with Keycloak:', redirectUri) + + const response = await fetch(`${backendConfig.url}/api/auth/register-redirect-uri`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + redirect_uri: redirectUri, + post_logout_redirect_uri: postLogoutRedirectUri, + }), }) + + if (!response.ok) { + const error = await response.text() + console.warn('[Auth] Failed to register redirect URI:', error) + return + } + + const result = await response.json() + console.log('[Auth] βœ“ Redirect URI registered:', result.redirect_uri) + } catch (error) { + // Non-critical error - OAuth will fail if not registered, but app can still load + console.warn('[Auth] Error registering redirect URI:', error) } } diff --git a/ushadow/frontend/src/contexts/KeycloakAuthContext.tsx b/ushadow/frontend/src/contexts/KeycloakAuthContext.tsx index 577067ee..8e6e8c18 100644 --- a/ushadow/frontend/src/contexts/KeycloakAuthContext.tsx +++ b/ushadow/frontend/src/contexts/KeycloakAuthContext.tsx @@ -21,7 +21,7 @@ interface KeycloakAuthContextType { login: (redirectUri?: string) => void register: (redirectUri?: string) => void logout: (redirectUri?: string) => void - getAccessToken: () => string | null + getAccessToken: () => Promise handleCallback: (code: string, state: string) => Promise } @@ -65,7 +65,7 @@ export function KeycloakAuthProvider({ children }: { children: ReactNode }) { setRefreshTimeoutId(null) } - const token = TokenManager.getAccessToken() + const token = TokenManager.getAccessTokenSync() const refreshToken = TokenManager.getRefreshToken() if (!token || !refreshToken) { @@ -170,32 +170,53 @@ export function KeycloakAuthProvider({ children }: { children: ReactNode }) { } useEffect(() => { - // Re-check auth state on mount (in case token expired between initial check and mount) - const authenticated = TokenManager.isAuthenticated() - if (authenticated !== isAuthenticated) { - setIsAuthenticated(authenticated) - if (authenticated) { - const info = TokenManager.getUserInfo() - setUserInfo(info) - // Fetch MongoDB user data + // Check auth state with launcher support (async) + const checkAuth = async () => { + console.log('[KC-AUTH] Checking authentication (launcher-aware)...') + const authenticated = await TokenManager.isAuthenticatedAsync() + console.log('[KC-AUTH] Authentication result:', authenticated) + + if (authenticated !== isAuthenticated) { + setIsAuthenticated(authenticated) + if (authenticated) { + const info = TokenManager.getUserInfo() + setUserInfo(info) + // Fetch MongoDB user data + fetchUserData() + // Set up token refresh + setupTokenRefresh() + } else { + setUserInfo(null) + setUser(null) + setIsLoading(false) + } + } else if (authenticated && !user) { + // If already authenticated but no user data, fetch it fetchUserData() - // Set up token refresh - setupTokenRefresh() - } else { - setUserInfo(null) - setUser(null) + // Set up token refresh if not already set + if (!refreshTimeoutId) { + setupTokenRefresh() + } + } else if (!authenticated) { + setIsLoading(false) } - } else if (authenticated && !user) { - // If already authenticated but no user data, fetch it - fetchUserData() - // Set up token refresh if not already set - if (!refreshTimeoutId) { - setupTokenRefresh() + } + + checkAuth() + + // Listen for token updates from launcher + const handleMessage = (event: MessageEvent) => { + if (event.data.type === 'KC_TOKENS_UPDATED') { + console.log('[KC-AUTH] Received token update notification from launcher, re-checking auth...') + checkAuth() } } + window.addEventListener('message', handleMessage) + // Clean up on unmount return () => { + window.removeEventListener('message', handleMessage) if (refreshTimeoutId) { console.log('[KC-AUTH] Cleaning up token refresh timeout on unmount') clearTimeout(refreshTimeoutId) @@ -288,10 +309,10 @@ export function KeycloakAuthProvider({ children }: { children: ReactNode }) { // Store tokens TokenManager.storeTokens(tokens) - console.log('[KC-AUTH] Tokens stored in sessionStorage') + console.log('[KC-AUTH] Tokens stored in localStorage') // Verify storage worked - const storedToken = sessionStorage.getItem('kc_access_token') + const storedToken = localStorage.getItem('kc_access_token') console.log('[KC-AUTH] Verified storage:', { hasStoredToken: !!storedToken, storedTokenPreview: storedToken?.substring(0, 30) + '...' @@ -312,8 +333,8 @@ export function KeycloakAuthProvider({ children }: { children: ReactNode }) { sessionStorage.removeItem('oauth_state') } - const getAccessToken = () => { - return TokenManager.getAccessToken() + const getAccessToken = async () => { + return await TokenManager.getAccessToken() } return ( diff --git a/ushadow/frontend/src/hooks/useWebRecording.ts b/ushadow/frontend/src/hooks/useWebRecording.ts index 662b31fb..31454292 100644 --- a/ushadow/frontend/src/hooks/useWebRecording.ts +++ b/ushadow/frontend/src/hooks/useWebRecording.ts @@ -303,7 +303,7 @@ export const useWebRecording = (): WebRecordingReturn => { // Get auth token - prefer Keycloak token, fallback to legacy token // This matches the pattern used in api.ts request interceptor - const kcToken = sessionStorage.getItem('kc_access_token') + const kcToken = localStorage.getItem('kc_access_token') const legacyToken = localStorage.getItem(getStorageKey('token')) const token = kcToken || legacyToken diff --git a/ushadow/frontend/src/pages/KubernetesClustersPage.tsx b/ushadow/frontend/src/pages/KubernetesClustersPage.tsx index 1b6f2f00..008cf391 100644 --- a/ushadow/frontend/src/pages/KubernetesClustersPage.tsx +++ b/ushadow/frontend/src/pages/KubernetesClustersPage.tsx @@ -90,6 +90,11 @@ export default function KubernetesClustersPage() { const [namespace, setNamespace] = useState('default') const [error, setError] = useState(null) + // Ingress configuration editing + const [editingCluster, setEditingCluster] = useState(null) + const [ingressDomain, setIngressDomain] = useState('') + const [ingressEnabledByDefault, setIngressEnabledByDefault] = useState(false) + useEffect(() => { loadClusters() }, []) @@ -196,6 +201,29 @@ export default function KubernetesClustersPage() { } } + const handleDeleteInfraScan = async (clusterId: string, namespace: string) => { + if (!confirm(`Delete infrastructure scan for namespace "${namespace}"?`)) { + return + } + + try { + await kubernetesApi.deleteInfraScan(clusterId, namespace) + + // Remove from local state + setScanResults(prev => { + const updated = { ...prev } + delete updated[`${clusterId}-${namespace}`] + return updated + }) + + // Refresh clusters to update infra_scans + await loadClusters() + } catch (err: any) { + console.error('Error deleting infrastructure scan:', err) + alert(`Failed to delete scan: ${err.response?.data?.detail || err.message}`) + } + } + const handleOpenNamespaceSelector = (clusterId: string) => { const cluster = clusters.find(c => c.cluster_id === clusterId) setScanNamespace(cluster?.namespace || 'ushadow') @@ -454,13 +482,23 @@ export default function KubernetesClustersPage() { {foundInfra} in {namespace} - +
+ + +
) @@ -482,6 +520,107 @@ export default function KubernetesClustersPage() { )} + {/* Ingress Configuration */} +
+
+

+ Ingress Configuration +

+ {editingCluster !== cluster.cluster_id && ( + + )} +
+ + {editingCluster === cluster.cluster_id ? ( +
+
+ + { + const value = e.target.value.toLowerCase() + if (/^[a-z0-9.-]*$/.test(value)) { + setIngressDomain(value) + } + }} + placeholder="shadow" + className="w-full px-3 py-2 text-sm rounded border border-neutral-300 dark:border-neutral-600 bg-white dark:bg-neutral-800 text-neutral-900 dark:text-neutral-100" + data-testid={`ingress-domain-input-${cluster.cluster_id}`} + /> +
+ + + +
+ + +
+
+ ) : ( +
+ {cluster.ingress_domain ? ( +
+
+ Domain: .{cluster.ingress_domain} +
+
+ Auto-enable: {cluster.ingress_enabled_by_default ? 'βœ“ Yes' : 'βœ— No'} +
+
+ ) : ( +
+ Not configured +
+ )} +
+ )} +
+ {/* Actions */}
@@ -614,36 +753,54 @@ export default function KubernetesClustersPage() { {showScanResults && renderInfraScanResults(showScanResults)} {/* Deploy to K8s Modal */} - {showDeployModal && selectedClusterForDeploy && ( - { - setShowDeployModal(false) - setSelectedClusterForDeploy(null) - }} - target={{ - id: selectedClusterForDeploy.deployment_target_id, - type: 'k8s', - name: selectedClusterForDeploy.name, - identifier: selectedClusterForDeploy.cluster_id, - environment: selectedClusterForDeploy.environment || 'unknown', - status: selectedClusterForDeploy.status || 'unknown', - namespace: selectedClusterForDeploy.namespace, - infrastructure: Object.keys(scanResults).find(key => key.startsWith(selectedClusterForDeploy.cluster_id)) - ? scanResults[Object.keys(scanResults).find(key => key.startsWith(selectedClusterForDeploy.cluster_id))!].infra_services - : undefined, - provider: selectedClusterForDeploy.labels?.provider, - region: selectedClusterForDeploy.labels?.region, - is_leader: undefined, - raw_metadata: selectedClusterForDeploy - }} - infraServices={ - Object.keys(scanResults).find(key => key.startsWith(selectedClusterForDeploy.cluster_id)) - ? scanResults[Object.keys(scanResults).find(key => key.startsWith(selectedClusterForDeploy.cluster_id))!].infra_services - : undefined + {showDeployModal && selectedClusterForDeploy && (() => { + // Get infrastructure services - exclude target namespace as it contains deployed services + let infraServices: any = undefined + if (selectedClusterForDeploy.infra_scans) { + const targetNs = selectedClusterForDeploy.namespace || 'ushadow' + + // Filter out the target namespace from infra scans + const infraScanKeys = Object.keys(selectedClusterForDeploy.infra_scans).filter( + ns => ns !== targetNs + ) + + console.log(`πŸ” [K8sPage] Target namespace: ${targetNs}, available infra scans:`, infraScanKeys) + + // Use first available infrastructure scan (not the target namespace) + if (infraScanKeys.length > 0) { + const infraNs = infraScanKeys[0] + infraServices = selectedClusterForDeploy.infra_scans[infraNs] + console.log(`πŸ” [K8sPage] Using infrastructure from '${infraNs}':`, infraServices?.mongo) + } else { + console.log(`⚠️ [K8sPage] No infrastructure scans available (target namespace excluded)`) } - /> - )} + } + + return ( + { + setShowDeployModal(false) + setSelectedClusterForDeploy(null) + }} + target={{ + id: selectedClusterForDeploy.deployment_target_id, + type: 'k8s', + name: selectedClusterForDeploy.name, + identifier: selectedClusterForDeploy.cluster_id, + environment: selectedClusterForDeploy.environment || 'unknown', + status: selectedClusterForDeploy.status || 'unknown', + namespace: selectedClusterForDeploy.namespace, + infrastructure: infraServices, + provider: selectedClusterForDeploy.labels?.provider, + region: selectedClusterForDeploy.labels?.region, + is_leader: undefined, + raw_metadata: selectedClusterForDeploy + }} + infraServices={infraServices} + /> + ) + })()} {/* Add Cluster Modal */} {showAddModal && createPortal( diff --git a/ushadow/frontend/src/pages/LoginPage.tsx b/ushadow/frontend/src/pages/LoginPage.tsx index b0a91154..7542a23b 100644 --- a/ushadow/frontend/src/pages/LoginPage.tsx +++ b/ushadow/frontend/src/pages/LoginPage.tsx @@ -2,35 +2,72 @@ import React from 'react' import { useNavigate, useLocation } from 'react-router-dom' import { useKeycloakAuth } from '../contexts/KeycloakAuthContext' import AuthHeader from '../components/auth/AuthHeader' -import { LogIn } from 'lucide-react' +import { LogIn, ExternalLink, UserPlus } from 'lucide-react' export default function LoginPage() { const navigate = useNavigate() const location = useLocation() const { isAuthenticated, isLoading, login, register } = useKeycloakAuth() - // Get the intended destination from router state (set by ProtectedRoute) - // or from query param (used by share pages and other public routes) + // Parse query parameters once const searchParams = new URLSearchParams(location.search) + const isLauncherMode = searchParams.get('launcher') === 'true' const returnTo = searchParams.get('returnTo') - const from = (location.state as { from?: string })?.from || returnTo || '/' + + // Get the intended destination from router state (set by ProtectedRoute) or from query param + // Default to /cluster instead of / to avoid redirect loop + const from = (location.state as { from?: string })?.from || returnTo || '/cluster' // After successful login, redirect to intended destination // Note: Don't redirect if we're on the callback page - that's handled by OAuthCallback component React.useEffect(() => { if (isAuthenticated && location.pathname !== '/oauth/callback') { + console.log('[LoginPage] Already authenticated, redirecting to:', from) navigate(from, { replace: true, state: { fromAuth: true } }) } }, [isAuthenticated, navigate, from, location.pathname]) - const handleLogin = () => { + const handleLogin = async () => { + console.log('[LoginPage] Login button clicked') + + // If in launcher mode, open in external browser + if (isLauncherMode) { + console.log('[LoginPage] Launcher mode detected, opening in browser') + const url = new URL(window.location.href) + url.searchParams.delete('launcher') + window.open(url.toString(), '_blank') + return + } + // Redirect to Keycloak login page - login(from) + console.log('[LoginPage] Starting Keycloak SSO login, redirect target:', from) + try { + await login(from) + } catch (error) { + console.error('[LoginPage] Login failed:', error) + } } - const handleRegister = () => { + const handleRegister = async () => { + console.log('[LoginPage] Register button clicked') + + // If in launcher mode, open in external browser + if (isLauncherMode) { + console.log('[LoginPage] Launcher mode detected, opening in browser') + const url = new URL(window.location.href) + url.searchParams.delete('launcher') + url.searchParams.set('register', 'true') + window.open(url.toString(), '_blank') + return + } + // Redirect to Keycloak registration page - register(from) + console.log('[LoginPage] Starting Keycloak SSO registration, redirect target:', from) + try { + await register(from) + } catch (error) { + console.error('[LoginPage] Registration failed:', error) + } } // Show loading while checking authentication @@ -95,6 +132,26 @@ export default function LoginPage() { border: '1px solid #27272a', }} > + {isLauncherMode && ( +
+
+ +
+

Authentication Required

+

+ Authentication must be completed in your browser. Click below to continue. +

+
+
+
+ )} +

Welcome to Ushadow @@ -105,50 +162,40 @@ export default function LoginPage() {

{/* Sign in with Keycloak Button */} - - -
-

- You'll be redirected to Keycloak for secure authentication -

-
+
+ - {/* Divider */} -
-
-
-
-
- - Or - -
-
+ -
-

- Don't have an account?{' '} - -

+
+

+ You'll be redirected to Keycloak for authentication +

+
diff --git a/ushadow/frontend/src/services/api.ts b/ushadow/frontend/src/services/api.ts index 725ad882..6555cc19 100644 --- a/ushadow/frontend/src/services/api.ts +++ b/ushadow/frontend/src/services/api.ts @@ -62,14 +62,17 @@ export const api = axios.create({ // Add request interceptor to include auth token api.interceptors.request.use((config) => { - // Check for Keycloak token first (in sessionStorage) - const kcToken = sessionStorage.getItem('kc_access_token') + // Check for Keycloak token first (in localStorage) + const kcToken = localStorage.getItem('kc_access_token') + + // Check for native login token (in localStorage - persists) + const nativeToken = localStorage.getItem('ushadow_access_token') // Fallback to legacy JWT token (in localStorage) const legacyToken = localStorage.getItem(getStorageKey('token')) - // Prefer Keycloak token if both are present - const token = kcToken || legacyToken + // Priority: Keycloak > Native > Legacy (all in localStorage now) + const token = kcToken || nativeToken || legacyToken if (token) { config.headers.Authorization = `Bearer ${token}` @@ -94,18 +97,13 @@ api.interceptors.response.use( // Let the component handle the service-specific auth error } else { // Token expired or invalid on core ushadow endpoints, redirect to login - console.warn('πŸ” API: 401 Unauthorized on ushadow endpoint - clearing all tokens and redirecting to login') - - // Clear legacy token + console.warn('πŸ” API: 401 Unauthorized on ushadow endpoint - clearing token and redirecting to login') localStorage.removeItem(getStorageKey('token')) - - // Clear Keycloak tokens (IMPORTANT: prevents infinite loop with invalid tokens) - sessionStorage.removeItem('kc_access_token') - sessionStorage.removeItem('kc_refresh_token') - sessionStorage.removeItem('kc_id_token') - sessionStorage.removeItem('kc_expires_at') - sessionStorage.removeItem('kc_refresh_expires_at') - + localStorage.removeItem('ushadow_access_token') + localStorage.removeItem('ushadow_user') + localStorage.removeItem('kc_access_token') + localStorage.removeItem('kc_refresh_token') + localStorage.removeItem('kc_id_token') window.location.href = '/login' } } else if (error.code === 'ECONNABORTED') { @@ -660,7 +658,16 @@ export const kubernetesApi = { api.get(`/api/kubernetes/${clusterId}`), removeCluster: (clusterId: string) => api.delete(`/api/kubernetes/${clusterId}`), - updateCluster: (clusterId: string, updates: Partial>) => + updateCluster: (clusterId: string, updates: { + name?: string + namespace?: string + infra_namespace?: string + labels?: Record + ingress_domain?: string + ingress_class?: string + ingress_enabled_by_default?: boolean + tailscale_magicdns_enabled?: boolean + }) => api.patch(`/api/kubernetes/${clusterId}`, updates), // Service management @@ -675,6 +682,10 @@ export const kubernetesApi = { `/api/kubernetes/${clusterId}/scan-infra`, { namespace } ), + deleteInfraScan: (clusterId: string, namespace: string) => + api.delete<{ cluster_id: string; namespace: string; message: string }>( + `/api/kubernetes/${clusterId}/scan-infra/${namespace}` + ), createEnvmap: (clusterId: string, data: { service_name: string; namespace?: string; env_vars: Record }) => api.post<{ success: boolean; configmap: string | null; secret: string | null; namespace: string }>( `/api/kubernetes/${clusterId}/envmap`, @@ -1620,14 +1631,7 @@ export const tailscaleApi = { provisionCertInContainer: (hostname: string) => api.post('/api/tailscale/container/provision-cert', null, { params: { hostname } }), configureServe: (config: TailscaleConfig) => - api.post<{ - status: string; - message: string; - routes?: string; - hostname?: string; - keycloak_registered?: boolean; - keycloak_message?: string; - }>('/api/tailscale/configure-serve', config), + api.post<{ status: string; message: string; routes?: string; hostname?: string }>('/api/tailscale/configure-serve', config), getServeStatus: () => api.get<{ status: string; routes: string | null; error?: string }>('/api/tailscale/serve-status'), updateCorsOrigins: (hostname: string) => @@ -2085,10 +2089,7 @@ export const githubImportApi = { }), } -// ============================================================================= -// Dashboard API - Chronicle activity monitoring -// ============================================================================= - +// Dashboard API types export enum ActivityType { CONVERSATION = 'conversation', MEMORY = 'memory', @@ -2116,13 +2117,13 @@ export interface DashboardData { last_updated: string } +// Dashboard API endpoints export const dashboardApi = { - /** Get complete dashboard data (stats + recent conversations & memories) */ - getDashboardData: (conversationLimit?: number, memoryLimit?: number) => - api.get('/api/dashboard/', { + getDashboardData: (conversationLimit: number = 10, memoryLimit: number = 10) => + api.get('/api/dashboard', { params: { conversation_limit: conversationLimit, - memory_limit: memoryLimit + memory_limit: memoryLimit, }, }), } diff --git a/ushadow/frontend/src/services/chronicleApi.ts b/ushadow/frontend/src/services/chronicleApi.ts index cff9a332..a62648a5 100644 --- a/ushadow/frontend/src/services/chronicleApi.ts +++ b/ushadow/frontend/src/services/chronicleApi.ts @@ -402,7 +402,7 @@ export async function getChronicleAudioUrl(conversationId: string, cropped: bool const proxyUrl = await getChronicleProxyUrl() // Get auth token - prefer Keycloak token, fallback to legacy token - const kcToken = sessionStorage.getItem('kc_access_token') + const kcToken = localStorage.getItem('kc_access_token') const legacyToken = localStorage.getItem(getStorageKey('token')) const token = kcToken || legacyToken || '' diff --git a/ushadow/launcher/.claude/hooks/idle-notification.sh b/ushadow/launcher/.claude/hooks/idle-notification.sh new file mode 100755 index 00000000..9d3ce076 --- /dev/null +++ b/ushadow/launcher/.claude/hooks/idle-notification.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# Claude Code Notification hook - fires on idle_prompt +# Move ticket to in_review when agent is waiting for user input + +# Log for debugging +echo "[$(date)] idle-notification hook fired" >> /tmp/claude-kanban-hooks.log + +BRANCH=$(git branch --show-current 2>/dev/null) + +if [ -z "$BRANCH" ]; then + exit 0 +fi + +if command -v kanban-cli >/dev/null 2>&1; then + kanban-cli move-to-review "$BRANCH" 2>/dev/null + echo "[$(date)] Moved $BRANCH to review" >> /tmp/claude-kanban-hooks.log +fi + +exit 0 diff --git a/ushadow/launcher/.claude/hooks/kanban-status.sh b/ushadow/launcher/.claude/hooks/kanban-status.sh new file mode 100755 index 00000000..59cfb98e --- /dev/null +++ b/ushadow/launcher/.claude/hooks/kanban-status.sh @@ -0,0 +1,45 @@ +#!/bin/bash +# Claude Code hook for automatic Kanban status updates +# +# This script is called by Claude Code hooks to automatically update +# ticket status based on agent activity. + +# Get current branch name +BRANCH=$(git branch --show-current 2>/dev/null) + +if [ -z "$BRANCH" ]; then + # Not in a git repository or no branch, skip + exit 0 +fi + +# Function to update status via kanban-cli +update_status() { + local status="$1" + local command="$2" + + if command -v kanban-cli >/dev/null 2>&1; then + kanban-cli "$command" "$BRANCH" 2>/dev/null + fi +} + +# Determine which hook triggered this script +case "$CLAUDE_HOOK_NAME" in + "SessionStart") + # Agent session started - move to in_progress + update_status "in_progress" "move-to-progress" + ;; + "UserPromptSubmit") + # User just submitted a response - agent resuming work + update_status "in_progress" "move-to-progress" + ;; + "AssistantWaitingForUser") + # Agent is waiting for user input - move to in_review + update_status "in_review" "move-to-review" + ;; + "SessionEnd") + # Agent session ended - move to in_review + update_status "in_review" "move-to-review" + ;; +esac + +exit 0 diff --git a/ushadow/launcher/.claude/hooks/session-end.sh b/ushadow/launcher/.claude/hooks/session-end.sh new file mode 100755 index 00000000..1289a999 --- /dev/null +++ b/ushadow/launcher/.claude/hooks/session-end.sh @@ -0,0 +1,15 @@ +#!/bin/bash +# Claude Code SessionEnd hook - agent session ending +# Move ticket to in_review (waiting for human to review/respond) + +BRANCH=$(git branch --show-current 2>/dev/null) + +if [ -z "$BRANCH" ]; then + exit 0 +fi + +if command -v kanban-cli >/dev/null 2>&1; then + kanban-cli move-to-review "$BRANCH" 2>/dev/null +fi + +exit 0 diff --git a/ushadow/launcher/.claude/hooks/session-start.sh b/ushadow/launcher/.claude/hooks/session-start.sh new file mode 100755 index 00000000..d23e8c00 --- /dev/null +++ b/ushadow/launcher/.claude/hooks/session-start.sh @@ -0,0 +1,15 @@ +#!/bin/bash +# Claude Code SessionStart hook - agent session just started +# Move ticket to in_progress + +BRANCH=$(git branch --show-current 2>/dev/null) + +if [ -z "$BRANCH" ]; then + exit 0 +fi + +if command -v kanban-cli >/dev/null 2>&1; then + kanban-cli move-to-progress "$BRANCH" 2>/dev/null +fi + +exit 0 diff --git a/ushadow/launcher/.claude/hooks/user-prompt-submit.sh b/ushadow/launcher/.claude/hooks/user-prompt-submit.sh new file mode 100755 index 00000000..652975b9 --- /dev/null +++ b/ushadow/launcher/.claude/hooks/user-prompt-submit.sh @@ -0,0 +1,15 @@ +#!/bin/bash +# Claude Code UserPromptSubmit hook - user just submitted a prompt +# Move ticket to in_progress (agent resuming work after waiting) + +BRANCH=$(git branch --show-current 2>/dev/null) + +if [ -z "$BRANCH" ]; then + exit 0 +fi + +if command -v kanban-cli >/dev/null 2>&1; then + kanban-cli move-to-progress "$BRANCH" 2>/dev/null +fi + +exit 0 diff --git a/ushadow/launcher/.claude/settings.local.json b/ushadow/launcher/.claude/settings.local.json new file mode 100644 index 00000000..5abaafbe --- /dev/null +++ b/ushadow/launcher/.claude/settings.local.json @@ -0,0 +1,10 @@ +{ + "permissions": { + "allow": [ + "Bash(git rebase:*)", + "Bash(git merge:*)", + "Bash(git checkout:*)", + "Bash(git add:*)" + ] + } +} diff --git a/ushadow/launcher/AGENT_SELF_REPORTING.md b/ushadow/launcher/AGENT_SELF_REPORTING.md new file mode 100644 index 00000000..3e94c83e --- /dev/null +++ b/ushadow/launcher/AGENT_SELF_REPORTING.md @@ -0,0 +1,277 @@ +# Agent Self-Reporting - Kanban Status Updates + +This document explains how AI agents (like Claude Code) should automatically update Kanban ticket status as they work. + +## The Workflow + +### 1. Agent Starts Working (Human Provides Input) +```bash +kanban-cli move-to-progress "$BRANCH_NAME" +``` +**Status:** `in_progress` πŸ€– + +**When:** Agent receives a user response and resumes work + +**Example:** +```bash +# User responds to agent's question +# Agent runs: +kanban-cli move-to-progress "generalLamcher" +# β†’ All tickets for this branch move to "in_progress" +``` + +### 2. Agent Waits for Human Input +```bash +kanban-cli move-to-review "$BRANCH_NAME" +``` +**Status:** `in_review` πŸ’¬ + +**When:** Agent has a question or needs human input + +**Example:** +```bash +# Agent asks: "Should I use TypeScript or JavaScript?" +# Before showing the prompt, agent runs: +kanban-cli move-to-review "generalLamcher" +# β†’ Tickets move to "in_review" (waiting for human) +``` + +### 3. Work is Merged (Automatic via Hook) +```bash +# This happens automatically when you run: +workmux merge +``` +**Status:** `done` βœ… + +**When:** Human runs `workmux merge` to merge the branch + +**Hook runs:** `kanban-cli move-to-done "$WM_BRANCH_NAME"` + +## Complete Example Flow + +```bash +# 1. User asks agent to implement a feature +# Agent starts working +kanban-cli move-to-progress "feature-auth" +# Status: in_progress πŸ€– + +# 2. Agent encounters a decision point +# Agent asks: "Which database? PostgreSQL or MongoDB?" +kanban-cli move-to-review "feature-auth" +# Status: in_review πŸ’¬ + +# 3. User responds: "PostgreSQL" +# Agent resumes work +kanban-cli move-to-progress "feature-auth" +# Status: in_progress πŸ€– + +# 4. Agent finishes, asks: "Ready to merge?" +kanban-cli move-to-review "feature-auth" +# Status: in_review πŸ’¬ + +# 5. User confirms and merges +workmux merge feature-auth +# Hook automatically runs: kanban-cli move-to-done "feature-auth" +# Status: done βœ… +``` + +## Implementation in Claude Code + +### Option 1: Manual Agent Commands + +The agent explicitly calls these commands at appropriate times: + +```bash +# In agent's workflow: +echo "[AGENT] Starting work on ticket..." +kanban-cli move-to-progress "$(git branch --show-current)" + +# ... do work ... + +# When needing input: +echo "[AGENT] Waiting for human response..." +kanban-cli move-to-review "$(git branch --show-current)" +``` + +### Option 2: Helper Scripts + +Source the helper script in your shell: + +```bash +# In ~/.zshrc or ~/.bashrc +source /path/to/launcher/kanban-status-helpers.sh + +# Then the agent can use: +kb-start # Start working (move to in_progress) +kb-waiting # Wait for human (move to in_review) +kb-status # Check current status +``` + +### Option 3: Agent Integration (Future) + +Ideally, the agent framework itself should call these: + +```python +# Pseudo-code for agent framework +class ClaudeAgent: + def on_user_input(self, message): + self.update_kanban_status("in_progress") + # ... process input ... + + def ask_user(self, question): + self.update_kanban_status("in_review") + # ... wait for response ... +``` + +## Environment Variables + +The agent should know which ticket it's working on. Set these: + +```bash +# When creating a worktree for a ticket +export TICKET_ID="ticket-abc-123" +export BRANCH_NAME="feature-auth" + +# Then the agent can use: +kanban-cli move-to-progress "$BRANCH_NAME" +``` + +## Detection Strategies + +### How Agent Knows When to Call + +**Starting Work (move to in_progress):** +- After receiving user's response to a question +- When user provides a new task/instruction +- When resuming from paused state + +**Waiting for Human (move to in_review):** +- Before calling `input()` or equivalent +- When presenting options/choices +- When asking for clarification +- When work is complete and awaiting approval + +**Example Patterns to Detect:** + +```python +# Python agent example +def ask_user(question): + # BEFORE asking: + run_command("kanban-cli move-to-review $(git branch --show-current)") + + # Now ask: + response = input(question) + + # AFTER receiving response: + run_command("kanban-cli move-to-progress $(git branch --show-current)") + + return response +``` + +## Workmux Hook Configuration + +The merge hook is already configured in `~/.config/workmux/config.yaml`: + +```yaml +pre_merge: + - kanban-cli move-to-done "$WM_BRANCH_NAME" +``` + +This runs automatically when you execute `workmux merge`. + +## Testing + +Test the full workflow: + +```bash +# 1. Start working +kanban-cli move-to-progress "generalLamcher" +kanban-cli find-by-branch "generalLamcher" +# Should show: in_progress + +# 2. Wait for human +kanban-cli move-to-review "generalLamcher" +kanban-cli find-by-branch "generalLamcher" +# Should show: in_review + +# 3. Resume work +kanban-cli move-to-progress "generalLamcher" +kanban-cli find-by-branch "generalLamcher" +# Should show: in_progress + +# 4. Merge (when ready) +workmux merge generalLamcher +# Should show: done +``` + +## Debugging + +### Check Current Status +```bash +kanban-cli find-by-branch "$(git branch --show-current)" +``` + +### View Database +```bash +sqlite3 ~/Library/Application\ Support/com.ushadow.launcher/kanban.db \ + "SELECT id, title, status, branch_name FROM tickets" +``` + +### Test Hook +```bash +# See what workmux will run +cat ~/.config/workmux/config.yaml + +# Test the command manually +export WM_BRANCH_NAME="test-branch" +kanban-cli move-to-done "$WM_BRANCH_NAME" +``` + +## Best Practices + +1. **Always use branch name as identifier** - Most reliable +2. **Call move-to-progress when resuming** - Keep status accurate +3. **Call move-to-review before every prompt** - Signal waiting state +4. **Let workmux handle "done"** - Don't manually mark as done +5. **Check status with find-by-branch** - Verify updates worked + +## Future Enhancements + +- [ ] Auto-detect agent activity (no manual calls needed) +- [ ] Integration with agent frameworks (LangChain, etc.) +- [ ] Tmux pane monitoring for automatic detection +- [ ] Web API for external integrations +- [ ] Slack/Discord notifications on status changes + +## Troubleshooting + +**Commands not found:** +```bash +# Make sure kanban-cli is in PATH +which kanban-cli +# Should output: /Users/username/.local/bin/kanban-cli +``` + +**No tickets found:** +```bash +# Check if tickets are linked to this branch +kanban-cli find-by-branch "$(git branch --show-current)" + +# Verify database exists +ls ~/Library/Application\ Support/com.ushadow.launcher/kanban.db +``` + +**Status not updating:** +```bash +# Run with full path +~/.local/bin/kanban-cli move-to-progress "branch-name" + +# Check stderr for errors +kanban-cli move-to-progress "branch-name" 2>&1 +``` + +## See Also + +- [KANBAN_HOOKS.md](./KANBAN_HOOKS.md) - Technical reference +- [KANBAN_HOOKS_EXAMPLE.md](./KANBAN_HOOKS_EXAMPLE.md) - Complete walkthrough +- [README.md](./README.md) - Launcher overview diff --git a/ushadow/launcher/KANBAN_AUTO_STATUS.md b/ushadow/launcher/KANBAN_AUTO_STATUS.md new file mode 100644 index 00000000..9bdc7baf --- /dev/null +++ b/ushadow/launcher/KANBAN_AUTO_STATUS.md @@ -0,0 +1,422 @@ +# Automatic Kanban Status Updates - Complete Guide + +This document explains how ticket status automatically updates based on agent activity, combining multiple layers of automation. + +## Architecture Overview + +The system uses **three layers** of automatic status updates: + +1. **Launcher Integration** - Updates status when starting agents +2. **Claude Code Hooks** - Updates status based on session events +3. **Workmux Hooks** - Updates status when merging branches + +This approach is inspired by vibe-kanban's backend service integration but adapted to work with the ushadow launcher's architecture. + +## How It Works + +### Status Flow + +``` +User creates ticket + ↓ +Launcher starts agent for ticket β†’ status: in_progress + ↓ +Agent working actively β†’ status: in_progress + ↓ +Agent finishes, session ends β†’ status: in_review + ↓ +User responds to agent β†’ status: in_progress + ↓ +(repeat as needed) + ↓ +User merges branch β†’ status: done +``` + +## Layer 1: Launcher Integration + +**File**: `src-tauri/src/commands/kanban.rs` + +When you start an agent for a ticket using the launcher, the `start_coding_agent_for_ticket` function automatically moves the ticket to `in_progress`. + +**Code** (lines 784-806): +```rust +// Automatically move ticket to in_progress when starting agent +eprintln!("[start_coding_agent_for_ticket] Moving ticket to in_progress..."); +if let Some(branch_name) = &ticket.branch_name { + let status_update = shell_command(&format!("kanban-cli move-to-progress \"{}\"", branch_name)) + .output(); + // ... error handling ... +} +``` + +**When it triggers**: Immediately when starting an agent for a ticket + +**What it does**: Moves ticket from `backlog` or `todo` to `in_progress` + +## Layer 2: Claude Code Hooks + +**Files**: `.claude/hooks/*.sh`, `.claude/settings.local.json` + +Claude Code hooks automatically update status based on session lifecycle events. + +### Hook Scripts + +1. **session-start.sh** - Runs when Claude Code starts + - Moves ticket to `in_progress` + - Indicates agent is ready to work + +2. **user-prompt-submit.sh** - Runs when user submits a prompt + - Moves ticket to `in_progress` + - Indicates agent resuming work after user responds + +3. **idle-notification.sh** - Runs when Claude Code becomes idle (waiting for input) + - Moves ticket to `in_review` + - Indicates agent finished responding and is waiting for user + - **This is the key hook** - fires after each agent response! + +4. **session-end.sh** - Runs when Claude Code exits + - Moves ticket to `in_review` + - Indicates session ended, waiting for human review + +### Configuration + +Hooks are configured in `.claude/settings.local.json`: + +```json +{ + "hooks": { + "SessionStart": [{ + "hooks": [{ + "type": "command", + "command": ".claude/hooks/session-start.sh", + "async": true + }] + }], + "UserPromptSubmit": [{ + "hooks": [{ + "type": "command", + "command": ".claude/hooks/user-prompt-submit.sh", + "async": true + }] + }], + "Notification": [{ + "matcher": "idle_prompt", + "hooks": [{ + "type": "command", + "command": ".claude/hooks/idle-notification.sh", + "async": true + }] + }], + "SessionEnd": [{ + "hooks": [{ + "type": "command", + "command": ".claude/hooks/session-end.sh", + "async": true + }] + }] + } +} +``` + +**Key features**: +- Hooks run asynchronously (don't block agent) +- Automatically detect current branch +- Silently skip if not in a git repo +- Require kanban-cli to be in PATH +- **`idle_prompt` notification** detects when agent finishes each response (not just session end!) + - This gives per-turn status updates within a long-running session + - Similar to vibe-kanban's approach for ACP-based agents + +## Layer 3: Workmux Integration + +**File**: `~/.config/workmux/config.yaml` + +When you merge a branch using `workmux merge`, it automatically moves tickets to `done`. + +**Configuration**: +```yaml +pre_merge: + - kanban-cli move-to-done "$WM_BRANCH_NAME" +``` + +**When it triggers**: Before `workmux merge` completes + +**What it does**: Moves all tickets for the branch to `done` + +## Installation & Setup + +### Prerequisites + +1. **kanban-cli must be installed**: + ```bash + cd ushadow/launcher/src-tauri + cargo build --release --bin kanban-cli + cp target/release/kanban-cli ~/.local/bin/ + ``` + +2. **Verify kanban-cli is in PATH**: + ```bash + which kanban-cli + # Should output: /Users/username/.local/bin/kanban-cli + ``` + +3. **Workmux hook configured** (should already be set): + ```bash + cat ~/.config/workmux/config.yaml + # Should contain: kanban-cli move-to-done "$WM_BRANCH_NAME" + ``` + +### Automatic Hook Setup + +The Claude Code hooks are already configured in this repository: + +βœ… `.claude/hooks/session-start.sh` - Executable +βœ… `.claude/hooks/user-prompt-submit.sh` - Executable +βœ… `.claude/hooks/idle-notification.sh` - Executable (NEW!) +βœ… `.claude/hooks/session-end.sh` - Executable +βœ… `.claude/settings.local.json` - Hooks configured with idle_prompt matcher + +**No additional setup needed** - hooks will run automatically when you use Claude Code in this project. + +### Manual Setup (for other projects) + +To add automatic status updates to another project: + +1. Create `.claude/hooks/` directory +2. Copy hook scripts from this project +3. Make scripts executable: `chmod +x .claude/hooks/*.sh` +4. Add hooks configuration to `.claude/settings.local.json` + +## Testing + +### Test Layer 1: Launcher Integration + +```bash +# Create a test ticket in the launcher UI +# Start agent for the ticket +# Check status: +kanban-cli find-by-branch "ticket-branch-name" +# Should show: in_progress +``` + +### Test Layer 2: Claude Code Hooks + +```bash +# Start Claude Code in a ticket branch +claude + +# Check status during session: +kanban-cli find-by-branch "$(git branch --show-current)" +# Should show: in_progress + +# Exit Claude Code (Ctrl+C or Ctrl+D) +# Check status after exit: +kanban-cli find-by-branch "$(git branch --show-current)" +# Should show: in_review + +# Start Claude Code again +# Respond to a prompt +# Should move back to: in_progress +``` + +### Test Layer 3: Workmux Integration + +```bash +# After completing work on a ticket +workmux merge ticket-branch-name + +# Check status: +kanban-cli find-by-branch "ticket-branch-name" +# Should show: done +``` + +### Debug Hooks + +To see if hooks are running: + +```bash +# Check Claude Code debug logs +tail -f ~/.claude/debug/*.log | grep -i kanban + +# Check workmux hook execution +# Should see output when running: workmux merge +``` + +## Comparison with Vibe-Kanban + +### Vibe-Kanban Approach + +**Architecture**: Backend service with execution process management + +**Key insights from code analysis**: +- `crates/services/src/services/container.rs` + - `start_execution` (line 974-992): Updates to `InProgress` + - `spawn_exit_monitor` (line 342-540): Waits for process exit or exit signal + - `finalize_task` (line 166-213): Updates to `InReview` + +**How it detects completion**: +- **For ACP-based agents** (Gemini, Qwen): + - Uses Agent Client Protocol with awaitable `prompt()` method + - Sends exit signal when turn completes (container.rs:486-487) + - Status updates after each response + +- **For Claude Code**: + - Spawns **new process for each prompt**! (container.rs:363 - exit_signal is None) + - Process exits when response complete + - No long-running session + +### Ushadow Launcher Approach + +**Architecture**: Hook-based with CLI integration + idle detection + +**Key components**: +- Rust CLI tool (`kanban-cli`) +- Claude Code hooks (shell scripts) +- **`idle_prompt` notification** - detects when agent waits for input +- Launcher integration (Rust code) +- Workmux hooks (YAML config) + +**How it detects completion**: +- **Long-running Claude Code session** +- `Notification(idle_prompt)` hook fires when agent finishes responding +- Status updates after each response (just like vibe-kanban's ACP agents!) +- Single process for entire conversation + +**Advantages over vibe-kanban's Claude Code approach**: +- βœ… **Keeps conversation context** - single long-running session +- βœ… **Per-response status updates** - via idle_prompt notification +- βœ… Works with any CLI tool (not just Claude Code) +- βœ… No backend service required +- βœ… Easy to debug (just check CLI calls) +- βœ… No process spawn overhead per prompt + +**Key difference**: +- Vibe-kanban: Short-lived processes (one per prompt) +- Ushadow launcher: Long-lived session + idle detection hooks + +## Troubleshooting + +### Hooks Not Running + +**Check if kanban-cli is in PATH**: +```bash +which kanban-cli +# If not found, install it (see Installation section) +``` + +**Check hook permissions**: +```bash +ls -la .claude/hooks/ +# All .sh files should be executable (rwxr-xr-x) +chmod +x .claude/hooks/*.sh +``` + +**Check Claude Code hook configuration**: +```bash +cat .claude/settings.local.json +# Should contain hooks configuration +``` + +**Enable Claude Code debug logging**: +```bash +CLAUDE_DEBUG=1 claude +tail -f ~/.claude/debug/*.log +``` + +### Status Not Updating + +**Verify ticket is linked to branch**: +```bash +kanban-cli find-by-branch "$(git branch --show-current)" +# Should return ticket(s) +``` + +**Manually test CLI command**: +```bash +kanban-cli move-to-progress "$(git branch --show-current)" +# Should succeed and update status +``` + +**Check database**: +```bash +sqlite3 ~/Library/Application\ Support/com.ushadow.launcher/kanban.db \ + "SELECT id, title, status, branch_name FROM tickets WHERE branch_name = 'your-branch'" +``` + +### Wrong Status After Merge + +**Check workmux hook**: +```bash +cat ~/.config/workmux/config.yaml +# Should contain: kanban-cli move-to-done "$WM_BRANCH_NAME" +``` + +**Test hook manually**: +```bash +export WM_BRANCH_NAME="test-branch" +kanban-cli move-to-done "$WM_BRANCH_NAME" +``` + +## Future Enhancements + +### Potential Improvements + +1. **Mid-session detection**: Detect when agent is waiting for user input during a session + - Could use tmux pane monitoring + - Could integrate with Claude Code's message flow + - Would enable more granular status updates + +2. **Activity monitoring**: Detect if agent is actively working vs idle + - Monitor tmux pane activity + - Track time since last command + - Auto-move to in_review after inactivity + +3. **Web API**: Expose status updates via HTTP API + - Allow external tools to update status + - Enable integrations with other systems + - Support webhooks for status changes + +4. **Notifications**: Alert on status changes + - Slack/Discord notifications + - Desktop notifications + - Email alerts + +5. **Analytics**: Track ticket lifecycle metrics + - Time in each status + - Agent productivity metrics + - Bottleneck identification + +## See Also + +- [AGENT_SELF_REPORTING.md](./AGENT_SELF_REPORTING.md) - Agent-side status reporting +- [KANBAN_HOOKS.md](./KANBAN_HOOKS.md) - Technical hook reference +- [KANBAN_HOOKS_EXAMPLE.md](./KANBAN_HOOKS_EXAMPLE.md) - Complete walkthrough +- [README.md](./README.md) - Launcher overview + +## Implementation Notes + +### Why Three Layers? + +Each layer covers a different lifecycle event: + +1. **Launcher**: Knows when agent starts (one-time event) +2. **Claude Code Hooks**: Knows session boundaries (start/end/resume) +3. **Workmux**: Knows when work is merged (completion event) + +No single layer can cover all cases, so we use all three together. + +### Why Async Hooks? + +Hooks run with `"async": true` to avoid blocking: +- Agent can start immediately while status updates in background +- Prevents delays if database is slow +- Failures don't break agent workflow + +### Why CLI Tool? + +Using `kanban-cli` instead of direct database access: +- Consistent error handling +- Easier to debug (run manually) +- Portable (works from shell, scripts, hooks) +- Single source of truth for status logic +- Can be called from any environment diff --git a/ushadow/launcher/KANBAN_HOOKS.md b/ushadow/launcher/KANBAN_HOOKS.md new file mode 100644 index 00000000..36a46e5e --- /dev/null +++ b/ushadow/launcher/KANBAN_HOOKS.md @@ -0,0 +1,289 @@ +# Kanban Hooks Integration + +This document explains how to automatically update Kanban ticket status based on workmux/tmux events. + +## Overview + +The launcher includes a CLI tool (`kanban-cli`) that can be called from workmux hooks to automatically update ticket status. The most common use case is moving tickets to "In Review" when an agent stops working and the branch is ready for merge. + +## Installation + +### 1. Build the CLI Tool + +```bash +cd ushadow/launcher/src-tauri +cargo build --release --bin kanban-cli + +# The binary will be at: target/release/kanban-cli +``` + +### 2. Install the CLI Tool + +Copy the binary to a location in your PATH: + +```bash +# Option 1: System-wide installation +sudo cp target/release/kanban-cli /usr/local/bin/ + +# Option 2: User installation +mkdir -p ~/.local/bin +cp target/release/kanban-cli ~/.local/bin/ +# Make sure ~/.local/bin is in your PATH + +# Verify installation +kanban-cli --help +``` + +## CLI Usage + +The `kanban-cli` tool provides several commands: + +### Set Ticket Status + +```bash +kanban-cli set-status +``` + +Statuses: `backlog`, `todo`, `in_progress`, `in_review`, `done`, `archived` + +Example: +```bash +kanban-cli set-status ticket-abc123 in_review +``` + +### Find Tickets + +```bash +# Find by worktree path +kanban-cli find-by-path /path/to/worktree + +# Find by branch name +kanban-cli find-by-branch feature-branch + +# Find by tmux window name +kanban-cli find-by-window ushadow-feature-branch +``` + +### Move to Review (Most Useful) + +The `move-to-review` command is designed for use in hooks. It accepts a flexible identifier and automatically finds matching tickets: + +```bash +kanban-cli move-to-review +``` + +The identifier can be: +- Worktree path +- Branch name +- Tmux window name + +It will: +- Find all tickets matching the identifier +- Skip tickets already in "in_review" or "done" status +- Move remaining tickets to "in_review" +- Exit cleanly even if no tickets are found (not all worktrees have tickets) + +## Workmux Hook Configuration + +### Option 1: Global Configuration (Recommended) + +Edit `~/.config/workmux/config.yaml`: + +```yaml +# Commands to run before merging +pre_merge: + # Move associated tickets to "in_review" status + - kanban-cli move-to-review "$WM_BRANCH_NAME" + + # Optional: Run tests before merge + - pytest + - cargo test +``` + +This applies to **all** workmux projects. The hook will: +1. Find tickets associated with the branch being merged +2. Move them to "in_review" status +3. Continue with the merge process + +### Option 2: Project-Specific Configuration + +Edit `.workmux.yaml` in your project root: + +```yaml +pre_merge: + - "" # Inherit global hooks + - kanban-cli move-to-review "$WM_WORKTREE_PATH" + # Or use branch name: + # - kanban-cli move-to-review "$WM_BRANCH_NAME" +``` + +### Available Environment Variables in Hooks + +Workmux provides these variables in hook scripts: + +- `$WM_BRANCH_NAME`: The branch being merged (e.g., "feature-login") +- `$WM_TARGET_BRANCH`: The target branch (e.g., "main") +- `$WM_WORKTREE_PATH`: Absolute path to the worktree +- `$WM_PROJECT_ROOT`: Absolute path to the main project +- `$WM_HANDLE`: The worktree handle/window name + +## Other Hook Opportunities + +### Post-Create Hook + +Move tickets to "in_progress" when a worktree is created: + +```yaml +post_create: + - kanban-cli move-to-review "$WM_WORKTREE_PATH" + # Note: You'd need to add a "move-to-progress" command for this +``` + +### Pre-Remove Hook + +Archive tickets when a worktree is removed: + +```yaml +pre_remove: + - kanban-cli set-status archived +``` + +## Workflow Example + +Here's a typical workflow with automatic status updates: + +1. **Create Ticket in Kanban Board** + - Status: Backlog + - Create worktree for the ticket (links ticket to branch) + +2. **Start Working** + - Manually move ticket to "In Progress" in UI + - Or add a post-create hook to do this automatically + +3. **Finish Work** + - Run `workmux merge` to merge the branch + - **pre_merge hook automatically moves ticket to "In Review"** + - Merge completes + +4. **Review & Complete** + - Reviewer checks the code + - Manually move ticket to "Done" after approval + +## Troubleshooting + +### CLI Not Found + +```bash +# Check if it's in your PATH +which kanban-cli + +# If not, add ~/.local/bin to PATH in ~/.zshrc or ~/.bashrc: +export PATH="$HOME/.local/bin:$PATH" +``` + +### No Tickets Found + +This is normal! Not all worktrees have associated Kanban tickets. The `move-to-review` command exits successfully even when no tickets are found. + +To debug, manually check for tickets: + +```bash +# See what workmux sees +echo "Branch: $WM_BRANCH_NAME" +echo "Path: $WM_WORKTREE_PATH" + +# Check for tickets +kanban-cli find-by-branch "$WM_BRANCH_NAME" +``` + +### Database Not Found + +The CLI looks for the Kanban database at: +- macOS: `~/Library/Application Support/com.ushadow.launcher/kanban.db` +- Linux: `~/.local/share/com.ushadow.launcher/kanban.db` +- Windows: `%APPDATA%\com.ushadow.launcher\kanban.db` + +If the database doesn't exist, you need to: +1. Run the Ushadow Launcher at least once +2. Create at least one ticket (this initializes the database) + +### Hook Not Running + +Verify your workmux configuration: + +```bash +# Check global config +cat ~/.config/workmux/config.yaml + +# Check project config +cat .workmux.yaml + +# Test the command manually +kanban-cli move-to-review "your-branch-name" +``` + +## Advanced: Custom Status Transitions + +You can create custom scripts for different status transitions: + +### Script: `move-to-progress.sh` + +```bash +#!/bin/bash +kanban-cli set-status "$1" in_progress +``` + +### Script: `complete-ticket.sh` + +```bash +#!/bin/bash +# Move to done and close tmux window +kanban-cli set-status "$1" done +workmux close "$WM_HANDLE" +``` + +Make them executable and add to your PATH: + +```bash +chmod +x move-to-progress.sh complete-ticket.sh +mv *.sh ~/.local/bin/ +``` + +## Integration with Other Tools + +### Git Hooks + +You can also use `kanban-cli` in git hooks: + +```bash +# .git/hooks/pre-push +#!/bin/bash +BRANCH=$(git rev-parse --abbrev-ref HEAD) +kanban-cli move-to-review "$BRANCH" +``` + +### CI/CD + +Update ticket status from CI pipelines: + +```bash +# In your CI script +kanban-cli set-status "$TICKET_ID" done +``` + +## Future Enhancements + +Potential improvements to this system: + +- [ ] Auto-detect ticket ID from branch name (e.g., `ticket-123-feature`) +- [ ] Support for custom status workflows +- [ ] Slack/Discord notifications on status change +- [ ] Integration with GitHub/GitLab issues +- [ ] Web API for external integrations +- [ ] Rollback command for accidental status changes + +## See Also + +- [Workmux Documentation](https://github.com/joshka/workmux) +- [Launcher README](./README.md) +- [Kanban Board Usage](./README.md#managing-work-with-kanban-board) diff --git a/ushadow/launcher/README.md b/ushadow/launcher/README.md index 407c618d..6b87917b 100644 --- a/ushadow/launcher/README.md +++ b/ushadow/launcher/README.md @@ -1,6 +1,17 @@ # Ushadow Desktop Launcher -A Tauri-based desktop application for orchestrating parallel development environments with git worktrees, tmux sessions, and Docker containers. +A Tauri-based desktop application for orchestrating parallel development environments with git worktrees, tmux sessions, and Docker containers. Includes integrated Kanban board for ticket management, making it a complete development workflow tool that bridges task tracking and environment management. + +## What Can It Do? + +- πŸš€ **One-Click Launch** - Install prerequisites and start Ushadow automatically +- 🌲 **Git Worktrees** - Work on multiple branches simultaneously in isolated environments +- πŸ’» **Tmux Integration** - Persistent terminal sessions that survive app restarts +- 🐳 **Docker Orchestration** - Start/stop containers per environment with visual status +- πŸ“‹ **Kanban Board** - Integrated ticket management with epics and environment linking +- βš™οΈ **Smart Setup** - Auto-configure credentials for new worktrees +- πŸ”„ **One-Click Merge** - Rebase and merge worktrees back to main with cleanup +- πŸ“Š **Multi-Project** - Manage multiple repositories with independent configurations ## Features @@ -10,6 +21,7 @@ A Tauri-based desktop application for orchestrating parallel development environ - **Container Orchestration**: Start/stop Docker containers per environment - **Environment Discovery**: Auto-detect and manage multiple environments - **Fast Status Checks**: Cached Tailscale/Docker polling for instant feedback +- **Kanban Board**: Integrated ticket management system for tracking work and epics ### Developer Experience - **One-Click Terminal Access**: Open Terminal.app directly into environment's tmux session @@ -17,14 +29,18 @@ A Tauri-based desktop application for orchestrating parallel development environ - **Real-time Status Badges**: Visual indicators for tmux activity (Working/Waiting/Done/Error) - **Quick Environment Switching**: Manage multiple parallel tasks/features simultaneously - **Merge & Cleanup**: Rebase and merge worktrees back to main with one click +- **Ticket Management**: Create, track, and organize tickets with epics, descriptions, and environments ### Infrastructure - **Prerequisite Checking**: Verifies Docker, Tailscale, Git, and Tmux - **System Tray**: Runs in background with quick access menu - **Cross-Platform**: Builds for macOS (DMG), Windows (EXE), and Linux (DEB/AppImage) +- **Default Credentials**: Configure default admin credentials for new worktrees ## Quick Start +### For Developers (Running from Source) + ```bash # Install dependencies npm install @@ -38,15 +54,35 @@ npm run tauri:dev # 3. Show all environments with real-time status ``` +### For Users (Installing the App) + +1. Download the appropriate installer for your platform: + - **macOS**: `Ushadow-{version}.dmg` + - **Windows**: `Ushadow-{version}.exe` or `.msi` + - **Linux**: `ushadow_{version}_amd64.deb` or `.AppImage` + +2. Run the installer and launch the Ushadow Launcher + +3. Follow the first-time setup wizard + ### First-Time Usage 1. **Set Project Root**: Click the folder icon to point to your Ushadow repo 2. **Check Prerequisites**: Verify Docker, Tailscale, Git, Tmux are installed 3. **Start Infrastructure**: Start required containers (postgres, redis, etc.) 4. **Create Environment**: Click "New Environment" and choose: - - **Clone** - Create new git clone (traditional) + - **Link** - Link to an existing directory - **Worktree** - Create git worktree (recommended for parallel dev) +### Multi-Project Mode + +The launcher supports managing multiple projects with independent configurations: + +- Switch between projects from the Install tab +- Each project maintains its own worktrees directory +- Independent infrastructure and environment settings per project +- Useful for working on multiple repositories or client projects + ### Using Tmux Sessions - **Purple Terminal Icon** on environment cards - Click to open Terminal and attach to tmux @@ -55,6 +91,63 @@ npm run tauri:dev **Note**: Terminal opening currently works on **macOS only** (via Terminal.app). Linux/Windows support is planned. See [CROSS_PLATFORM_TERMINAL.md](./CROSS_PLATFORM_TERMINAL.md) for details. +### Configuring Default Credentials + +The **Credentials** button in the header allows you to configure default admin credentials that will be automatically written to new worktrees: + +1. Click **Credentials** button in the header +2. Enter default admin email, password, and name +3. Save settings +4. All newly created worktrees will automatically have these credentials configured in `secrets.yaml` + +This eliminates the need to manually configure credentials for each new environment, streamlining the development workflow. + +### Managing Work with Kanban Board + +The launcher includes an integrated Kanban board for ticket and epic management: + +- **Kanban Tab** in the header - View all tickets organized by status (Backlog, To Do, In Progress, Done) +- **Create Tickets** - Add new tickets with title, description, and link them to epics +- **Create Epics** - Organize related tickets into epics for better project structure +- **Environment Linking** - Associate tickets with specific development environments +- **Drag & Drop** - Move tickets between columns to update their status +- **Ticket Details** - View full ticket information including descriptions and metadata + +The Kanban board integrates with your backend API, allowing you to track work directly from the launcher while managing development environments. + +### Automatic Ticket Status Updates + +The launcher automatically updates ticket status as you work, using a three-layer system: + +**1. Launcher Integration**: When starting an agent β†’ status: `in_progress` +**2. Claude Code Hooks**: Session lifecycle events β†’ status: `in_progress` / `in_review` +**3. Workmux Integration**: When merging β†’ status: `done` + +**Quick Setup:** + +```bash +# Install kanban-cli (required) +cd src-tauri +cargo build --release --bin kanban-cli +cp target/release/kanban-cli ~/.local/bin/ +``` + +**That's it!** Claude Code hooks are pre-configured in `.claude/` and workmux hooks are already set up. + +**How it works:** +- βœ… Start agent for ticket β†’ Automatically moves to `in_progress` +- βœ… Agent finishes/waits β†’ Automatically moves to `in_review` +- βœ… You respond β†’ Automatically moves back to `in_progress` +- βœ… Merge branch β†’ Automatically moves to `done` + +See **[KANBAN_AUTO_STATUS.md](./KANBAN_AUTO_STATUS.md)** for complete documentation: +- Architecture overview and how each layer works +- Comparison with vibe-kanban's approach +- Testing and troubleshooting +- Future enhancements + +For manual CLI usage and advanced integration, see **[KANBAN_HOOKS.md](./KANBAN_HOOKS.md)**. + ## Documentation - **[TMUX_INTEGRATION.md](./TMUX_INTEGRATION.md)** - Complete guide to tmux integration features (Phase 1) @@ -170,23 +263,52 @@ This generates all required icon sizes for each platform. launcher/ β”œβ”€β”€ dist/ # Bootstrap UI (shown before containers start) β”‚ └── index.html +β”œβ”€β”€ src/ # React frontend +β”‚ β”œβ”€β”€ components/ # UI components +β”‚ β”‚ β”œβ”€β”€ KanbanBoard.tsx +β”‚ β”‚ β”œβ”€β”€ EnvironmentsPanel.tsx +β”‚ β”‚ β”œβ”€β”€ TmuxManagerDialog.tsx +β”‚ β”‚ └── ... +β”‚ β”œβ”€β”€ hooks/ # React hooks (useTauri, useTmuxMonitoring) +β”‚ β”œβ”€β”€ store/ # Zustand state management +β”‚ └── App.tsx # Main application β”œβ”€β”€ src-tauri/ β”‚ β”œβ”€β”€ Cargo.toml # Rust dependencies β”‚ β”œβ”€β”€ tauri.conf.json # Tauri configuration β”‚ β”œβ”€β”€ icons/ # App icons β”‚ └── src/ -β”‚ └── main.rs # Rust backend (Docker management) +β”‚ β”œβ”€β”€ main.rs # Rust backend entry point +β”‚ β”œβ”€β”€ commands/ # Tauri command implementations +β”‚ β”‚ β”œβ”€β”€ kanban.rs # Kanban board operations +β”‚ β”‚ β”œβ”€β”€ settings.rs # Settings management +β”‚ β”‚ └── ... +β”‚ └── models.rs # Data structures └── package.json # Node scripts for Tauri CLI ``` ## How It Works -1. **On Launch**: Shows bootstrap UI with prerequisite checks -2. **Start Services**: Runs `docker compose up` for infrastructure and app -3. **Health Check**: Polls backend until healthy -4. **Open App**: Navigates webview to `http://localhost:3000` -5. **System Tray**: Minimizes to tray, stays running in background -6. **On Quit**: Optionally stops containers (configurable) +### Application Lifecycle + +1. **On Launch**: Shows Install page with one-click quick launch +2. **Prerequisite Check**: Verifies Docker, Git, Python, Tmux are installed +3. **Infrastructure Setup**: Starts shared services (Postgres, Redis, etc.) +4. **Environment Discovery**: Auto-detects existing worktrees and containers +5. **Tmux Integration**: Auto-starts tmux server and monitors sessions +6. **System Tray**: Minimizes to tray, stays running in background +7. **On Quit**: Optionally stops containers (configurable) + +### Development Workflow + +1. **Navigate to Kanban tab** - View and manage tickets +2. **Create a ticket** - Define the work to be done +3. **Navigate to Environments tab** - View all development environments +4. **Create worktree** - Click "New Environment" β†’ Choose branch name +5. **Auto-setup** - Launcher creates worktree, tmux window, and starts containers +6. **Open terminal** - Click purple terminal icon to attach tmux session +7. **Develop** - Code in VS Code, run commands in tmux +8. **Track progress** - Update ticket status in Kanban board +9. **Merge & Cleanup** - When done, merge worktree back to main with one click ## Configuration @@ -204,17 +326,97 @@ The app uses Tauri's security features: - **CSP**: Restricts content sources to localhost - **Shell Scope**: Only allows specific Docker/Tailscale commands - **No Node.js**: Runs native Rust, not Node (unlike Electron) +- **Credential Storage**: Settings stored locally, never transmitted +- **Tauri Permissions**: Minimal permission model (no file system access beyond project paths) + +## Tips & Tricks + +### Productivity Tips + +**Use Worktrees for Parallel Development** +- Create a worktree for each feature/bug you're working on +- Switch between worktrees instantly without git stash +- Each worktree has its own containers and ports + +**Organize with Epics** +- Group related tickets into epics in the Kanban board +- Track progress across multiple related features +- Link tickets to environments for better context + +**Leverage Tmux Sessions** +- Keep long-running commands in tmux (tests, servers, logs) +- Sessions persist even if you close the launcher +- Use tmux windows to organize different tasks + +**Set Default Credentials** +- Configure default admin credentials once +- All new worktrees automatically get these credentials +- No more manual secrets.yaml editing + +### Keyboard Shortcuts + +- **Cmd/Ctrl + R**: Refresh all status +- **Native clipboard shortcuts work**: Cmd/Ctrl+C, V, X, Z, etc. + +### Best Practices + +1. **Name worktrees clearly**: Use descriptive names like `fix-login-bug` or `add-auth-feature` +2. **Clean up regularly**: Merge and delete completed worktrees to save disk space +3. **Use the Log Panel**: Expand it when troubleshooting to see detailed output +4. **Keep main up-to-date**: Regularly pull latest changes to avoid merge conflicts +5. **Link tickets to environments**: Use the Kanban board to track which ticket is in which environment ## Troubleshooting -### "Docker not found" -Ensure Docker Desktop is installed and the `docker` CLI is in your PATH. +### Environment Issues + +**"Docker not found"** +- Ensure Docker Desktop is installed and the `docker` CLI is in your PATH +- On macOS: `which docker` should show `/usr/local/bin/docker` +- Try restarting the launcher after installing Docker + +**"Tailscale not found"** +- Install Tailscale from https://tailscale.com/download +- Tailscale is optional but recommended for remote access + +**Environment won't start** +- Check that Docker is running (`docker ps` should work) +- Verify ports aren't already in use (default: 8000, 3000) +- Check logs in the Log Panel at the bottom of the launcher +- Try stopping and restarting the environment + +**Tmux window not created** +- Ensure tmux is installed: `tmux -V` +- Check if tmux server is running: `tmux list-sessions` +- Restart the launcher to auto-start tmux server + +### Kanban Board Issues + +**Tickets not loading** +- Ensure at least one environment is running (backend API needed) +- Check backend URL in Kanban tab matches running environment +- Verify backend is healthy at `http://localhost:8000/health` + +**Can't create tickets** +- Verify you have a running backend environment +- Check browser console for API errors +- Ensure credentials are configured (Settings button) + +### Build Issues + +**Build fails on Linux** +- Install all webkit/gtk dependencies listed in Prerequisites section +- Run: `sudo apt install libwebkit2gtk-4.0-dev build-essential` -### "Tailscale not found" -Install Tailscale from https://tailscale.com/download +**Windows build fails** +- Ensure WebView2 runtime is installed +- Install Visual Studio Build Tools +- Restart terminal after installing dependencies -### Build fails on Linux -Install all webkit/gtk dependencies listed in Prerequisites. +### General Tips -### Windows build fails -Ensure WebView2 runtime is installed and Visual Studio Build Tools are set up. +- **Check the Log Panel** - Most errors appear in the bottom log panel +- **Refresh Status** - Click the refresh button to update environment status +- **Restart the Launcher** - Many issues resolve after a fresh start +- **Check Disk Space** - Worktrees and containers can use significant space +- **Review Configuration** - Verify project root and worktrees directory paths diff --git a/ushadow/launcher/claude-with-kanban b/ushadow/launcher/claude-with-kanban new file mode 100755 index 00000000..badbbe52 --- /dev/null +++ b/ushadow/launcher/claude-with-kanban @@ -0,0 +1,43 @@ +#!/bin/bash +# Wrapper for Claude Code that automatically updates Kanban status +# +# Usage: claude-with-kanban [claude args...] +# +# What it does: +# 1. Moves tickets to "in_progress" when agent starts +# 2. Runs claude with all your arguments +# 3. Moves tickets to "in_review" when agent finishes +# +# Install: +# chmod +x claude-with-kanban +# cp claude-with-kanban ~/.local/bin/ +# alias claude='claude-with-kanban' + +# Get the current branch +BRANCH=$(git branch --show-current 2>/dev/null) + +if [ -z "$BRANCH" ]; then + echo "⚠ Not in a git repository, skipping kanban updates" + # Run claude without status updates + exec claude "$@" +fi + +echo "πŸ€– Starting agent for branch: $BRANCH" +echo " Moving tickets to 'in_progress'..." + +# Move to in_progress +kanban-cli move-to-progress "$BRANCH" 2>/dev/null + +# Run the actual claude command +# Use exec to replace this process +claude "$@" +EXIT_CODE=$? + +# After claude finishes +echo "" +echo "πŸ’¬ Agent finished, moving tickets to 'in_review'..." +kanban-cli move-to-review "$BRANCH" 2>/dev/null + +echo "βœ“ Tickets updated and ready for review" + +exit $EXIT_CODE diff --git a/ushadow/launcher/install-kanban-hooks.sh b/ushadow/launcher/install-kanban-hooks.sh new file mode 100755 index 00000000..f1ca9a98 --- /dev/null +++ b/ushadow/launcher/install-kanban-hooks.sh @@ -0,0 +1,147 @@ +#!/bin/bash + +set -e + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" +echo -e "${BLUE} Ushadow Launcher - Kanban Hooks Setup${NC}" +echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" +echo + +# Check if we're in the right directory +if [ ! -f "src-tauri/Cargo.toml" ]; then + echo -e "${RED}Error: Must run from launcher directory (ushadow/launcher)${NC}" + exit 1 +fi + +# Step 1: Build the CLI tool +echo -e "${YELLOW}Step 1: Building kanban-cli...${NC}" +cd src-tauri +if cargo build --release --bin kanban-cli; then + echo -e "${GREEN}βœ“ Built successfully${NC}" +else + echo -e "${RED}βœ— Build failed${NC}" + exit 1 +fi +cd .. +echo + +# Step 2: Install the CLI tool +echo -e "${YELLOW}Step 2: Installing kanban-cli...${NC}" +INSTALL_DIR="$HOME/.local/bin" +CLI_SOURCE="src-tauri/target/release/kanban-cli" + +# Create installation directory if it doesn't exist +mkdir -p "$INSTALL_DIR" + +# Copy the binary +cp "$CLI_SOURCE" "$INSTALL_DIR/" +chmod +x "$INSTALL_DIR/kanban-cli" + +echo -e "${GREEN}βœ“ Installed to: $INSTALL_DIR/kanban-cli${NC}" + +# Check if directory is in PATH +if [[ ":$PATH:" != *":$INSTALL_DIR:"* ]]; then + echo -e "${YELLOW}⚠ $INSTALL_DIR is not in your PATH${NC}" + echo + echo "Add this to your ~/.zshrc or ~/.bashrc:" + echo -e "${BLUE}export PATH=\"\$HOME/.local/bin:\$PATH\"${NC}" + echo +fi +echo + +# Step 3: Verify installation +echo -e "${YELLOW}Step 3: Verifying installation...${NC}" +if command -v kanban-cli &> /dev/null; then + echo -e "${GREEN}βœ“ kanban-cli is available in PATH${NC}" + kanban-cli --help | head -5 +else + echo -e "${RED}βœ— kanban-cli not found in PATH${NC}" + echo "You may need to restart your shell or run:" + echo -e "${BLUE}export PATH=\"\$HOME/.local/bin:\$PATH\"${NC}" +fi +echo + +# Step 4: Configure workmux hooks +echo -e "${YELLOW}Step 4: Configuring workmux hooks...${NC}" + +WORKMUX_CONFIG="$HOME/.config/workmux/config.yaml" +WORKMUX_DIR="$HOME/.config/workmux" + +# Create workmux config directory if it doesn't exist +if [ ! -d "$WORKMUX_DIR" ]; then + echo "Creating workmux config directory..." + mkdir -p "$WORKMUX_DIR" +fi + +# Check if config exists +if [ -f "$WORKMUX_CONFIG" ]; then + echo -e "${YELLOW}⚠ Workmux config already exists${NC}" + echo "Location: $WORKMUX_CONFIG" + echo + + # Check if hook is already configured + if grep -q "kanban-cli move-to-review" "$WORKMUX_CONFIG" 2>/dev/null; then + echo -e "${GREEN}βœ“ Kanban hook already configured${NC}" + else + echo "To enable automatic status updates, add this to your pre_merge hooks:" + echo + echo -e "${BLUE}pre_merge:" + echo " - kanban-cli move-to-review \"\$WM_BRANCH_NAME\"${NC}" + echo + fi +else + echo "Creating workmux config with kanban hooks..." + cat > "$WORKMUX_CONFIG" << 'EOF' +# Workmux global configuration +# See: workmux init for all options + +#------------------------------------------------------------------------------- +# Hooks +#------------------------------------------------------------------------------- + +# Commands to run before merging (e.g., linting, tests). +# Aborts the merge if any command fails. +pre_merge: + # Automatically move tickets to "in_review" status + - kanban-cli move-to-review "$WM_BRANCH_NAME" + + # Uncomment to run tests before merge: + # - npm test + # - cargo test + # - pytest + +EOF + echo -e "${GREEN}βœ“ Created workmux config with kanban hooks${NC}" + echo "Location: $WORKMUX_CONFIG" +fi +echo + +# Summary +echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" +echo -e "${GREEN}Setup Complete!${NC}" +echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" +echo +echo "What's next?" +echo +echo "1. ${GREEN}Test the CLI:${NC}" +echo " kanban-cli --help" +echo +echo "2. ${GREEN}Create a ticket in the Kanban board${NC}" +echo " - Link it to a worktree/branch" +echo +echo "3. ${GREEN}Test the hook:${NC}" +echo " - Make changes in the worktree" +echo " - Run: workmux merge" +echo " - The ticket should automatically move to 'In Review'" +echo +echo "4. ${GREEN}View documentation:${NC}" +echo " cat KANBAN_HOOKS.md" +echo +echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" diff --git a/ushadow/launcher/kanban-status-helpers.sh b/ushadow/launcher/kanban-status-helpers.sh new file mode 100644 index 00000000..c608fb74 --- /dev/null +++ b/ushadow/launcher/kanban-status-helpers.sh @@ -0,0 +1,71 @@ +#!/bin/bash +# Kanban Status Helper Functions +# Source this file in your shell: source kanban-status-helpers.sh + +# Get the current ticket ID from branch name or environment +get_current_ticket_id() { + # Check if TICKET_ID is set + if [ -n "$TICKET_ID" ]; then + echo "$TICKET_ID" + return 0 + fi + + # Try to get from branch name + local branch=$(git branch --show-current 2>/dev/null) + if [ -n "$branch" ]; then + echo "$branch" + return 0 + fi + + # Fallback to worktree path + echo "$(pwd)" +} + +# Agent starts working (after receiving human input) +kanban-start-working() { + local identifier=$(get_current_ticket_id) + echo "πŸ“ Moving ticket to 'in_progress'..." + kanban-cli find-by-branch "$identifier" | grep -o 'ticket-[a-f0-9-]*' | while read ticket_id; do + kanban-cli set-status "$ticket_id" in_progress + done +} + +# Agent stops and waits for human (needs input) +kanban-waiting-for-human() { + local identifier=$(get_current_ticket_id) + echo "πŸ’¬ Moving ticket to 'in_review' (waiting for human)..." + kanban-cli find-by-branch "$identifier" | grep -o 'ticket-[a-f0-9-]*' | while read ticket_id; do + kanban-cli set-status "$ticket_id" in_review + done +} + +# Mark ticket as done (usually via workmux merge hook) +kanban-mark-done() { + local identifier=$(get_current_ticket_id) + echo "βœ… Moving ticket to 'done'..." + kanban-cli find-by-branch "$identifier" | grep -o 'ticket-[a-f0-9-]*' | while read ticket_id; do + kanban-cli set-status "$ticket_id" done + done +} + +# Quick status check +kanban-status() { + local identifier=$(get_current_ticket_id) + echo "Current identifier: $identifier" + echo "" + kanban-cli find-by-branch "$identifier" +} + +# Aliases for convenience +alias kb-start='kanban-start-working' +alias kb-waiting='kanban-waiting-for-human' +alias kb-done='kanban-mark-done' +alias kb-status='kanban-status' + +echo "βœ“ Kanban status helpers loaded" +echo "" +echo "Commands available:" +echo " kanban-start-working (or: kb-start) - Move to 'in_progress'" +echo " kanban-waiting-for-human (or: kb-waiting) - Move to 'in_review'" +echo " kanban-mark-done (or: kb-done) - Move to 'done'" +echo " kanban-status (or: kb-status) - Check current status" diff --git a/ushadow/launcher/package.json b/ushadow/launcher/package.json index 5e9a979b..3fe4d0db 100644 --- a/ushadow/launcher/package.json +++ b/ushadow/launcher/package.json @@ -1,6 +1,6 @@ { "name": "ushadow-launcher", - "version": "0.7.15", + "version": "0.8.0", "description": "Ushadow Desktop Launcher", "private": true, "type": "module", diff --git a/ushadow/launcher/public/oauth-callback.html b/ushadow/launcher/public/oauth-callback.html new file mode 100644 index 00000000..af04e9a9 --- /dev/null +++ b/ushadow/launcher/public/oauth-callback.html @@ -0,0 +1,165 @@ + + + + + Completing Login... + + + +
+
+
Completing authentication...
+
Please wait while we process your login
+
+ + + + diff --git a/ushadow/launcher/src-tauri/Cargo.lock b/ushadow/launcher/src-tauri/Cargo.lock index a1a8fb15..af48cd0a 100644 --- a/ushadow/launcher/src-tauri/Cargo.lock +++ b/ushadow/launcher/src-tauri/Cargo.lock @@ -8,6 +8,18 @@ version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" +[[package]] +name = "ahash" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" +dependencies = [ + "cfg-if", + "once_cell", + "version_check", + "zerocopy", +] + [[package]] name = "aho-corasick" version = "1.1.4" @@ -719,6 +731,12 @@ dependencies = [ "syn 2.0.113", ] +[[package]] +name = "data-encoding" +version = "2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7a1e2f27636f116493b8b860f5546edb47c8d8f8ea73e1d2a20be88e28d1fea" + [[package]] name = "deranged" version = "0.5.5" @@ -953,6 +971,18 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "fallible-iterator" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649" + +[[package]] +name = "fallible-streaming-iterator" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" + [[package]] name = "fastrand" version = "2.3.0" @@ -1105,6 +1135,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", + "futures-sink", ] [[package]] @@ -1175,6 +1206,7 @@ dependencies = [ "futures-core", "futures-io", "futures-macro", + "futures-sink", "futures-task", "memchr", "pin-project-lite", @@ -1515,7 +1547,7 @@ dependencies = [ "futures-core", "futures-sink", "futures-util", - "http", + "http 0.2.12", "indexmap 2.12.1", "slab", "tokio", @@ -1540,6 +1572,15 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +dependencies = [ + "ahash", +] + [[package]] name = "hashbrown" version = "0.15.5" @@ -1555,6 +1596,39 @@ version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" +[[package]] +name = "hashlink" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" +dependencies = [ + "hashbrown 0.14.5", +] + +[[package]] +name = "headers" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06683b93020a07e3dbcf5f8c0f6d40080d725bea7936fc01ad345c01b97dc270" +dependencies = [ + "base64 0.21.7", + "bytes", + "headers-core", + "http 0.2.12", + "httpdate", + "mime", + "sha1", +] + +[[package]] +name = "headers-core" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" +dependencies = [ + "http 0.2.12", +] + [[package]] name = "heck" version = "0.3.3" @@ -1613,6 +1687,16 @@ dependencies = [ "itoa 1.0.17", ] +[[package]] +name = "http" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a" +dependencies = [ + "bytes", + "itoa 1.0.17", +] + [[package]] name = "http-body" version = "0.4.6" @@ -1620,7 +1704,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", - "http", + "http 0.2.12", "pin-project-lite", ] @@ -1653,7 +1737,7 @@ dependencies = [ "futures-core", "futures-util", "h2", - "http", + "http 0.2.12", "http-body", "httparse", "httpdate", @@ -2096,6 +2180,17 @@ dependencies = [ "redox_syscall 0.7.0", ] +[[package]] +name = "libsqlite3-sys" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c10584274047cb335c23d3e61bcef8e323adae7c5c8c760540f73610177fc3f" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + [[package]] name = "linux-raw-sys" version = "0.11.0" @@ -2233,6 +2328,16 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" +[[package]] +name = "mime_guess" +version = "2.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7c44f8e672c00fe5308fa235f821cb4198414e1c77935c1ab6948d3fd78550e" +dependencies = [ + "mime", + "unicase", +] + [[package]] name = "miniz_oxide" version = "0.8.9" @@ -2264,6 +2369,24 @@ dependencies = [ "pxfm", ] +[[package]] +name = "multer" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01acbdc23469fd8fe07ab135923371d5f5a422fbf9c522158677c8eb15bc51c2" +dependencies = [ + "bytes", + "encoding_rs", + "futures-util", + "http 0.2.12", + "httparse", + "log", + "memchr", + "mime", + "spin", + "version_check", +] + [[package]] name = "native-tls" version = "0.2.14" @@ -2850,6 +2973,26 @@ dependencies = [ "siphasher 1.0.1", ] +[[package]] +name = "pin-project" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.113", +] + [[package]] name = "pin-project-lite" version = "0.2.16" @@ -3266,7 +3409,7 @@ dependencies = [ "futures-core", "futures-util", "h2", - "http", + "http 0.2.12", "http-body", "hyper", "hyper-tls", @@ -3318,6 +3461,20 @@ dependencies = [ "windows 0.37.0", ] +[[package]] +name = "rusqlite" +version = "0.31.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b838eba278d213a8beaf485bd313fd580ca4505a00d5871caeb1457c55322cae" +dependencies = [ + "bitflags 2.10.0", + "fallible-iterator", + "fallible-streaming-iterator", + "hashlink", + "libsqlite3-sys", + "smallvec", +] + [[package]] name = "rustc_version" version = "0.4.1" @@ -3662,6 +3819,17 @@ dependencies = [ "stable_deref_trait", ] +[[package]] +name = "sha1" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + [[package]] name = "sha2" version = "0.10.9" @@ -3824,6 +3992,12 @@ dependencies = [ "system-deps 5.0.0", ] +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + [[package]] name = "stable_deref_trait" version = "1.2.1" @@ -4058,7 +4232,7 @@ dependencies = [ "glob", "gtk", "heck 0.5.0", - "http", + "http 0.2.12", "ignore", "log", "nix 0.26.4", @@ -4161,7 +4335,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8066855882f00172935e3fa7d945126580c34dcbabab43f5d4f0c2398a67d47b" dependencies = [ "gtk", - "http", + "http 0.2.12", "http-range", "rand 0.8.5", "raw-window-handle", @@ -4428,6 +4602,18 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-tungstenite" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c83b561d025642014097b66e6c1bb422783339e0909e4429cde4749d1990bc38" +dependencies = [ + "futures-util", + "log", + "tokio", + "tungstenite", +] + [[package]] name = "tokio-util" version = "0.7.18" @@ -4558,6 +4744,7 @@ version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" dependencies = [ + "log", "pin-project-lite", "tracing-attributes", "tracing-core", @@ -4630,6 +4817,25 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" +[[package]] +name = "tungstenite" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ef1a641ea34f399a848dea702823bbecfb4c486f911735368f1f137cb8257e1" +dependencies = [ + "byteorder", + "bytes", + "data-encoding", + "http 1.4.0", + "httparse", + "log", + "rand 0.8.5", + "sha1", + "thiserror 1.0.69", + "url", + "utf-8", +] + [[package]] name = "typenum" version = "1.19.0" @@ -4647,6 +4853,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "unicase" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbc4bc3a9f746d862c45cb89d705aa10f187bb96c76001afab07a0d35ce60142" + [[package]] name = "unicode-ident" version = "1.0.22" @@ -4679,13 +4891,14 @@ dependencies = [ [[package]] name = "ushadow-launcher" -version = "0.7.14" +version = "0.8.0" dependencies = [ "chrono", "dirs", "open 5.3.3", "portable-pty", "reqwest", + "rusqlite", "serde", "serde_json", "serde_yaml", @@ -4693,6 +4906,7 @@ dependencies = [ "tauri-build", "tokio", "uuid", + "warp", ] [[package]] @@ -4788,6 +5002,35 @@ dependencies = [ "try-lock", ] +[[package]] +name = "warp" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4378d202ff965b011c64817db11d5829506d3404edeadb61f190d111da3f231c" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "headers", + "http 0.2.12", + "hyper", + "log", + "mime", + "mime_guess", + "multer", + "percent-encoding", + "pin-project", + "scoped-tls", + "serde", + "serde_json", + "serde_urlencoded", + "tokio", + "tokio-tungstenite", + "tokio-util", + "tower-service", + "tracing", +] + [[package]] name = "wasi" version = "0.9.0+wasi-snapshot-preview1" @@ -5727,7 +5970,7 @@ dependencies = [ "glib", "gtk", "html5ever", - "http", + "http 0.2.12", "kuchikiki", "libc", "log", diff --git a/ushadow/launcher/src-tauri/Cargo.toml b/ushadow/launcher/src-tauri/Cargo.toml index 766cb06b..bc542963 100644 --- a/ushadow/launcher/src-tauri/Cargo.toml +++ b/ushadow/launcher/src-tauri/Cargo.toml @@ -1,27 +1,34 @@ [package] name = "ushadow-launcher" -version = "0.7.15" +version = "0.8.0" description = "Ushadow Desktop Launcher" authors = ["Ushadow"] license = "MIT" repository = "" edition = "2021" +# Additional binaries +[[bin]] +name = "kanban-cli" +path = "src/bin/kanban-cli.rs" + [build-dependencies] tauri-build = { version = "1", features = [] } [dependencies] -tauri = { version = "1", features = [ "clipboard-all", "path-all", "process-exit", "shell-execute", "process-relaunch", "shell-open", "process-command-api", "dialog-all", "notification-all", "system-tray"] } +tauri = { version = "1", features = [ "window-set-focus", "window-close", "window-set-size", "window-hide", "window-create", "window-show", "window-center", "window-set-position", "window-set-always-on-top", "window-set-title", "window-set-resizable", "clipboard-all", "path-all", "process-exit", "shell-execute", "process-relaunch", "shell-open", "process-command-api", "dialog-all", "notification-all", "system-tray"] } serde = { version = "1", features = ["derive"] } serde_json = "1" serde_yaml = "0.9" tokio = { version = "1", features = ["process", "time", "macros", "rt-multi-thread", "io-util", "sync"] } reqwest = { version = "0.11", features = ["blocking"] } +warp = "0.3" open = "5" portable-pty = "0.8" uuid = { version = "1.6", features = ["v4", "serde"] } dirs = "5" chrono = "0.4" +rusqlite = { version = "0.31", features = ["bundled"] } [features] default = ["custom-protocol"] diff --git a/ushadow/launcher/src-tauri/src/bin/kanban-cli.rs b/ushadow/launcher/src-tauri/src/bin/kanban-cli.rs new file mode 100644 index 00000000..3689416c --- /dev/null +++ b/ushadow/launcher/src-tauri/src/bin/kanban-cli.rs @@ -0,0 +1,474 @@ +use rusqlite::{Connection, params}; +use std::path::PathBuf; +use std::env; + +#[derive(Debug)] +struct Ticket { + id: String, + title: String, + status: String, + worktree_path: Option, + branch_name: Option, + tmux_window_name: Option, +} + +/// Get the path to the SQLite database +fn get_db_path() -> Result { + let data_dir = dirs::data_dir().ok_or("Failed to get data directory")?; + let launcher_dir = data_dir.join("com.ushadow.launcher"); + + if !launcher_dir.exists() { + return Err(format!("Launcher data directory does not exist: {:?}", launcher_dir)); + } + + Ok(launcher_dir.join("kanban.db")) +} + +/// Get a database connection +fn get_db_connection() -> Result { + let db_path = get_db_path()?; + Connection::open(&db_path) + .map_err(|e| format!("Failed to open database: {}", e)) +} + +/// Find tickets by worktree path +fn find_tickets_by_worktree(worktree_path: &str) -> Result, String> { + let conn = get_db_connection()?; + + let mut stmt = conn.prepare( + "SELECT id, title, status, worktree_path, branch_name, tmux_window_name + FROM tickets + WHERE worktree_path = ?" + ).map_err(|e| format!("Failed to prepare statement: {}", e))?; + + let tickets = stmt.query_map([worktree_path], |row| { + Ok(Ticket { + id: row.get(0)?, + title: row.get(1)?, + status: row.get(2)?, + worktree_path: row.get(3)?, + branch_name: row.get(4)?, + tmux_window_name: row.get(5)?, + }) + }) + .map_err(|e| format!("Failed to query tickets: {}", e))? + .filter_map(|r| r.ok()) + .collect(); + + Ok(tickets) +} + +/// Find tickets by branch name +fn find_tickets_by_branch(branch_name: &str) -> Result, String> { + let conn = get_db_connection()?; + + let mut stmt = conn.prepare( + "SELECT id, title, status, worktree_path, branch_name, tmux_window_name + FROM tickets + WHERE branch_name = ?" + ).map_err(|e| format!("Failed to prepare statement: {}", e))?; + + let tickets = stmt.query_map([branch_name], |row| { + Ok(Ticket { + id: row.get(0)?, + title: row.get(1)?, + status: row.get(2)?, + worktree_path: row.get(3)?, + branch_name: row.get(4)?, + tmux_window_name: row.get(5)?, + }) + }) + .map_err(|e| format!("Failed to query tickets: {}", e))? + .filter_map(|r| r.ok()) + .collect(); + + Ok(tickets) +} + +/// Find tickets by tmux window name +fn find_tickets_by_tmux_window(window_name: &str) -> Result, String> { + let conn = get_db_connection()?; + + let mut stmt = conn.prepare( + "SELECT id, title, status, worktree_path, branch_name, tmux_window_name + FROM tickets + WHERE tmux_window_name = ?" + ).map_err(|e| format!("Failed to prepare statement: {}", e))?; + + let tickets = stmt.query_map([window_name], |row| { + Ok(Ticket { + id: row.get(0)?, + title: row.get(1)?, + status: row.get(2)?, + worktree_path: row.get(3)?, + branch_name: row.get(4)?, + tmux_window_name: row.get(5)?, + }) + }) + .map_err(|e| format!("Failed to query tickets: {}", e))? + .filter_map(|r| r.ok()) + .collect(); + + Ok(tickets) +} + +/// Update ticket status +fn update_ticket_status(ticket_id: &str, new_status: &str) -> Result<(), String> { + // Validate status + let valid_statuses = ["backlog", "todo", "in_progress", "in_review", "done", "archived"]; + if !valid_statuses.contains(&new_status) { + return Err(format!( + "Invalid status '{}'. Must be one of: {}", + new_status, + valid_statuses.join(", ") + )); + } + + let conn = get_db_connection()?; + let now = chrono::Utc::now().to_rfc3339(); + + let rows_affected = conn.execute( + "UPDATE tickets SET status = ?, updated_at = ? WHERE id = ?", + params![new_status, &now, ticket_id], + ).map_err(|e| format!("Failed to update ticket: {}", e))?; + + if rows_affected == 0 { + return Err(format!("Ticket not found: {}", ticket_id)); + } + + Ok(()) +} + +fn print_usage() { + eprintln!("Usage: kanban-cli [options]"); + eprintln!(); + eprintln!("Commands:"); + eprintln!(" set-status Update ticket status"); + eprintln!(" find-by-path Find tickets by worktree path"); + eprintln!(" find-by-branch Find tickets by branch name"); + eprintln!(" find-by-window Find tickets by tmux window name"); + eprintln!(" move-to-review Move ticket(s) to 'in_review' status"); + eprintln!(" move-to-progress Move ticket(s) to 'in_progress' status"); + eprintln!(" move-to-done Move ticket(s) to 'done' status"); + eprintln!(" (identifier can be path, branch, or window)"); + eprintln!(); + eprintln!("Statuses:"); + eprintln!(" backlog, todo, in_progress, in_review, done, archived"); + eprintln!(); + eprintln!("Examples:"); + eprintln!(" # Update specific ticket"); + eprintln!(" kanban-cli set-status ticket-123 in_review"); + eprintln!(); + eprintln!(" # Find tickets by worktree path"); + eprintln!(" kanban-cli find-by-path /path/to/worktree"); + eprintln!(); + eprintln!(" # Agent self-reporting workflow"); + eprintln!(" kanban-cli move-to-progress $BRANCH_NAME # Agent starts working"); + eprintln!(" kanban-cli move-to-review $BRANCH_NAME # Agent waits for human"); + eprintln!(" kanban-cli move-to-done $BRANCH_NAME # Work merged"); +} + +fn main() { + let args: Vec = env::args().collect(); + + if args.len() < 2 { + print_usage(); + std::process::exit(1); + } + + let command = &args[1]; + + let result = match command.as_str() { + "set-status" => { + if args.len() < 4 { + eprintln!("Error: set-status requires ticket ID and status"); + print_usage(); + std::process::exit(1); + } + let ticket_id = &args[2]; + let status = &args[3]; + + match update_ticket_status(ticket_id, status) { + Ok(_) => { + println!("βœ“ Updated ticket {} to status: {}", ticket_id, status); + Ok(()) + } + Err(e) => Err(e), + } + } + "find-by-path" => { + if args.len() < 3 { + eprintln!("Error: find-by-path requires worktree path"); + print_usage(); + std::process::exit(1); + } + let path = &args[2]; + + match find_tickets_by_worktree(path) { + Ok(tickets) => { + if tickets.is_empty() { + println!("No tickets found for path: {}", path); + } else { + println!("Found {} ticket(s):", tickets.len()); + for ticket in tickets { + println!(" {} - {} ({})", ticket.id, ticket.title, ticket.status); + } + } + Ok(()) + } + Err(e) => Err(e), + } + } + "find-by-branch" => { + if args.len() < 3 { + eprintln!("Error: find-by-branch requires branch name"); + print_usage(); + std::process::exit(1); + } + let branch = &args[2]; + + match find_tickets_by_branch(branch) { + Ok(tickets) => { + if tickets.is_empty() { + println!("No tickets found for branch: {}", branch); + } else { + println!("Found {} ticket(s):", tickets.len()); + for ticket in tickets { + println!(" {} - {} ({})", ticket.id, ticket.title, ticket.status); + } + } + Ok(()) + } + Err(e) => Err(e), + } + } + "find-by-window" => { + if args.len() < 3 { + eprintln!("Error: find-by-window requires tmux window name"); + print_usage(); + std::process::exit(1); + } + let window = &args[2]; + + match find_tickets_by_tmux_window(window) { + Ok(tickets) => { + if tickets.is_empty() { + println!("No tickets found for tmux window: {}", window); + } else { + println!("Found {} ticket(s):", tickets.len()); + for ticket in tickets { + println!(" {} - {} ({})", ticket.id, ticket.title, ticket.status); + } + } + Ok(()) + } + Err(e) => Err(e), + } + } + "move-to-review" => { + if args.len() < 3 { + eprintln!("Error: move-to-review requires identifier (path, branch, or window)"); + print_usage(); + std::process::exit(1); + } + let identifier = &args[2]; + + // Try to find tickets by different methods - try all methods until we find some tickets + let mut tickets = Vec::new(); + + // Try worktree path + if let Ok(found) = find_tickets_by_worktree(identifier) { + if !found.is_empty() { + tickets = found; + } + } + + // If no tickets found, try branch name + if tickets.is_empty() { + if let Ok(found) = find_tickets_by_branch(identifier) { + if !found.is_empty() { + tickets = found; + } + } + } + + // If still no tickets, try tmux window + if tickets.is_empty() { + if let Ok(found) = find_tickets_by_tmux_window(identifier) { + tickets = found; + } + } + + if tickets.is_empty() { + eprintln!("⚠ No tickets found for identifier: {}", identifier); + eprintln!(" This is OK - not all worktrees have associated tickets"); + Ok(()) + } else { + let mut errors = Vec::new(); + let mut updated = 0; + + for ticket in &tickets { + // Only update if not already in review or done + if ticket.status != "in_review" && ticket.status != "done" { + match update_ticket_status(&ticket.id, "in_review") { + Ok(_) => { + println!("βœ“ Moved ticket to review: {} - {}", ticket.id, ticket.title); + updated += 1; + } + Err(e) => { + errors.push(format!("Failed to update {}: {}", ticket.id, e)); + } + } + } else { + println!(" Skipped {} - already in status: {}", ticket.id, ticket.status); + } + } + + if !errors.is_empty() { + Err(errors.join("\n")) + } else { + if updated > 0 { + println!("βœ“ Moved {} ticket(s) to review", updated); + } + Ok(()) + } + } + } + "move-to-progress" => { + if args.len() < 3 { + eprintln!("Error: move-to-progress requires identifier (path, branch, or window)"); + print_usage(); + std::process::exit(1); + } + let identifier = &args[2]; + + // Try to find tickets by different methods + let mut tickets = Vec::new(); + + if let Ok(found) = find_tickets_by_worktree(identifier) { + if !found.is_empty() { + tickets = found; + } + } + + if tickets.is_empty() { + if let Ok(found) = find_tickets_by_branch(identifier) { + if !found.is_empty() { + tickets = found; + } + } + } + + if tickets.is_empty() { + if let Ok(found) = find_tickets_by_tmux_window(identifier) { + tickets = found; + } + } + + if tickets.is_empty() { + eprintln!("⚠ No tickets found for identifier: {}", identifier); + eprintln!(" This is OK - not all worktrees have associated tickets"); + Ok(()) + } else { + let mut errors = Vec::new(); + let mut updated = 0; + + for ticket in &tickets { + match update_ticket_status(&ticket.id, "in_progress") { + Ok(_) => { + println!("βœ“ Moved ticket to in_progress: {} - {}", ticket.id, ticket.title); + updated += 1; + } + Err(e) => { + errors.push(format!("Failed to update {}: {}", ticket.id, e)); + } + } + } + + if !errors.is_empty() { + Err(errors.join("\n")) + } else { + if updated > 0 { + println!("βœ“ Moved {} ticket(s) to in_progress", updated); + } + Ok(()) + } + } + } + "move-to-done" => { + if args.len() < 3 { + eprintln!("Error: move-to-done requires identifier (path, branch, or window)"); + print_usage(); + std::process::exit(1); + } + let identifier = &args[2]; + + // Try to find tickets by different methods + let mut tickets = Vec::new(); + + if let Ok(found) = find_tickets_by_worktree(identifier) { + if !found.is_empty() { + tickets = found; + } + } + + if tickets.is_empty() { + if let Ok(found) = find_tickets_by_branch(identifier) { + if !found.is_empty() { + tickets = found; + } + } + } + + if tickets.is_empty() { + if let Ok(found) = find_tickets_by_tmux_window(identifier) { + tickets = found; + } + } + + if tickets.is_empty() { + eprintln!("⚠ No tickets found for identifier: {}", identifier); + eprintln!(" This is OK - not all worktrees have associated tickets"); + Ok(()) + } else { + let mut errors = Vec::new(); + let mut updated = 0; + + for ticket in &tickets { + match update_ticket_status(&ticket.id, "done") { + Ok(_) => { + println!("βœ“ Moved ticket to done: {} - {}", ticket.id, ticket.title); + updated += 1; + } + Err(e) => { + errors.push(format!("Failed to update {}: {}", ticket.id, e)); + } + } + } + + if !errors.is_empty() { + Err(errors.join("\n")) + } else { + if updated > 0 { + println!("βœ“ Moved {} ticket(s) to done", updated); + } + Ok(()) + } + } + } + "--help" | "-h" => { + print_usage(); + Ok(()) + } + _ => { + eprintln!("Error: Unknown command '{}'", command); + print_usage(); + std::process::exit(1); + } + }; + + if let Err(e) = result { + eprintln!("Error: {}", e); + std::process::exit(1); + } +} diff --git a/ushadow/launcher/src-tauri/src/commands/config_commands.rs b/ushadow/launcher/src-tauri/src/commands/config_commands.rs new file mode 100644 index 00000000..a563cb94 --- /dev/null +++ b/ushadow/launcher/src-tauri/src/commands/config_commands.rs @@ -0,0 +1,48 @@ +use crate::config::LauncherConfig; +use std::path::PathBuf; +use tauri::State; + +use super::docker::AppState; + +/// Load project configuration from .launcher-config.yaml +#[tauri::command] +pub async fn load_project_config( + project_root: String, + state: State<'_, AppState>, +) -> Result { + let config = LauncherConfig::load(&PathBuf::from(&project_root))?; + + // Store the loaded config in application state + let mut config_lock = state.config.lock().map_err(|e| e.to_string())?; + *config_lock = Some(config.clone()); + + Ok(config) +} + +/// Get the currently loaded configuration +#[tauri::command] +pub async fn get_current_config( + state: State<'_, AppState>, +) -> Result, String> { + let config_lock = state.config.lock().map_err(|e| e.to_string())?; + Ok(config_lock.clone()) +} + +/// Check if a launcher config file exists in the given directory +#[tauri::command] +pub async fn check_launcher_config_exists(project_root: String) -> Result { + let config_path = PathBuf::from(&project_root).join(".launcher-config.yaml"); + Ok(config_path.exists()) +} + +/// Validate a config file without loading it into state +#[tauri::command] +pub async fn validate_config_file(project_root: String) -> Result { + match LauncherConfig::load(&PathBuf::from(&project_root)) { + Ok(config) => Ok(format!( + "Configuration is valid for project '{}'", + config.project.display_name + )), + Err(e) => Err(e), + } +} diff --git a/ushadow/launcher/src-tauri/src/commands/container_discovery.rs b/ushadow/launcher/src-tauri/src/commands/container_discovery.rs new file mode 100644 index 00000000..647b2813 --- /dev/null +++ b/ushadow/launcher/src-tauri/src/commands/container_discovery.rs @@ -0,0 +1,307 @@ +use crate::config::LauncherConfig; +use crate::models::{EnvironmentStatus, InfraService, UshadowEnvironment}; +use serde_json::Value; +use std::process::Command; + +/// Information about a discovered container +#[derive(Debug, Clone)] +pub struct ContainerInfo { + pub name: String, + pub service_name: String, + pub status: String, + pub ports: Vec, + pub compose_project: String, +} + +/// Port mapping from container to host +#[derive(Debug, Clone)] +pub struct PortMapping { + pub host_port: u16, + pub container_port: u16, + pub protocol: String, +} + +/// Discover all containers for a specific environment using Docker Compose labels +pub fn discover_environment_containers( + config: &LauncherConfig, + env_name: &str, +) -> Result, String> { + // Determine the compose project name for this environment + // For ushadow: "ushadow-orange", "ushadow-blue", or "ushadow" for default + let compose_project = if env_name == "default" || env_name.is_empty() { + config.project.name.clone() + } else { + format!("{}-{}", config.project.name, env_name) + }; + + // Query Docker for containers with this compose project label + let output = Command::new("docker") + .args([ + "ps", + "-a", + "--filter", + &format!("label=com.docker.compose.project={}", compose_project), + "--format", + "{{.Names}}", + ]) + .output() + .map_err(|e| format!("Failed to query Docker: {}", e))?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + return Err(format!("Docker command failed: {}", stderr)); + } + + let container_names = String::from_utf8_lossy(&output.stdout); + let mut containers = Vec::new(); + + for container_name in container_names.lines() { + if container_name.trim().is_empty() { + continue; + } + + // Inspect each container to get detailed information + if let Ok(info) = inspect_container(container_name) { + containers.push(info); + } + } + + Ok(containers) +} + +/// Inspect a single container to extract service name, status, and ports +fn inspect_container(container_name: &str) -> Result { + let output = Command::new("docker") + .args(["inspect", container_name]) + .output() + .map_err(|e| format!("Failed to inspect container: {}", e))?; + + if !output.status.success() { + return Err("Docker inspect failed".to_string()); + } + + let json_str = String::from_utf8_lossy(&output.stdout); + let json: Vec = serde_json::from_str(&json_str) + .map_err(|e| format!("Failed to parse Docker inspect JSON: {}", e))?; + + let container = json + .first() + .ok_or("No container info returned".to_string())?; + + // Extract labels + let labels = container["Config"]["Labels"] + .as_object() + .ok_or("No labels found")?; + + let service_name = labels + .get("com.docker.compose.service") + .and_then(|v| v.as_str()) + .unwrap_or("unknown") + .to_string(); + + let compose_project = labels + .get("com.docker.compose.project") + .and_then(|v| v.as_str()) + .unwrap_or("") + .to_string(); + + // Extract status + let status = container["State"]["Status"] + .as_str() + .unwrap_or("unknown") + .to_string(); + + // Extract port mappings + let ports = extract_port_mappings(container)?; + + Ok(ContainerInfo { + name: container_name.to_string(), + service_name, + status, + ports, + compose_project, + }) +} + +/// Extract port mappings from Docker inspect JSON +fn extract_port_mappings(container: &Value) -> Result, String> { + let mut mappings = Vec::new(); + + let ports_obj = match container["NetworkSettings"]["Ports"].as_object() { + Some(obj) => obj, + None => return Ok(mappings), // No ports exposed + }; + + for (container_port_proto, host_bindings) in ports_obj { + // container_port_proto format: "8000/tcp" + let parts: Vec<&str> = container_port_proto.split('/').collect(); + if parts.len() != 2 { + continue; + } + + let container_port = parts[0].parse::().unwrap_or(0); + let protocol = parts[1].to_string(); + + // host_bindings is an array of {"HostIp": "0.0.0.0", "HostPort": "8240"} + if let Some(bindings) = host_bindings.as_array() { + for binding in bindings { + if let Some(host_port_str) = binding["HostPort"].as_str() { + if let Ok(host_port) = host_port_str.parse::() { + mappings.push(PortMapping { + host_port, + container_port, + protocol: protocol.clone(), + }); + } + } + } + } + } + + Ok(mappings) +} + +/// Discover infrastructure containers using compose project label +pub fn discover_infrastructure_containers( + config: &LauncherConfig, +) -> Result, String> { + let output = Command::new("docker") + .args([ + "ps", + "-a", + "--filter", + &format!( + "label=com.docker.compose.project={}", + config.infrastructure.project_name + ), + "--format", + "{{.Names}}", + ]) + .output() + .map_err(|e| format!("Failed to query Docker: {}", e))?; + + if !output.status.success() { + return Ok(Vec::new()); // Infrastructure not running + } + + let container_names = String::from_utf8_lossy(&output.stdout); + let mut services = Vec::new(); + + for container_name in container_names.lines() { + if container_name.trim().is_empty() { + continue; + } + + if let Ok(info) = inspect_container(container_name) { + // Format ports string for display + let ports_str = if info.ports.is_empty() { + None + } else { + Some( + info.ports + .iter() + .map(|p| format!("{}:{}", p.host_port, p.container_port)) + .collect::>() + .join(", "), + ) + }; + + services.push(InfraService { + name: info.service_name.clone(), + display_name: capitalize(&info.service_name), + running: info.status == "running", + ports: ports_str, + }); + } + } + + Ok(services) +} + +/// Capitalize first letter of a string +fn capitalize(s: &str) -> String { + let mut chars = s.chars(); + match chars.next() { + None => String::new(), + Some(first) => first.to_uppercase().chain(chars).collect(), + } +} + +/// Determine environment status from container list +pub fn determine_environment_status(containers: &[ContainerInfo]) -> EnvironmentStatus { + if containers.is_empty() { + return EnvironmentStatus::Available; + } + + let running_count = containers.iter().filter(|c| c.status == "running").count(); + + if running_count == containers.len() { + EnvironmentStatus::Running + } else if running_count > 0 { + EnvironmentStatus::Partial + } else { + EnvironmentStatus::Stopped + } +} + +/// Find the primary service port from container list +pub fn get_primary_service_port( + containers: &[ContainerInfo], + primary_service_name: &str, +) -> Option { + containers + .iter() + .find(|c| c.service_name == primary_service_name) + .and_then(|c| c.ports.first()) + .map(|p| p.host_port) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_capitalize() { + assert_eq!(capitalize("mongo"), "Mongo"); + assert_eq!(capitalize("redis"), "Redis"); + assert_eq!(capitalize(""), ""); + } + + #[test] + fn test_determine_status() { + let running_containers = vec![ + ContainerInfo { + name: "test-backend".to_string(), + service_name: "backend".to_string(), + status: "running".to_string(), + ports: vec![], + compose_project: "test".to_string(), + }, + ContainerInfo { + name: "test-webui".to_string(), + service_name: "webui".to_string(), + status: "running".to_string(), + ports: vec![], + compose_project: "test".to_string(), + }, + ]; + + assert_eq!( + determine_environment_status(&running_containers), + EnvironmentStatus::Running + ); + + let mut partial_containers = running_containers.clone(); + partial_containers[1].status = "exited".to_string(); + + assert_eq!( + determine_environment_status(&partial_containers), + EnvironmentStatus::Partial + ); + + assert_eq!( + determine_environment_status(&[]), + EnvironmentStatus::Available + ); + } +} diff --git a/ushadow/launcher/src-tauri/src/commands/discovery.rs b/ushadow/launcher/src-tauri/src/commands/discovery.rs index 363c9c00..5ae5fa1c 100644 --- a/ushadow/launcher/src-tauri/src/commands/discovery.rs +++ b/ushadow/launcher/src-tauri/src/commands/discovery.rs @@ -9,6 +9,7 @@ use super::worktree::{list_worktrees, get_colors_for_name}; /// Infrastructure service patterns const INFRA_PATTERNS: &[(&str, &str)] = &[ ("mongo", "MongoDB"), + ("postgres", "PostgreSQL"), ("redis", "Redis"), ("neo4j", "Neo4j"), ("qdrant", "Qdrant"), @@ -169,7 +170,18 @@ pub async fn discover_environments_with_config( // Check infrastructure services for (pattern, display_name) in INFRA_PATTERNS { - if name == *pattern || name.ends_with(&format!("-{}", pattern)) || name.ends_with(&format!("-{}-1", pattern)) { + // Match various container name patterns: + // - exact: "postgres" + // - hyphen suffix: "infra-postgres", "infra-postgres-1" + // - underscore suffix: "hash_postgres", "d5904eb91d56_postgres" + // - contains: any container with the service name in it + let is_match = name == *pattern + || name.ends_with(&format!("-{}", pattern)) + || name.ends_with(&format!("-{}-1", pattern)) + || name.ends_with(&format!("_{}", pattern)) + || name.contains(&format!("_{}", pattern)); + + if is_match { if !found_infra.contains(*pattern) { found_infra.insert(pattern.to_string()); infrastructure.push(InfraService { @@ -178,6 +190,11 @@ pub async fn discover_environments_with_config( running: is_running, ports: ports.clone(), }); + } else if is_running { + // Update existing entry to running if we find a running instance + if let Some(service) = infrastructure.iter_mut().find(|s| s.name == *pattern) { + service.running = true; + } } } } diff --git a/ushadow/launcher/src-tauri/src/commands/discovery_v2.rs b/ushadow/launcher/src-tauri/src/commands/discovery_v2.rs new file mode 100644 index 00000000..573d18f1 --- /dev/null +++ b/ushadow/launcher/src-tauri/src/commands/discovery_v2.rs @@ -0,0 +1,137 @@ +use crate::config::LauncherConfig; +use crate::models::{DiscoveryResult, EnvironmentStatus, UshadowEnvironment, WorktreeInfo}; +use super::container_discovery::{ + discover_environment_containers, discover_infrastructure_containers, + determine_environment_status, get_primary_service_port, +}; +use super::prerequisites::{check_docker, check_tailscale}; +use super::worktree::{list_worktrees, get_colors_for_name}; +use std::collections::HashMap; + +/// Discover environments using config-based Docker Compose labels +/// Note: The project_root parameter is required to load the config +/// The frontend should provide this from the user's project selection +#[tauri::command] +pub async fn discover_environments_v2( + project_root: String, + main_repo: Option, +) -> Result { + // Check prerequisites + let (docker_installed, docker_running, _) = check_docker(); + let (tailscale_installed, tailscale_connected, _) = check_tailscale(); + + let docker_ok = docker_installed && docker_running; + let tailscale_ok = tailscale_installed && tailscale_connected; + + // Load config from project root + let config = LauncherConfig::load(&std::path::PathBuf::from(&project_root))?; + + // Use provided main_repo or default to project_root + let main_repo = main_repo.unwrap_or_else(|| project_root.clone()); + + // Get worktrees (source of truth for environments) + let worktrees = match list_worktrees(main_repo.clone()).await { + Ok(wt) => { + eprintln!("[discovery_v2] Found {} worktrees from {}", wt.len(), main_repo); + wt + } + Err(e) => { + eprintln!("[discovery_v2] Failed to list worktrees from {}: {}", main_repo, e); + Vec::new() + } + }; + + // Build worktree map + let mut worktree_map: HashMap = HashMap::new(); + for wt in worktrees { + worktree_map.insert(wt.name.clone(), wt); + } + + // Discover infrastructure + let infrastructure = if docker_ok { + discover_infrastructure_containers(&config).unwrap_or_else(|e| { + eprintln!("[discovery_v2] Infrastructure discovery error: {}", e); + Vec::new() + }) + } else { + Vec::new() + }; + + // Discover environments + let mut environments = Vec::new(); + + for (env_name, wt) in &worktree_map { + let (primary_color, _) = get_colors_for_name(env_name); + + // Discover containers for this environment using Docker Compose labels + let containers = if docker_ok { + discover_environment_containers(&config, env_name).unwrap_or_else(|e| { + eprintln!("[discovery_v2] Container discovery error for {}: {}", env_name, e); + Vec::new() + }) + } else { + Vec::new() + }; + + // Determine status from containers + let status = determine_environment_status(&containers); + + // Get primary service port + let backend_port = get_primary_service_port(&containers, &config.containers.primary_service); + + // Find webui port from containers (look for webui service) + // Falls back to backend - 5000 if webui service not found + let webui_port = containers + .iter() + .find(|c| c.service_name == "webui" || c.service_name == "frontend") + .and_then(|c| c.ports.first()) + .map(|p| p.host_port) + .or_else(|| backend_port.and_then(|p| if p >= 5000 { Some(p - 5000) } else { None })); + + // Build localhost URL (prefer webui port, fallback to backend) + let localhost_url = if status == EnvironmentStatus::Running { + webui_port.or(backend_port).map(|p| format!("http://localhost:{}", p)) + } else { + None + }; + + // Generate Tailscale URL using the host's tailnet + let tailscale_url = super::port_utils::generate_tailscale_url( + env_name, + config.containers.tailscale_project_prefix.as_deref(), + ) + .unwrap_or(None); + + let tailscale_active = tailscale_url.is_some() && status == EnvironmentStatus::Running; + + // Container names for display + let container_names: Vec = containers.iter().map(|c| c.name.clone()).collect(); + + let running = status == EnvironmentStatus::Running || status == EnvironmentStatus::Partial; + + environments.push(UshadowEnvironment { + name: env_name.clone(), + color: primary_color, + path: Some(wt.path.clone()), + branch: Some(wt.branch.clone()), + status, + running, + localhost_url, + tailscale_url, + backend_port, + webui_port, + tailscale_active, + containers: container_names, + is_worktree: true, + created_at: None, // TODO: Get actual creation timestamp from git worktree + base_branch: None, // TODO: Determine base branch (main/dev) from worktree + }); + } + + Ok(DiscoveryResult { + infrastructure, + environments, + docker_ok, + tailscale_ok, + }) +} diff --git a/ushadow/launcher/src-tauri/src/commands/docker.rs b/ushadow/launcher/src-tauri/src/commands/docker.rs index 21a73f0a..3c0a2ba9 100644 --- a/ushadow/launcher/src-tauri/src/commands/docker.rs +++ b/ushadow/launcher/src-tauri/src/commands/docker.rs @@ -3,10 +3,11 @@ use std::sync::Mutex; use std::collections::HashMap; use std::path::Path; use tauri::State; -use crate::models::{ContainerStatus, ServiceInfo}; +use crate::models::{ContainerStatus, ServiceInfo, InfraService}; use super::utils::{silent_command, shell_command, quote_path_buf}; use super::platform::{Platform, PlatformOps}; use super::bundled; +use serde_yaml::Value; /// Recursively copy a directory and all its contents fn copy_dir_recursive(src: &Path, dst: &Path) -> std::io::Result<()> { @@ -122,12 +123,16 @@ fn find_available_ports(default_backend: u16, default_webui: u16) -> (u16, u16) /// Application state pub struct AppState { pub project_root: Mutex>, + pub containers_running: Mutex, + pub config: Mutex>, } impl AppState { pub fn new() -> Self { Self { project_root: Mutex::new(None), + containers_running: Mutex::new(false), + config: Mutex::new(None), } } } @@ -154,7 +159,7 @@ pub async fn start_infrastructure(state: State<'_, AppState>) -> Result, name: String, mode: Ok(format!("Environment '{}' started{}", name, port_info)) } +/// Parse docker-compose.infra.yml to get list of available services +#[tauri::command] +pub fn get_infra_services_from_compose(state: State) -> Result, String> { + let root = state.project_root.lock().map_err(|e| e.to_string())?; + let project_root = root.clone().ok_or("Project root not set")?; + drop(root); + + // Try to find compose file (bundled or working directory) + let bundled_compose_file = bundled::get_compose_file(&project_root, "docker-compose.infra.yml"); + + // Also check working directory + let working_compose_file = std::path::Path::new(&project_root) + .join("compose") + .join("docker-compose.infra.yml"); + + let compose_file = if working_compose_file.exists() { + working_compose_file + } else { + bundled_compose_file + }; + + if !compose_file.exists() { + return Err(format!("docker-compose.infra.yml not found at {:?}", compose_file)); + } + + // Read and parse YAML + let contents = std::fs::read_to_string(&compose_file) + .map_err(|e| format!("Failed to read compose file: {}", e))?; + + let yaml: Value = serde_yaml::from_str(&contents) + .map_err(|e| format!("Failed to parse compose YAML: {}", e))?; + + // Extract services + let services = yaml.get("services") + .ok_or("No 'services' section found in compose file")? + .as_mapping() + .ok_or("'services' is not a mapping")?; + + let mut result = Vec::new(); + + // Map of service IDs to display names + let display_names: HashMap<&str, &str> = [ + ("postgres", "PostgreSQL"), + ("mongodb", "MongoDB"), + ("mongo", "MongoDB"), + ("redis", "Redis"), + ("mysql", "MySQL"), + ("elasticsearch", "Elasticsearch"), + ("rabbitmq", "RabbitMQ"), + ("kafka", "Kafka"), + ("qdrant", "Qdrant"), + ("neo4j", "Neo4j"), + ].iter().copied().collect(); + + for (service_id, service_config) in services { + let service_name = service_id.as_str() + .ok_or("Service name is not a string")? + .to_string(); + + // Get display name (capitalize if not in map) + let display_name = display_names.get(service_name.as_str()) + .copied() + .unwrap_or_else(|| &service_name) + .to_string(); + + // Extract default port from exposed ports + let default_port = service_config.get("ports") + .and_then(|ports| ports.as_sequence()) + .and_then(|seq| seq.first()) + .and_then(|port_mapping| port_mapping.as_str()) + .and_then(|mapping| { + // Parse port mapping like "5432:5432" or "5432" + let parts: Vec<&str> = mapping.split(':').collect(); + parts.first().and_then(|p| p.parse::().ok()) + }); + + // Extract profiles + let profiles = service_config.get("profiles") + .and_then(|p| p.as_sequence()) + .map(|seq| { + seq.iter() + .filter_map(|v| v.as_str().map(String::from)) + .collect() + }) + .unwrap_or_else(Vec::new); + + // Check if this service is actually running + let running = check_service_running(&service_name); + + // Format ports string + let ports_str = default_port.map(|p| p.to_string()); + + result.push(InfraService { + name: service_name, + display_name, + running, + ports: ports_str, + }); + } + + Ok(result) +} + +/// Check if a service container is running +fn check_service_running(service_name: &str) -> bool { + use std::process::Command; + + // Check if container exists and is running + let output = Command::new("docker") + .args([ + "ps", + "--filter", + &format!("name={}", service_name), + "--filter", + "status=running", + "--format", + "{{.Names}}", + ]) + .output(); + + match output { + Ok(output) if output.status.success() => { + let stdout = String::from_utf8_lossy(&output.stdout); + stdout.lines().any(|line| line.contains(service_name)) + } + _ => false, + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/ushadow/launcher/src-tauri/src/commands/env_scanner.rs b/ushadow/launcher/src-tauri/src/commands/env_scanner.rs new file mode 100644 index 00000000..a15f2e5b --- /dev/null +++ b/ushadow/launcher/src-tauri/src/commands/env_scanner.rs @@ -0,0 +1,236 @@ +use std::path::Path; +use std::fs; +use serde::{Serialize, Deserialize}; + +#[derive(Serialize, Deserialize, Clone, Debug)] +pub struct DetectedPort { + pub name: String, + pub default_value: Option, + pub base_port: Option, + pub is_database: bool, +} + +#[derive(Serialize, Deserialize, Clone, Debug)] +pub struct DetectedEnvVar { + pub name: String, + pub default_value: Option, + pub is_port: bool, + pub is_database_port: bool, + pub should_append_env_name: bool, // For DB names, user names, etc. +} + +/// Scan .env.template, .env.example, or .env for port-related variables +#[tauri::command] +pub fn scan_env_file(project_root: String) -> Result, String> { + let project_path = Path::new(&project_root); + + // Try different env file names in order of preference + let env_files = vec![ + ".env.template", + ".env.example", + ".env.sample", + ".env", + ]; + + let mut found_file = None; + for file_name in &env_files { + let file_path = project_path.join(file_name); + if file_path.exists() { + found_file = Some(file_path); + break; + } + } + + let env_file = found_file.ok_or("No .env file found in project root")?; + + eprintln!("[scan_env_file] Scanning: {:?}", env_file); + + let content = fs::read_to_string(&env_file) + .map_err(|e| format!("Failed to read env file: {}", e))?; + + let mut detected_ports = Vec::new(); + + for line in content.lines() { + let line = line.trim(); + + // Skip comments and empty lines + if line.starts_with('#') || line.is_empty() { + continue; + } + + // Parse VAR=value format + if let Some((key, value)) = line.split_once('=') { + let key = key.trim(); + let value = value.trim().trim_matches('"').trim_matches('\''); + + // Check if this looks like a port variable + if is_port_variable(key) { + let base_port = value.parse::().ok(); + let is_database = is_database_port(key); + + detected_ports.push(DetectedPort { + name: key.to_string(), + default_value: Some(value.to_string()), + base_port, + is_database, + }); + } + } + } + + eprintln!("[scan_env_file] Detected {} port variables", detected_ports.len()); + + Ok(detected_ports) +} + +/// Scan all environment variables from .env files +#[tauri::command] +pub fn scan_all_env_vars(project_root: String) -> Result, String> { + let project_path = Path::new(&project_root); + + // Try different env file names in order of preference + let env_files = vec![ + ".env.template", + ".env.example", + ".env.sample", + ".env", + ]; + + let mut found_file = None; + for file_name in &env_files { + let file_path = project_path.join(file_name); + if file_path.exists() { + found_file = Some(file_path); + break; + } + } + + let env_file = found_file.ok_or("No .env file found in project root")?; + + eprintln!("[scan_all_env_vars] Scanning: {:?}", env_file); + + let content = fs::read_to_string(&env_file) + .map_err(|e| format!("Failed to read env file: {}", e))?; + + let mut detected_vars = Vec::new(); + + for line in content.lines() { + let line = line.trim(); + + // Skip comments and empty lines + if line.starts_with('#') || line.is_empty() { + continue; + } + + // Parse VAR=value format + if let Some((key, value)) = line.split_once('=') { + let key = key.trim(); + let value = value.trim().trim_matches('"').trim_matches('\''); + + let is_port = is_port_variable(key); + let is_database_port = is_database_port(key); + let should_append_env_name = should_append_env_name(key); + + detected_vars.push(DetectedEnvVar { + name: key.to_string(), + default_value: Some(value.to_string()), + is_port, + is_database_port, + should_append_env_name, + }); + } + } + + eprintln!("[scan_all_env_vars] Detected {} variables", detected_vars.len()); + + Ok(detected_vars) +} + +/// Check if a variable name looks like a port variable +fn is_port_variable(key: &str) -> bool { + let key_upper = key.to_uppercase(); + + // Explicit port variables + if key_upper.contains("PORT") { + return true; + } + + // Common database/service port patterns + let patterns = [ + "POSTGRES", "MYSQL", "MONGODB", "MONGO", "REDIS", "MEMCACHED", + "ELASTICSEARCH", "RABBITMQ", "KAFKA", "CASSANDRA", + "BACKEND", "API", "WEBUI", "FRONTEND", "WEB", + ]; + + for pattern in &patterns { + if key_upper.contains(pattern) && key_upper.contains("PORT") { + return true; + } + } + + false +} + +/// Check if a port variable is for a database +fn is_database_port(key: &str) -> bool { + let key_upper = key.to_uppercase(); + + let db_keywords = [ + "POSTGRES", "MYSQL", "MONGODB", "MONGO", "REDIS", "MEMCACHED", + "ELASTICSEARCH", "CASSANDRA", "MARIADB", "MSSQL", "ORACLE", + "DB", "DATABASE", + ]; + + db_keywords.iter().any(|kw| key_upper.contains(kw)) +} + +/// Check if a variable should have the environment name appended +/// (e.g., database names, user names, bucket names) +fn should_append_env_name(key: &str) -> bool { + let key_upper = key.to_uppercase(); + + // Variables that typically need env-specific values + let patterns = [ + "DB_NAME", "DATABASE_NAME", "POSTGRES_DB", "MYSQL_DATABASE", "MONGO_DATABASE", + "DB_USER", "DATABASE_USER", "POSTGRES_USER", "MYSQL_USER", + "BUCKET_NAME", "QUEUE_NAME", "TOPIC_NAME", "STREAM_NAME", + "SCHEMA_NAME", "TENANT_", "NAMESPACE", + ]; + + patterns.iter().any(|pattern| key_upper.contains(pattern)) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_is_port_variable() { + assert!(is_port_variable("BACKEND_PORT")); + assert!(is_port_variable("postgres_port")); + assert!(is_port_variable("REDIS_PORT")); + assert!(is_port_variable("API_PORT")); + assert!(!is_port_variable("API_KEY")); + assert!(!is_port_variable("DATABASE_URL")); + } + + #[test] + fn test_is_database_port() { + assert!(is_database_port("POSTGRES_PORT")); + assert!(is_database_port("REDIS_PORT")); + assert!(is_database_port("MONGODB_PORT")); + assert!(!is_database_port("BACKEND_PORT")); + assert!(!is_database_port("WEBUI_PORT")); + } + + #[test] + fn test_should_append_env_name() { + assert!(should_append_env_name("POSTGRES_DB")); + assert!(should_append_env_name("DB_NAME")); + assert!(should_append_env_name("DATABASE_NAME")); + assert!(should_append_env_name("BUCKET_NAME")); + assert!(should_append_env_name("POSTGRES_USER")); + assert!(!should_append_env_name("POSTGRES_PASSWORD")); + assert!(!should_append_env_name("API_KEY")); + } +} diff --git a/ushadow/launcher/src-tauri/src/commands/http_client.rs b/ushadow/launcher/src-tauri/src/commands/http_client.rs new file mode 100644 index 00000000..2fac6d8b --- /dev/null +++ b/ushadow/launcher/src-tauri/src/commands/http_client.rs @@ -0,0 +1,81 @@ +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; + +#[derive(Debug, Serialize, Deserialize)] +pub struct HttpResponse { + pub status: u16, + pub body: String, + pub headers: HashMap, +} + +/// Make an HTTP request from Rust (bypasses CORS) +#[tauri::command] +pub async fn http_request( + url: String, + method: String, + headers: Option>, + body: Option, +) -> Result { + eprintln!("[HTTP] Making {} request to: {}", method, url); + + let client = reqwest::Client::builder() + .timeout(std::time::Duration::from_secs(30)) + .build() + .map_err(|e| format!("Failed to create HTTP client: {}", e))?; + + let mut request: reqwest::RequestBuilder = match method.to_uppercase().as_str() { + "GET" => client.get(&url), + "POST" => client.post(&url), + "PUT" => client.put(&url), + "DELETE" => client.delete(&url), + "PATCH" => client.patch(&url), + _ => return Err(format!("Unsupported HTTP method: {}", method)), + }; + + // Add headers + if let Some(headers) = headers { + for (key, value) in headers { + request = request.header(key, value); + } + } + + // Add body for POST/PUT/PATCH + if let Some(body_content) = body { + eprintln!("[HTTP] Request body: {}", body_content); + request = request.body(body_content); + } + + // Send request + eprintln!("[HTTP] Sending request..."); + let response = request + .send() + .await + .map_err(|e| { + eprintln!("[HTTP] Request failed: {}", e); + format!("HTTP request failed: {}", e) + })?; + + eprintln!("[HTTP] Response status: {}", response.status()); + + let status = response.status().as_u16(); + + // Extract headers + let mut response_headers = HashMap::new(); + for (key, value) in response.headers() { + if let Ok(value_str) = value.to_str() { + response_headers.insert(key.to_string(), value_str.to_string()); + } + } + + // Get body + let body = response + .text() + .await + .map_err(|e| format!("Failed to read response body: {}", e))?; + + Ok(HttpResponse { + status, + body, + headers: response_headers, + }) +} diff --git a/ushadow/launcher/src-tauri/src/commands/kanban.rs b/ushadow/launcher/src-tauri/src/commands/kanban.rs new file mode 100644 index 00000000..55cfb13c --- /dev/null +++ b/ushadow/launcher/src-tauri/src/commands/kanban.rs @@ -0,0 +1,999 @@ +use crate::models::{Epic, Ticket, TicketPriority, TicketStatus}; +use super::worktree::create_worktree_with_workmux; +use super::utils::shell_command; +use std::path::PathBuf; +use std::fs; +use serde::{Deserialize, Serialize}; +use tauri::api::path::data_dir; + +/// Request to create a ticket with worktree and tmux +#[derive(Debug, Deserialize)] +pub struct CreateTicketWorktreeRequest { + pub ticket_id: String, + pub ticket_title: String, + pub project_root: String, + pub branch_name: Option, // If None, will be generated from ticket_id + pub base_branch: Option, // Default to "main" + pub epic_branch: Option, // If part of epic with shared branch +} + +/// Result of creating a ticket worktree +#[derive(Debug, Serialize)] +pub struct CreateTicketWorktreeResult { + pub worktree_path: String, + pub branch_name: String, + pub tmux_window_name: String, + pub tmux_session_name: String, +} + +/// Create a worktree and tmux window for a kanban ticket +/// +/// This command handles two scenarios: +/// 1. Ticket has its own branch (epic_branch is None) +/// 2. Ticket shares a branch with epic (epic_branch is Some) +#[tauri::command] +pub async fn create_ticket_worktree( + request: CreateTicketWorktreeRequest, +) -> Result { + eprintln!("[create_ticket_worktree] Creating worktree for ticket: {}", request.ticket_title); + + // Determine branch to use + let branch_name = if let Some(epic_branch) = request.epic_branch { + // Use epic's shared branch + eprintln!("[create_ticket_worktree] Using epic's shared branch: {}", epic_branch); + epic_branch + } else if let Some(branch_name) = request.branch_name { + // Use provided branch name + branch_name + } else { + // Generate branch name from ticket ID + format!("ticket-{}", request.ticket_id) + }; + + let base_branch = request.base_branch.unwrap_or_else(|| "main".to_string()); + + // Create worktree with tmux integration + // The worktree name will be the branch name + let worktree_info = create_worktree_with_workmux( + request.project_root.clone(), + branch_name.clone(), + Some(base_branch), + Some(false), // Not background + ).await?; + + // Create a unique window name for this ticket (include ticket ID to ensure uniqueness) + let ticket_id_short = &request.ticket_id[request.ticket_id.len().saturating_sub(6)..]; // Last 6 chars + let tmux_window_name = format!("ushadow-{}-{}", branch_name, ticket_id_short); + let tmux_session_name = "workmux".to_string(); // Default session + + eprintln!("[create_ticket_worktree] βœ“ Worktree created at: {}", worktree_info.path); + eprintln!("[create_ticket_worktree] βœ“ Tmux window: {}", tmux_window_name); + + Ok(CreateTicketWorktreeResult { + worktree_path: worktree_info.path, + branch_name, + tmux_window_name, + tmux_session_name, + }) +} + +/// Attach an existing ticket to an existing worktree (for epic-shared branches) +#[tauri::command] +pub async fn attach_ticket_to_worktree( + ticket_id: String, + worktree_path: String, + branch_name: String, +) -> Result { + eprintln!("[attach_ticket_to_worktree] Attaching ticket {} to existing worktree: {}", ticket_id, worktree_path); + + // Verify worktree exists + let path_buf = PathBuf::from(&worktree_path); + if !path_buf.exists() { + eprintln!("[attach_ticket_to_worktree] ERROR: Worktree path does not exist: {}", worktree_path); + return Err(format!("Worktree path does not exist: {}", worktree_path)); + } + + // Create a unique window name for this ticket (include ticket ID to ensure uniqueness) + let ticket_id_short = &ticket_id[ticket_id.len().saturating_sub(6)..]; // Last 6 chars + let tmux_window_name = format!("ushadow-{}-{}", branch_name, ticket_id_short); + let tmux_session_name = "workmux".to_string(); + + // Ensure tmux server is running + shell_command("tmux start-server") + .output() + .map_err(|e| format!("Failed to start tmux server: {}", e))?; + + // Check if the workmux session exists + let check_session = shell_command("tmux has-session -t workmux") + .output(); + + if check_session.is_err() || !check_session.unwrap().status.success() { + eprintln!("[attach_ticket_to_worktree] Creating workmux session..."); + shell_command("tmux new-session -d -s workmux") + .output() + .map_err(|e| format!("Failed to create workmux session: {}", e))?; + } + + // Check if tmux window exists + let check_window = shell_command(&format!( + "tmux list-windows -t {} -F '#W'", + tmux_session_name + )) + .output() + .map_err(|e| format!("Failed to check tmux windows: {}", e))?; + + let stdout = String::from_utf8_lossy(&check_window.stdout); + let window_exists = stdout.lines().any(|line| line == tmux_window_name); + + if window_exists { + eprintln!("[attach_ticket_to_worktree] βœ“ Found existing tmux window: {}", tmux_window_name); + } else { + eprintln!("[attach_ticket_to_worktree] Creating tmux window: {}", tmux_window_name); + + // Create the tmux window + let create_result = shell_command(&format!( + "tmux new-window -t {} -n {} -c '{}'", + tmux_session_name, tmux_window_name, worktree_path + )) + .output() + .map_err(|e| format!("Failed to create tmux window: {}", e))?; + + if !create_result.status.success() { + let stderr = String::from_utf8_lossy(&create_result.stderr); + return Err(format!("Failed to create tmux window: {}", stderr)); + } + + eprintln!("[attach_ticket_to_worktree] βœ“ Created tmux window: {}", tmux_window_name); + } + + eprintln!("[attach_ticket_to_worktree] βœ“ Ticket attached to worktree with tmux window: {}", tmux_window_name); + + Ok(CreateTicketWorktreeResult { + worktree_path, + branch_name, + tmux_window_name, + tmux_session_name, + }) +} + +/// List all tickets associated with a specific tmux window +/// Returns ticket IDs that are using this tmux window +#[tauri::command] +pub async fn get_tickets_for_tmux_window( + window_name: String, +) -> Result, String> { + // This will need to query the backend API + // For now, return empty list as placeholder + eprintln!("[get_tickets_for_tmux_window] Getting tickets for window: {}", window_name); + Ok(vec![]) +} + +/// Get tmux window information for a ticket +#[tauri::command] +pub async fn get_ticket_tmux_info( + ticket_id: String, +) -> Result, String> { + // This will need to query the backend API to get ticket's tmux details + // For now, return None as placeholder + eprintln!("[get_ticket_tmux_info] Getting tmux info for ticket: {}", ticket_id); + Ok(None) +} + +// ============================================================================ +// Local Ticket & Epic Storage (SQLite-based) +// ============================================================================ + +use rusqlite::{Connection, params}; + +/// Get the path to the SQLite database +fn get_db_path() -> Result { + let data_dir = data_dir().ok_or("Failed to get data directory")?; + let launcher_dir = data_dir.join("com.ushadow.launcher"); + + // Create directory if it doesn't exist + if !launcher_dir.exists() { + fs::create_dir_all(&launcher_dir) + .map_err(|e| format!("Failed to create launcher data directory: {}", e))?; + } + + Ok(launcher_dir.join("kanban.db")) +} + +/// Get a database connection and ensure schema is initialized +fn get_db_connection() -> Result { + let db_path = get_db_path()?; + let conn = Connection::open(&db_path) + .map_err(|e| format!("Failed to open database: {}", e))?; + + // Create tables if they don't exist + conn.execute( + "CREATE TABLE IF NOT EXISTS epics ( + id TEXT PRIMARY KEY, + title TEXT NOT NULL, + description TEXT, + color TEXT NOT NULL, + branch_name TEXT, + base_branch TEXT NOT NULL, + project_id TEXT, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL + )", + [], + ).map_err(|e| format!("Failed to create epics table: {}", e))?; + + conn.execute( + "CREATE TABLE IF NOT EXISTS tickets ( + id TEXT PRIMARY KEY, + title TEXT NOT NULL, + description TEXT, + status TEXT NOT NULL, + priority TEXT NOT NULL, + epic_id TEXT, + tags TEXT NOT NULL, + color TEXT, + tmux_window_name TEXT, + tmux_session_name TEXT, + branch_name TEXT, + worktree_path TEXT, + environment_name TEXT, + project_id TEXT, + assigned_to TEXT, + \"order\" INTEGER NOT NULL, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL, + FOREIGN KEY (epic_id) REFERENCES epics (id) ON DELETE SET NULL + )", + [], + ).map_err(|e| format!("Failed to create tickets table: {}", e))?; + + // Create indexes for common queries + conn.execute( + "CREATE INDEX IF NOT EXISTS idx_tickets_status ON tickets(status)", + [], + ).map_err(|e| format!("Failed to create index: {}", e))?; + + conn.execute( + "CREATE INDEX IF NOT EXISTS idx_tickets_project ON tickets(project_id)", + [], + ).map_err(|e| format!("Failed to create index: {}", e))?; + + conn.execute( + "CREATE INDEX IF NOT EXISTS idx_epics_project ON epics(project_id)", + [], + ).map_err(|e| format!("Failed to create index: {}", e))?; + + Ok(conn) +} + +/// Get all tickets, optionally filtered by project +#[tauri::command] +pub async fn get_tickets(project_id: Option) -> Result, String> { + let conn = get_db_connection()?; + + // Build query based on filter + let query = if project_id.is_some() { + "SELECT * FROM tickets WHERE project_id = ? ORDER BY \"order\"" + } else { + "SELECT * FROM tickets ORDER BY \"order\"" + }; + + let mut stmt = conn.prepare(query) + .map_err(|e| format!("Failed to prepare statement: {}", e))?; + + // Helper function to map row to Ticket + let map_row = |row: &rusqlite::Row| -> Result { + Ok(Ticket { + id: row.get(0)?, + title: row.get(1)?, + description: row.get(2)?, + status: match row.get::<_, String>(3)?.as_str() { + "backlog" => TicketStatus::Backlog, + "todo" => TicketStatus::Todo, + "in_progress" => TicketStatus::InProgress, + "in_review" => TicketStatus::InReview, + "done" => TicketStatus::Done, + "archived" => TicketStatus::Archived, + _ => TicketStatus::Backlog, + }, + priority: match row.get::<_, String>(4)?.as_str() { + "low" => TicketPriority::Low, + "medium" => TicketPriority::Medium, + "high" => TicketPriority::High, + "urgent" => TicketPriority::Urgent, + _ => TicketPriority::Medium, + }, + epic_id: row.get(5)?, + tags: serde_json::from_str(&row.get::<_, String>(6)?).unwrap_or_default(), + color: row.get(7)?, + tmux_window_name: row.get(8)?, + tmux_session_name: row.get(9)?, + branch_name: row.get(10)?, + worktree_path: row.get(11)?, + environment_name: row.get(12)?, + project_id: row.get(13)?, + assigned_to: row.get(14)?, + order: row.get(15)?, + created_at: row.get(16)?, + updated_at: row.get(17)?, + }) + }; + + // Execute query with or without parameter + let tickets: Vec = if let Some(pid) = project_id { + stmt.query_map([pid], map_row) + .map_err(|e| format!("Failed to query tickets: {}", e))? + .filter_map(|r| r.ok()) + .collect() + } else { + stmt.query_map([], map_row) + .map_err(|e| format!("Failed to query tickets: {}", e))? + .filter_map(|r| r.ok()) + .collect() + }; + + Ok(tickets) +} + +/// Get all epics, optionally filtered by project +#[tauri::command] +pub async fn get_epics(project_id: Option) -> Result, String> { + let conn = get_db_connection()?; + + // Build query based on filter + let query = if project_id.is_some() { + "SELECT * FROM epics WHERE project_id = ? ORDER BY created_at DESC" + } else { + "SELECT * FROM epics ORDER BY created_at DESC" + }; + + let mut stmt = conn.prepare(query) + .map_err(|e| format!("Failed to prepare statement: {}", e))?; + + // Helper function to map row to Epic + let map_row = |row: &rusqlite::Row| -> Result { + Ok(Epic { + id: row.get(0)?, + title: row.get(1)?, + description: row.get(2)?, + color: row.get(3)?, + branch_name: row.get(4)?, + base_branch: row.get(5)?, + project_id: row.get(6)?, + created_at: row.get(7)?, + updated_at: row.get(8)?, + }) + }; + + // Execute query with or without parameter + let epics: Vec = if let Some(pid) = project_id { + stmt.query_map([pid], map_row) + .map_err(|e| format!("Failed to query epics: {}", e))? + .filter_map(|r| r.ok()) + .collect() + } else { + stmt.query_map([], map_row) + .map_err(|e| format!("Failed to query epics: {}", e))? + .filter_map(|r| r.ok()) + .collect() + }; + + Ok(epics) +} + +/// Create a new ticket +#[tauri::command] +pub async fn create_ticket( + title: String, + description: Option, + priority: String, + epic_id: Option, + tags: Vec, + environment_name: Option, + project_id: Option, +) -> Result { + let conn = get_db_connection()?; + + // Parse priority + let priority_enum = match priority.as_str() { + "low" => TicketPriority::Low, + "medium" => TicketPriority::Medium, + "high" => TicketPriority::High, + "urgent" => TicketPriority::Urgent, + _ => TicketPriority::Medium, + }; + + // Generate sequential ticket ID (e.g., ush-1, ush-2, etc.) + let prefix = "ush"; + let next_number = get_next_ticket_number(&conn, prefix)?; + let id = format!("{}-{}", prefix, next_number); + + // Get current timestamp + let now = chrono::Utc::now().to_rfc3339(); + + // Calculate order (highest + 1 for backlog status) + let max_order: i32 = conn.query_row( + "SELECT COALESCE(MAX(\"order\"), -1) FROM tickets WHERE status = 'backlog'", + [], + |row| row.get(0), + ).unwrap_or(-1); + + let order = max_order + 1; + + // Serialize tags to JSON + let tags_json = serde_json::to_string(&tags) + .map_err(|e| format!("Failed to serialize tags: {}", e))?; + + // Insert ticket into database + conn.execute( + "INSERT INTO tickets (id, title, description, status, priority, epic_id, tags, color, tmux_window_name, tmux_session_name, branch_name, worktree_path, environment_name, project_id, assigned_to, \"order\", created_at, updated_at) + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17, ?18)", + params![ + &id, + &title, + &description, + "backlog", + &priority, + &epic_id, + &tags_json, + None::, // color + None::, // tmux_window_name + None::, // tmux_session_name + None::, // branch_name + None::, // worktree_path + &environment_name, + &project_id, + None::, // assigned_to + order, + &now, + &now, + ], + ).map_err(|e| format!("Failed to insert ticket: {}", e))?; + + Ok(Ticket { + id, + title, + description, + status: TicketStatus::Backlog, + priority: priority_enum, + epic_id, + tags, + color: None, + tmux_window_name: None, + tmux_session_name: None, + branch_name: None, + worktree_path: None, + environment_name, + project_id, + assigned_to: None, + order, + created_at: now.clone(), + updated_at: now, + }) +} + +/// Update a ticket +#[tauri::command] +pub async fn update_ticket( + id: String, + title: Option, + description: Option, + status: Option, + priority: Option, + epic_id: Option, + tags: Option>, + order: Option, + worktree_path: Option, + branch_name: Option, + tmux_window_name: Option, + tmux_session_name: Option, + environment_name: Option, +) -> Result { + let conn = get_db_connection()?; + + // First, get the current ticket to return updated version + let mut stmt = conn.prepare("SELECT * FROM tickets WHERE id = ?") + .map_err(|e| format!("Failed to prepare statement: {}", e))?; + + let mut ticket = stmt.query_row([&id], |row| { + Ok(Ticket { + id: row.get(0)?, + title: row.get(1)?, + description: row.get(2)?, + status: match row.get::<_, String>(3)?.as_str() { + "backlog" => TicketStatus::Backlog, + "todo" => TicketStatus::Todo, + "in_progress" => TicketStatus::InProgress, + "in_review" => TicketStatus::InReview, + "done" => TicketStatus::Done, + "archived" => TicketStatus::Archived, + _ => TicketStatus::Backlog, + }, + priority: match row.get::<_, String>(4)?.as_str() { + "low" => TicketPriority::Low, + "medium" => TicketPriority::Medium, + "high" => TicketPriority::High, + "urgent" => TicketPriority::Urgent, + _ => TicketPriority::Medium, + }, + epic_id: row.get(5)?, + tags: serde_json::from_str(&row.get::<_, String>(6)?).unwrap_or_default(), + color: row.get(7)?, + tmux_window_name: row.get(8)?, + tmux_session_name: row.get(9)?, + branch_name: row.get(10)?, + worktree_path: row.get(11)?, + environment_name: row.get(12)?, + project_id: row.get(13)?, + assigned_to: row.get(14)?, + order: row.get(15)?, + created_at: row.get(16)?, + updated_at: row.get(17)?, + }) + }).map_err(|e| format!("Ticket not found: {}", e))?; + + // Update fields in memory + if let Some(t) = title { + ticket.title = t; + } + if let Some(d) = description { + ticket.description = Some(d); + } + if let Some(s) = status { + ticket.status = match s.as_str() { + "backlog" => TicketStatus::Backlog, + "todo" => TicketStatus::Todo, + "in_progress" => TicketStatus::InProgress, + "in_review" => TicketStatus::InReview, + "done" => TicketStatus::Done, + "archived" => TicketStatus::Archived, + _ => ticket.status, + }; + } + if let Some(p) = priority { + ticket.priority = match p.as_str() { + "low" => TicketPriority::Low, + "medium" => TicketPriority::Medium, + "high" => TicketPriority::High, + "urgent" => TicketPriority::Urgent, + _ => ticket.priority, + }; + } + if let Some(e) = epic_id { + ticket.epic_id = Some(e); + } + if let Some(t) = tags { + ticket.tags = t; + } + if let Some(o) = order { + ticket.order = o; + } + if let Some(wp) = worktree_path { + ticket.worktree_path = Some(wp); + } + if let Some(bn) = branch_name { + ticket.branch_name = Some(bn); + } + if let Some(twn) = tmux_window_name { + ticket.tmux_window_name = Some(twn); + } + if let Some(tsn) = tmux_session_name { + ticket.tmux_session_name = Some(tsn); + } + if let Some(en) = environment_name { + ticket.environment_name = Some(en); + } + + ticket.updated_at = chrono::Utc::now().to_rfc3339(); + + // Serialize tags to JSON + let tags_json = serde_json::to_string(&ticket.tags) + .map_err(|e| format!("Failed to serialize tags: {}", e))?; + + // Convert status and priority to strings + let status_str = match ticket.status { + TicketStatus::Backlog => "backlog", + TicketStatus::Todo => "todo", + TicketStatus::InProgress => "in_progress", + TicketStatus::InReview => "in_review", + TicketStatus::Done => "done", + TicketStatus::Archived => "archived", + }; + + let priority_str = match ticket.priority { + TicketPriority::Low => "low", + TicketPriority::Medium => "medium", + TicketPriority::High => "high", + TicketPriority::Urgent => "urgent", + }; + + // Update in database + conn.execute( + "UPDATE tickets SET title = ?1, description = ?2, status = ?3, priority = ?4, epic_id = ?5, tags = ?6, \"order\" = ?7, worktree_path = ?8, branch_name = ?9, tmux_window_name = ?10, tmux_session_name = ?11, environment_name = ?12, updated_at = ?13 WHERE id = ?14", + params![ + &ticket.title, + &ticket.description, + status_str, + priority_str, + &ticket.epic_id, + &tags_json, + ticket.order, + &ticket.worktree_path, + &ticket.branch_name, + &ticket.tmux_window_name, + &ticket.tmux_session_name, + &ticket.environment_name, + &ticket.updated_at, + &id, + ], + ).map_err(|e| format!("Failed to update ticket: {}", e))?; + + Ok(ticket) +} + +/// Delete a ticket +#[tauri::command] +pub async fn delete_ticket(id: String) -> Result<(), String> { + let conn = get_db_connection()?; + + conn.execute("DELETE FROM tickets WHERE id = ?", params![&id]) + .map_err(|e| format!("Failed to delete ticket: {}", e))?; + + Ok(()) +} + +/// Create a new epic +#[tauri::command] +pub async fn create_epic( + title: String, + description: Option, + color: String, + base_branch: String, + branch_name: Option, + project_id: Option, +) -> Result { + let conn = get_db_connection()?; + + let id = format!("epic-{}", uuid::Uuid::new_v4()); + let now = chrono::Utc::now().to_rfc3339(); + + // Insert epic into database + conn.execute( + "INSERT INTO epics (id, title, description, color, branch_name, base_branch, project_id, created_at, updated_at) + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)", + params![ + &id, + &title, + &description, + &color, + &branch_name, + &base_branch, + &project_id, + &now, + &now, + ], + ).map_err(|e| format!("Failed to insert epic: {}", e))?; + + Ok(Epic { + id, + title, + description, + color, + branch_name, + base_branch, + project_id, + created_at: now.clone(), + updated_at: now, + }) +} + +/// Update an epic +#[tauri::command] +pub async fn update_epic( + id: String, + title: Option, + description: Option, + color: Option, + branch_name: Option, +) -> Result { + let conn = get_db_connection()?; + + // First, get the current epic to return updated version + let mut stmt = conn.prepare("SELECT * FROM epics WHERE id = ?") + .map_err(|e| format!("Failed to prepare statement: {}", e))?; + + let mut epic = stmt.query_row([&id], |row| { + Ok(Epic { + id: row.get(0)?, + title: row.get(1)?, + description: row.get(2)?, + color: row.get(3)?, + branch_name: row.get(4)?, + base_branch: row.get(5)?, + project_id: row.get(6)?, + created_at: row.get(7)?, + updated_at: row.get(8)?, + }) + }).map_err(|e| format!("Epic not found: {}", e))?; + + // Update fields in memory + if let Some(t) = title { + epic.title = t; + } + if let Some(d) = description { + epic.description = Some(d); + } + if let Some(c) = color { + epic.color = c; + } + if let Some(b) = branch_name { + epic.branch_name = Some(b); + } + + epic.updated_at = chrono::Utc::now().to_rfc3339(); + + // Update in database + conn.execute( + "UPDATE epics SET title = ?1, description = ?2, color = ?3, branch_name = ?4, updated_at = ?5 WHERE id = ?6", + params![ + &epic.title, + &epic.description, + &epic.color, + &epic.branch_name, + &epic.updated_at, + &id, + ], + ).map_err(|e| format!("Failed to update epic: {}", e))?; + + Ok(epic) +} + +/// Delete an epic +#[tauri::command] +pub async fn delete_epic(id: String) -> Result<(), String> { + let conn = get_db_connection()?; + + conn.execute("DELETE FROM epics WHERE id = ?", params![&id]) + .map_err(|e| format!("Failed to delete epic: {}", e))?; + + Ok(()) +} + +/// Start a coding agent in the tmux window for a ticket +#[tauri::command] +pub async fn start_coding_agent_for_ticket( + ticket_id: String, + tmux_window_name: String, + tmux_session_name: String, + worktree_path: String, +) -> Result<(), String> { + use super::settings::load_launcher_settings; + + eprintln!("[start_coding_agent_for_ticket] Starting agent for ticket: {}", ticket_id); + eprintln!("[start_coding_agent_for_ticket] Tmux window: {}, session: {}", tmux_window_name, tmux_session_name); + eprintln!("[start_coding_agent_for_ticket] Worktree path: {}", worktree_path); + + // Load settings to get coding agent configuration + let settings = load_launcher_settings().await?; + + if !settings.coding_agent.auto_start { + eprintln!("[start_coding_agent_for_ticket] Auto-start is disabled, skipping"); + return Ok(()); + } + + // Get ticket details + let ticket = get_ticket_by_id(&ticket_id)?; + + // Automatically move ticket to in_progress when starting agent + eprintln!("[start_coding_agent_for_ticket] Moving ticket to in_progress..."); + if let Some(branch_name) = &ticket.branch_name { + // Use kanban-cli to move to in_progress + let status_update = shell_command(&format!("kanban-cli move-to-progress \"{}\"", branch_name)) + .output(); + + match status_update { + Ok(output) if output.status.success() => { + eprintln!("[start_coding_agent_for_ticket] βœ“ Ticket moved to in_progress"); + } + Ok(output) => { + let stderr = String::from_utf8_lossy(&output.stderr); + eprintln!("[start_coding_agent_for_ticket] Warning: Failed to update status: {}", stderr); + } + Err(e) => { + eprintln!("[start_coding_agent_for_ticket] Warning: Failed to run kanban-cli: {}", e); + } + } + } + + eprintln!("[start_coding_agent_for_ticket] Found ticket: {}", ticket.title); + + // Build the agent prompt with ticket context + let prompt = format!( + "You are working on the following ticket:\n\nTitle: {}\n\nDescription: {}\n\nPlease help implement this feature.", + ticket.title, + ticket.description.as_ref().unwrap_or(&"No description".to_string()) + ); + + // Build the command to send to tmux + // Format: tmux send-keys -t session:window "command" Enter + let agent_command = if settings.coding_agent.args.is_empty() { + settings.coding_agent.command.clone() + } else { + format!("{} {}", settings.coding_agent.command, settings.coding_agent.args.join(" ")) + }; + + eprintln!("[start_coding_agent_for_ticket] Running agent command: {}", agent_command); + + // First verify the tmux window exists + let check_window = shell_command(&format!( + "tmux list-windows -t {} -F '#{{window_name}}'", + tmux_session_name + )) + .output() + .map_err(|e| format!("Failed to check tmux windows: {}", e))?; + + let windows_output = String::from_utf8_lossy(&check_window.stdout); + eprintln!("[start_coding_agent_for_ticket] Available windows in session {}:", tmux_session_name); + eprintln!("{}", windows_output); + + if !windows_output.contains(&tmux_window_name) { + return Err(format!("Tmux window '{}' not found in session '{}'", tmux_window_name, tmux_session_name)); + } + + // Send a test echo command first to verify tmux communication works + let test_cmd = format!("tmux send-keys -t {}:{} 'echo \"[LAUNCHER] Starting coding agent...\"' Enter", tmux_session_name, tmux_window_name); + eprintln!("[start_coding_agent_for_ticket] Test command: {}", test_cmd); + let test_result = shell_command(&test_cmd) + .output() + .map_err(|e| format!("Failed to send test command: {}", e))?; + + if !test_result.status.success() { + let stderr = String::from_utf8_lossy(&test_result.stderr); + return Err(format!("Test command failed: {}", stderr)); + } + + std::thread::sleep(std::time::Duration::from_millis(300)); + + // CD to worktree directory + let cd_cmd = format!("tmux send-keys -t {}:{} 'cd \"{}\"' Enter", tmux_session_name, tmux_window_name, worktree_path); + eprintln!("[start_coding_agent_for_ticket] CD command: {}", cd_cmd); + let cd_result = shell_command(&cd_cmd) + .output() + .map_err(|e| format!("Failed to send cd command: {}", e))?; + + if !cd_result.status.success() { + let stderr = String::from_utf8_lossy(&cd_result.stderr); + return Err(format!("CD command failed: {}", stderr)); + } + + std::thread::sleep(std::time::Duration::from_millis(300)); + + // Send PWD to verify we're in the right directory + let pwd_cmd = format!("tmux send-keys -t {}:{} 'pwd' Enter", tmux_session_name, tmux_window_name); + eprintln!("[start_coding_agent_for_ticket] PWD command: {}", pwd_cmd); + shell_command(&pwd_cmd) + .output() + .map_err(|e| format!("Failed to send pwd command: {}", e))?; + + std::thread::sleep(std::time::Duration::from_millis(500)); + + // Finally, start the coding agent + let agent_cmd = format!("tmux send-keys -t {}:{} '{}' Enter", tmux_session_name, tmux_window_name, agent_command); + eprintln!("[start_coding_agent_for_ticket] Agent command: {}", agent_cmd); + let start_agent = shell_command(&agent_cmd) + .output() + .map_err(|e| format!("Failed to send agent command: {}", e))?; + + if !start_agent.status.success() { + let stderr = String::from_utf8_lossy(&start_agent.stderr); + return Err(format!("Failed to start coding agent: {}", stderr)); + } + + // Wait for agent to start up + eprintln!("[start_coding_agent_for_ticket] Waiting for agent to start..."); + std::thread::sleep(std::time::Duration::from_secs(3)); + + // Send the ticket context as a prompt + // We need to escape the prompt for shell safety + let escaped_prompt = prompt + .replace("\\", "\\\\") + .replace("\"", "\\\"") + .replace("$", "\\$") + .replace("`", "\\`"); + + let prompt_cmd = format!("tmux send-keys -t {}:{} \"{}\"", tmux_session_name, tmux_window_name, escaped_prompt); + eprintln!("[start_coding_agent_for_ticket] Sending ticket prompt to agent..."); + let send_prompt = shell_command(&prompt_cmd) + .output() + .map_err(|e| format!("Failed to send prompt: {}", e))?; + + if !send_prompt.status.success() { + let stderr = String::from_utf8_lossy(&send_prompt.stderr); + eprintln!("[start_coding_agent_for_ticket] Warning: Failed to send prompt: {}", stderr); + // Don't fail the whole operation if prompt sending fails + } + + // Send Enter to submit the prompt + std::thread::sleep(std::time::Duration::from_millis(500)); + let enter_cmd = format!("tmux send-keys -t {}:{} Enter", tmux_session_name, tmux_window_name); + shell_command(&enter_cmd) + .output() + .map_err(|e| format!("Failed to send Enter: {}", e))?; + + eprintln!("[start_coding_agent_for_ticket] βœ“ All commands sent successfully"); + + Ok(()) +} + +/// Get the next ticket number for a given prefix +fn get_next_ticket_number(conn: &rusqlite::Connection, prefix: &str) -> Result { + // Query all ticket IDs that match the prefix pattern + let pattern = format!("{}-%%", prefix); + let mut stmt = conn.prepare("SELECT id FROM tickets WHERE id LIKE ?") + .map_err(|e| format!("Failed to prepare statement: {}", e))?; + + let ticket_ids = stmt.query_map([&pattern], |row| { + row.get::<_, String>(0) + }).map_err(|e| format!("Failed to query tickets: {}", e))?; + + // Find the highest number + let mut max_number = 0; + for id_result in ticket_ids { + if let Ok(id) = id_result { + // Extract number from "ush-123" format + if let Some(number_str) = id.strip_prefix(&format!("{}-", prefix)) { + if let Ok(number) = number_str.parse::() { + if number > max_number { + max_number = number; + } + } + } + } + } + + Ok(max_number + 1) +} + +/// Helper to get a ticket by ID (internal use) +fn get_ticket_by_id(id: &str) -> Result { + let conn = get_db_connection()?; + + let mut stmt = conn.prepare("SELECT * FROM tickets WHERE id = ?") + .map_err(|e| format!("Failed to prepare statement: {}", e))?; + + stmt.query_row([id], |row| { + Ok(Ticket { + id: row.get(0)?, + title: row.get(1)?, + description: row.get(2)?, + status: match row.get::<_, String>(3)?.as_str() { + "backlog" => TicketStatus::Backlog, + "todo" => TicketStatus::Todo, + "in_progress" => TicketStatus::InProgress, + "in_review" => TicketStatus::InReview, + "done" => TicketStatus::Done, + "archived" => TicketStatus::Archived, + _ => TicketStatus::Backlog, + }, + priority: match row.get::<_, String>(4)?.as_str() { + "low" => TicketPriority::Low, + "medium" => TicketPriority::Medium, + "high" => TicketPriority::High, + "urgent" => TicketPriority::Urgent, + _ => TicketPriority::Medium, + }, + epic_id: row.get(5)?, + tags: serde_json::from_str(&row.get::<_, String>(6)?).unwrap_or_default(), + color: row.get(7)?, + tmux_window_name: row.get(8)?, + tmux_session_name: row.get(9)?, + branch_name: row.get(10)?, + worktree_path: row.get(11)?, + environment_name: row.get(12)?, + project_id: row.get(13)?, + assigned_to: row.get(14)?, + order: row.get(15)?, + created_at: row.get(16)?, + updated_at: row.get(17)?, + }) + }).map_err(|e| format!("Ticket not found: {}", e)) +} diff --git a/ushadow/launcher/src-tauri/src/commands/mod.rs b/ushadow/launcher/src-tauri/src/commands/mod.rs index 6c49cd67..cdd97928 100644 --- a/ushadow/launcher/src-tauri/src/commands/mod.rs +++ b/ushadow/launcher/src-tauri/src/commands/mod.rs @@ -1,5 +1,6 @@ mod docker; mod discovery; +mod discovery_v2; mod prerequisites; mod prerequisites_config; mod repository; // Repository and Git operations @@ -10,11 +11,19 @@ mod settings; mod bundled; // Bundled resources locator pub mod worktree; pub mod platform; // Platform abstraction layer +mod kanban; // Kanban ticket integration +mod oauth_server; // OAuth callback server for desktop auth +mod http_client; // HTTP client for CORS-free requests // Embedded terminal module (PTY-based) - DEPRECATED in favor of native terminal integration (iTerm2/Terminal.app/gnome-terminal) // pub mod terminal; +mod config_commands; +mod container_discovery; +mod port_utils; +mod env_scanner; pub use docker::*; pub use discovery::*; +pub use discovery_v2::*; pub use prerequisites::*; pub use prerequisites_config::*; pub use repository::*; // Export repository management functions @@ -22,4 +31,11 @@ pub use generic_installer::*; // Export generic installer functions pub use permissions::*; pub use settings::*; pub use worktree::*; +pub use kanban::*; // Export kanban ticket functions +pub use oauth_server::*; // Export OAuth server functions +pub use http_client::*; // Export HTTP client functions // pub use terminal::*; +pub use config_commands::*; +pub use container_discovery::*; +pub use port_utils::*; +pub use env_scanner::*; diff --git a/ushadow/launcher/src-tauri/src/commands/oauth_server.rs b/ushadow/launcher/src-tauri/src/commands/oauth_server.rs new file mode 100644 index 00000000..27229e85 --- /dev/null +++ b/ushadow/launcher/src-tauri/src/commands/oauth_server.rs @@ -0,0 +1,191 @@ +/// OAuth callback server for desktop authentication +/// +/// Implements the standard OAuth flow for desktop apps: +/// 1. Start temporary HTTP server on random port +/// 2. Register http://localhost:PORT/callback with Keycloak +/// 3. Open system browser for login +/// 4. Catch redirect, exchange code for tokens +/// 5. Shut down server + +use std::sync::{Arc, Mutex}; +use tauri::State; +use tokio::sync::oneshot; +use warp::{Filter, Reply}; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Serialize, Deserialize)] +pub struct OAuthCallbackParams { + pub code: String, + pub state: String, +} + +#[derive(Debug, Clone, Serialize)] +pub struct OAuthResult { + pub success: bool, + pub code: Option, + pub state: Option, + pub error: Option, +} + +/// Start OAuth callback server and return the port and callback URL +/// +/// This starts a temporary HTTP server that waits for the OAuth callback. +/// The server automatically shuts down after receiving the callback or timing out. +#[tauri::command] +pub async fn start_oauth_server() -> Result<(u16, String), String> { + use warp::Filter; + + // Find available port + let listener = std::net::TcpListener::bind("127.0.0.1:0") + .map_err(|e| format!("Failed to bind to port: {}", e))?; + let port = listener.local_addr() + .map_err(|e| format!("Failed to get local address: {}", e))? + .port(); + drop(listener); + + let callback_url = format!("http://localhost:{}/callback", port); + + println!("[OAuth] Started callback server on port {}", port); + println!("[OAuth] Callback URL: {}", callback_url); + + Ok((port, callback_url)) +} + +/// Wait for OAuth callback +/// +/// This blocks until the callback is received or times out (5 minutes). +/// Returns the authorization code and state from the callback. +#[tauri::command] +pub async fn wait_for_oauth_callback(port: u16) -> Result { + use std::time::Duration; + use tokio::time::timeout; + + let result = Arc::new(Mutex::new(None)); + let result_clone = result.clone(); + + // Create shutdown signal + let (tx, rx) = oneshot::channel::<()>(); + let tx = Arc::new(Mutex::new(Some(tx))); + + // Callback route handler + let callback_route = warp::path("callback") + .and(warp::query::()) + .map(move |params: OAuthCallbackParams| { + println!("[OAuth] Callback received: code={}, state={}", + params.code.chars().take(10).collect::(), + params.state.chars().take(10).collect::() + ); + + // Store result + { + let mut result = result_clone.lock().unwrap(); + *result = Some(OAuthResult { + success: true, + code: Some(params.code.clone()), + state: Some(params.state.clone()), + error: None, + }); + } + + // Trigger shutdown + if let Some(tx) = tx.lock().unwrap().take() { + let _ = tx.send(()); + } + + // Return success page + warp::reply::html( + r#" + + + + Login Successful + + + +
+
βœ“
+

Login Successful!

+

You can close this window and return to the Ushadow Launcher.

+
+ + + + "# + ) + }); + + // Start server + let server = warp::serve(callback_route) + .bind_with_graceful_shutdown(([127, 0, 0, 1], port), async { + rx.await.ok(); + }); + + // Run server with timeout + let server_task = tokio::spawn(server.1); + + // Wait for callback or timeout (5 minutes) + match timeout(Duration::from_secs(300), async { + loop { + if result.lock().unwrap().is_some() { + break; + } + tokio::time::sleep(Duration::from_millis(100)).await; + } + }).await { + Ok(_) => { + // Got callback + let result = result.lock().unwrap().take().unwrap(); + println!("[OAuth] Callback processed successfully"); + + // Shut down server + server_task.abort(); + + Ok(result) + } + Err(_) => { + // Timeout + println!("[OAuth] Callback timeout (5 minutes)"); + server_task.abort(); + + Ok(OAuthResult { + success: false, + code: None, + state: None, + error: Some("Timeout waiting for login".to_string()), + }) + } + } +} diff --git a/ushadow/launcher/src-tauri/src/commands/port_utils.rs b/ushadow/launcher/src-tauri/src/commands/port_utils.rs new file mode 100644 index 00000000..2b2c47f1 --- /dev/null +++ b/ushadow/launcher/src-tauri/src/commands/port_utils.rs @@ -0,0 +1,147 @@ +use std::process::Command; + +/// Port pair for backend and frontend (webui) +#[derive(Debug, Clone)] +pub struct PortPair { + pub backend: u16, + pub frontend: u16, +} + +/// Find available ports by calling Python's validate_ports from setup_utils.py +/// This uses the existing port validation logic and maintains the 5000 port separation +pub fn find_available_ports( + project_root: &str, + preferred_backend_port: u16, +) -> Result { + // Frontend port is always backend - 5000 + let preferred_frontend_port = if preferred_backend_port >= 5000 { + preferred_backend_port - 5000 + } else { + return Err(format!( + "Backend port {} too low (must be >= 5000 to maintain frontend separation)", + preferred_backend_port + )); + }; + + // Call Python to check if these ports are available + // Using the same logic as setup/run.py + if are_ports_available(project_root, preferred_backend_port, preferred_frontend_port)? { + return Ok(PortPair { + backend: preferred_backend_port, + frontend: preferred_frontend_port, + }); + } + + // Ports not available, find alternatives by incrementing offset + // This mirrors the logic in setup/run.py:145-160 + let base_backend = 8000; + let base_frontend = 3000; + let initial_offset = preferred_backend_port - base_backend; + + for attempt in 1..=100 { + let new_offset = initial_offset + (attempt * 10); + let backend = base_backend + new_offset; + let frontend = base_frontend + new_offset; + + if are_ports_available(project_root, backend, frontend)? { + return Ok(PortPair { backend, frontend }); + } + } + + Err("Could not find available ports after 100 attempts".to_string()) +} + +/// Check if both backend and frontend ports are available +/// Uses native Rust implementation (faster than calling Python subprocess) +/// This mirrors the logic from setup/setup_utils.py::check_port_in_use +fn are_ports_available( + _project_root: &str, + backend_port: u16, + frontend_port: u16, +) -> Result { + Ok(is_port_available(backend_port) && is_port_available(frontend_port)) +} + +/// Check if a single port is available by attempting to bind to it +fn is_port_available(port: u16) -> bool { + use std::net::TcpListener; + + match TcpListener::bind(("127.0.0.1", port)) { + Ok(_) => true, // Port is available + Err(_) => false, // Port is in use + } +} + +/// Get the Tailscale tailnet name from the host machine +/// Returns the tailnet domain (e.g., "thestumonkey.github") +pub fn get_tailnet_name() -> Result { + let output = Command::new("tailscale") + .args(["status", "--json"]) + .output() + .map_err(|e| format!("Failed to run tailscale command: {}", e))?; + + if !output.status.success() { + return Err("Tailscale not running or not available".to_string()); + } + + let json_str = String::from_utf8_lossy(&output.stdout); + let json: serde_json::Value = serde_json::from_str(&json_str) + .map_err(|e| format!("Failed to parse tailscale JSON: {}", e))?; + + let tailnet = json["CurrentTailnet"]["Name"] + .as_str() + .ok_or("Could not find tailnet name in status")?; + + Ok(tailnet.to_string()) +} + +/// Generate Tailscale URL for an environment +/// Format: https://{env_name}.{tailnet} or https://{project}-{env_name}.{tailnet} +/// +/// # Arguments +/// * `env_name` - Environment name (e.g., "orange", "blue") +/// * `project_prefix` - Optional project prefix for multi-project setups (e.g., Some("ushadow")) +pub fn generate_tailscale_url( + env_name: &str, + project_prefix: Option<&str>, +) -> Result, String> { + // Get the tailnet name from the host + let tailnet = match get_tailnet_name() { + Ok(t) => t, + Err(_) => return Ok(None), // Tailscale not available, return None instead of error + }; + + // Build hostname: either "envname" or "project-envname" + let hostname = if let Some(prefix) = project_prefix { + format!("{}-{}", prefix, env_name) + } else { + env_name.to_string() + }; + + let url = format!("https://{}.{}", hostname, tailnet); + + Ok(Some(url)) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_port_availability() { + // Test that port checking works + // Note: This test might be flaky if ports are actually in use + let available = is_port_available(65432); // Use high port unlikely to be used + assert!(available, "High port should be available"); + } + + #[test] + fn test_port_pair_separation() { + let pair = PortPair { + backend: 8000, + frontend: 3000, + }; + + assert_eq!(pair.backend - pair.frontend, 5000); + } +} diff --git a/ushadow/launcher/src-tauri/src/commands/settings.rs b/ushadow/launcher/src-tauri/src/commands/settings.rs index ceb3f7c1..8cbe47e2 100644 --- a/ushadow/launcher/src-tauri/src/commands/settings.rs +++ b/ushadow/launcher/src-tauri/src/commands/settings.rs @@ -2,11 +2,36 @@ use serde::{Deserialize, Serialize}; use std::fs; use std::path::PathBuf; +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CodingAgentConfig { + /// Name/type of the coding agent (e.g., "claude", "aider", "cursor") + pub agent_type: String, + /// Command to run the agent (e.g., "claude", "aider") + pub command: String, + /// Additional arguments to pass to the agent + pub args: Vec, + /// Whether to auto-start the agent when a ticket is assigned + pub auto_start: bool, +} + +impl Default for CodingAgentConfig { + fn default() -> Self { + Self { + agent_type: "claude".to_string(), + command: "claude".to_string(), + args: vec!["--dangerously-skip-permissions".to_string()], + auto_start: true, + } + } +} + #[derive(Debug, Clone, Serialize, Deserialize)] pub struct LauncherSettings { pub default_admin_email: Option, pub default_admin_password: Option, pub default_admin_name: Option, + #[serde(default)] + pub coding_agent: CodingAgentConfig, } impl Default for LauncherSettings { @@ -15,6 +40,7 @@ impl Default for LauncherSettings { default_admin_email: None, default_admin_password: None, default_admin_name: Some("Administrator".to_string()), + coding_agent: CodingAgentConfig::default(), } } } diff --git a/ushadow/launcher/src-tauri/src/commands/worktree.rs b/ushadow/launcher/src-tauri/src/commands/worktree.rs index 17697af5..b5f43b6e 100644 --- a/ushadow/launcher/src-tauri/src/commands/worktree.rs +++ b/ushadow/launcher/src-tauri/src/commands/worktree.rs @@ -1,4 +1,4 @@ -use crate::models::{WorktreeInfo, TmuxSessionInfo, TmuxWindowInfo, ClaudeStatus}; +use crate::models::{WorktreeInfo, TmuxSessionInfo, TmuxWindowInfo, ClaudeStatus, EnvironmentConflict}; use std::collections::HashMap; use std::path::PathBuf; use std::process::Command; @@ -32,6 +32,35 @@ pub fn get_colors_for_name(name: &str) -> (String, String) { (name.to_string(), name.to_string()) } +/// Delete a git branch (best effort - won't fail if branch doesn't exist) +fn delete_branch(main_repo: &str, branch_name: &str) { + eprintln!("[delete_branch] Attempting to delete branch '{}'", branch_name); + + // Try to delete the branch with -D (force delete) + let output = silent_command("git") + .args(["branch", "-D", branch_name]) + .current_dir(main_repo) + .output(); + + match output { + Ok(result) if result.status.success() => { + eprintln!("[delete_branch] βœ“ Successfully deleted branch '{}'", branch_name); + } + Ok(result) => { + let stderr = String::from_utf8_lossy(&result.stderr); + // Don't error if branch doesn't exist + if !stderr.contains("not found") && !stderr.contains("does not exist") { + eprintln!("[delete_branch] Warning: Failed to delete branch '{}': {}", branch_name, stderr); + } else { + eprintln!("[delete_branch] Branch '{}' already deleted or doesn't exist", branch_name); + } + } + Err(e) => { + eprintln!("[delete_branch] Warning: Failed to run git branch -D: {}", e); + } + } +} + /// Check if a worktree exists for a given branch #[tauri::command] pub async fn check_worktree_exists(main_repo: String, branch: String) -> Result, String> { @@ -105,6 +134,32 @@ pub async fn check_worktree_exists(main_repo: String, branch: String) -> Result< Ok(None) } +/// Check if an environment with this name already exists and return conflict info +#[tauri::command] +pub async fn check_environment_conflict( + main_repo: String, + env_name: String, +) -> Result, String> { + let env_name = env_name.to_lowercase(); + + // Check if a worktree with this name exists + let worktrees = list_worktrees(main_repo.clone()).await?; + + if let Some(worktree) = worktrees.iter().find(|wt| wt.name == env_name) { + // Worktree exists - return conflict info + // Note: is_running will be set to false here, but the frontend can check + // the actual running status from its discovery data + return Ok(Some(EnvironmentConflict { + name: env_name, + current_branch: worktree.branch.clone(), + path: worktree.path.clone(), + is_running: false, // Frontend will populate this from discovery + })); + } + + Ok(None) +} + /// List all git worktrees in a repository #[tauri::command] pub async fn list_worktrees(main_repo: String) -> Result, String> { @@ -348,6 +403,48 @@ pub async fn create_worktree( let branch_exists = check_output.status.success(); + // Check for branch naming conflicts (e.g., can't create test/foo if test exists, or vice versa) + if !branch_exists { + // Check if any part of the branch path conflicts with existing branches + let all_branches_output = silent_command("git") + .args(["for-each-ref", "--format=%(refname:short)", "refs/heads/"]) + .current_dir(&main_repo) + .output() + .map_err(|e| format!("Failed to list branches: {}", e))?; + + let all_branches = String::from_utf8_lossy(&all_branches_output.stdout); + + for existing_branch in all_branches.lines() { + // Check if desired_branch would conflict with existing_branch + // Conflict cases: + // 1. Want to create "test/foo" but "test" exists + // 2. Want to create "test" but "test/foo" exists + if desired_branch.starts_with(&format!("{}/", existing_branch)) { + return Err(format!( + "Cannot create branch '{}' because branch '{}' already exists. Git doesn't allow 'foo' and 'foo/bar' to both exist as branches.", + desired_branch, existing_branch + )); + } + if existing_branch.starts_with(&format!("{}/", desired_branch)) { + return Err(format!( + "Cannot create branch '{}' because branch '{}' already exists. Git doesn't allow 'foo' and 'foo/bar' to both exist as branches.", + desired_branch, existing_branch + )); + } + } + } + + // Before creating, clean up any locked/missing worktrees at this path + eprintln!("[create_worktree] Checking for locked/missing worktrees..."); + let _ = silent_command("git") + .args(["worktree", "unlock", worktree_path.to_str().unwrap()]) + .current_dir(&main_repo) + .output(); + let _ = silent_command("git") + .args(["worktree", "prune"]) + .current_dir(&main_repo) + .output(); + let (output, final_branch) = if branch_exists { // Branch exists - checkout directly into worktree let output = silent_command("git") @@ -644,7 +741,12 @@ pub async fn remove_worktree(main_repo: String, name: String) -> Result<(), Stri .find(|wt| wt.name == name) .ok_or_else(|| format!("Worktree '{}' not found", name))?; - // Remove the worktree + eprintln!("[remove_worktree] Removing worktree at: {}", worktree.path); + + // Store branch name for deletion after worktree removal + let branch_name = worktree.branch.clone(); + + // Try to remove the worktree let output = silent_command("git") .args(["worktree", "remove", &worktree.path]) .current_dir(&main_repo) @@ -653,9 +755,64 @@ pub async fn remove_worktree(main_repo: String, name: String) -> Result<(), Stri if !output.status.success() { let stderr = String::from_utf8_lossy(&output.stderr); + + // If it contains modified/untracked files, use --force + if stderr.contains("modified or untracked files") || stderr.contains("use --force") { + eprintln!("[remove_worktree] Worktree has uncommitted changes, forcing removal..."); + + let force_output = silent_command("git") + .args(["worktree", "remove", "--force", &worktree.path]) + .current_dir(&main_repo) + .output() + .map_err(|e| format!("Failed to force remove worktree: {}", e))?; + + if force_output.status.success() { + eprintln!("[remove_worktree] βœ“ Successfully force-removed worktree"); + // Delete the associated branch + delete_branch(&main_repo, &branch_name); + return Ok(()); + } else { + let force_stderr = String::from_utf8_lossy(&force_output.stderr); + return Err(format!("Failed to force remove worktree: {}", force_stderr)); + } + } + + // If it's locked or missing, try to unlock and prune + if stderr.contains("locked") || stderr.contains("missing") { + eprintln!("[remove_worktree] Worktree is locked/missing, attempting to unlock and prune..."); + + // Try to unlock + let _ = silent_command("git") + .args(["worktree", "unlock", &worktree.path]) + .current_dir(&main_repo) + .output(); + + // Try to prune + let prune_output = silent_command("git") + .args(["worktree", "prune"]) + .current_dir(&main_repo) + .output() + .map_err(|e| format!("Failed to prune worktrees: {}", e))?; + + if prune_output.status.success() { + eprintln!("[remove_worktree] βœ“ Successfully pruned locked/missing worktree"); + // Delete the associated branch + delete_branch(&main_repo, &branch_name); + return Ok(()); + } else { + let prune_stderr = String::from_utf8_lossy(&prune_output.stderr); + return Err(format!("Failed to prune worktree: {}", prune_stderr)); + } + } + return Err(format!("Git command failed: {}", stderr)); } + eprintln!("[remove_worktree] βœ“ Worktree removed successfully"); + + // Delete the associated branch + delete_branch(&main_repo, &branch_name); + Ok(()) } @@ -761,11 +918,16 @@ pub async fn create_worktree_with_workmux( // Use the launcher's own worktree creation logic instead of workmux // This ensures consistent directory structure let main_repo_path = PathBuf::from(&main_repo); + + // Calculate worktrees directory: ../worktrees (sibling to project root) let worktrees_dir = main_repo_path.parent() - .ok_or("Could not determine worktrees directory")? + .ok_or("Could not determine parent directory")? + .join("worktrees") .to_string_lossy() .to_string(); + eprintln!("[create_worktree_with_workmux] Worktrees directory: {}", worktrees_dir); + // Create the worktree directly let worktree = create_worktree(main_repo.clone(), worktrees_dir, name.clone(), base_branch).await?; @@ -1244,12 +1406,9 @@ pub async fn open_tmux_in_terminal(window_name: String, worktree_path: String) - let temp_script = format!("/tmp/ushadow_iterm_{}.sh", window_name.replace("/", "_")); let script_content = format!( - "#!/bin/bash\nprintf '\\033]0;{}\\007\\033]6;1;bg;red;brightness;{}\\007\\033]6;1;bg;green;brightness;{}\\007\\033]6;1;bg;blue;brightness;{}\\007'\n# Create dedicated session for this environment if it doesn't exist\ntmux has-session -t {} 2>/dev/null || tmux new-session -d -s {} -c '{}'\n# Attach to this environment's dedicated session\nexec tmux attach-session -t {}\n", + "#!/bin/bash\nprintf '\\033]0;{}\\007\\033]6;1;bg;red;brightness;{}\\007\\033]6;1;bg;green;brightness;{}\\007\\033]6;1;bg;blue;brightness;{}\\007'\n# Attach to the workmux session and select the specific window\nexec tmux attach-session -t workmux:{}\n", display_name, r, g, b, - window_name, - window_name, - worktree_path, window_name ); fs::write(&temp_script, script_content) @@ -1259,16 +1418,38 @@ pub async fn open_tmux_in_terminal(window_name: String, worktree_path: String) - .output() .map_err(|e| format!("Failed to chmod: {}", e))?; - // Simple iTerm2 AppleScript that executes the script + // iTerm2 AppleScript that reuses existing windows with matching name let applescript = format!( r#"tell application "iTerm" activate - set newWindow to (create window with default profile) - tell current session of newWindow - set name to "{}" - write text "{} && exit" - end tell + + -- Try to find existing window with this name + set foundWindow to false + repeat with aWindow in windows + repeat with aTab in tabs of aWindow + repeat with aSession in sessions of aTab + if name of aSession is "{}" then + -- Found existing window, select it + select aSession + set foundWindow to true + exit repeat + end if + end repeat + if foundWindow then exit repeat + end repeat + if foundWindow then exit repeat + end repeat + + -- If no existing window found, create new one + if not foundWindow then + set newWindow to (create window with default profile) + tell current session of newWindow + set name to "{}" + write text "{} && exit" + end tell + end if end tell"#, + display_name, display_name, temp_script ); diff --git a/ushadow/launcher/src-tauri/src/config.rs b/ushadow/launcher/src-tauri/src/config.rs new file mode 100644 index 00000000..8a466751 --- /dev/null +++ b/ushadow/launcher/src-tauri/src/config.rs @@ -0,0 +1,356 @@ +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::path::PathBuf; + +/// Main launcher configuration loaded from .launcher-config.yaml +#[derive(Deserialize, Serialize, Clone, Debug)] +pub struct LauncherConfig { + pub project: ProjectConfig, + pub prerequisites: PrerequisitesConfig, + pub setup: SetupConfig, + pub infrastructure: InfrastructureConfig, + pub containers: ContainersConfig, + pub ports: PortsConfig, + pub worktrees: WorktreesConfig, +} + +#[derive(Deserialize, Serialize, Clone, Debug)] +pub struct ProjectConfig { + pub name: String, + pub display_name: String, +} + +#[derive(Deserialize, Serialize, Clone, Debug)] +pub struct PrerequisitesConfig { + pub required: Vec, + #[serde(default)] + pub optional: Vec, +} + +#[derive(Deserialize, Serialize, Clone, Debug)] +pub struct SetupConfig { + pub command: String, + #[serde(default)] + pub env_vars: Vec, +} + +#[derive(Deserialize, Serialize, Clone, Debug)] +pub struct InfrastructureConfig { + pub compose_file: String, + pub project_name: String, + pub profile: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug)] +pub struct ContainersConfig { + pub naming_pattern: String, + pub primary_service: String, + pub health_endpoint: String, + /// Optional project prefix for Tailscale hostnames (for multi-project setups) + /// If set, Tailscale URLs will be: https://{prefix}-{env}.{tailnet} + /// If not set, Tailscale URLs will be: https://{env}.{tailnet} + #[serde(default)] + pub tailscale_project_prefix: Option, +} + +#[derive(Deserialize, Serialize, Clone, Debug)] +pub struct PortsConfig { + #[serde(default = "default_allocation_strategy")] + pub allocation_strategy: String, // "hash", "sequential", "random" + pub base_port: u16, + pub offset: PortOffset, +} + +fn default_allocation_strategy() -> String { + "hash".to_string() +} + +#[derive(Deserialize, Serialize, Clone, Debug)] +pub struct PortOffset { + pub min: u16, + pub max: u16, + pub step: u16, +} + +#[derive(Deserialize, Serialize, Clone, Debug)] +pub struct WorktreesConfig { + pub default_parent: String, + #[serde(default)] + pub branch_prefix: String, +} + +impl LauncherConfig { + /// Load configuration from .launcher-config.yaml in the project root + pub fn load(project_root: &PathBuf) -> Result { + let config_path = project_root.join(".launcher-config.yaml"); + + if !config_path.exists() { + return Err(format!( + "Configuration file not found: {}\n\n\ + This repository is not configured for the launcher.\n\ + Please create a .launcher-config.yaml file in the repository root.", + config_path.display() + )); + } + + let contents = std::fs::read_to_string(&config_path) + .map_err(|e| format!("Failed to read config file: {}", e))?; + + let config: LauncherConfig = serde_yaml::from_str(&contents) + .map_err(|e| format!("Failed to parse config YAML: {}", e))?; + + // Validate the configuration + config.validate()?; + + Ok(config) + } + + /// Validate the configuration structure and constraints + fn validate(&self) -> Result<(), String> { + // Validate project basics + if self.project.name.is_empty() { + return Err("project.name cannot be empty".to_string()); + } + + // Validate setup command + if self.setup.command.is_empty() { + return Err("setup.command cannot be empty".to_string()); + } + + // Validate port ranges + if self.ports.base_port == 0 { + return Err("ports.base_port must be greater than 0".to_string()); + } + + if self.ports.offset.max > 60000 { + return Err(format!( + "ports.offset.max ({}) too large - max allowed is 60000 to prevent exceeding port 65535", + self.ports.offset.max + )); + } + + // Validate container naming pattern contains required variables + if !self.containers.naming_pattern.contains("{project_name}") { + return Err("containers.naming_pattern must contain {project_name}".to_string()); + } + + if !self.containers.naming_pattern.contains("{service_name}") { + return Err("containers.naming_pattern must contain {service_name}".to_string()); + } + + // Validate infrastructure config + if self.infrastructure.compose_file.is_empty() { + return Err("infrastructure.compose_file cannot be empty".to_string()); + } + + Ok(()) + } + + /// Expand variables in a string template using provided context + pub fn expand_variables(&self, template: &str, vars: &HashMap) -> String { + let mut result = template.to_string(); + for (key, value) in vars { + result = result.replace(&format!("{{{}}}", key), value); + } + result + } + + /// Generate container name from pattern + pub fn generate_container_name(&self, env_name: &str, service_name: &str) -> String { + let env_suffix = if env_name == "default" || env_name.is_empty() { + String::new() + } else { + format!("-{}", env_name) + }; + + self.containers + .naming_pattern + .replace("{project_name}", &self.project.name) + .replace("{env_name}", &env_suffix) + .replace("{service_name}", service_name) + .replace("--", "-") // Clean up double dashes + } + + /// Calculate port for an environment given the base port and env name + pub fn calculate_port(&self, env_name: &str) -> u16 { + if env_name == "default" || env_name == "main" || env_name.is_empty() { + return self.ports.base_port; + } + + match self.ports.allocation_strategy.as_str() { + "hash" => { + let hash: u32 = env_name.bytes().map(|b| b as u32).sum(); + let offset_steps = (self.ports.offset.max - self.ports.offset.min) / self.ports.offset.step; + let offset = ((hash % offset_steps as u32) * self.ports.offset.step as u32) as u16; + self.ports.base_port + offset + } + "sequential" => { + // For sequential, would need to track allocated ports in state + // For now, fall back to hash + let hash: u32 = env_name.bytes().map(|b| b as u32).sum(); + let offset_steps = (self.ports.offset.max - self.ports.offset.min) / self.ports.offset.step; + let offset = ((hash % offset_steps as u32) * self.ports.offset.step as u32) as u16; + self.ports.base_port + offset + } + _ => self.ports.base_port, // Default/random strategy + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_container_naming() { + let config = LauncherConfig { + project: ProjectConfig { + name: "myapp".to_string(), + display_name: "My App".to_string(), + }, + containers: ContainersConfig { + naming_pattern: "{project_name}{env_name}-{service_name}".to_string(), + primary_service: "backend".to_string(), + health_endpoint: "/health".to_string(), + tailscale_project_prefix: None, + }, + prerequisites: PrerequisitesConfig { + required: vec![], + optional: vec![], + }, + setup: SetupConfig { + command: "setup.sh".to_string(), + env_vars: vec![], + }, + infrastructure: InfrastructureConfig { + compose_file: "docker-compose.yml".to_string(), + project_name: "infra".to_string(), + profile: None, + }, + ports: PortsConfig { + allocation_strategy: "hash".to_string(), + base_port: 8000, + offset: PortOffset { + min: 0, + max: 500, + step: 10, + }, + }, + worktrees: WorktreesConfig { + default_parent: "~/repos".to_string(), + branch_prefix: "".to_string(), + }, + }; + + assert_eq!( + config.generate_container_name("default", "backend"), + "myapp-backend" + ); + assert_eq!( + config.generate_container_name("staging", "backend"), + "myapp-staging-backend" + ); + } + + #[test] + fn test_variable_expansion() { + let config = LauncherConfig { + project: ProjectConfig { + name: "test".to_string(), + display_name: "Test".to_string(), + }, + prerequisites: PrerequisitesConfig { + required: vec![], + optional: vec![], + }, + setup: SetupConfig { + command: "setup.sh {ENV_NAME} {PORT}".to_string(), + env_vars: vec![], + }, + containers: ContainersConfig { + naming_pattern: "test-{service_name}".to_string(), + primary_service: "backend".to_string(), + health_endpoint: "/health".to_string(), + tailscale_project_prefix: None, + }, + infrastructure: InfrastructureConfig { + compose_file: "docker-compose.yml".to_string(), + project_name: "infra".to_string(), + profile: None, + }, + ports: PortsConfig { + allocation_strategy: "hash".to_string(), + base_port: 8000, + offset: PortOffset { + min: 0, + max: 500, + step: 10, + }, + }, + worktrees: WorktreesConfig { + default_parent: "~/repos".to_string(), + branch_prefix: "".to_string(), + }, + }; + + let mut vars = HashMap::new(); + vars.insert("ENV_NAME".to_string(), "staging".to_string()); + vars.insert("PORT".to_string(), "8080".to_string()); + + let expanded = config.expand_variables(&config.setup.command, &vars); + assert_eq!(expanded, "setup.sh staging 8080"); + } + + #[test] + fn test_port_calculation() { + let config = LauncherConfig { + project: ProjectConfig { + name: "test".to_string(), + display_name: "Test".to_string(), + }, + prerequisites: PrerequisitesConfig { + required: vec![], + optional: vec![], + }, + setup: SetupConfig { + command: "setup.sh".to_string(), + env_vars: vec![], + }, + containers: ContainersConfig { + naming_pattern: "test-{service_name}".to_string(), + primary_service: "backend".to_string(), + health_endpoint: "/health".to_string(), + tailscale_project_prefix: None, + }, + infrastructure: InfrastructureConfig { + compose_file: "docker-compose.yml".to_string(), + project_name: "infra".to_string(), + profile: None, + }, + ports: PortsConfig { + allocation_strategy: "hash".to_string(), + base_port: 8000, + offset: PortOffset { + min: 0, + max: 500, + step: 10, + }, + }, + worktrees: WorktreesConfig { + default_parent: "~/repos".to_string(), + branch_prefix: "".to_string(), + }, + }; + + // Default environment gets base port + assert_eq!(config.calculate_port("default"), 8000); + assert_eq!(config.calculate_port("main"), 8000); + + // Other environments get offset ports (deterministic hash) + let staging_port = config.calculate_port("staging"); + assert!(staging_port >= 8000 && staging_port <= 8500); + + // Same env name should always give same port + assert_eq!(staging_port, config.calculate_port("staging")); + } +} diff --git a/ushadow/launcher/src-tauri/src/main.rs b/ushadow/launcher/src-tauri/src/main.rs index f4dfa294..25a8f177 100644 --- a/ushadow/launcher/src-tauri/src/main.rs +++ b/ushadow/launcher/src-tauri/src/main.rs @@ -4,31 +4,47 @@ )] mod commands; +mod config; mod models; use commands::{AppState, check_prerequisites, discover_environments, get_os_type, - discover_environments_with_config, + discover_environments_with_config, discover_environments_v2, start_containers, stop_containers, get_container_status, start_infrastructure, stop_infrastructure, restart_infrastructure, start_environment, stop_environment, check_ports, check_backend_health, check_webui_health, open_browser, focus_window, set_project_root, create_environment, + // OAuth server commands + start_oauth_server, wait_for_oauth_callback, + // HTTP client + http_request, // Project/repo management (from repository.rs) get_default_project_dir, check_project_dir, clone_ushadow_repo, update_ushadow_repo, get_current_branch, checkout_branch, get_base_branch, // Worktree commands - list_worktrees, list_git_branches, check_worktree_exists, create_worktree, create_worktree_with_workmux, + list_worktrees, list_git_branches, check_worktree_exists, check_environment_conflict, create_worktree, create_worktree_with_workmux, merge_worktree_with_rebase, list_tmux_sessions, get_tmux_window_status, get_environment_tmux_status, get_tmux_info, ensure_tmux_running, attach_tmux_to_worktree, open_in_vscode, open_in_vscode_with_tmux, remove_worktree, delete_environment, get_tmux_sessions, kill_tmux_window, kill_tmux_server, open_tmux_in_terminal, capture_tmux_pane, get_claude_status, + // Kanban ticket commands + create_ticket_worktree, attach_ticket_to_worktree, get_tickets_for_tmux_window, get_ticket_tmux_info, + start_coding_agent_for_ticket, + // Kanban ticket/epic CRUD (local storage) + get_tickets, get_epics, create_ticket, update_ticket, delete_ticket, create_epic, update_epic, delete_epic, // Settings load_launcher_settings, save_launcher_settings, write_credentials_to_worktree, // Prerequisites config (from prerequisites_config.rs) get_prerequisites_config, get_platform_prerequisites_config, // Generic installer (from generic_installer.rs) - replaces all platform-specific installers install_prerequisite, start_prerequisite, + // Config commands (from 4bdc-ushadow-launchge) + load_project_config, get_current_config, check_launcher_config_exists, validate_config_file, + // Environment scanning + scan_env_file, scan_all_env_vars, + // Infrastructure discovery + get_infra_services_from_compose, // Permissions check_install_path}; use tauri::{ @@ -51,15 +67,54 @@ fn create_tray_menu() -> SystemTrayMenu { fn create_app_menu() -> Menu { let launcher = CustomMenuItem::new("show_launcher", "Show Launcher"); + // App menu (File on Windows/Linux, App name on macOS) let app_menu = Submenu::new( "Ushadow", Menu::new() .add_item(launcher) .add_native_item(MenuItem::Separator) + .add_native_item(MenuItem::Hide) + .add_native_item(MenuItem::HideOthers) + .add_native_item(MenuItem::ShowAll) + .add_native_item(MenuItem::Separator) .add_native_item(MenuItem::Quit), ); - Menu::new().add_submenu(app_menu) + // Edit menu with all standard shortcuts + let edit_menu = Submenu::new( + "Edit", + Menu::new() + .add_native_item(MenuItem::Undo) + .add_native_item(MenuItem::Redo) + .add_native_item(MenuItem::Separator) + .add_native_item(MenuItem::Cut) + .add_native_item(MenuItem::Copy) + .add_native_item(MenuItem::Paste) + .add_native_item(MenuItem::SelectAll), + ); + + // View menu + let view_menu = Submenu::new( + "View", + Menu::new() + .add_native_item(MenuItem::EnterFullScreen), + ); + + // Window menu + let window_menu = Submenu::new( + "Window", + Menu::new() + .add_native_item(MenuItem::Minimize) + .add_native_item(MenuItem::Zoom) + .add_native_item(MenuItem::Separator) + .add_native_item(MenuItem::CloseWindow), + ); + + Menu::new() + .add_submenu(app_menu) + .add_submenu(edit_menu) + .add_submenu(view_menu) + .add_submenu(window_menu) } fn main() { @@ -138,9 +193,11 @@ fn main() { get_base_branch, // Worktree management discover_environments_with_config, + discover_environments_v2, list_worktrees, list_git_branches, check_worktree_exists, + check_environment_conflict, create_worktree, create_worktree_with_workmux, merge_worktree_with_rebase, @@ -160,6 +217,21 @@ fn main() { open_tmux_in_terminal, capture_tmux_pane, get_claude_status, + // Kanban ticket integration + create_ticket_worktree, + attach_ticket_to_worktree, + get_tickets_for_tmux_window, + get_ticket_tmux_info, + start_coding_agent_for_ticket, + // Kanban ticket/epic CRUD (local storage) + get_tickets, + get_epics, + create_ticket, + update_ticket, + delete_ticket, + create_epic, + update_epic, + delete_epic, // Settings load_launcher_settings, save_launcher_settings, @@ -170,6 +242,21 @@ fn main() { // Generic installer install_prerequisite, start_prerequisite, + // Config management (from 4bdc-ushadow-launchge) + load_project_config, + get_current_config, + check_launcher_config_exists, + validate_config_file, + // Environment scanning + scan_env_file, + scan_all_env_vars, + // Infrastructure discovery + get_infra_services_from_compose, + // OAuth server + start_oauth_server, + wait_for_oauth_callback, + // HTTP client + http_request, ]) .setup(|app| { let window = app.get_window("main").unwrap(); diff --git a/ushadow/launcher/src-tauri/src/models.rs b/ushadow/launcher/src-tauri/src/models.rs index 1a4f89b8..3500d17f 100644 --- a/ushadow/launcher/src-tauri/src/models.rs +++ b/ushadow/launcher/src-tauri/src/models.rs @@ -143,3 +143,87 @@ pub struct ClaudeStatus { pub current_task: Option, pub last_output: Option, } + +/// Environment conflict info - when creating environment that already exists +#[derive(Serialize, Deserialize, Clone, Debug)] +pub struct EnvironmentConflict { + pub name: String, + pub current_branch: String, + pub path: String, + pub is_running: bool, +} + +/// Compose service definition from docker-compose.yml +#[derive(Serialize, Deserialize, Clone, Debug)] +pub struct ComposeServiceDefinition { + pub id: String, // Service name from compose (e.g., "postgres", "redis") + pub display_name: String, // Human-readable name (e.g., "PostgreSQL", "Redis") + pub default_port: Option, // Primary exposed port + pub profiles: Vec, // Profiles this service belongs to +} + +/// Kanban ticket status +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +#[serde(rename_all = "snake_case")] +pub enum TicketStatus { + Backlog, + Todo, + InProgress, + InReview, + Done, + Archived, +} + +/// Kanban ticket priority +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +#[serde(rename_all = "lowercase")] +pub enum TicketPriority { + Low, + Medium, + High, + Urgent, +} + +/// Epic (collection of related tickets) +#[derive(Serialize, Deserialize, Clone, Debug)] +pub struct Epic { + pub id: String, + pub title: String, + pub description: Option, + pub color: String, + pub branch_name: Option, + pub base_branch: String, + pub project_id: Option, + pub created_at: String, + pub updated_at: String, +} + +/// Kanban ticket +#[derive(Serialize, Deserialize, Clone, Debug)] +pub struct Ticket { + pub id: String, + pub title: String, + pub description: Option, + pub status: TicketStatus, + pub priority: TicketPriority, + pub epic_id: Option, + pub tags: Vec, + pub color: Option, + pub tmux_window_name: Option, + pub tmux_session_name: Option, + pub branch_name: Option, + pub worktree_path: Option, + pub environment_name: Option, + pub project_id: Option, + pub assigned_to: Option, + pub order: i32, + pub created_at: String, + pub updated_at: String, +} + +/// Kanban data storage structure +#[derive(Serialize, Deserialize, Clone, Debug)] +pub struct KanbanData { + pub tickets: Vec, + pub epics: Vec, +} diff --git a/ushadow/launcher/src-tauri/tauri.conf.json b/ushadow/launcher/src-tauri/tauri.conf.json index ba821f18..5b01c193 100644 --- a/ushadow/launcher/src-tauri/tauri.conf.json +++ b/ushadow/launcher/src-tauri/tauri.conf.json @@ -8,7 +8,7 @@ }, "package": { "productName": "Ushadow", - "version": "0.7.15" + "version": "0.8.0" }, "tauri": { "allowlist": { @@ -59,6 +59,20 @@ }, "notification": { "all": true + }, + "window": { + "all": false, + "create": true, + "center": true, + "close": true, + "hide": true, + "show": true, + "setFocus": true, + "setTitle": true, + "setSize": true, + "setPosition": true, + "setResizable": true, + "setAlwaysOnTop": true } }, "bundle": { @@ -95,7 +109,14 @@ } }, "security": { - "csp": "default-src 'self'; script-src 'self' 'unsafe-inline'; connect-src 'self' http://localhost:* https://localhost:* ws://localhost:* wss://localhost:*; img-src 'self' data: http://localhost:* https://localhost:*; style-src 'self' 'unsafe-inline'; frame-src http://localhost:* https://localhost:*" + "csp": "default-src 'self'; script-src 'self' 'unsafe-inline'; connect-src 'self' http://localhost:* https://localhost:* ws://localhost:* wss://localhost:*; img-src 'self' data: http://localhost:* https://localhost:*; style-src 'self' 'unsafe-inline'; frame-src http://localhost:* https://localhost:*", + "dangerousRemoteDomainIpcAccess": [ + { + "domain": "localhost", + "windows": ["main", "oauth-window"], + "enableTauriAPI": false + } + ] }, "systemTray": { "iconPath": "icons/icon.png", diff --git a/ushadow/launcher/src/App.tsx b/ushadow/launcher/src/App.tsx index b8eb562d..0c0d6692 100644 --- a/ushadow/launcher/src/App.tsx +++ b/ushadow/launcher/src/App.tsx @@ -1,21 +1,25 @@ import { useState, useEffect, useCallback, useRef } from 'react' -import { tauri, type Prerequisites, type Discovery, type UshadowEnvironment, type PlatformPrerequisitesConfig } from './hooks/useTauri' +import { tauri, type Prerequisites, type Discovery, type UshadowEnvironment, type PlatformPrerequisitesConfig, type EnvironmentConflict } from './hooks/useTauri' import { useAppStore, type BranchType } from './store/appStore' import { useWindowFocus } from './hooks/useWindowFocus' import { useTmuxMonitoring } from './hooks/useTmuxMonitoring' -import { writeText, readText } from '@tauri-apps/api/clipboard' import { DevToolsPanel } from './components/DevToolsPanel' import { PrerequisitesPanel } from './components/PrerequisitesPanel' import { InfrastructurePanel } from './components/InfrastructurePanel' +import { InfraConfigPanel } from './components/InfraConfigPanel' import { EnvironmentsPanel } from './components/EnvironmentsPanel' import { LogPanel, type LogEntry, type LogLevel } from './components/LogPanel' import { ProjectSetupDialog } from './components/ProjectSetupDialog' import { NewEnvironmentDialog } from './components/NewEnvironmentDialog' +import { EnvironmentConflictDialog } from './components/EnvironmentConflictDialog' import { TmuxManagerDialog } from './components/TmuxManagerDialog' import { SettingsDialog } from './components/SettingsDialog' import { EmbeddedView } from './components/EmbeddedView' -import { RefreshCw, Settings, Zap, Loader2, FolderOpen, Pencil, Terminal, Sliders, Package, FolderGit2 } from 'lucide-react' +import { ProjectManager } from './components/ProjectManager' +import { AuthButton } from './components/AuthButton' +import { RefreshCw, Settings, Zap, Loader2, FolderOpen, Pencil, Terminal, Sliders, Package, FolderGit2, Trello } from 'lucide-react' import { getColors } from './utils/colors' +import { KanbanBoard } from './components/KanbanBoard' function App() { // Store @@ -33,8 +37,19 @@ function App() { setProjectRoot, worktreesDir, setWorktreesDir, + multiProjectMode, + kanbanEnabled, + projects, + activeProjectId, } = useAppStore() + // Get active project in multi-project mode, or use legacy projectRoot + const activeProject = multiProjectMode && activeProjectId + ? projects.find(p => p.id === activeProjectId) + : null + const effectiveProjectRoot = activeProject?.rootPath || projectRoot + const effectiveWorktreesDir = activeProject?.worktreesPath || worktreesDir + // State const [platform, setPlatform] = useState('') const [prerequisites, setPrerequisites] = useState(null) @@ -45,7 +60,7 @@ function App() { const [installingItem, setInstallingItem] = useState(null) const [isLaunching, setIsLaunching] = useState(false) const [loadingInfra, setLoadingInfra] = useState(false) - const [loadingEnv, setLoadingEnv] = useState(null) + const [loadingEnv, setLoadingEnv] = useState<{ name: string; action: 'starting' | 'stopping' | 'deleting' | 'merging' } | null>(null) const [showProjectDialog, setShowProjectDialog] = useState(false) const [showNewEnvDialog, setShowNewEnvDialog] = useState(false) const [showTmuxManager, setShowTmuxManager] = useState(false) @@ -55,6 +70,42 @@ function App() { const [shouldAutoLaunch, setShouldAutoLaunch] = useState(false) const [leftColumnWidth, setLeftColumnWidth] = useState(350) // pixels const [isResizing, setIsResizing] = useState(false) + const [environmentConflict, setEnvironmentConflict] = useState(null) + const [pendingEnvCreation, setPendingEnvCreation] = useState<{ name: string; branch: string } | null>(null) + const [selectedEnvironment, setSelectedEnvironment] = useState(null) + + // Auto-select environment matching current directory's ENV_NAME, or first running + useEffect(() => { + if (!selectedEnvironment && discovery?.environments) { + // Try to find environment matching the current project root + const currentEnv = discovery.environments.find(e => + e.running && e.path === projectRoot + ) + // Fallback to first running environment + const envToSelect = currentEnv || discovery.environments.find(e => e.running) + if (envToSelect) { + console.log('[App] Auto-selecting environment:', { + name: envToSelect.name, + backend_port: envToSelect.backend_port, + path: envToSelect.path, + projectRoot, + matched: !!currentEnv + }) + setSelectedEnvironment(envToSelect) + } + } + }, [discovery?.environments, selectedEnvironment, projectRoot]) + + // Debug: expose selectedEnvironment to console + useEffect(() => { + if (selectedEnvironment) { + (window as any).selectedEnv = selectedEnvironment + console.log('[App] Selected environment updated:', { + name: selectedEnvironment.name, + backend_port: selectedEnvironment.backend_port + }) + } + }, [selectedEnvironment]) // Window focus detection for smart polling const isWindowFocused = useWindowFocus() @@ -63,6 +114,40 @@ function App() { const environmentNames = discovery?.environments.map(e => e.name) ?? [] const tmuxStatuses = useTmuxMonitoring(environmentNames, isWindowFocused && environmentNames.length > 0) + // Infrastructure service selection + const [selectedInfraServices, setSelectedInfraServices] = useState([]) + + // Auto-select running infrastructure services + useEffect(() => { + if (discovery?.infrastructure) { + const runningServiceIds = discovery.infrastructure + .filter(service => service.running) + .map(service => service.name) + + // Only update if the running services have changed + setSelectedInfraServices(prev => { + const prevSet = new Set(prev) + const newSet = new Set(runningServiceIds) + + // Check if sets are different + if (prevSet.size !== newSet.size) return runningServiceIds + for (const id of runningServiceIds) { + if (!prevSet.has(id)) return runningServiceIds + } + + return prev // No change needed + }) + } + }, [discovery?.infrastructure]) + + const handleToggleInfraService = (serviceId: string) => { + setSelectedInfraServices(prev => + prev.includes(serviceId) + ? prev.filter(id => id !== serviceId) + : [...prev, serviceId] + ) + } + const logIdRef = useRef(0) const lastStateRef = useRef('') @@ -118,85 +203,54 @@ function App() { } }, [isResizing, handleMouseMove, handleMouseUp]) - // Enable keyboard shortcuts for copy/paste - useEffect(() => { - const handleKeyDown = async (e: KeyboardEvent) => { - const isMac = navigator.platform.toUpperCase().indexOf('MAC') >= 0 - const modifier = isMac ? e.metaKey : e.ctrlKey - - // Only handle copy/paste/cut if modifier key is pressed - if (!modifier) return - - // Get the active element - const target = e.target as HTMLElement - const isInputField = target.tagName === 'INPUT' || target.tagName === 'TEXTAREA' || target.isContentEditable + // Enable native clipboard operations (undo/redo/copy/paste/cut/select-all) + // Tauri webview supports standard browser clipboard API + // All native keyboard shortcuts work by default: Cmd/Ctrl+Z (undo), Cmd/Ctrl+Shift+Z (redo), Cmd/Ctrl+A (select all), etc. - try { - if (e.key.toLowerCase() === 'c') { - // Copy: get selection from input field or window selection - let textToCopy = '' - if (isInputField && (target instanceof HTMLInputElement || target instanceof HTMLTextAreaElement)) { - const start = target.selectionStart || 0 - const end = target.selectionEnd || 0 - textToCopy = target.value.substring(start, end) - } else { - const selection = window.getSelection() - textToCopy = selection?.toString() || '' - } + // Token sharing API: Listen for token requests from environment iframes + useEffect(() => { + const handleMessage = (event: MessageEvent) => { + // Security: Only respond to messages from embedded environments + // TODO: Add origin validation if needed + + if (event.data.type === 'GET_KC_TOKEN') { + // Get tokens from localStorage + const token = localStorage.getItem('kc_access_token') + const refreshToken = localStorage.getItem('kc_refresh_token') + const idToken = localStorage.getItem('kc_id_token') + + // Send tokens back to requesting iframe + event.source?.postMessage( + { + type: 'KC_TOKEN_RESPONSE', + tokens: { token, refreshToken, idToken }, + }, + '*' // TODO: Restrict to specific origins in production + ) + } - if (textToCopy) { - await writeText(textToCopy) - e.preventDefault() - } - } else if (e.key.toLowerCase() === 'v') { - // Paste: handle input fields specially, but allow pasting anywhere - const text = await readText() - if (!text) return - - if (target instanceof HTMLInputElement || target instanceof HTMLTextAreaElement) { - // Paste into input/textarea - const start = target.selectionStart || 0 - const end = target.selectionEnd || 0 - const currentValue = target.value - target.value = currentValue.substring(0, start) + text + currentValue.substring(end) - target.selectionStart = target.selectionEnd = start + text.length - - // Trigger input event so React state updates - const event = new Event('input', { bubbles: true }) - target.dispatchEvent(event) - e.preventDefault() - } else if (target.isContentEditable) { - // Paste into contenteditable - document.execCommand('insertText', false, text) - e.preventDefault() - } - } else if (e.key.toLowerCase() === 'x') { - // Cut: only from input fields - if (target instanceof HTMLInputElement || target instanceof HTMLTextAreaElement) { - const start = target.selectionStart || 0 - const end = target.selectionEnd || 0 - const selectedText = target.value.substring(start, end) - if (selectedText) { - await writeText(selectedText) - const currentValue = target.value - target.value = currentValue.substring(0, start) + currentValue.substring(end) - target.selectionStart = target.selectionEnd = start - - // Trigger input event so React state updates - const event = new Event('input', { bubbles: true }) - target.dispatchEvent(event) - e.preventDefault() - } - } - } - } catch (err) { - // Silently fail if clipboard access is denied - console.warn('Clipboard access failed:', err) + if (event.data.type === 'REFRESH_KC_TOKEN') { + // TODO: Implement token refresh logic + // For now, just return current tokens + const token = localStorage.getItem('kc_access_token') + const refreshToken = localStorage.getItem('kc_refresh_token') + const idToken = localStorage.getItem('kc_id_token') + + event.source?.postMessage( + { + type: 'KC_TOKEN_REFRESHED', + tokens: { token, refreshToken, idToken }, + }, + '*' + ) } } - document.addEventListener('keydown', handleKeyDown, true) // Use capture phase - return () => document.removeEventListener('keydown', handleKeyDown, true) + window.addEventListener('message', handleMessage) + + return () => { + window.removeEventListener('message', handleMessage) + } }, []) // Apply spoofed values to prerequisites @@ -295,22 +349,16 @@ function App() { const init = async () => { try { log('Initializing...', 'step') - console.log('[Init] Starting initialization...') const os = await tauri.getOsType() - console.log('[Init] OS:', os) setPlatform(os) log(`Platform: ${os}`) // Load prerequisites configuration for this platform - console.log('[Init] Loading prerequisites config...') const config = await tauri.getPlatformPrerequisitesConfig(os) - console.log('[Init] Prerequisites config:', config) setPrerequisitesConfig(config) - console.log('[Init] Getting default project dir...') const defaultDir = await tauri.getDefaultProjectDir() - console.log('[Init] Default dir:', defaultDir) // Track if this is first time setup (showing project dialog) let isFirstTimeSetup = false @@ -323,22 +371,18 @@ function App() { log('Please configure your repository location', 'step') } else { // Sync existing project root to Rust backend - console.log('[Init] Setting project root:', projectRoot) await tauri.setProjectRoot(projectRoot) } // Check prerequisites immediately (system-wide, no project needed) - console.log('[Init] Checking prerequisites...') await refreshPrerequisites() // Only run discovery if we have a valid project root if (!isFirstTimeSetup) { - console.log('[Init] Running discovery...') await refreshDiscovery() } log('Ready', 'success') - console.log('[Init] Initialization complete') } catch (err) { console.error('[Init] Initialization error:', err) log(`Initialization failed: ${err}`, 'error') @@ -378,6 +422,15 @@ function App() { return () => clearInterval(interval) }, [refreshPrerequisites, refreshDiscovery, isWindowFocused, projectRoot]) + // Sync active project root to Rust backend when it changes (multi-project mode) + useEffect(() => { + if (effectiveProjectRoot && effectiveProjectRoot !== projectRoot) { + tauri.setProjectRoot(effectiveProjectRoot).catch(err => { + console.error('Failed to sync project root to backend:', err) + }) + } + }, [effectiveProjectRoot, projectRoot]) + // Install handlers const handleInstall = async (item: string) => { setIsInstalling(true) @@ -560,8 +613,7 @@ function App() { // Environment handlers const handleStartEnv = async (envName: string, explicitPath?: string) => { - console.log(`DEBUG: handleStartEnv called for ${envName}`) - setLoadingEnv(envName) + setLoadingEnv({ name: envName, action: 'starting' }) log(`Starting ${envName}...`, 'step') // Use explicit path if provided, otherwise look up the environment @@ -636,9 +688,7 @@ function App() { } }) } else { - console.log(`DEBUG: Calling tauri.startEnvironment(${envName}, ${envPath})`) const result = await tauri.startEnvironment(envName, envPath) - console.log(`DEBUG: tauri.startEnvironment returned: ${result}`) // Only log summary to activity log (full detail is in console/detail pane) log(`βœ“ Environment ${envName} started successfully`, 'success') @@ -673,7 +723,7 @@ function App() { } const handleStopEnv = async (envName: string) => { - setLoadingEnv(envName) + setLoadingEnv({ name: envName, action: 'stopping' }) log(`Stopping ${envName}...`, 'step') try { @@ -728,7 +778,7 @@ function App() { if (!confirmed) return - setLoadingEnv(envName) + setLoadingEnv({ name: envName, action: 'merging' }) log(`Merging worktree "${envName}" to main...`, 'step') try { @@ -776,24 +826,24 @@ function App() { ? `Delete environment "${envName}"?\n\n` + `This will:\n` + `β€’ Stop all containers\n` + - `β€’ Remove the worktree\n` + + `β€’ Remove the worktree (including any uncommitted changes)\n` + `β€’ Close the tmux session\n\n` + - `This action cannot be undone!` + `⚠️ This action cannot be undone!` : `Delete environment "${envName}"?\n\n` + `This will:\n` + `β€’ Stop all containers\n` + `β€’ Close the tmux session\n\n` + - `This action cannot be undone!` + `⚠️ This action cannot be undone!` const confirmed = window.confirm(message) if (!confirmed) return - setLoadingEnv(envName) + setLoadingEnv({ name: envName, action: 'deleting' }) log(`Deleting environment "${envName}"...`, 'step') try { - const result = await tauri.deleteEnvironment(projectRoot, envName) + const result = await tauri.deleteEnvironment(effectiveProjectRoot, envName) log(result, 'success') log(`βœ“ Environment "${envName}" deleted`, 'success') @@ -832,7 +882,7 @@ function App() { // Force lowercase to avoid Docker Compose naming issues name = name.toLowerCase() - const envPath = `${projectRoot}/../${name}` // Expected clone location + const envPath = `${effectiveProjectRoot}/../${name}` // Expected clone location const modeLabel = serverMode === 'dev' ? 'hot reload' : 'production' // Check port availability in dev mode (non-quick launch) @@ -889,17 +939,17 @@ function App() { name = name.toLowerCase() branch = branch.toLowerCase() - if (!worktreesDir) { + if (!effectiveWorktreesDir) { log('Worktrees directory not configured', 'error') throw new Error('Worktrees directory not configured') } log(`Creating worktree "${name}" from branch "${branch}"...`, 'step') - log(`Project root: ${projectRoot}`, 'info') - log(`Worktrees dir: ${worktreesDir}`, 'info') + log(`Project root: ${effectiveProjectRoot}`, 'info') + log(`Worktrees dir: ${effectiveWorktreesDir}`, 'info') try { - const worktree = await tauri.createWorktreeWithWorkmux(projectRoot, name, branch, true) + const worktree = await tauri.createWorktreeWithWorkmux(effectiveProjectRoot, name, branch, true) log(`βœ“ Worktree created successfully`, 'success') log(`Path: ${worktree.path}`, 'info') log(`Branch: ${worktree.branch}`, 'info') @@ -937,12 +987,30 @@ function App() { name = name.toLowerCase() branch = branch.toLowerCase() - if (!worktreesDir) { + if (!effectiveWorktreesDir) { log('Worktrees directory not configured', 'error') return } - const envPath = `${worktreesDir}/${name}` + // Check for conflicts first + try { + const conflict = await tauri.checkEnvironmentConflict(effectiveProjectRoot, name) + if (conflict) { + // Check if the environment is actually running (from discovery data) + const env = discovery?.environments.find(e => e.name === name) + conflict.is_running = env?.running || false + + // Show conflict dialog + setEnvironmentConflict(conflict) + setPendingEnvCreation({ name, branch }) + return + } + } catch (err) { + log(`Failed to check for conflicts: ${err}`, 'warning') + // Continue anyway + } + + const envPath = `${effectiveWorktreesDir}/${name}` // Add to creating environments list setCreatingEnvs(prev => [...prev, { name, status: 'cloning', path: envPath }]) @@ -957,7 +1025,7 @@ function App() { } else { // Step 1: Create the git worktree with workmux (includes tmux integration) log(`Creating git worktree at ${envPath}...`, 'info') - const worktree = await tauri.createWorktreeWithWorkmux(projectRoot, name, branch || undefined, true) + const worktree = await tauri.createWorktreeWithWorkmux(effectiveProjectRoot, name, branch || undefined, true) log(`βœ“ Worktree created at ${worktree.path}`, 'success') // Step 1.5: Write default admin credentials if configured @@ -1015,6 +1083,120 @@ function App() { } } + // Conflict resolution handlers + const handleConflictStartExisting = async () => { + if (!environmentConflict) return + + setEnvironmentConflict(null) + setPendingEnvCreation(null) + log(`Starting existing environment "${environmentConflict.name}"...`, 'step') + + // Start the existing environment + await handleStartEnv(environmentConflict.name, environmentConflict.path) + } + + const handleConflictSwitchBranch = async () => { + if (!environmentConflict || !pendingEnvCreation) return + + const { name, branch } = pendingEnvCreation + setEnvironmentConflict(null) + setPendingEnvCreation(null) + + log(`Switching "${name}" to branch "${branch}"...`, 'step') + + try { + // Stop if running + if (environmentConflict.is_running) { + log('Stopping environment before switching branch...', 'info') + await tauri.stopEnvironment(name) + } + + // Checkout the new branch + log(`Checking out branch ${branch}...`, 'info') + await tauri.checkoutBranch(environmentConflict.path, branch) + log(`βœ“ Switched to ${branch}`, 'success') + + // Start the environment + await handleStartEnv(name, environmentConflict.path) + } catch (err) { + log(`Failed to switch branch: ${err}`, 'error') + } + } + + const handleConflictDeleteAndRecreate = async () => { + if (!environmentConflict || !pendingEnvCreation) return + + const { name, branch } = pendingEnvCreation + setEnvironmentConflict(null) + setPendingEnvCreation(null) + + log(`Deleting and recreating "${name}"...`, 'step') + + try { + // Delete the old environment (stops containers, removes worktree, closes tmux) + await tauri.deleteEnvironment(effectiveProjectRoot, name) + log(`βœ“ Old environment deleted`, 'success') + + // Wait a moment for cleanup + await new Promise(r => setTimeout(r, 1000)) + + // Now create the new environment (reuse existing logic from handleNewEnvWorktree) + const envPath = `${effectiveWorktreesDir}/${name}` + setCreatingEnvs(prev => [...prev, { name, status: 'cloning', path: envPath }]) + log(`Creating worktree "${name}" from branch "${branch}"...`, 'step') + + if (dryRunMode) { + log(`[DRY RUN] Would create worktree "${name}" for branch "${branch}"`, 'warning') + setCreatingEnvs(prev => prev.map(e => e.name === name ? { ...e, status: 'starting' } : e)) + await new Promise(r => setTimeout(r, 2000)) + log(`[DRY RUN] Worktree environment "${name}" created`, 'success') + } else { + log(`Creating git worktree at ${envPath}...`, 'info') + const worktree = await tauri.createWorktreeWithWorkmux(effectiveProjectRoot, name, branch || undefined, true) + log(`βœ“ Worktree created at ${worktree.path}`, 'success') + + // Write credentials if configured + try { + const settings = await tauri.loadLauncherSettings() + if (settings.default_admin_email && settings.default_admin_password) { + log(`Writing admin credentials to secrets.yaml...`, 'info') + await tauri.writeCredentialsToWorktree( + worktree.path, + settings.default_admin_email, + settings.default_admin_password, + settings.default_admin_name || undefined + ) + log(`βœ“ Admin credentials configured`, 'success') + } + } catch (err) { + log(`Could not write credentials: ${err}`, 'warning') + } + + setCreatingEnvs(prev => prev.map(e => e.name === name ? { ...e, status: 'starting', path: worktree.path } : e)) + + // Start the environment + log(`Starting environment "${name}"...`, 'step') + await handleStartEnv(name, worktree.path) + + log(`βœ“ Worktree environment "${name}" created and started!`, 'success') + } + + setTimeout(() => { + setCreatingEnvs(prev => prev.filter(e => e.name !== name)) + }, 15000) + + await refreshDiscovery() + } catch (err) { + log(`Failed to delete and recreate: ${err}`, 'error') + setCreatingEnvs(prev => prev.map(e => e.name === name ? { ...e, status: 'error', error: String(err) } : e)) + } + } + + const handleConflictCancel = () => { + setEnvironmentConflict(null) + setPendingEnvCreation(null) + } + // Project setup handler - saves paths, doesn't clone yet const handleProjectSetup = async (path: string, worktreesPath: string) => { setShowProjectDialog(false) @@ -1053,12 +1235,8 @@ function App() { const handleClone = async (path: string, branch?: string) => { try { - console.log('DEBUG handleClone: Starting clone for path:', path, 'branch:', branch) - console.log('DEBUG handleClone: dryRunMode =', dryRunMode) - // Check if repo already exists at this location const status = await tauri.checkProjectDir(path) - console.log('DEBUG handleClone: checkProjectDir status =', status) if (status.exists && status.is_valid_repo) { // Repo exists - pull latest instead of cloning @@ -1083,9 +1261,7 @@ function App() { await new Promise(r => setTimeout(r, 2000)) log('[DRY RUN] Clone simulated', 'success') } else { - console.log('DEBUG handleClone: Calling tauri.cloneUshadowRepo with path:', path, 'branch:', branch) const result = await tauri.cloneUshadowRepo(path, branch) - console.log('DEBUG handleClone: Clone result from Rust:', result) log(result, 'success') } } @@ -1120,7 +1296,6 @@ function App() { // Quick launch (for quick mode) const handleQuickLaunch = async () => { - console.log('DEBUG: handleQuickLaunch started') setIsLaunching(true) setLogExpanded(true) log('πŸš€ Starting Ushadow quick launch...', 'step') @@ -1347,7 +1522,7 @@ function App() { + {kanbanEnabled && ( + + )}
{/* Tmux Manager */} @@ -1386,15 +1573,21 @@ function App() { - {/* Settings / Credentials Button */} + {/* Auth Button */} + + + {/* Settings Button */} {/* Refresh */} @@ -1412,53 +1605,71 @@ function App() { {/* Main Content */}
{appMode === 'install' ? ( - /* Install Page - One-Click Launch (Landing Page) */ -
-
-

One-Click Launch

-

- Automatically install prerequisites and start Ushadow -

-
+ /* Install Page - Project Configuration */ +
+ {multiProjectMode ? ( + /* Multi-Project Mode - Project Manager */ + <> +
+

Project Management

+

+ Manage multiple projects with independent configurations +

+
+
+ +
+ + ) : ( + /* Single-Project Mode - One-Click Launch */ +
+
+

One-Click Launch

+

+ Automatically install prerequisites and start Ushadow +

+
- {/* Project Folder Display */} -
- - Project folder: - - {projectRoot || 'Not set'} - - -
+ {/* Project Folder Display */} +
+ + Project folder: + + {projectRoot || 'Not set'} + + +
- + +
+ )}
) : appMode === 'infra' ? ( /* Infra Page - Prerequisites & Infrastructure Setup */ @@ -1466,7 +1677,7 @@ function App() {

Setup & Installation

- Install prerequisites and configure your single environment + Install prerequisites and configure shared infrastructure

@@ -1493,70 +1704,23 @@ function App() { onStop={handleStopInfra} onRestart={handleRestartInfra} isLoading={loadingInfra} + selectedServices={selectedInfraServices} + onToggleService={handleToggleInfraService} />
- {/* Single Environment Section for Consumers */} -
-

Your Environment

- {!discovery || discovery.environments.length === 0 ? ( -
-

No environment created yet

- -
- ) : ( -
- {discovery.environments.map(env => ( -
-
-
- {env.name} - - {env.status} - -
-
- {env.running ? ( - <> - - - - ) : ( - - )} -
-
- ))} -
- )} -
+ {/* Infrastructure Configuration */} + {effectiveProjectRoot && ( + { + // TODO: Save to backend + }} + /> + )}
- ) : ( + ) : appMode === 'environments' ? ( /* Environments Page - Worktree Management */
setCreatingEnvs(prev => prev.filter(e => e.name !== name))} loadingEnv={loadingEnv} tmuxStatuses={tmuxStatuses} + selectedEnvironment={selectedEnvironment} + onSelectEnvironment={(env) => { + console.log('[App] onSelectEnvironment called with:', env?.name) + setSelectedEnvironment(env) + }} />
- )} + ) : appMode === 'kanban' && kanbanEnabled ? ( + /* Kanban Page - Ticket Management */ + (() => { + // Use the first available backend (running or not) + const backendUrl = discovery?.environments.find(e => e.running)?.localhost_url + || discovery?.environments[0]?.localhost_url + || 'http://localhost:8000' + + return ( + + ) + })() + ) : null}
{/* Log Panel - Bottom */} @@ -1601,7 +1786,7 @@ function App() { {/* New Environment Dialog */} setShowNewEnvDialog(false)} onLink={handleNewEnvLink} onWorktree={handleNewEnvWorktree} @@ -1619,6 +1804,16 @@ function App() { isOpen={showSettingsDialog} onClose={() => setShowSettingsDialog(false)} /> + + {/* Environment Conflict Dialog */} +
) } diff --git a/ushadow/launcher/src/components/AuthButton.tsx b/ushadow/launcher/src/components/AuthButton.tsx new file mode 100644 index 00000000..dc833cf1 --- /dev/null +++ b/ushadow/launcher/src/components/AuthButton.tsx @@ -0,0 +1,294 @@ +import { useState, useEffect } from 'react' +import { LogIn, LogOut, User, Loader2 } from 'lucide-react' +import { tauri, type UshadowEnvironment } from '../hooks/useTauri' +import { TokenManager } from '../services/tokenManager' +import { generateCodeVerifier, generateCodeChallenge, generateState } from '../utils/pkce' + +interface AuthButtonProps { + // Optional: Pass specific environment to auth against + // If not provided, will use first running environment + environment?: UshadowEnvironment | null + // Show as large button in center of page (for login prompt) + variant?: 'header' | 'centered' +} + +export function AuthButton({ environment, variant = 'header' }: AuthButtonProps) { + const [isAuthenticated, setIsAuthenticated] = useState(false) + const [username, setUsername] = useState(null) + const [isLoading, setIsLoading] = useState(true) + + // Check auth status when environment changes + useEffect(() => { + if (!environment) { + setIsLoading(false) + return + } + + checkAuthStatus() + + // Periodically check for token expiration (every 30 seconds) + const intervalId = setInterval(() => { + checkAuthStatus() + }, 30000) + + return () => clearInterval(intervalId) + }, [environment]) + + const checkAuthStatus = () => { + if (TokenManager.isAuthenticated()) { + const userInfo = TokenManager.getUserInfo() + setIsAuthenticated(true) + setUsername(userInfo?.preferred_username || userInfo?.email || 'User') + } else { + setIsAuthenticated(false) + } + setIsLoading(false) + } + + const handleLogin = async () => { + if (!environment) { + alert('No environment selected. Please start an environment first.') + return + } + + setIsLoading(true) + + try { + console.log('[AuthButton] Starting OAuth flow with HTTP callback server...') + + // Get backend URL from environment + const backendUrl = `http://localhost:${environment.backend_port}` + console.log('[AuthButton] Backend URL:', backendUrl) + + // Declare variables at function scope + let keycloakUrl: string + let port: number + let callbackUrl: string + + // Fetch Keycloak config from backend (using Tauri HTTP client to bypass CORS) + console.log('[AuthButton] Fetching Keycloak config from backend...') + const configResponse = await tauri.httpRequest(`${backendUrl}/api/settings/config`, 'GET') + console.log('[AuthButton] Config response status:', configResponse.status) + if (configResponse.status !== 200) { + throw new Error(`Failed to fetch config from backend: ${configResponse.status} - ${configResponse.body}`) + } + const config = JSON.parse(configResponse.body) + keycloakUrl = config.keycloak?.public_url || 'http://localhost:8081' + console.log('[AuthButton] Using Keycloak URL:', keycloakUrl) + + // Start OAuth callback server + console.log('[AuthButton] Starting OAuth callback server...') + ;[port, callbackUrl] = await tauri.startOAuthServer() + console.log('[AuthButton] βœ“ Callback server running on port:', port) + console.log('[AuthButton] Callback URL:', callbackUrl) + + // Register callback URL with Keycloak (using Tauri HTTP client to bypass CORS) + console.log('[AuthButton] Registering callback URL with Keycloak...') + const registerResponse = await tauri.httpRequest( + `${backendUrl}/api/auth/register-redirect-uri`, + 'POST', + { 'Content-Type': 'application/json' }, + JSON.stringify({ redirect_uri: callbackUrl }) + ) + + console.log('[AuthButton] Register response status:', registerResponse.status) + if (registerResponse.status !== 200) { + throw new Error(`Failed to register callback URL: ${registerResponse.status} - ${registerResponse.body}`) + } + console.log('[AuthButton] βœ“ Callback URL registered') + + // Generate PKCE parameters + const codeVerifier = generateCodeVerifier() + const codeChallenge = await generateCodeChallenge(codeVerifier) + const state = generateState() + + // Store for callback validation + localStorage.setItem('pkce_code_verifier', codeVerifier) + localStorage.setItem('oauth_state', state) + localStorage.setItem('oauth_backend_url', backendUrl) + + // Build Keycloak login URL + const authUrl = new URL(`${keycloakUrl}/realms/ushadow/protocol/openid-connect/auth`) + authUrl.searchParams.set('client_id', 'ushadow-frontend') + authUrl.searchParams.set('redirect_uri', callbackUrl) + authUrl.searchParams.set('response_type', 'code') + authUrl.searchParams.set('scope', 'openid profile email') + authUrl.searchParams.set('state', state) + authUrl.searchParams.set('code_challenge', codeChallenge) + authUrl.searchParams.set('code_challenge_method', 'S256') + + // Open system browser + console.log('[AuthButton] Opening system browser...') + await tauri.openBrowser(authUrl.toString()) + console.log('[AuthButton] βœ“ Browser opened, waiting for callback...') + + // Wait for OAuth callback (this will block until callback or timeout) + const result = await tauri.waitForOAuthCallback(port) + + if (!result.success || !result.code || !result.state) { + throw new Error(result.error || 'Login failed or cancelled') + } + + console.log('[AuthButton] βœ“ Callback received') + + // Validate state (CSRF protection) + const savedState = localStorage.getItem('oauth_state') + if (result.state !== savedState) { + throw new Error('Invalid state parameter - possible CSRF attack') + } + + // Exchange code for tokens + const savedCodeVerifier = localStorage.getItem('pkce_code_verifier') + if (!savedCodeVerifier) { + throw new Error('Missing PKCE code verifier') + } + + console.log('[AuthButton] Exchanging code for tokens...') + const tokenResponse = await tauri.httpRequest( + `${backendUrl}/api/auth/token`, + 'POST', + { 'Content-Type': 'application/json' }, + JSON.stringify({ + code: result.code, + code_verifier: savedCodeVerifier, + redirect_uri: callbackUrl, + }) + ) + + if (tokenResponse.status !== 200) { + throw new Error(`Token exchange failed: ${tokenResponse.body}`) + } + + const tokens = JSON.parse(tokenResponse.body) + + // Store tokens + TokenManager.storeTokens(tokens) + console.log('[AuthButton] βœ“ Login successful') + + // Clean up + localStorage.removeItem('oauth_state') + localStorage.removeItem('pkce_code_verifier') + localStorage.removeItem('oauth_backend_url') + + // Notify embedded environments that tokens are now available + const iframe = document.getElementById('embedded-iframe') as HTMLIFrameElement + if (iframe && iframe.contentWindow) { + console.log('[AuthButton] Notifying embedded environment to refresh authentication') + iframe.contentWindow.postMessage( + { type: 'KC_TOKENS_UPDATED' }, + '*' // Send to iframe regardless of origin + ) + } + + // Update UI + checkAuthStatus() + } catch (error) { + console.error('[AuthButton] Login error:', error) + alert(`Login failed: ${error}`) + setIsLoading(false) + } + } + + const handleLogout = async () => { + TokenManager.clearTokens() + setIsAuthenticated(false) + setUsername(null) + + // Optionally open Keycloak logout page + if (environment) { + try { + const backendUrl = `http://localhost:${environment.backend_port}` + const configResponse = await tauri.httpRequest(`${backendUrl}/api/settings/config`, 'GET') + if (configResponse.status === 200) { + const config = JSON.parse(configResponse.body) + const keycloakUrl = config.keycloak?.public_url || 'http://localhost:8081' + const logoutUrl = `${keycloakUrl}/realms/ushadow/protocol/openid-connect/logout` + await tauri.openBrowser(logoutUrl) + } + } catch (error) { + console.error('[AuthButton] Logout error:', error) + } + } + } + + // Don't show button if no environment + if (!environment) { + return null + } + + if (isLoading) { + return ( +
+ +
+ ) + } + + if (isAuthenticated) { + if (variant === 'centered') { + return null // Don't show anything when authenticated in centered mode + } + + return ( +
+
+ + {username} +
+ +
+ ) + } + + // Centered variant - large button in middle of page + if (variant === 'centered') { + return ( +
+
+

Authentication Required

+

+ You need to log in to access Ushadow environments. +

+

+ Click below to open the login page in your browser. +

+
+ + + + {environment && ( +

+ Environment: {environment.name} β€’ + Port: {environment.webui_port} +

+ )} +
+ ) + } + + // Header variant - compact button + return ( + + ) +} diff --git a/ushadow/launcher/src/components/CreateEpicDialog.tsx b/ushadow/launcher/src/components/CreateEpicDialog.tsx new file mode 100644 index 00000000..d43c7a89 --- /dev/null +++ b/ushadow/launcher/src/components/CreateEpicDialog.tsx @@ -0,0 +1,180 @@ +import { useState } from 'react' + +interface CreateEpicDialogProps { + isOpen: boolean + onClose: () => void + onCreated: () => void + projectId?: string + backendUrl: string +} + +const PRESET_COLORS = [ + '#3B82F6', // blue + '#8B5CF6', // purple + '#EC4899', // pink + '#F59E0B', // amber + '#10B981', // green + '#06B6D4', // cyan + '#F97316', // orange + '#EF4444', // red +] + +export function CreateEpicDialog({ + isOpen, + onClose, + onCreated, + projectId, + backendUrl, +}: CreateEpicDialogProps) { + const [title, setTitle] = useState('') + const [description, setDescription] = useState('') + const [color, setColor] = useState(PRESET_COLORS[0]) + const [baseBranch, setBaseBranch] = useState('main') + const [creating, setCreating] = useState(false) + const [error, setError] = useState(null) + + if (!isOpen) return null + + const handleSubmit = async (e: React.FormEvent) => { + e.preventDefault() + setError(null) + setCreating(true) + + try { + const { tauri } = await import('../hooks/useTauri') + + await tauri.createEpic( + title, + description || null, + color, + baseBranch, + null, // branch_name (not set during creation) + projectId || null + ) + + onCreated() + // Reset form + setTitle('') + setDescription('') + setColor(PRESET_COLORS[0]) + setBaseBranch('main') + } catch (err) { + setError(err instanceof Error ? err.message : 'Failed to create epic') + } finally { + setCreating(false) + } + } + + return ( +
+
e.stopPropagation()} + > +

Create New Epic

+ +
+ {/* Title */} +
+ + setTitle(e.target.value)} + className="w-full bg-gray-900 border border-gray-700 rounded px-3 py-2 text-white" + placeholder="Epic title" + required + data-testid="create-epic-title" + /> +
+ + {/* Description */} +
+ +