diff --git a/.opencode/bin/flowctl.py b/.opencode/bin/flowctl.py index 9a83fb9..f1dc081 100755 --- a/.opencode/bin/flowctl.py +++ b/.opencode/bin/flowctl.py @@ -17,9 +17,28 @@ import shutil import sys import tempfile +from abc import ABC, abstractmethod +from contextlib import contextmanager from datetime import datetime from pathlib import Path -from typing import Any, Optional +from typing import Any, ContextManager, Optional + +# Platform-specific file locking (fcntl on Unix, no-op on Windows) +try: + import fcntl + + def _flock(f, lock_type): + fcntl.flock(f, lock_type) + + LOCK_EX = fcntl.LOCK_EX + LOCK_UN = fcntl.LOCK_UN +except ImportError: + # Windows: fcntl not available, use no-op (acceptable for single-machine use) + def _flock(f, lock_type): + pass + + LOCK_EX = 0 + LOCK_UN = 0 # --- Constants --- @@ -44,6 +63,17 @@ "## Evidence", ] +# Runtime fields stored in state-dir (not tracked in git) +RUNTIME_FIELDS = { + "status", + "updated_at", + "claimed_at", + "assignee", + "claim_note", + "evidence", + "blocked_reason", +} + # --- Helpers --- @@ -73,6 +103,186 @@ def ensure_flow_exists() -> bool: return get_flow_dir().exists() +def get_state_dir() -> Path: + """Get state directory for runtime task state. + + Resolution order: + 1. FLOW_STATE_DIR env var (explicit override for orchestrators) + 2. git common-dir (shared across all worktrees automatically) + 3. Fallback to .flow/state for non-git repos + """ + # 1. Explicit override + if state_dir := os.environ.get("FLOW_STATE_DIR"): + return Path(state_dir).resolve() + + # 2. Git common-dir (shared across worktrees) + try: + result = subprocess.run( + ["git", "rev-parse", "--git-common-dir", "--path-format=absolute"], + capture_output=True, + text=True, + check=True, + ) + common = result.stdout.strip() + return Path(common) / "flow-state" + except subprocess.CalledProcessError: + pass + + # 3. Fallback for non-git repos + return get_flow_dir() / "state" + + +# --- StateStore (runtime task state) --- + + +class StateStore(ABC): + """Abstract interface for runtime task state storage.""" + + @abstractmethod + def load_runtime(self, task_id: str) -> Optional[dict]: + """Load runtime state for a task. Returns None if no state file.""" + ... + + @abstractmethod + def save_runtime(self, task_id: str, data: dict) -> None: + """Save runtime state for a task.""" + ... + + @abstractmethod + def lock_task(self, task_id: str) -> ContextManager: + """Context manager for exclusive task lock.""" + ... + + @abstractmethod + def list_runtime_files(self) -> list[str]: + """List all task IDs that have runtime state files.""" + ... + + +class LocalFileStateStore(StateStore): + """File-based state store with fcntl locking.""" + + def __init__(self, state_dir: Path): + self.state_dir = state_dir + self.tasks_dir = state_dir / "tasks" + self.locks_dir = state_dir / "locks" + + def _state_path(self, task_id: str) -> Path: + return self.tasks_dir / f"{task_id}.state.json" + + def _lock_path(self, task_id: str) -> Path: + return self.locks_dir / f"{task_id}.lock" + + def load_runtime(self, task_id: str) -> Optional[dict]: + state_path = self._state_path(task_id) + if not state_path.exists(): + return None + try: + with open(state_path, encoding="utf-8") as f: + return json.load(f) + except (json.JSONDecodeError, IOError): + return None + + def save_runtime(self, task_id: str, data: dict) -> None: + self.tasks_dir.mkdir(parents=True, exist_ok=True) + state_path = self._state_path(task_id) + content = json.dumps(data, indent=2, sort_keys=True) + "\n" + atomic_write(state_path, content) + + @contextmanager + def lock_task(self, task_id: str): + """Acquire exclusive lock for task operations.""" + self.locks_dir.mkdir(parents=True, exist_ok=True) + lock_path = self._lock_path(task_id) + with open(lock_path, "w") as f: + try: + _flock(f, LOCK_EX) + yield + finally: + _flock(f, LOCK_UN) + + def list_runtime_files(self) -> list[str]: + if not self.tasks_dir.exists(): + return [] + return [ + f.stem.replace(".state", "") + for f in self.tasks_dir.glob("*.state.json") + ] + + +def get_state_store() -> LocalFileStateStore: + """Get the state store instance.""" + return LocalFileStateStore(get_state_dir()) + + +# --- Task Loading with State Merge --- + + +def load_task_definition(task_id: str, use_json: bool = True) -> dict: + """Load task definition from tracked file (no runtime state).""" + flow_dir = get_flow_dir() + def_path = flow_dir / TASKS_DIR / f"{task_id}.json" + return load_json_or_exit(def_path, f"Task {task_id}", use_json=use_json) + + +def load_task_with_state(task_id: str, use_json: bool = True) -> dict: + """Load task definition merged with runtime state. + + Backward compatible: if no state file exists, reads legacy runtime + fields from definition file. + """ + definition = load_task_definition(task_id, use_json=use_json) + + # Load runtime state + store = get_state_store() + runtime = store.load_runtime(task_id) + + if runtime is None: + # Backward compat: extract runtime fields from definition + runtime = {k: definition[k] for k in RUNTIME_FIELDS if k in definition} + if not runtime: + runtime = {"status": "todo"} + + # Merge: runtime overwrites definition for runtime fields + merged = {**definition, **runtime} + return normalize_task(merged) + + +def save_task_runtime(task_id: str, updates: dict) -> None: + """Write runtime state only (merge with existing). Never touch definition file.""" + store = get_state_store() + with store.lock_task(task_id): + current = store.load_runtime(task_id) or {"status": "todo"} + merged = {**current, **updates, "updated_at": now_iso()} + store.save_runtime(task_id, merged) + + +def reset_task_runtime(task_id: str) -> None: + """Reset runtime state to baseline (overwrite, not merge). Used by task reset.""" + store = get_state_store() + with store.lock_task(task_id): + # Overwrite with clean baseline state + store.save_runtime(task_id, {"status": "todo", "updated_at": now_iso()}) + + +def delete_task_runtime(task_id: str) -> None: + """Delete runtime state file entirely. Used by checkpoint restore when no runtime.""" + store = get_state_store() + with store.lock_task(task_id): + state_path = store._state_path(task_id) + if state_path.exists(): + state_path.unlink() + + +def save_task_definition(task_id: str, definition: dict) -> None: + """Write definition to tracked file (filters out runtime fields).""" + flow_dir = get_flow_dir() + def_path = flow_dir / TASKS_DIR / f"{task_id}.json" + # Filter out runtime fields + clean_def = {k: v for k, v in definition.items() if k not in RUNTIME_FIELDS} + atomic_write_json(def_path, clean_def) + + def get_default_config() -> dict: """Return default config structure.""" return { @@ -399,9 +609,12 @@ def normalize_epic(epic_data: dict) -> dict: def normalize_task(task_data: dict) -> dict: - """Apply defaults for optional task fields.""" + """Apply defaults for optional task fields and migrate legacy keys.""" if "priority" not in task_data: task_data["priority"] = None + # Migrate legacy 'deps' key to 'depends_on' + if "depends_on" not in task_data: + task_data["depends_on"] = task_data.get("deps", []) return task_data @@ -900,6 +1113,7 @@ def build_opencode_impl_prompt( def build_opencode_plan_prompt( plan_summary: str, plan_spec: str, + task_specs: str, epic_id: str, focus: Optional[str], ) -> str: @@ -911,6 +1125,9 @@ def build_opencode_plan_prompt( Plan spec: {plan_spec} + +Task specs: +{task_specs} {focus_block} Review Criteria: 1. Completeness @@ -920,6 +1137,7 @@ def build_opencode_plan_prompt( 5. Risks (incl. security) 6. Scope 7. Testability +8. Consistency (epic vs task specs) Output Format: - Group issues by severity (Blocker/Major/Minor) @@ -935,10 +1153,12 @@ def build_review_prompt( spec_content: str, context_hints: str, diff_summary: str = "", + task_specs: str = "", ) -> str: """Build XML-structured review prompt for codex. review_type: 'impl' or 'plan' + task_specs: Combined task spec content (plan reviews only) Uses same Carmack-level criteria as RepoPrompt workflow to ensure parity. """ @@ -1030,6 +1250,18 @@ def build_review_prompt( context_preamble + """Conduct a John Carmack-level review of this plan. +## Review Scope + +You are reviewing: +1. **Epic spec** in `` - The high-level plan +2. **Task specs** in `` - Individual task breakdowns (if provided) + +**CRITICAL**: Check for consistency between epic and tasks. Flag if: +- Task specs contradict or miss epic requirements +- Task acceptance criteria don't align with epic acceptance criteria +- Task approaches would need to change based on epic design decisions +- Epic mentions states/enums/types that tasks don't account for + ## Review Criteria 1. **Completeness** - All requirements covered? Missing edge cases? @@ -1039,6 +1271,7 @@ def build_review_prompt( 5. **Risks** - Blockers identified? Security gaps? Mitigation? 6. **Scope** - Right-sized? Over/under-engineering? 7. **Testability** - How will we verify this works? +8. **Consistency** - Do task specs align with epic spec? ## Verdict Scope @@ -1046,6 +1279,7 @@ def build_review_prompt( - Issues **within this plan** that block implementation - Feasibility problems given the **current codebase state** - Missing requirements that are **part of the stated goal** +- Inconsistencies between epic and task specs Do NOT mark NEEDS_WORK for: - Pre-existing codebase issues unrelated to this plan @@ -1081,6 +1315,9 @@ def build_review_prompt( parts.append(f"\n{diff_summary}\n") parts.append(f"\n{spec_content}\n") + + if task_specs: + parts.append(f"\n{task_specs}\n") parts.append(f"\n{instruction}\n") return "\n\n".join(parts) @@ -1096,6 +1333,29 @@ def build_rereview_preamble(changed_files: list[str], review_type: str) -> str: if len(changed_files) > 30: files_list += f"\n- ... and {len(changed_files) - 30} more files" + task_sync_note = "" + if review_type == "plan": + task_sync_note = """ + +## Task Spec Sync Required + +If you modified the epic spec in ways that affect task specs, you MUST also update +the affected task specs before requesting re-review. Use: + +```bash +flowctl task set-spec --file - <<'EOF' + +EOF +``` + +Task specs need updating when epic changes affect: +- State/enum values referenced in tasks +- Acceptance criteria that tasks implement +- Approach/design decisions tasks depend on +- Lock/retry/error handling semantics +- API signatures or type definitions +""" + return f"""## IMPORTANT: Re-review After Fixes This is a RE-REVIEW. Code has been modified since your last review. @@ -1105,7 +1365,7 @@ def build_rereview_preamble(changed_files: list[str], review_type: str) -> str: Use your file reading tools to get the CURRENT content of these files. Do NOT rely on what you saw in the previous review - the code has changed. - +{task_sync_note} After re-reading, conduct a fresh {review_type} review on the updated code. --- @@ -1569,10 +1829,12 @@ def cmd_status(args: argparse.Namespace) -> None: if tasks_dir.exists(): for task_file in tasks_dir.glob("fn-*.json"): # Skip non-task files (must have . before .json) - if "." not in task_file.stem: + task_id = task_file.stem + if "." not in task_id: continue try: - task_data = load_json(task_file) + # Use merged state for accurate status counts + task_data = load_task_with_state(task_id, use_json=True) status = task_data.get("status", "todo") if status in task_counts: task_counts[status] += 1 @@ -2246,6 +2508,10 @@ def cmd_dep_add(args: argparse.Namespace) -> None: task_data = load_json_or_exit(task_path, f"Task {args.task}", use_json=args.json) + # Migrate old 'deps' key to 'depends_on' if needed + if "depends_on" not in task_data: + task_data["depends_on"] = task_data.pop("deps", []) + if args.depends_on not in task_data["depends_on"]: task_data["depends_on"].append(args.depends_on) task_data["updated_at"] = now_iso() @@ -2278,16 +2544,15 @@ def cmd_show(args: argparse.Namespace) -> None: load_json_or_exit(epic_path, f"Epic {args.id}", use_json=args.json) ) - # Get tasks for this epic + # Get tasks for this epic (with merged runtime state) tasks = [] tasks_dir = flow_dir / TASKS_DIR if tasks_dir.exists(): for task_file in sorted(tasks_dir.glob(f"{args.id}.*.json")): - task_data = normalize_task( - load_json_or_exit( - task_file, f"Task {task_file.stem}", use_json=args.json - ) - ) + task_id = task_file.stem + if "." not in task_id: + continue # Skip non-task files + task_data = load_task_with_state(task_id, use_json=args.json) if "id" not in task_data: continue # Skip artifact files (GH-21) tasks.append( @@ -2296,7 +2561,9 @@ def cmd_show(args: argparse.Namespace) -> None: "title": task_data["title"], "status": task_data["status"], "priority": task_data.get("priority"), - "depends_on": task_data["depends_on"], + "depends_on": task_data.get( + "depends_on", task_data.get("deps", []) + ), } ) @@ -2324,10 +2591,8 @@ def task_sort_key(t): print(f" [{t['status']}] {t['id']}: {t['title']}{deps}") elif is_task_id(args.id): - task_path = flow_dir / TASKS_DIR / f"{args.id}.json" - task_data = normalize_task( - load_json_or_exit(task_path, f"Task {args.id}", use_json=args.json) - ) + # Load task with merged runtime state + task_data = load_task_with_state(args.id, use_json=args.json) if args.json: json_output(task_data) @@ -2364,15 +2629,16 @@ def cmd_epics(args: argparse.Namespace) -> None: epic_file, f"Epic {epic_file.stem}", use_json=args.json ) ) - # Count tasks + # Count tasks (with merged runtime state) tasks_dir = flow_dir / TASKS_DIR task_count = 0 done_count = 0 if tasks_dir.exists(): for task_file in tasks_dir.glob(f"{epic_data['id']}.*.json"): - task_data = load_json_or_exit( - task_file, f"Task {task_file.stem}", use_json=args.json - ) + task_id = task_file.stem + if "." not in task_id: + continue + task_data = load_task_with_state(task_id, use_json=args.json) task_count += 1 if task_data.get("status") == "done": done_count += 1 @@ -2423,12 +2689,10 @@ def cmd_tasks(args: argparse.Namespace) -> None: pattern = f"{args.epic}.*.json" if args.epic else "fn-*.json" for task_file in sorted(tasks_dir.glob(pattern)): # Skip if it's not a task file (must have . in the name before .json) - stem = task_file.stem - if "." not in stem: + task_id = task_file.stem + if "." not in task_id: continue - task_data = normalize_task( - load_json_or_exit(task_file, f"Task {stem}", use_json=args.json) - ) + task_data = load_task_with_state(task_id, use_json=args.json) if "id" not in task_data: continue # Skip artifact files (GH-21) # Filter by status if requested @@ -2501,17 +2765,15 @@ def epic_sort_key(e): epics.sort(key=epic_sort_key) - # Load all tasks grouped by epic + # Load all tasks grouped by epic (with merged runtime state) tasks_by_epic = {} all_tasks = [] if tasks_dir.exists(): for task_file in sorted(tasks_dir.glob("fn-*.json")): - stem = task_file.stem - if "." not in stem: + task_id = task_file.stem + if "." not in task_id: continue - task_data = normalize_task( - load_json_or_exit(task_file, f"Task {stem}", use_json=args.json) - ) + task_data = load_task_with_state(task_id, use_json=args.json) if "id" not in task_data: continue # Skip artifact files (GH-21) epic_id = task_data["epic"] @@ -2859,10 +3121,10 @@ def cmd_task_set_acceptance(args: argparse.Namespace) -> None: def cmd_task_set_spec(args: argparse.Namespace) -> None: - """Set task description and/or acceptance in one call. + """Set task spec - full replacement (--file) or section patches. - Reduces tool calls: instead of separate set-description + set-acceptance, - both can be set atomically with a single JSON timestamp update. + Full replacement mode: --file replaces entire spec content (like epic set-plan). + Section patch mode: --description and/or --acceptance update specific sections. """ if not ensure_flow_exists(): error_exit( @@ -2876,10 +3138,11 @@ def cmd_task_set_spec(args: argparse.Namespace) -> None: use_json=args.json, ) - # Need at least one of description or acceptance - if not args.description and not args.acceptance: + # Need at least one of file, description, or acceptance + has_file = hasattr(args, "file") and args.file + if not has_file and not args.description and not args.acceptance: error_exit( - "At least one of --description or --acceptance required", + "Requires --file, --description, or --acceptance", use_json=args.json, ) @@ -2894,6 +3157,20 @@ def cmd_task_set_spec(args: argparse.Namespace) -> None: # Load task JSON first (fail early) task_data = load_json_or_exit(task_json_path, f"Task {task_id}", use_json=args.json) + # Full file replacement mode (like epic set-plan) + if has_file: + content = read_file_or_stdin(args.file, "Spec file", use_json=args.json) + atomic_write(task_spec_path, content) + task_data["updated_at"] = now_iso() + atomic_write_json(task_json_path, task_data) + + if args.json: + json_output({"id": task_id, "message": f"Task {task_id} spec replaced"}) + else: + print(f"Task {task_id} spec replaced") + return + + # Section patch mode (existing behavior) # Read current spec current_spec = read_text_or_exit( task_spec_path, f"Task {task_id} spec", use_json=args.json @@ -2904,16 +3181,22 @@ def cmd_task_set_spec(args: argparse.Namespace) -> None: # Apply description if provided if args.description: - desc_content = read_file_or_stdin(args.description, "Description file", use_json=args.json) + desc_content = read_file_or_stdin( + args.description, "Description file", use_json=args.json + ) try: - updated_spec = patch_task_section(updated_spec, "## Description", desc_content) + updated_spec = patch_task_section( + updated_spec, "## Description", desc_content + ) sections_updated.append("## Description") except ValueError as e: error_exit(str(e), use_json=args.json) # Apply acceptance if provided if args.acceptance: - acc_content = read_file_or_stdin(args.acceptance, "Acceptance file", use_json=args.json) + acc_content = read_file_or_stdin( + args.acceptance, "Acceptance file", use_json=args.json + ) try: updated_spec = patch_task_section(updated_spec, "## Acceptance", acc_content) sections_updated.append("## Acceptance") @@ -2957,7 +3240,8 @@ def cmd_task_reset(args: argparse.Namespace) -> None: if not task_json_path.exists(): error_exit(f"Task {task_id} not found", use_json=args.json) - task_data = load_json_or_exit(task_json_path, f"Task {task_id}", use_json=args.json) + # Load task with merged runtime state + task_data = load_task_with_state(task_id, use_json=args.json) # Load epic to check if closed epic_id = epic_id_from_task(task_id) @@ -2969,7 +3253,7 @@ def cmd_task_reset(args: argparse.Namespace) -> None: f"Cannot reset task in closed epic {epic_id}", use_json=args.json ) - # Check status validations + # Check status validations (use merged state) current_status = task_data.get("status", "todo") if current_status == "in_progress": error_exit( @@ -2986,23 +3270,20 @@ def cmd_task_reset(args: argparse.Namespace) -> None: print(f"{task_id} already todo") return - # Reset task - task_data["status"] = "todo" - task_data["updated_at"] = now_iso() - - # Clear optional fields - task_data.pop("blocked_reason", None) - task_data.pop("completed_at", None) - - # Clear claim fields (MU-2) - task_data.pop("assignee", None) - task_data.pop("claimed_at", None) - task_data.pop("claim_note", None) - - # Clear evidence from JSON - task_data.pop("evidence", None) - - atomic_write_json(task_json_path, task_data) + # Reset runtime state to baseline (overwrite, not merge - clears all runtime fields) + reset_task_runtime(task_id) + + # Also clear legacy runtime fields from definition file (for backward compat cleanup) + def_data = load_json_or_exit(task_json_path, f"Task {task_id}", use_json=args.json) + def_data.pop("blocked_reason", None) + def_data.pop("completed_at", None) + def_data.pop("assignee", None) + def_data.pop("claimed_at", None) + def_data.pop("claim_note", None) + def_data.pop("evidence", None) + def_data["status"] = "todo" # Keep in sync for backward compat + def_data["updated_at"] = now_iso() + atomic_write_json(task_json_path, def_data) # Clear evidence section from spec markdown clear_task_evidence(task_id) @@ -3016,23 +3297,28 @@ def cmd_task_reset(args: argparse.Namespace) -> None: dep_path = flow_dir / TASKS_DIR / f"{dep_id}.json" if not dep_path.exists(): continue - dep_data = load_json(dep_path) + # Load merged state for dependent + dep_data = load_task_with_state(dep_id, use_json=args.json) dep_status = dep_data.get("status", "todo") # Skip in_progress and already todo if dep_status == "in_progress" or dep_status == "todo": continue - dep_data["status"] = "todo" - dep_data["updated_at"] = now_iso() - dep_data.pop("blocked_reason", None) - dep_data.pop("completed_at", None) - dep_data.pop("assignee", None) - dep_data.pop("claimed_at", None) - dep_data.pop("claim_note", None) - dep_data.pop("evidence", None) - - atomic_write_json(dep_path, dep_data) + # Reset runtime state for dependent (overwrite, not merge) + reset_task_runtime(dep_id) + + # Also clear legacy fields from definition + dep_def = load_json(dep_path) + dep_def.pop("blocked_reason", None) + dep_def.pop("completed_at", None) + dep_def.pop("assignee", None) + dep_def.pop("claimed_at", None) + dep_def.pop("claim_note", None) + dep_def.pop("evidence", None) + dep_def["status"] = "todo" + dep_def["updated_at"] = now_iso() + atomic_write_json(dep_path, dep_def) clear_task_evidence(dep_id) reset_ids.append(dep_id) @@ -3119,7 +3405,7 @@ def cmd_ready(args: argparse.Namespace) -> None: # MU-2: Get current actor for display (marks your tasks) current_actor = get_actor() - # Get all tasks for epic + # Get all tasks for epic (with merged runtime state) tasks_dir = flow_dir / TASKS_DIR if not tasks_dir.exists(): error_exit( @@ -3128,9 +3414,10 @@ def cmd_ready(args: argparse.Namespace) -> None: ) tasks = {} for task_file in tasks_dir.glob(f"{args.epic}.*.json"): - task_data = normalize_task( - load_json_or_exit(task_file, f"Task {task_file.stem}", use_json=args.json) - ) + task_id = task_file.stem + if "." not in task_id: + continue + task_data = load_task_with_state(task_id, use_json=args.json) if "id" not in task_data: continue # Skip artifact files (GH-21) tasks[task_data["id"]] = task_data @@ -3321,11 +3608,11 @@ def sort_key(t: dict) -> tuple[int, int]: tasks: dict[str, dict] = {} for task_file in tasks_dir.glob(f"{epic_id}.*.json"): - task_data = normalize_task( - load_json_or_exit( - task_file, f"Task {task_file.stem}", use_json=args.json - ) - ) + task_id = task_file.stem + if "." not in task_id: + continue + # Load task with merged runtime state + task_data = load_task_with_state(task_id, use_json=args.json) if "id" not in task_data: continue # Skip artifact files (GH-21) tasks[task_data["id"]] = task_data @@ -3411,77 +3698,85 @@ def cmd_start(args: argparse.Namespace) -> None: f"Invalid task ID: {args.id}. Expected format: fn-N.M or fn-N-xxx.M", use_json=args.json ) - flow_dir = get_flow_dir() - task_path = flow_dir / TASKS_DIR / f"{args.id}.json" + # Load task definition for dependency info (outside lock) + # Normalize to handle legacy "deps" field + task_def = normalize_task(load_task_definition(args.id, use_json=args.json)) + depends_on = task_def.get("depends_on", []) or [] - task_data = load_json_or_exit(task_path, f"Task {args.id}", use_json=args.json) + # Validate all dependencies are done (outside lock - this is read-only check) + if not args.force: + for dep in depends_on: + dep_data = load_task_with_state(dep, use_json=args.json) + if dep_data["status"] != "done": + error_exit( + f"Cannot start task {args.id}: dependency {dep} is '{dep_data['status']}', not 'done'. " + f"Complete dependencies first or use --force to override.", + use_json=args.json, + ) - # MU-2: Soft-claim semantics current_actor = get_actor() - existing_assignee = task_data.get("assignee") - - # Cannot start done task - if task_data["status"] == "done": - error_exit( - f"Cannot start task {args.id}: status is 'done'.", use_json=args.json - ) - - # Blocked requires --force - if task_data["status"] == "blocked" and not args.force: - error_exit( - f"Cannot start task {args.id}: status is 'blocked'. Use --force to override.", - use_json=args.json, - ) + store = get_state_store() + + # Atomic claim: validation + write inside lock to prevent race conditions + with store.lock_task(args.id): + # Re-load runtime state inside lock for accurate check + runtime = store.load_runtime(args.id) + if runtime is None: + # Backward compat: extract from definition + runtime = {k: task_def[k] for k in RUNTIME_FIELDS if k in task_def} + if not runtime: + runtime = {"status": "todo"} + + status = runtime.get("status", "todo") + existing_assignee = runtime.get("assignee") + + # Cannot start done task + if status == "done": + error_exit( + f"Cannot start task {args.id}: status is 'done'.", use_json=args.json + ) - # Check if claimed by someone else (unless --force) - if not args.force and existing_assignee and existing_assignee != current_actor: - error_exit( - f"Cannot start task {args.id}: claimed by '{existing_assignee}'. " - f"Use --force to override.", - use_json=args.json, - ) + # Blocked requires --force + if status == "blocked" and not args.force: + error_exit( + f"Cannot start task {args.id}: status is 'blocked'. Use --force to override.", + use_json=args.json, + ) - # Validate task is in todo status (unless --force or resuming own task) - if not args.force and task_data["status"] != "todo": - # Allow resuming your own in_progress task - if not ( - task_data["status"] == "in_progress" and existing_assignee == current_actor - ): + # Check if claimed by someone else (unless --force) + if not args.force and existing_assignee and existing_assignee != current_actor: error_exit( - f"Cannot start task {args.id}: status is '{task_data['status']}', expected 'todo'. " + f"Cannot start task {args.id}: claimed by '{existing_assignee}'. " f"Use --force to override.", use_json=args.json, ) - # Validate all dependencies are done (unless --force) - if not args.force: - for dep in task_data.get("depends_on", []): - dep_path = flow_dir / TASKS_DIR / f"{dep}.json" - dep_data = load_json_or_exit( - dep_path, f"Dependency {dep}", use_json=args.json - ) - if dep_data["status"] != "done": + # Validate task is in todo status (unless --force or resuming own task) + if not args.force and status != "todo": + # Allow resuming your own in_progress task + if not (status == "in_progress" and existing_assignee == current_actor): error_exit( - f"Cannot start task {args.id}: dependency {dep} is '{dep_data['status']}', not 'done'. " - f"Complete dependencies first or use --force to override.", + f"Cannot start task {args.id}: status is '{status}', expected 'todo'. " + f"Use --force to override.", use_json=args.json, ) - # Set status and claim fields - task_data["status"] = "in_progress" - if not existing_assignee: - task_data["assignee"] = current_actor - task_data["claimed_at"] = now_iso() - if args.note: - task_data["claim_note"] = args.note - elif args.force and existing_assignee and existing_assignee != current_actor: - # Force override: note the takeover - task_data["assignee"] = current_actor - task_data["claimed_at"] = now_iso() - if not args.note: - task_data["claim_note"] = f"Taken over from {existing_assignee}" - task_data["updated_at"] = now_iso() - atomic_write_json(task_path, task_data) + # Build runtime state updates + runtime_updates = {**runtime, "status": "in_progress", "updated_at": now_iso()} + if not existing_assignee: + runtime_updates["assignee"] = current_actor + runtime_updates["claimed_at"] = now_iso() + if args.note: + runtime_updates["claim_note"] = args.note + elif args.force and existing_assignee and existing_assignee != current_actor: + # Force override: note the takeover + runtime_updates["assignee"] = current_actor + runtime_updates["claimed_at"] = now_iso() + if not args.note: + runtime_updates["claim_note"] = f"Taken over from {existing_assignee}" + + # Write inside lock + store.save_runtime(args.id, runtime_updates) # NOTE: We no longer update epic timestamp on task start/done. # Epic timestamp only changes on epic-level operations (set-plan, close). @@ -3512,11 +3807,10 @@ def cmd_done(args: argparse.Namespace) -> None: ) flow_dir = get_flow_dir() - task_json_path = flow_dir / TASKS_DIR / f"{args.id}.json" task_spec_path = flow_dir / TASKS_DIR / f"{args.id}.md" - # Load task JSON (fail early before any writes) - task_data = load_json_or_exit(task_json_path, f"Task {args.id}", use_json=args.json) + # Load task with merged runtime state (fail early before any writes) + task_data = load_task_with_state(args.id, use_json=args.json) # MU-2: Require in_progress status (unless --force) if not args.force and task_data["status"] != "in_progress": @@ -3606,13 +3900,11 @@ def to_list(val: Any) -> list: except ValueError as e: error_exit(str(e), use_json=args.json) - # All validation passed - now write (spec, task) + # All validation passed - now write (spec to tracked file, runtime to state-dir) atomic_write(task_spec_path, updated_spec) - task_data["status"] = "done" - task_data["updated_at"] = now_iso() - task_data["evidence"] = evidence # Store raw evidence dict for programmatic access - atomic_write_json(task_json_path, task_data) + # Write runtime state to state-dir (not definition file) + save_task_runtime(args.id, {"status": "done", "evidence": evidence}) # NOTE: We no longer update epic timestamp on task done. # This reduces merge conflicts in multi-user scenarios. @@ -3638,12 +3930,10 @@ def cmd_block(args: argparse.Namespace) -> None: ) flow_dir = get_flow_dir() - task_json_path = flow_dir / TASKS_DIR / f"{args.id}.json" task_spec_path = flow_dir / TASKS_DIR / f"{args.id}.md" - task_data = normalize_task( - load_json_or_exit(task_json_path, f"Task {args.id}", use_json=args.json) - ) + # Load task with merged runtime state + task_data = load_task_with_state(args.id, use_json=args.json) if task_data["status"] == "done": error_exit( @@ -3672,9 +3962,8 @@ def cmd_block(args: argparse.Namespace) -> None: atomic_write(task_spec_path, updated_spec) - task_data["status"] = "blocked" - task_data["updated_at"] = now_iso() - atomic_write_json(task_json_path, task_data) + # Write runtime state to state-dir (not definition file) + save_task_runtime(args.id, {"status": "blocked", "blocked_reason": reason}) if args.json: json_output( @@ -3684,6 +3973,97 @@ def cmd_block(args: argparse.Namespace) -> None: print(f"Task {args.id} blocked") +def cmd_state_path(args: argparse.Namespace) -> None: + """Show resolved state directory path.""" + state_dir = get_state_dir() + + if args.task: + if not is_task_id(args.task): + error_exit( + f"Invalid task ID: {args.task}. Expected format: fn-N.M or fn-N-xxx.M", + use_json=args.json, + ) + state_path = state_dir / "tasks" / f"{args.task}.state.json" + if args.json: + json_output({"state_dir": str(state_dir), "task_state_path": str(state_path)}) + else: + print(state_path) + else: + if args.json: + json_output({"state_dir": str(state_dir)}) + else: + print(state_dir) + + +def cmd_migrate_state(args: argparse.Namespace) -> None: + """Migrate runtime state from definition files to state-dir.""" + if not ensure_flow_exists(): + error_exit( + ".flow/ does not exist. Run 'flowctl init' first.", use_json=args.json + ) + + flow_dir = get_flow_dir() + tasks_dir = flow_dir / TASKS_DIR + store = get_state_store() + + migrated = [] + skipped = [] + + if not tasks_dir.exists(): + if args.json: + json_output({"migrated": [], "skipped": [], "message": "No tasks directory"}) + else: + print("No tasks directory found.") + return + + for task_file in tasks_dir.glob("fn-*.json"): + task_id = task_file.stem + if "." not in task_id: + continue # Skip non-task files + + # Check if state file already exists + if store.load_runtime(task_id) is not None: + skipped.append(task_id) + continue + + # Load definition and extract runtime fields + try: + definition = load_json(task_file) + except Exception: + skipped.append(task_id) + continue + + runtime = {k: definition[k] for k in RUNTIME_FIELDS if k in definition} + if not runtime or runtime.get("status") == "todo": + # No runtime state to migrate + skipped.append(task_id) + continue + + # Write runtime state + store.save_runtime(task_id, runtime) + migrated.append(task_id) + + # Optionally clean definition file (only with --clean flag) + if args.clean: + clean_def = {k: v for k, v in definition.items() if k not in RUNTIME_FIELDS} + atomic_write_json(task_file, clean_def) + + if args.json: + json_output({ + "migrated": migrated, + "skipped": skipped, + "cleaned": args.clean, + }) + else: + print(f"Migrated: {len(migrated)} tasks") + if migrated: + for t in migrated: + print(f" {t}") + print(f"Skipped: {len(skipped)} tasks (already migrated or no state)") + if args.clean: + print("Definition files cleaned (runtime fields removed)") + + def cmd_epic_close(args: argparse.Namespace) -> None: """Close an epic (all tasks must be done).""" if not ensure_flow_exists(): @@ -3702,7 +4082,7 @@ def cmd_epic_close(args: argparse.Namespace) -> None: if not epic_path.exists(): error_exit(f"Epic {args.id} not found", use_json=args.json) - # Check all tasks are done + # Check all tasks are done (with merged runtime state) tasks_dir = flow_dir / TASKS_DIR if not tasks_dir.exists(): error_exit( @@ -3711,9 +4091,10 @@ def cmd_epic_close(args: argparse.Namespace) -> None: ) incomplete = [] for task_file in tasks_dir.glob(f"{args.id}.*.json"): - task_data = load_json_or_exit( - task_file, f"Task {task_file.stem}", use_json=args.json - ) + task_id = task_file.stem + if "." not in task_id: + continue + task_data = load_task_with_state(task_id, use_json=args.json) if task_data["status"] != "done": incomplete.append(f"{task_data['id']} ({task_data['status']})") @@ -3805,25 +4186,26 @@ def validate_epic( if not dep_path.exists(): errors.append(f"Epic {epic_id}: depends_on_epics missing epic {dep}") - # Get all tasks + # Get all tasks (with merged runtime state for accurate status) tasks_dir = flow_dir / TASKS_DIR tasks = {} if tasks_dir.exists(): for task_file in tasks_dir.glob(f"{epic_id}.*.json"): - task_data = normalize_task( - load_json_or_exit( - task_file, f"Task {task_file.stem}", use_json=use_json - ) - ) + task_id = task_file.stem + if "." not in task_id: + continue # Skip non-task files + # Use merged state to get accurate status + task_data = load_task_with_state(task_id, use_json=use_json) if "id" not in task_data: continue # Skip artifact files (GH-21) tasks[task_data["id"]] = task_data # Validate each task for task_id, task in tasks.items(): - # Validate status - if task.get("status") not in TASK_STATUS: - errors.append(f"Task {task_id}: invalid status '{task.get('status')}'") + # Validate status (use merged state which defaults to "todo" if missing) + status = task.get("status", "todo") + if status not in TASK_STATUS: + errors.append(f"Task {task_id}: invalid status '{status}'") # Check task spec exists task_spec_path = flow_dir / TASKS_DIR / f"{task_id}.md" @@ -4483,12 +4865,24 @@ def cmd_codex_plan_review(args: argparse.Namespace) -> None: epic_spec = epic_spec_path.read_text(encoding="utf-8") + # Load task specs for this epic + tasks_dir = flow_dir / TASKS_DIR + task_specs_parts = [] + for task_file in sorted(tasks_dir.glob(f"{epic_id}.*.md")): + task_id = task_file.stem + task_content = task_file.read_text(encoding="utf-8") + task_specs_parts.append(f"### {task_id}\n\n{task_content}") + + task_specs = "\n\n---\n\n".join(task_specs_parts) if task_specs_parts else "" + # Get context hints (from main branch for plans) base_branch = args.base if hasattr(args, "base") and args.base else "main" context_hints = gather_context_hints(base_branch) # Build prompt - prompt = build_review_prompt("plan", epic_spec, context_hints) + prompt = build_review_prompt( + "plan", epic_spec, context_hints, task_specs=task_specs + ) # Check for existing session in receipt (indicates re-review) receipt_path = args.receipt if hasattr(args, "receipt") and args.receipt else None @@ -4504,10 +4898,13 @@ def cmd_codex_plan_review(args: argparse.Namespace) -> None: except (json.JSONDecodeError, Exception): pass - # For re-reviews, prepend instruction to re-read spec file + # For re-reviews, prepend instruction to re-read spec files if is_rereview: - # For plan reviews, the spec file is what changes + # For plan reviews, epic spec and task specs may change spec_files = [str(epic_spec_path)] + # Add task spec files + for task_file in sorted(tasks_dir.glob(f"{epic_id}.*.md")): + spec_files.append(str(task_file)) rereview_preamble = build_rereview_preamble(spec_files, "plan") prompt = rereview_preamble + prompt @@ -4680,12 +5077,24 @@ def cmd_opencode_plan_review(args: argparse.Namespace) -> None: flow_dir = get_flow_dir() epic_json_path = flow_dir / EPICS_DIR / f"{epic_id}.json" epic_spec_path = flow_dir / SPECS_DIR / f"{epic_id}.md" - plan_summary = epic_json_path.read_text(encoding="utf-8") if epic_json_path.exists() else "" - plan_spec = epic_spec_path.read_text(encoding="utf-8") if epic_spec_path.exists() else "" + plan_summary = ( + epic_json_path.read_text(encoding="utf-8") if epic_json_path.exists() else "" + ) + plan_spec = ( + epic_spec_path.read_text(encoding="utf-8") if epic_spec_path.exists() else "" + ) + + task_specs_parts = [] + for task_file in sorted((flow_dir / TASKS_DIR).glob(f"{epic_id}.*.md")): + task_id = task_file.stem + task_content = task_file.read_text(encoding="utf-8") + task_specs_parts.append(f"### {task_id}\n\n{task_content}") + task_specs = "\n\n---\n\n".join(task_specs_parts) if task_specs_parts else "" prompt = build_opencode_plan_prompt( plan_summary=plan_summary, plan_spec=plan_spec, + task_specs=task_specs, epic_id=epic_id, focus=focus, ) @@ -4765,25 +5174,34 @@ def cmd_checkpoint_save(args: argparse.Namespace) -> None: if spec_path.exists(): epic_spec = spec_path.read_text(encoding="utf-8") - # Load all tasks for this epic + # Load all tasks for this epic (including runtime state) tasks_dir = flow_dir / TASKS_DIR + store = get_state_store() tasks = [] if tasks_dir.exists(): for task_file in sorted(tasks_dir.glob(f"{epic_id}.*.json")): + task_id = task_file.stem + if "." not in task_id: + continue # Skip non-task files task_data = load_json(task_file) - task_spec_path = tasks_dir / f"{task_file.stem}.md" + task_spec_path = tasks_dir / f"{task_id}.md" task_spec = "" if task_spec_path.exists(): task_spec = task_spec_path.read_text(encoding="utf-8") - tasks.append({ - "id": task_file.stem, - "data": task_data, - "spec": task_spec, - }) + # Include runtime state in checkpoint + runtime_state = store.load_runtime(task_id) + tasks.append( + { + "id": task_id, + "data": task_data, + "spec": task_spec, + "runtime": runtime_state, # May be None if no state file + } + ) # Build checkpoint checkpoint = { - "schema_version": 1, + "schema_version": 2, # Bumped for runtime state support "created_at": now_iso(), "epic_id": epic_id, "epic": { @@ -4852,8 +5270,9 @@ def cmd_checkpoint_restore(args: argparse.Namespace) -> None: if checkpoint["epic"]["spec"]: atomic_write(spec_path, checkpoint["epic"]["spec"]) - # Restore tasks + # Restore tasks (including runtime state) tasks_dir = flow_dir / TASKS_DIR + store = get_state_store() restored_tasks = [] for task in checkpoint["tasks"]: task_id = task["id"] @@ -4866,6 +5285,17 @@ def cmd_checkpoint_restore(args: argparse.Namespace) -> None: if task["spec"]: atomic_write(task_spec_path, task["spec"]) + + # Restore runtime state from checkpoint (schema_version >= 2) + runtime = task.get("runtime") + if runtime is not None: + # Restore saved runtime state + with store.lock_task(task_id): + store.save_runtime(task_id, runtime) + else: + # No runtime in checkpoint - delete any existing runtime state + delete_task_runtime(task_id) + restored_tasks.append(task_id) if args.json: @@ -5070,6 +5500,26 @@ def main() -> None: p_status.add_argument("--json", action="store_true", help="JSON output") p_status.set_defaults(func=cmd_status) + p_state_path = subparsers.add_parser( + "state-path", help="Show resolved state directory path" + ) + p_state_path.add_argument( + "--task", help="Task ID (fn-N.M) to show state file path" + ) + p_state_path.add_argument("--json", action="store_true", help="JSON output") + p_state_path.set_defaults(func=cmd_state_path) + + p_migrate_state = subparsers.add_parser( + "migrate-state", help="Migrate runtime state from definition files to state-dir" + ) + p_migrate_state.add_argument( + "--clean", + action="store_true", + help="Remove runtime fields from definition files", + ) + p_migrate_state.add_argument("--json", action="store_true", help="JSON output") + p_migrate_state.set_defaults(func=cmd_migrate_state) + # config p_config = subparsers.add_parser("config", help="Config commands") config_sub = p_config.add_subparsers(dest="config_cmd", required=True) @@ -5206,9 +5656,12 @@ def main() -> None: p_task_acc.set_defaults(func=cmd_task_set_acceptance) p_task_set_spec = task_sub.add_parser( - "set-spec", help="Set description and/or acceptance in one call" + "set-spec", help="Set task spec (full file or sections)" ) p_task_set_spec.add_argument("id", help="Task ID (fn-N.M)") + p_task_set_spec.add_argument( + "--file", help="Full spec file (use '-' for stdin)" + ) p_task_set_spec.add_argument( "--description", help="Description file (use '-' for stdin)" ) diff --git a/.opencode/skill/flow-next-opencode-plan-review/SKILL.md b/.opencode/skill/flow-next-opencode-plan-review/SKILL.md index f065cdf..cce654a 100644 --- a/.opencode/skill/flow-next-opencode-plan-review/SKILL.md +++ b/.opencode/skill/flow-next-opencode-plan-review/SKILL.md @@ -140,7 +140,9 @@ $FLOWCTL opencode plan-review "$EPIC_ID" --receipt "$RECEIPT_PATH" # Output includes VERDICT=SHIP|NEEDS_WORK|MAJOR_RETHINK ``` -On NEEDS_WORK: fix plan via `$FLOWCTL epic set-plan`, then re-run (receipt enables session continuity). +On NEEDS_WORK: fix plan via `$FLOWCTL epic set-plan` AND sync affected task specs via `$FLOWCTL task set-spec`, then re-run (receipt enables session continuity). + +**Note**: `opencode plan-review` automatically includes task specs in the review prompt. ### RepoPrompt Backend @@ -153,8 +155,12 @@ $FLOWCTL cat eval "$($FLOWCTL rp setup-review --repo-root "$REPO_ROOT" --summary "Review plan for : ")" # Outputs W= T=. If fails → RETRY -# Step 3: Augment selection +# Step 3: Augment selection - add epic AND task specs $FLOWCTL rp select-add --window "$W" --tab "$T" .flow/specs/.md +# Add all task specs for this epic +for task_spec in .flow/tasks/${EPIC_ID}.*.md; do + [[ -f "$task_spec" ]] && $FLOWCTL rp select-add --window "$W" --tab "$T" "$task_spec" +done # Step 4: Build and send review prompt (see workflow.md) $FLOWCTL rp chat-send --window "$W" --tab "$T" --message-file /tmp/review-prompt.md --new-chat --chat-name "Plan Review: " @@ -169,10 +175,22 @@ $FLOWCTL epic set-plan-review-status --status ship --json If verdict is NEEDS_WORK, loop internally until SHIP: 1. **Parse issues** from reviewer feedback -2. **Fix plan** via `$FLOWCTL epic set-plan --file /tmp/updated-plan.md` -3. **Re-review**: +2. **Fix epic spec** via `$FLOWCTL epic set-plan --file /tmp/updated-plan.md` +3. **Sync affected task specs** - If epic changes affect task specs, update them: + ```bash + $FLOWCTL task set-spec --file - --json <<'EOF' + + EOF + ``` + Task specs need updating when epic changes affect: + - State/enum values referenced in tasks + - Acceptance criteria that tasks implement + - Approach/design decisions tasks depend on + - Lock/retry/error handling semantics + - API signatures or type definitions +4. **Re-review**: - **OpenCode**: re-run reviewer subagent with updated plan - **RP**: `$FLOWCTL rp chat-send --window "$W" --tab "$T" --message-file /tmp/re-review.md` (NO `--new-chat`) -4. **Repeat** until `SHIP` +5. **Repeat** until `SHIP` **CRITICAL**: For RP, re-reviews must stay in the SAME chat so reviewer has context. Only use `--new-chat` on the FIRST review. diff --git a/.opencode/skill/flow-next-opencode-plan-review/workflow.md b/.opencode/skill/flow-next-opencode-plan-review/workflow.md index 7407380..86627c6 100644 --- a/.opencode/skill/flow-next-opencode-plan-review/workflow.md +++ b/.opencode/skill/flow-next-opencode-plan-review/workflow.md @@ -158,14 +158,19 @@ Builder selects context automatically. Review and add must-haves: # See what builder selected $FLOWCTL rp select-get --window "$W" --tab "$T" -# Always add the plan spec +# Always add the epic spec $FLOWCTL rp select-add --window "$W" --tab "$T" .flow/specs/.md +# Always add ALL task specs for this epic +for task_spec in .flow/tasks/${EPIC_ID}.*.md; do + [[ -f "$task_spec" ]] && $FLOWCTL rp select-add --window "$W" --tab "$T" "$task_spec" +done + # Add PRD/architecture docs if found $FLOWCTL rp select-add --window "$W" --tab "$T" docs/prd.md ``` -**Why this matters:** Chat only sees selected files. +**Why this matters:** Chat only sees selected files. Reviewer needs both epic spec AND task specs to check for consistency. --- @@ -199,6 +204,18 @@ If you cannot find ``, ask for the files to be re-attached before ## Review Focus [USER'S FOCUS AREAS] +## Review Scope + +You are reviewing: +1. **Epic spec** - The high-level plan +2. **Task specs** - Individual task breakdowns + +**CRITICAL**: Check for consistency between epic and tasks. Flag if: +- Task specs contradict or miss epic requirements +- Task acceptance criteria don't align with epic acceptance criteria +- Task approaches would need to change based on epic design decisions +- Epic mentions states/enums/types that tasks don't account for + ## Review Criteria Conduct a John Carmack-level review: @@ -210,6 +227,7 @@ Conduct a John Carmack-level review: 5. **Risks** - Blockers identified? Security gaps? Mitigation? 6. **Scope** - Right-sized? Over/under-engineering? 7. **Testability** - How will we verify this works? +8. **Consistency** - Do task specs align with epic spec? ## Output Format @@ -291,7 +309,20 @@ If verdict is NEEDS_WORK: $FLOWCTL checkpoint restore --epic --json ``` -4. **Re-review with fix summary** (only AFTER step 3): +4. **Sync affected task specs** - If epic changes affect task specs, update them: + ```bash + $FLOWCTL task set-spec --file - --json <<'EOF' + + EOF + ``` + Task specs need updating when epic changes affect: + - State/enum values referenced in tasks + - Acceptance criteria that tasks implement + - Approach/design decisions tasks depend on + - Lock/retry/error handling semantics + - API signatures or type definitions + +5. **Re-review with fix summary** (only AFTER steps 3-4): **IMPORTANT**: Do NOT re-add files already in the selection. RepoPrompt auto-refreshes file contents on every message. Only use `select-add` for NEW files created during fixes: @@ -315,12 +346,14 @@ If verdict is NEEDS_WORK: $FLOWCTL rp chat-send --window "$W" --tab "$T" --message-file /tmp/re-review.md ``` -5. **Repeat** until Ship +6. **Repeat** until Ship **Anti-pattern**: Re-adding already-selected files before re-review. RP auto-refreshes; re-adding can cause issues. **Anti-pattern**: Re-reviewing without calling `epic set-plan` first. This wastes reviewer time and loops forever. +**Anti-pattern**: Updating epic spec without syncing affected task specs. Causes reviewer to flag consistency issues again. + --- ## Failure Recovery diff --git a/.opencode/skill/flow-next-opencode-setup/templates/usage.md b/.opencode/skill/flow-next-opencode-setup/templates/usage.md index 20a8942..54d89fd 100644 --- a/.opencode/skill/flow-next-opencode-setup/templates/usage.md +++ b/.opencode/skill/flow-next-opencode-setup/templates/usage.md @@ -22,6 +22,8 @@ Task tracking for AI agents. All state lives in `.flow/`. └── meta.json # Project metadata ``` +Runtime state (status, assignee, evidence) is stored in `.git/flow-state/` (not tracked). + ## IDs - Epics: `fn-N` (e.g., fn-1, fn-2) @@ -46,6 +48,8 @@ Task tracking for AI agents. All state lives in `.flow/`. # Status .flow/bin/flowctl ready --epic fn-1 # What's ready to work on .flow/bin/flowctl validate --all # Check structure +.flow/bin/flowctl state-path # Show runtime state directory +.flow/bin/flowctl migrate-state --clean # Optional migration + cleanup # Create .flow/bin/flowctl epic create --title "..." diff --git a/.opencode/version b/.opencode/version index abd4105..3a4036f 100644 --- a/.opencode/version +++ b/.opencode/version @@ -1 +1 @@ -0.2.4 +0.2.5 diff --git a/CHANGELOG.md b/CHANGELOG.md index eb326f7..9f2762c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,18 @@ ## Unreleased +**Restores parity with upstream Claude Code plugin (flow-next 0.17.0 + 0.17.1).** + +### Ported from upstream 0.17.0 → 0.17.1 + +- **Runtime state store** — Task runtime fields move to `.git/flow-state/` (worktree-safe) +- **StateStore with locking** — fcntl-based per-task locks for race-safe updates +- **New commands** — `flowctl state-path` + `flowctl migrate-state [--clean]` +- **Checkpoint schema v2** — Includes runtime state in save/restore +- **Plan review includes task specs** — Both RP and OpenCode review epic + task specs for consistency +- **Plan review consistency checks** — New criteria + anti-pattern: update epic without syncing tasks +- **Task spec full replacement** — `flowctl task set-spec --file` (supports stdin) + **Restores parity with upstream Claude Code plugin (flow-next 0.12.10 + 0.13.0).** ### Planning Workflow Changes (ported from upstream 0.13.0) diff --git a/README.md b/README.md index aeb2630..a6e465c 100644 --- a/README.md +++ b/README.md @@ -288,6 +288,39 @@ Teams can work in parallel branches without coordination servers: * **Actor resolution**: Auto-detects from git email, `FLOW_ACTOR` env, or `$USER` * **Local validation**: `flowctl validate --all` catches issues before commit +### Parallel Worktrees + +Multiple agents can work simultaneously in different git worktrees, sharing task state: + +```bash +# Main repo +git worktree add ../feature-a fn-1-branch +git worktree add ../feature-b fn-2-branch + +# Both worktrees share task state via .git/flow-state/ +cd ../feature-a && flowctl start fn-1.1 # Agent A claims task +cd ../feature-b && flowctl start fn-2.1 # Agent B claims different task +``` + +**How it works:** +- Runtime state (status, assignee, evidence) lives in `.git/flow-state/` — shared across worktrees +- Definition files (title, description, deps) stay in `.flow/` — tracked in git +- Per-task `fcntl` locking prevents race conditions + +**State directory resolution:** +1. `FLOW_STATE_DIR` env (explicit override) +2. `git --git-common-dir` + `/flow-state` (worktree-aware) +3. `.flow/state` fallback (non-git or old git) + +**Commands:** +```bash +flowctl state-path # Show resolved state directory +flowctl migrate-state # Migrate existing repo (optional) +flowctl migrate-state --clean # Migrate + remove runtime from tracked files +``` + +**Backward compatible** — existing repos work without migration. The merged read path automatically falls back to definition files when no state file exists. + ### Zero Dependencies Everything is bundled: @@ -515,6 +548,8 @@ This creates a complete audit trail: what was planned, what was done, how it was └── decisions.md ``` +Runtime state (status, assignee, evidence) is stored in `.git/flow-state/` (not tracked). + ### ID Format * **Epic**: `fn-N-xxx` where `xxx` is a 3-character alphanumeric suffix @@ -540,6 +575,7 @@ flowctl epic close fn-1 flowctl task create --epic fn-1 --title "..." --deps fn-1.2,fn-1.3 --priority 10 flowctl task set-description fn-1.1 --file desc.md flowctl task set-acceptance fn-1.1 --file accept.md +flowctl task set-spec fn-1.1 --file spec.md flowctl dep add fn-1.3 fn-1.2 @@ -555,6 +591,9 @@ flowctl cat fn-1 flowctl validate --epic fn-1 flowctl validate --all +flowctl state-path +flowctl migrate-state --clean + flowctl review-backend # Get configured review backend (ASK if not set) flowctl config set review.backend opencode # Set default backend ``` diff --git a/docs/flowctl.md b/docs/flowctl.md index 1c8c0e7..81a47f2 100644 --- a/docs/flowctl.md +++ b/docs/flowctl.md @@ -7,7 +7,7 @@ CLI for `.flow/` task tracking. Agents must use flowctl for all writes. ## Available Commands ``` -init, detect, epic, task, dep, show, epics, tasks, list, cat, ready, next, start, done, block, validate, config, memory, prep-chat, rp, opencode, codex, checkpoint, status +init, detect, epic, task, dep, show, epics, tasks, list, cat, ready, next, start, done, block, validate, config, memory, prep-chat, rp, opencode, codex, checkpoint, status, state-path, migrate-state ``` ## Multi-User Safety @@ -21,6 +21,23 @@ Works out of the box for parallel branches. No setup required. **Optional**: Add CI gate with `docs/ci-workflow-example.yml` to block bad PRs. +## Runtime State (Worktrees) + +Runtime task state (status, assignee, evidence) lives in `.git/flow-state/` and is shared across worktrees. +Definition files (title, deps, specs) remain in `.flow/` and are tracked in git. + +State directory resolution: +1. `FLOW_STATE_DIR` env (override) +2. `git --git-common-dir` + `/flow-state` (worktree-aware) +3. `.flow/state` fallback (non-git) + +Useful commands: +```bash +flowctl state-path # Show resolved state directory +flowctl migrate-state # Migrate existing repo (optional) +flowctl migrate-state --clean # Migrate + remove runtime from tracked files +``` + ## File Structure ``` @@ -37,6 +54,8 @@ Works out of the box for parallel branches. No setup required. └── usage.md # (optional) CLI reference via /flow-next:setup ``` +Runtime state (status, assignee, evidence) is stored in `.git/flow-state/` (not tracked). + Flowctl accepts schema v1 and v2; new fields are optional and defaulted. New fields: @@ -147,13 +166,19 @@ flowctl task set-acceptance fn-1.2 --file accept.md [--json] ### task set-spec -Set description and acceptance in one call (fewer writes). +Set task spec (full replacement or section patches). +Full replacement: +```bash +flowctl task set-spec fn-1.2 --file spec.md [--json] +``` + +Section patches: ```bash flowctl task set-spec fn-1.2 --description desc.md --acceptance accept.md [--json] ``` -Both `--description` and `--acceptance` are optional; supply one or both. +`--description` and `--acceptance` are optional; supply one or both. Use `--file -` for stdin. ### task reset @@ -554,7 +579,7 @@ References: src/middleware.py:45 (calls authenticate), tests/test_auth.py:12 | Review | Criteria | |--------|----------| -| Plan | Completeness, Feasibility, Clarity, Architecture, Risks, Scope, Testability | +| Plan | Completeness, Feasibility, Clarity, Architecture, Risks, Scope, Testability, Consistency | | Impl | Correctness, Simplicity, DRY, Architecture, Edge Cases, Tests, Security | **Receipt schema (Ralph-compatible):** @@ -586,7 +611,7 @@ flowctl checkpoint restore --epic fn-1 [--json] flowctl checkpoint delete --epic fn-1 [--json] ``` -Checkpoints preserve full epic + task state. Useful when compaction occurs during plan-review cycles. +Checkpoints preserve full epic + task state (including runtime state). Useful when compaction occurs during plan-review cycles. ### status @@ -603,6 +628,35 @@ Output: Human-readable output shows epic/task counts and any active Ralph runs. +### state-path + +Show resolved runtime state directory (worktree-aware). + +```bash +flowctl state-path [--json] +flowctl state-path --task fn-1.2 [--json] +``` + +Example output: +```json +{"success": true, "state_dir": "/repo/.git/flow-state", "task_state_path": "/repo/.git/flow-state/tasks/fn-1.2.state.json"} +``` + +### migrate-state + +Migrate runtime state from tracked task definitions into the shared state directory. + +```bash +flowctl migrate-state [--clean] [--json] +``` + +What it does: +1. Scans `.flow/tasks/*.json` for runtime fields +2. Writes runtime state to `.git/flow-state/tasks/*.state.json` +3. Optional `--clean` removes runtime fields from tracked task JSONs + +Backward compatible: repos work without migration. Use `--clean` only if you want a clean diff. + ## Ralph Receipts Review receipts are **not** managed by flowctl. They are written by the review skills when `REVIEW_RECEIPT_PATH` is set (Ralph sets this env var).