diff --git a/.claude/commands/deepwork_jobs.define.md b/.claude/commands/deepwork_jobs.define.md
index fe95ebd0..76a5bf45 100644
--- a/.claude/commands/deepwork_jobs.define.md
+++ b/.claude/commands/deepwork_jobs.define.md
@@ -17,7 +17,7 @@ hooks:
4. **Concise Summary**: Is the summary under 200 characters and descriptive?
5. **Rich Description**: Does the description provide enough context for future refinement?
6. **Valid Schema**: Does the job.yml follow the required schema (name, version, summary, steps)?
- 7. **File Created**: Has the job.yml file been created in `deepwork/[job_name]/job.yml`?
+ 7. **File Created**: Has the job.yml file been created in `.deepwork/jobs/[job_name]/job.yml`?
If ANY criterion is not met, continue working to address it.
If ALL criteria are satisfied, include `QUALITY_COMPLETE` in your response.
@@ -192,9 +192,9 @@ stop_hooks:
Only after you have complete understanding, create the `job.yml` file:
-**File Location**: `deepwork/[job_name]/job.yml`
+**File Location**: `.deepwork/jobs/[job_name]/job.yml`
-(Where `[job_name]` is the name of the NEW job you're creating, e.g., `deepwork/competitive_research/job.yml`)
+(Where `[job_name]` is the name of the NEW job you're creating, e.g., `.deepwork/jobs/competitive_research/job.yml`)
**Format**:
```yaml
@@ -357,12 +357,12 @@ User: Yes, that's perfect!
Claude: Great! Creating the job.yml specification now...
-[Creates deepwork/competitive_research/job.yml with the complete spec]
+[Creates .deepwork/jobs/competitive_research/job.yml with the complete spec]
✓ Job specification created!
**File created:**
-- deepwork/competitive_research/job.yml
+- .deepwork/jobs/competitive_research/job.yml
**Next step:**
Run `/deepwork_jobs.implement` to generate the instruction files for each step based on this specification.
@@ -396,9 +396,9 @@ Before creating the job.yml, ensure:
### job.yml
-The complete YAML specification file (example shown in Step 4 above).
+The complete YAML specification file (example shown in Step 5 above).
-**Location**: `deepwork/[job_name]/job.yml`
+**Location**: `.deepwork/jobs/[job_name]/job.yml`
(Where `[job_name]` is the name of the new job being created)
@@ -466,7 +466,7 @@ Verify the job.yml output meets ALL quality criteria before completing:
4. **Concise Summary**: Is the summary under 200 characters and descriptive?
5. **Rich Description**: Does the description provide enough context for future refinement?
6. **Valid Schema**: Does the job.yml follow the required schema (name, version, summary, steps)?
-7. **File Created**: Has the job.yml file been created in `deepwork/[job_name]/job.yml`?
+7. **File Created**: Has the job.yml file been created in `.deepwork/jobs/[job_name]/job.yml`?
If ANY criterion is not met, continue working to address it.
If ALL criteria are satisfied, include `QUALITY_COMPLETE` in your response.
diff --git a/.claude/commands/deepwork_jobs.implement.md b/.claude/commands/deepwork_jobs.implement.md
index 70d920f2..f9a026b8 100644
--- a/.claude/commands/deepwork_jobs.implement.md
+++ b/.claude/commands/deepwork_jobs.implement.md
@@ -80,7 +80,7 @@ Read the `job.yml` specification file and create all the necessary files to make
### Step 1: Read and Validate the Specification
1. **Locate the job.yml file**
- - Read `deepwork/[job_name]/job.yml` from the define step (Where `[job_name]` is the name of the new job that was created in the define step)
+ - Read `.deepwork/jobs/[job_name]/job.yml` from the define step (Where `[job_name]` is the name of the new job that was created in the define step)
- Parse the YAML content
2. **Validate the specification**
@@ -213,13 +213,9 @@ If a step in the job.yml has `stop_hooks` defined, the generated instruction fil
This alignment ensures the AI agent knows exactly what will be validated and can self-check before completing.
-### Step 4: Copy job.yml to Job Directory
+### Step 4: Verify job.yml Location
-Copy the validated `job.yml` from the work directory to `.deepwork/jobs/[job_name]/job.yml`:
-
-```bash
-cp deepwork/[job_name]/job.yml .deepwork/jobs/[job_name]/job.yml
-```
+Verify that `job.yml` is in the correct location at `.deepwork/jobs/[job_name]/job.yml`. The define step should have created it there. If for some reason it's not there, you may need to create or move it.
### Step 5: Sync Commands
diff --git a/.deepwork/jobs/deepwork_jobs/job.yml b/.deepwork/jobs/deepwork_jobs/job.yml
index 603664ff..87d9c3ac 100644
--- a/.deepwork/jobs/deepwork_jobs/job.yml
+++ b/.deepwork/jobs/deepwork_jobs/job.yml
@@ -43,7 +43,7 @@ steps:
4. **Concise Summary**: Is the summary under 200 characters and descriptive?
5. **Rich Description**: Does the description provide enough context for future refinement?
6. **Valid Schema**: Does the job.yml follow the required schema (name, version, summary, steps)?
- 7. **File Created**: Has the job.yml file been created in `deepwork/[job_name]/job.yml`?
+ 7. **File Created**: Has the job.yml file been created in `.deepwork/jobs/[job_name]/job.yml`?
If ANY criterion is not met, continue working to address it.
If ALL criteria are satisfied, include `QUALITY_COMPLETE` in your response.
diff --git a/.deepwork/jobs/deepwork_jobs/steps/define.md b/.deepwork/jobs/deepwork_jobs/steps/define.md
index c63da0fe..bb1165dd 100644
--- a/.deepwork/jobs/deepwork_jobs/steps/define.md
+++ b/.deepwork/jobs/deepwork_jobs/steps/define.md
@@ -133,9 +133,9 @@ stop_hooks:
Only after you have complete understanding, create the `job.yml` file:
-**File Location**: `deepwork/[job_name]/job.yml`
+**File Location**: `.deepwork/jobs/[job_name]/job.yml`
-(Where `[job_name]` is the name of the NEW job you're creating, e.g., `deepwork/competitive_research/job.yml`)
+(Where `[job_name]` is the name of the NEW job you're creating, e.g., `.deepwork/jobs/competitive_research/job.yml`)
**Format**:
```yaml
@@ -298,12 +298,12 @@ User: Yes, that's perfect!
Claude: Great! Creating the job.yml specification now...
-[Creates deepwork/competitive_research/job.yml with the complete spec]
+[Creates .deepwork/jobs/competitive_research/job.yml with the complete spec]
✓ Job specification created!
**File created:**
-- deepwork/competitive_research/job.yml
+- .deepwork/jobs/competitive_research/job.yml
**Next step:**
Run `/deepwork_jobs.implement` to generate the instruction files for each step based on this specification.
@@ -337,9 +337,9 @@ Before creating the job.yml, ensure:
### job.yml
-The complete YAML specification file (example shown in Step 4 above).
+The complete YAML specification file (example shown in Step 5 above).
-**Location**: `deepwork/[job_name]/job.yml`
+**Location**: `.deepwork/jobs/[job_name]/job.yml`
(Where `[job_name]` is the name of the new job being created)
diff --git a/.deepwork/jobs/deepwork_jobs/steps/implement.md b/.deepwork/jobs/deepwork_jobs/steps/implement.md
index 99c6fba4..b87e8c5a 100644
--- a/.deepwork/jobs/deepwork_jobs/steps/implement.md
+++ b/.deepwork/jobs/deepwork_jobs/steps/implement.md
@@ -11,7 +11,7 @@ Read the `job.yml` specification file and create all the necessary files to make
### Step 1: Read and Validate the Specification
1. **Locate the job.yml file**
- - Read `deepwork/[job_name]/job.yml` from the define step (Where `[job_name]` is the name of the new job that was created in the define step)
+ - Read `.deepwork/jobs/[job_name]/job.yml` from the define step (Where `[job_name]` is the name of the new job that was created in the define step)
- Parse the YAML content
2. **Validate the specification**
@@ -144,13 +144,9 @@ If a step in the job.yml has `stop_hooks` defined, the generated instruction fil
This alignment ensures the AI agent knows exactly what will be validated and can self-check before completing.
-### Step 4: Copy job.yml to Job Directory
+### Step 4: Verify job.yml Location
-Copy the validated `job.yml` from the work directory to `.deepwork/jobs/[job_name]/job.yml`:
-
-```bash
-cp deepwork/[job_name]/job.yml .deepwork/jobs/[job_name]/job.yml
-```
+Verify that `job.yml` is in the correct location at `.deepwork/jobs/[job_name]/job.yml`. The define step should have created it there. If for some reason it's not there, you may need to create or move it.
### Step 5: Sync Commands
diff --git a/.github/workflows/validate.yml b/.github/workflows/validate.yml
index 68c78a32..ac8cadd3 100644
--- a/.github/workflows/validate.yml
+++ b/.github/workflows/validate.yml
@@ -4,18 +4,12 @@ on:
pull_request:
branches: ["*"]
-permissions:
- contents: write
-
jobs:
- format:
+ tests:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- with:
- ref: ${{ github.head_ref }}
- token: ${{ secrets.GITHUB_TOKEN }}
- name: Install uv
uses: astral-sh/setup-uv@v4
@@ -30,56 +24,10 @@ jobs:
- name: Install dependencies
run: uv sync --extra dev
- - name: Run ruff formatting
- run: uv run ruff format src/ tests/
-
- - name: Run ruff linting with auto-fix
- run: uv run ruff check --fix src/ tests/
-
- - name: Re-run formatting after auto-fix
- run: uv run ruff format src/ tests/
-
- - name: Verify all issues are fixed
+ - name: Check formatting (ruff)
run: |
- uv run ruff check src/ tests/
uv run ruff format --check src/ tests/
-
- - name: Check for changes
- id: check_changes
- run: |
- if [[ -n "$(git status --porcelain)" ]]; then
- echo "has_changes=true" >> $GITHUB_OUTPUT
- else
- echo "has_changes=false" >> $GITHUB_OUTPUT
- fi
-
- - name: Commit and push formatting changes
- if: steps.check_changes.outputs.has_changes == 'true'
- run: |
- git config --local user.email "github-actions[bot]@users.noreply.github.com"
- git config --local user.name "github-actions[bot]"
- git add -A
- git commit -m "style: auto-format code with ruff"
- git push
-
- tests:
- runs-on: ubuntu-latest
-
- steps:
- - uses: actions/checkout@v4
-
- - name: Install uv
- uses: astral-sh/setup-uv@v4
- with:
- version: "latest"
-
- - name: Set up Python
- uses: actions/setup-python@v5
- with:
- python-version: "3.11"
-
- - name: Install dependencies
- run: uv sync --extra dev
+ uv run ruff check src/ tests/
- name: Run tests
run: uv run pytest tests/ -v
diff --git a/README.md b/README.md
index 6e74cfdf..8a09646c 100644
--- a/README.md
+++ b/README.md
@@ -4,21 +4,6 @@
DeepWork is a tool for defining and executing multi-step workflows with AI coding assistants like Claude Code, Google Gemini, and GitHub Copilot. It enables you to decompose complex tasks into manageable steps, with clear inputs, outputs, and dependencies.
-## Status: Phase 1 MVP Complete ✅
-
-**Version**: 0.1.0
-**Test Coverage**: 166 tests passing
-
-### What's Implemented
-
-- ✅ Job definition parsing and validation
-- ✅ Job registry for tracking installed workflows
-- ✅ Multi-platform support (Claude Code, Gemini, Copilot)
-- ✅ Jinja2-based skill file generation
-- ✅ Git integration for work branch management
-- ✅ CLI with `install` command
-- ✅ Core skills: `deepwork.define` and `deepwork.refine`
-
## Installation
### Prerequisites
@@ -46,27 +31,35 @@ deepwork install --platform claude # or gemini, copilot
This will:
- Create `.deepwork/` directory structure
-- Initialize job registry
-- Generate core DeepWork skills
+- Generate core DeepWork jobs
+- Install DeepWork jobs for your AI assistant
+- Configure hooks for your AI assistant to enable policies
## Quick Start
-### 1. Define a Workflow
-Use Claude Code (or your AI assistant) to define a new job:
+
+### 1. Define a Job
+Jobs are multi-step workflows where each Step has clear input and output artifacts, making them easier to manage effectively.
+
+The process of defining a job itself is actually a DeepWork job. You can see it at `.deepwork/jobs/deepwork_jobs/`.
+
+To start the process, just run the first Step in the job:
```
-/deepwork.define
+/deepwork_jobs.define
```
Follow the interactive prompts to:
-- Name your workflow
+- Name your job
- Define steps with inputs/outputs
- Specify dependencies between steps
+It will also prompt you to go on the the next Step in the job.
+
### 2. Execute Steps
-Run individual steps of your workflow:
+Run individual steps of your job:
```
/your_job_name.step_1
@@ -83,7 +76,7 @@ The AI will:
Use the refine skill to update existing jobs:
```
-/deepwork.refine
+/deepwork_jobs.refine
```
## Example: Competitive Research Workflow
@@ -215,9 +208,6 @@ ruff format src/
## Documentation
- **[Architecture](doc/architecture.md)**: Complete design specification
-- **[Template Review](doc/TEMPLATE_REVIEW.md)**: Skill template documentation
-- **[Status](STATUS.md)**: Implementation progress
-- **[Next Steps](NEXT_STEPS.md)**: Future development roadmap
## Project Structure
@@ -227,7 +217,6 @@ deepwork/
│ ├── cli/ # Command-line interface
│ ├── core/ # Core functionality
│ │ ├── parser.py # Job definition parsing
-│ │ ├── registry.py # Job registry management
│ │ ├── detector.py # Platform detection
│ │ └── generator.py # Skill file generation
│ ├── templates/ # Jinja2 templates
diff --git a/claude.md b/claude.md
index 588fa483..85b6ef2d 100644
--- a/claude.md
+++ b/claude.md
@@ -155,7 +155,7 @@ my-project/
## Key Files to Reference
- `doc/architecture.md` - Comprehensive architecture documentation
-- `readme.md` - High-level project overview
+- `README.md` - High-level project overview
- `shell.nix` - Development environment setup
## Development Guidelines
@@ -206,9 +206,3 @@ If a job exists in `src/deepwork/standard_jobs/`, it is a standard job and MUST
3. **Performance**: Job import completes in <10 seconds
4. **Extensibility**: New AI platforms can be added in <2 days
5. **Quality**: 90%+ test coverage, zero critical bugs
-
-## Questions or Issues?
-
-- See `doc/architecture.md` for detailed design documentation
-- Check `readme.md` for high-level concepts
-- Reference implementation phases for current work focus
diff --git a/doc/architecture.md b/doc/architecture.md
index d316b513..7383f8f9 100644
--- a/doc/architecture.md
+++ b/doc/architecture.md
@@ -42,6 +42,7 @@ deepwork/ # DeepWork tool repository
│ │ ├── install.py # Install command
│ │ └── sync.py # Sync command
│ ├── core/
+│ │ ├── adapters.py # Agent adapters for AI platforms
│ │ ├── detector.py # AI platform detection
│ │ ├── generator.py # Command file generation
│ │ ├── parser.py # Job definition parsing
@@ -133,52 +134,81 @@ def install(platform: str):
print(f" Run /deepwork_jobs.define to create your first job")
```
-### 2. Platform Detector (`detector.py`)
+### 2. Agent Adapters (`adapters.py`)
-Identifies which AI platforms are available in the project.
+Defines the modular adapter architecture for AI platforms. Each adapter encapsulates platform-specific configuration and behavior.
-**Detection Logic**:
+**Adapter Architecture**:
```python
-@dataclass
-class PlatformConfig:
- """Configuration for an AI platform."""
- name: str # "claude", "gemini", "copilot"
- display_name: str # "Claude Code", "Google Gemini", "GitHub Copilot"
- config_dir: str # ".claude", ".gemini", ".github"
- commands_dir: str # "commands", "commands", "commands"
-
-PLATFORMS = {
- "claude": PlatformConfig(
- name="claude",
- display_name="Claude Code",
- config_dir=".claude",
- commands_dir="commands"
- ),
- "gemini": PlatformConfig(
- name="gemini",
- display_name="Google Gemini",
- config_dir=".gemini",
- commands_dir="commands"
- ),
- "copilot": PlatformConfig(
- name="copilot",
- display_name="GitHub Copilot",
- config_dir=".github",
- commands_dir="commands"
- )
-}
+class CommandLifecycleHook(str, Enum):
+ """Generic lifecycle hook events supported by DeepWork."""
+ AFTER_AGENT = "after_agent" # After agent finishes (quality validation)
+ BEFORE_TOOL = "before_tool" # Before tool execution
+ BEFORE_PROMPT = "before_prompt" # When user submits a prompt
+
+class AgentAdapter(ABC):
+ """Base class for AI agent platform adapters."""
+
+ # Auto-registration via __init_subclass__
+ _registry: ClassVar[dict[str, type[AgentAdapter]]] = {}
+
+ # Platform configuration (subclasses define as class attributes)
+ name: ClassVar[str] # "claude"
+ display_name: ClassVar[str] # "Claude Code"
+ config_dir: ClassVar[str] # ".claude"
+ commands_dir: ClassVar[str] = "commands"
+
+ # Mapping from generic hook names to platform-specific names
+ hook_name_mapping: ClassVar[dict[CommandLifecycleHook, str]] = {}
+
+ def detect(self, project_root: Path) -> bool:
+ """Check if this platform is available in the project."""
+
+ def get_platform_hook_name(self, hook: CommandLifecycleHook) -> str | None:
+ """Get platform-specific event name for a generic hook."""
+
+ @abstractmethod
+ def sync_hooks(self, project_path: Path, hooks: dict) -> int:
+ """Sync hooks to platform settings."""
+
+class ClaudeAdapter(AgentAdapter):
+ name = "claude"
+ display_name = "Claude Code"
+ config_dir = ".claude"
+
+ # Claude Code uses PascalCase event names
+ hook_name_mapping = {
+ CommandLifecycleHook.AFTER_AGENT: "Stop",
+ CommandLifecycleHook.BEFORE_TOOL: "PreToolUse",
+ CommandLifecycleHook.BEFORE_PROMPT: "UserPromptSubmit",
+ }
+```
+### 3. Platform Detector (`detector.py`)
+
+Uses adapters to identify which AI platforms are available in the project.
+
+**Detection Logic**:
+```python
class PlatformDetector:
- def detect_platform(self, platform_name: str) -> PlatformConfig | None:
+ def detect_platform(self, platform_name: str) -> AgentAdapter | None:
"""Check if a specific platform is available."""
- platform = PLATFORMS[platform_name]
- config_dir = self.project_root / platform.config_dir
- if config_dir.exists() and config_dir.is_dir():
- return platform
+ adapter_class = AgentAdapter.get(platform_name)
+ adapter = adapter_class(self.project_root)
+ if adapter.detect():
+ return adapter
return None
+
+ def detect_all_platforms(self) -> list[AgentAdapter]:
+ """Detect all available platforms."""
+ return [
+ adapter_class(self.project_root)
+ for adapter_class in AgentAdapter.get_all().values()
+ if adapter_class(self.project_root).detect()
+ ]
```
-### 3. Command Generator (`generator.py`)
+### 4. Command Generator (`generator.py`)
Generates AI-platform-specific command files from job definitions.
@@ -387,6 +417,42 @@ steps:
- comparative_report
```
+### Lifecycle Hooks in Job Definitions
+
+Steps can define lifecycle hooks that trigger at specific points during execution. Hooks are defined using generic event names that are mapped to platform-specific names by adapters:
+
+```yaml
+steps:
+ - id: build_report
+ name: "Build Report"
+ description: "Generate the final report"
+ instructions_file: steps/build_report.md
+ outputs:
+ - report.md
+ hooks:
+ after_agent: # Triggers after agent finishes (Claude: "Stop")
+ - prompt: |
+ Verify the report includes all required sections:
+ - Executive summary
+ - Data analysis
+ - Recommendations
+ - script: hooks/validate_report.sh
+ before_tool: # Triggers before tool use (Claude: "PreToolUse")
+ - prompt: "Confirm tool execution is appropriate"
+```
+
+**Supported Lifecycle Events**:
+- `after_agent` - Triggered after the agent finishes responding (quality validation)
+- `before_tool` - Triggered before the agent uses a tool
+- `before_prompt` - Triggered when user submits a new prompt
+
+**Hook Action Types**:
+- `prompt` - Inline prompt text
+- `prompt_file` - Path to a file containing the prompt
+- `script` - Path to a shell script
+
+**Note**: The deprecated `stop_hooks` field is still supported for backward compatibility but maps to `hooks.after_agent`.
+
### Step Instructions Example
`.deepwork/jobs/competitive_research/steps/identify_competitors.md`:
@@ -456,65 +522,8 @@ Create `competitors.md` with this structure:
When the job is defined and `sync` is run, DeepWork generates command files. Example for Claude Code:
-`.claude/commands/competitive_research.identify_competitors.md`:
-
-```markdown
----
-description: Research and identify direct and indirect competitors
----
-
-# competitive_research.identify_competitors
+`.deepwork/jobs/competitive_research` a step called `identify_competitors` will generate a command file at `.claude/commands/competitive_research.identify_competitors.md`:
-**Step 1 of 5** in the **competitive_research** workflow
-
-**Summary**: Systematic competitive analysis workflow
-
-## Job Overview
-
-[Job description and context...]
-
-## Instructions
-
-You are performing the "Identify Competitors" step of competitive research.
-
-### Prerequisites
-This step has no dependencies (it's the first step).
-
-Before starting, ensure you have:
-- Market segment defined
-- Product category specified
-
-### Input Parameters
-Ask the user for the following if not already provided:
-1. **market_segment**: The market segment to analyze
-2. **product_category**: Product category
-
-### Your Task
-
-[Content from .deepwork/jobs/competitive_research/steps/identify_competitors.md is embedded here]
-
-### Work Branch Management
-1. Check if we're on a work branch for this job
-2. If not, create a new branch: `deepwork/competitive_research-[instance]-[date]`
-3. All outputs should be created in the `deepwork/[branch-name]/` directory
-
-### Output Requirements
-Create the following file in the work directory:
-- `deepwork/[branch-name]/competitors.md`
-
-### After Completion
-1. Inform the user that step 1 is complete
-2. Recommend they review the competitors.md file
-3. Suggest running `/competitive_research.primary_research` to continue
-
----
-
-## Context Files
-- Job definition: `.deepwork/jobs/competitive_research/job.yml`
-- Step instructions: `.deepwork/jobs/competitive_research/steps/identify_competitors.md`
-```
-
----
# Part 3: Runtime Execution Model
@@ -660,17 +669,14 @@ This step requires outputs from:
- Step 2 (primary_research): primary_research.md
### Your Task
-1. Read `deepwork/[branch]/competitors.md`
-2. Read `deepwork/[branch]/primary_research.md`
-3. [Perform analysis]
-4. Write `deepwork/[branch]/secondary_research.md`
+Conduct web research on secondary sources for each competitor identified in competitors.md.
```
### 3. Git History
When working on similar jobs:
- User: "Do competitive research for Acme Corp, similar to our Widget Corp analysis"
-- Claude can read `deepwork/competitive_research-widget-corp-2026-01-05/` from git history
+- Claude can read old existing branches like`deepwork/competitive_research-widget-corp-2024-01-05/` from git history
- Uses it as a template for style, depth, format
### 4. No Environment Variables Needed
@@ -707,18 +713,17 @@ Where `instance-identifier` can be:
### Command Behavior
Commands should:
-1. Check if we're already on a work branch for this job
+1. Check if we're already on a branch for this job
2. If not, ask user for instance name or auto-generate from timestamp
3. Create branch: `git checkout -b deepwork/[job_name]-[instance]-[date]`
-4. Create work directory: `mkdir -p deepwork/[job_name]-[instance]-[date]`
-5. Perform work in that directory
+4. Perform the work on that branch
### Completion and Merge
-When all steps are done:
-1. User reviews all outputs in `work/[branch-name]/`
-2. Commits the work
-3. Creates PR to main branch
+When all steps are done, remind the user they should:
+1. Review all outputs
+2. Commit the work
+3. Create PR to main branch
4. After merge, the work products are in the repository
5. Future job instances can reference this work for context/templates
diff --git a/src/deepwork/cli/install.py b/src/deepwork/cli/install.py
index 030f5668..59b0e0c0 100644
--- a/src/deepwork/cli/install.py
+++ b/src/deepwork/cli/install.py
@@ -6,6 +6,7 @@
import click
from rich.console import Console
+from deepwork.core.adapters import AgentAdapter
from deepwork.core.detector import PlatformDetector
from deepwork.utils.fs import ensure_dir
from deepwork.utils.git import is_git_repo
@@ -112,13 +113,21 @@ def _create_deepwork_gitignore(deepwork_dir: Path) -> None:
gitignore_path.write_text(gitignore_content)
+class DynamicChoice(click.Choice):
+ """A Click Choice that gets its values dynamically from AgentAdapter."""
+
+ def __init__(self) -> None:
+ # Get choices at runtime from registered adapters
+ super().__init__(AgentAdapter.list_names(), case_sensitive=False)
+
+
@click.command()
@click.option(
"--platform",
"-p",
- type=click.Choice(["claude", "gemini", "copilot"], case_sensitive=False),
+ type=DynamicChoice(),
required=False,
- help="AI platform to install for (claude, gemini, or copilot). If not specified, will auto-detect.",
+ help="AI platform to install for. If not specified, will auto-detect.",
)
@click.option(
"--path",
@@ -171,43 +180,46 @@ def _install_deepwork(platform_name: str | None, project_path: Path) -> None:
if platform_name:
# User specified platform - check if it's available
console.print(f"[yellow]→[/yellow] Checking for {platform_name.title()}...")
- platform_config = detector.detect_platform(platform_name.lower())
+ adapter = detector.detect_platform(platform_name.lower())
- if platform_config is None:
+ if adapter is None:
# Platform not detected - provide helpful message
- platform_cfg = detector.get_platform_config(platform_name.lower())
+ adapter = detector.get_adapter(platform_name.lower())
raise InstallError(
- f"{platform_cfg.display_name} not detected in this project.\n"
- f"Expected to find '{platform_cfg.config_dir}/' directory.\n"
- f"Please ensure {platform_cfg.display_name} is set up in this project."
+ f"{adapter.display_name} not detected in this project.\n"
+ f"Expected to find '{adapter.config_dir}/' directory.\n"
+ f"Please ensure {adapter.display_name} is set up in this project."
)
- console.print(f" [green]✓[/green] {platform_config.display_name} detected")
- platform_to_add = platform_config.name
+ console.print(f" [green]✓[/green] {adapter.display_name} detected")
+ platform_to_add = adapter.name
else:
# Auto-detect platform
console.print("[yellow]→[/yellow] Auto-detecting AI platform...")
- available_platforms = detector.detect_all_platforms()
+ available_adapters = detector.detect_all_platforms()
- if not available_platforms:
+ if not available_adapters:
+ supported = ", ".join(
+ f"{AgentAdapter.get(name).display_name} ({AgentAdapter.get(name).config_dir}/)"
+ for name in AgentAdapter.list_names()
+ )
raise InstallError(
- "No AI platform detected.\n"
- "DeepWork supports: Claude Code (.claude/), Google Gemini (.gemini/), "
- "GitHub Copilot (.github/).\n"
+ f"No AI platform detected.\n"
+ f"DeepWork supports: {supported}.\n"
"Please set up one of these platforms first, or use --platform to specify."
)
- if len(available_platforms) > 1:
+ if len(available_adapters) > 1:
# Multiple platforms - ask user to specify
- platform_names = ", ".join(p.display_name for p in available_platforms)
+ platform_names = ", ".join(a.display_name for a in available_adapters)
raise InstallError(
f"Multiple AI platforms detected: {platform_names}\n"
"Please specify which platform to use with --platform option."
)
- platform_config = available_platforms[0]
- console.print(f" [green]✓[/green] {platform_config.display_name} detected")
- platform_to_add = platform_config.name
+ adapter = available_adapters[0]
+ console.print(f" [green]✓[/green] {adapter.display_name} detected")
+ platform_to_add = adapter.name
# Step 3: Create .deepwork/ directory structure
console.print("[yellow]→[/yellow] Creating DeepWork directory structure...")
@@ -247,9 +259,9 @@ def _install_deepwork(platform_name: str | None, project_path: Path) -> None:
# Add platform if not already present
if platform_to_add not in config_data["platforms"]:
config_data["platforms"].append(platform_to_add)
- console.print(f" [green]✓[/green] Added {platform_config.display_name} to platforms")
+ console.print(f" [green]✓[/green] Added {adapter.display_name} to platforms")
else:
- console.print(f" [dim]•[/dim] {platform_config.display_name} already configured")
+ console.print(f" [dim]•[/dim] {adapter.display_name} already configured")
save_yaml(config_file, config_data)
console.print(f" [green]✓[/green] Updated {config_file.relative_to(project_path)}")
@@ -269,7 +281,7 @@ def _install_deepwork(platform_name: str | None, project_path: Path) -> None:
# Success message
console.print()
console.print(
- f"[bold green]✓ DeepWork installed successfully for {platform_config.display_name}![/bold green]"
+ f"[bold green]✓ DeepWork installed successfully for {adapter.display_name}![/bold green]"
)
console.print()
console.print("[bold]Next steps:[/bold]")
diff --git a/src/deepwork/cli/sync.py b/src/deepwork/cli/sync.py
index c895819e..f5e46751 100644
--- a/src/deepwork/cli/sync.py
+++ b/src/deepwork/cli/sync.py
@@ -6,7 +6,7 @@
from rich.console import Console
from rich.table import Table
-from deepwork.core.detector import PLATFORMS
+from deepwork.core.adapters import AgentAdapter
from deepwork.core.generator import CommandGenerator
from deepwork.core.hooks_syncer import collect_job_hooks, sync_hooks_to_platform
from deepwork.core.parser import parse_job_definition
@@ -109,15 +109,17 @@ def sync_commands(project_path: Path) -> None:
stats = {"platforms": 0, "commands": 0, "hooks": 0}
for platform_name in platforms:
- if platform_name not in PLATFORMS:
+ try:
+ adapter_cls = AgentAdapter.get(platform_name)
+ except Exception:
console.print(f"[yellow]⚠[/yellow] Unknown platform '{platform_name}', skipping")
continue
- platform_config = PLATFORMS[platform_name]
- console.print(f"\n[yellow]→[/yellow] Syncing to {platform_config.display_name}...")
+ adapter = adapter_cls(project_path)
+ console.print(f"\n[yellow]→[/yellow] Syncing to {adapter.display_name}...")
- platform_dir = project_path / platform_config.config_dir
- commands_dir = platform_dir / platform_config.commands_dir
+ platform_dir = project_path / adapter.config_dir
+ commands_dir = platform_dir / adapter.commands_dir
# Create commands directory
ensure_dir(commands_dir)
@@ -127,7 +129,7 @@ def sync_commands(project_path: Path) -> None:
console.print(" [dim]•[/dim] Generating commands...")
for job in jobs:
try:
- job_paths = generator.generate_all_commands(job, platform_config, platform_dir)
+ job_paths = generator.generate_all_commands(job, adapter, platform_dir)
stats["commands"] += len(job_paths)
console.print(f" [green]✓[/green] {job.name} ({len(job_paths)} commands)")
except Exception as e:
@@ -137,7 +139,7 @@ def sync_commands(project_path: Path) -> None:
if job_hooks_list:
console.print(" [dim]•[/dim] Syncing hooks...")
try:
- hooks_count = sync_hooks_to_platform(project_path, platform_config, job_hooks_list)
+ hooks_count = sync_hooks_to_platform(project_path, adapter, job_hooks_list)
stats["hooks"] += hooks_count
if hooks_count > 0:
console.print(f" [green]✓[/green] Synced {hooks_count} hook(s)")
diff --git a/src/deepwork/core/adapters.py b/src/deepwork/core/adapters.py
new file mode 100644
index 00000000..700010b6
--- /dev/null
+++ b/src/deepwork/core/adapters.py
@@ -0,0 +1,299 @@
+"""Agent adapters for AI coding assistants."""
+
+from __future__ import annotations
+
+import json
+from abc import ABC, abstractmethod
+from enum import Enum
+from pathlib import Path
+from typing import Any, ClassVar
+
+
+class AdapterError(Exception):
+ """Exception raised for adapter errors."""
+
+ pass
+
+
+class CommandLifecycleHook(str, Enum):
+ """Generic command lifecycle hook events supported by DeepWork.
+
+ These represent hook points in the AI agent's command execution lifecycle.
+ Each adapter maps these generic names to platform-specific event names.
+ The enum values are the generic names used in job.yml files.
+ """
+
+ # Triggered after the agent finishes responding (before returning to user)
+ # Use for quality validation loops, output verification
+ AFTER_AGENT = "after_agent"
+
+ # Triggered before the agent uses a tool
+ # Use for tool-specific validation or pre-processing
+ BEFORE_TOOL = "before_tool"
+
+ # Triggered when the user submits a new prompt
+ # Use for session initialization, context setup
+ BEFORE_PROMPT = "before_prompt"
+
+
+# List of all supported command lifecycle hooks
+COMMAND_LIFECYCLE_HOOKS_SUPPORTED: list[CommandLifecycleHook] = list(CommandLifecycleHook)
+
+
+class AgentAdapter(ABC):
+ """Base class for AI agent platform adapters.
+
+ Subclasses are automatically registered when defined, enabling dynamic
+ discovery of supported platforms.
+ """
+
+ # Class-level registry for auto-discovery
+ _registry: ClassVar[dict[str, type[AgentAdapter]]] = {}
+
+ # Platform configuration (subclasses define as class attributes)
+ name: ClassVar[str]
+ display_name: ClassVar[str]
+ config_dir: ClassVar[str]
+ commands_dir: ClassVar[str] = "commands"
+ command_template: ClassVar[str] = "command-job-step.md.jinja"
+
+ # Mapping from generic CommandLifecycleHook to platform-specific event names.
+ # Subclasses should override this to provide platform-specific mappings.
+ hook_name_mapping: ClassVar[dict[CommandLifecycleHook, str]] = {}
+
+ def __init__(self, project_root: Path | str | None = None):
+ """
+ Initialize adapter with optional project root.
+
+ Args:
+ project_root: Path to project root directory
+ """
+ self.project_root = Path(project_root) if project_root else None
+
+ def __init_subclass__(cls, **kwargs: Any) -> None:
+ """Auto-register subclasses."""
+ super().__init_subclass__(**kwargs)
+ # Only register if the class has a name attribute set (not inherited default)
+ if "name" in cls.__dict__ and cls.name:
+ AgentAdapter._registry[cls.name] = cls
+
+ @classmethod
+ def get_all(cls) -> dict[str, type[AgentAdapter]]:
+ """
+ Return all registered adapter classes.
+
+ Returns:
+ Dict mapping adapter names to adapter classes
+ """
+ return cls._registry.copy()
+
+ @classmethod
+ def get(cls, name: str) -> type[AgentAdapter]:
+ """
+ Get adapter class by name.
+
+ Args:
+ name: Adapter name (e.g., "claude", "gemini", "copilot")
+
+ Returns:
+ Adapter class
+
+ Raises:
+ AdapterError: If adapter name is not registered
+ """
+ if name not in cls._registry:
+ raise AdapterError(
+ f"Unknown adapter '{name}'. Supported adapters: {', '.join(cls._registry.keys())}"
+ )
+ return cls._registry[name]
+
+ @classmethod
+ def list_names(cls) -> list[str]:
+ """
+ List all registered adapter names.
+
+ Returns:
+ List of adapter names
+ """
+ return list(cls._registry.keys())
+
+ def get_template_dir(self, templates_root: Path) -> Path:
+ """
+ Get the template directory for this adapter.
+
+ Args:
+ templates_root: Root directory containing platform templates
+
+ Returns:
+ Path to this adapter's template directory
+ """
+ return templates_root / self.name
+
+ def get_commands_dir(self, project_root: Path | None = None) -> Path:
+ """
+ Get the commands directory path.
+
+ Args:
+ project_root: Project root (uses instance's project_root if not provided)
+
+ Returns:
+ Path to commands directory
+
+ Raises:
+ AdapterError: If no project root specified
+ """
+ root = project_root or self.project_root
+ if not root:
+ raise AdapterError("No project root specified")
+ return root / self.config_dir / self.commands_dir
+
+ def get_command_filename(self, job_name: str, step_id: str) -> str:
+ """
+ Get the filename for a command.
+
+ Can be overridden for different file formats (e.g., TOML for Gemini).
+
+ Args:
+ job_name: Name of the job
+ step_id: ID of the step
+
+ Returns:
+ Command filename (e.g., "job_name.step_id.md")
+ """
+ return f"{job_name}.{step_id}.md"
+
+ def detect(self, project_root: Path | None = None) -> bool:
+ """
+ Check if this platform is available in the project.
+
+ Args:
+ project_root: Project root (uses instance's project_root if not provided)
+
+ Returns:
+ True if platform config directory exists
+ """
+ root = project_root or self.project_root
+ if not root:
+ return False
+ config_path = root / self.config_dir
+ return config_path.exists() and config_path.is_dir()
+
+ def get_platform_hook_name(self, hook: CommandLifecycleHook) -> str | None:
+ """
+ Get the platform-specific event name for a generic hook.
+
+ Args:
+ hook: Generic CommandLifecycleHook
+
+ Returns:
+ Platform-specific event name, or None if not supported
+ """
+ return self.hook_name_mapping.get(hook)
+
+ def supports_hook(self, hook: CommandLifecycleHook) -> bool:
+ """
+ Check if this adapter supports a specific hook.
+
+ Args:
+ hook: Generic CommandLifecycleHook
+
+ Returns:
+ True if the hook is supported
+ """
+ return hook in self.hook_name_mapping
+
+ @abstractmethod
+ def sync_hooks(self, project_path: Path, hooks: dict[str, list[dict[str, Any]]]) -> int:
+ """
+ Sync hooks to platform settings.
+
+ Args:
+ project_path: Path to project root
+ hooks: Dict mapping lifecycle events to hook configurations
+
+ Returns:
+ Number of hooks synced
+
+ Raises:
+ AdapterError: If sync fails
+ """
+ pass
+
+
+def _hook_already_present(hooks: list[dict[str, Any]], script_path: str) -> bool:
+ """Check if a hook with the given script path is already in the list."""
+ for hook in hooks:
+ hook_list = hook.get("hooks", [])
+ for h in hook_list:
+ if h.get("command") == script_path:
+ return True
+ return False
+
+
+class ClaudeAdapter(AgentAdapter):
+ """Adapter for Claude Code."""
+
+ name = "claude"
+ display_name = "Claude Code"
+ config_dir = ".claude"
+
+ # Claude Code uses PascalCase event names
+ hook_name_mapping: ClassVar[dict[CommandLifecycleHook, str]] = {
+ CommandLifecycleHook.AFTER_AGENT: "Stop",
+ CommandLifecycleHook.BEFORE_TOOL: "PreToolUse",
+ CommandLifecycleHook.BEFORE_PROMPT: "UserPromptSubmit",
+ }
+
+ def sync_hooks(self, project_path: Path, hooks: dict[str, list[dict[str, Any]]]) -> int:
+ """
+ Sync hooks to Claude Code settings.json.
+
+ Args:
+ project_path: Path to project root
+ hooks: Merged hooks configuration
+
+ Returns:
+ Number of hooks synced
+
+ Raises:
+ AdapterError: If sync fails
+ """
+ if not hooks:
+ return 0
+
+ settings_file = project_path / self.config_dir / "settings.json"
+
+ # Load existing settings or create new
+ existing_settings: dict[str, Any] = {}
+ if settings_file.exists():
+ try:
+ with open(settings_file, encoding="utf-8") as f:
+ existing_settings = json.load(f)
+ except (json.JSONDecodeError, OSError) as e:
+ raise AdapterError(f"Failed to read settings.json: {e}") from e
+
+ # Merge hooks into existing settings
+ if "hooks" not in existing_settings:
+ existing_settings["hooks"] = {}
+
+ for event, event_hooks in hooks.items():
+ if event not in existing_settings["hooks"]:
+ existing_settings["hooks"][event] = []
+
+ # Add new hooks that aren't already present
+ for hook in event_hooks:
+ script_path = hook.get("hooks", [{}])[0].get("command", "")
+ if not _hook_already_present(existing_settings["hooks"][event], script_path):
+ existing_settings["hooks"][event].append(hook)
+
+ # Write back to settings.json
+ try:
+ settings_file.parent.mkdir(parents=True, exist_ok=True)
+ with open(settings_file, "w", encoding="utf-8") as f:
+ json.dump(existing_settings, f, indent=2)
+ except OSError as e:
+ raise AdapterError(f"Failed to write settings.json: {e}") from e
+
+ # Count total hooks
+ total = sum(len(hooks_list) for hooks_list in hooks.values())
+ return total
diff --git a/src/deepwork/core/detector.py b/src/deepwork/core/detector.py
index 4082bb9a..683da40b 100644
--- a/src/deepwork/core/detector.py
+++ b/src/deepwork/core/detector.py
@@ -1,8 +1,9 @@
"""Platform detection for AI coding assistants."""
-from dataclasses import dataclass
from pathlib import Path
+from deepwork.core.adapters import AdapterError, AgentAdapter
+
class DetectorError(Exception):
"""Exception raised for platform detection errors."""
@@ -10,41 +11,8 @@ class DetectorError(Exception):
pass
-@dataclass
-class PlatformConfig:
- """Configuration for an AI platform."""
-
- name: str # "claude", "gemini", "copilot"
- display_name: str # "Claude Code", "Google Gemini", "GitHub Copilot"
- config_dir: str # ".claude", ".gemini", ".github"
- commands_dir: str # "commands", "commands", "commands"
-
-
-# Supported platform configurations
-PLATFORMS = {
- "claude": PlatformConfig(
- name="claude",
- display_name="Claude Code",
- config_dir=".claude",
- commands_dir="commands",
- ),
- "gemini": PlatformConfig(
- name="gemini",
- display_name="Google Gemini",
- config_dir=".gemini",
- commands_dir="commands",
- ),
- "copilot": PlatformConfig(
- name="copilot",
- display_name="GitHub Copilot",
- config_dir=".github",
- commands_dir="commands",
- ),
-}
-
-
class PlatformDetector:
- """Detects available AI coding platforms."""
+ """Detects available AI coding platforms using registered adapters."""
def __init__(self, project_root: Path | str):
"""
@@ -55,7 +23,7 @@ def __init__(self, project_root: Path | str):
"""
self.project_root = Path(project_root)
- def detect_platform(self, platform_name: str) -> PlatformConfig | None:
+ def detect_platform(self, platform_name: str) -> AgentAdapter | None:
"""
Check if a specific platform is available.
@@ -63,60 +31,56 @@ def detect_platform(self, platform_name: str) -> PlatformConfig | None:
platform_name: Platform name ("claude", "gemini", "copilot")
Returns:
- PlatformConfig if platform is available, None otherwise
+ AgentAdapter instance if platform is available, None otherwise
Raises:
DetectorError: If platform_name is not supported
"""
- if platform_name not in PLATFORMS:
- raise DetectorError(
- f"Unknown platform '{platform_name}'. "
- f"Supported platforms: {', '.join(PLATFORMS.keys())}"
- )
-
- platform = PLATFORMS[platform_name]
- config_dir = self.project_root / platform.config_dir
+ try:
+ adapter_cls = AgentAdapter.get(platform_name)
+ except AdapterError as e:
+ raise DetectorError(str(e)) from e
- if config_dir.exists() and config_dir.is_dir():
- return platform
+ adapter = adapter_cls(self.project_root)
+ if adapter.detect():
+ return adapter
return None
- def detect_all_platforms(self) -> list[PlatformConfig]:
+ def detect_all_platforms(self) -> list[AgentAdapter]:
"""
Detect all available platforms.
Returns:
- List of available platform configurations
+ List of available adapter instances
"""
available = []
- for platform_name in PLATFORMS:
- platform = self.detect_platform(platform_name)
- if platform is not None:
- available.append(platform)
+ for platform_name in AgentAdapter.list_names():
+ adapter = self.detect_platform(platform_name)
+ if adapter is not None:
+ available.append(adapter)
return available
- def get_platform_config(self, platform_name: str) -> PlatformConfig:
+ def get_adapter(self, platform_name: str) -> AgentAdapter:
"""
- Get configuration for a platform (without checking availability).
+ Get an adapter instance for a platform (without checking availability).
Args:
platform_name: Platform name
Returns:
- Platform configuration
+ AgentAdapter instance
Raises:
DetectorError: If platform_name is not supported
"""
- if platform_name not in PLATFORMS:
- raise DetectorError(
- f"Unknown platform '{platform_name}'. "
- f"Supported platforms: {', '.join(PLATFORMS.keys())}"
- )
+ try:
+ adapter_cls = AgentAdapter.get(platform_name)
+ except AdapterError as e:
+ raise DetectorError(str(e)) from e
- return PLATFORMS[platform_name]
+ return adapter_cls(self.project_root)
@staticmethod
def list_supported_platforms() -> list[str]:
@@ -126,4 +90,4 @@ def list_supported_platforms() -> list[str]:
Returns:
List of platform names
"""
- return list(PLATFORMS.keys())
+ return AgentAdapter.list_names()
diff --git a/src/deepwork/core/generator.py b/src/deepwork/core/generator.py
index 616dad78..380ab5b5 100644
--- a/src/deepwork/core/generator.py
+++ b/src/deepwork/core/generator.py
@@ -5,8 +5,9 @@
from jinja2 import Environment, FileSystemLoader, TemplateNotFound
-from deepwork.core.detector import PlatformConfig
+from deepwork.core.adapters import AgentAdapter, CommandLifecycleHook
from deepwork.core.parser import JobDefinition, Step
+from deepwork.schemas.job_schema import LIFECYCLE_HOOK_EVENTS
from deepwork.utils.fs import safe_read, safe_write
@@ -36,20 +37,20 @@ def __init__(self, templates_dir: Path | str | None = None):
if not self.templates_dir.exists():
raise GeneratorError(f"Templates directory not found: {self.templates_dir}")
- def _get_jinja_env(self, platform: PlatformConfig) -> Environment:
+ def _get_jinja_env(self, adapter: AgentAdapter) -> Environment:
"""
- Get Jinja2 environment for a platform.
+ Get Jinja2 environment for an adapter.
Args:
- platform: Platform configuration
+ adapter: Agent adapter
Returns:
Jinja2 Environment
"""
- platform_templates_dir = self.templates_dir / platform.name
+ platform_templates_dir = adapter.get_template_dir(self.templates_dir)
if not platform_templates_dir.exists():
raise GeneratorError(
- f"Templates for platform '{platform.name}' not found at {platform_templates_dir}"
+ f"Templates for platform '{adapter.name}' not found at {platform_templates_dir}"
)
return Environment(
@@ -82,8 +83,37 @@ def _is_standalone_step(self, job: JobDefinition, step: Step) -> bool:
return True
+ def _build_hook_context(self, job: JobDefinition, hook_action: Any) -> dict[str, Any]:
+ """
+ Build context for a single hook action.
+
+ Args:
+ job: Job definition
+ hook_action: HookAction instance
+
+ Returns:
+ Hook context dictionary
+ """
+ hook_ctx: dict[str, Any] = {}
+ if hook_action.is_prompt():
+ hook_ctx["type"] = "prompt"
+ hook_ctx["content"] = hook_action.prompt
+ elif hook_action.is_prompt_file():
+ hook_ctx["type"] = "prompt_file"
+ hook_ctx["path"] = hook_action.prompt_file
+ # Read the prompt file content
+ prompt_file_path = job.job_dir / hook_action.prompt_file
+ prompt_content = safe_read(prompt_file_path)
+ if prompt_content is None:
+ raise GeneratorError(f"Hook prompt file not found: {prompt_file_path}")
+ hook_ctx["content"] = prompt_content
+ elif hook_action.is_script():
+ hook_ctx["type"] = "script"
+ hook_ctx["path"] = hook_action.script
+ return hook_ctx
+
def _build_step_context(
- self, job: JobDefinition, step: Step, step_index: int
+ self, job: JobDefinition, step: Step, step_index: int, adapter: AgentAdapter
) -> dict[str, Any]:
"""
Build template context for a step.
@@ -92,6 +122,7 @@ def _build_step_context(
job: Job definition
step: Step to generate context for
step_index: Index of step in job (0-based)
+ adapter: Agent adapter for platform-specific hook name mapping
Returns:
Template context dictionary
@@ -126,26 +157,26 @@ def _build_step_context(
if step_index > 0:
prev_step = job.steps[step_index - 1].id
- # Build stop hooks context (array)
- stop_hooks = []
- for hook in step.stop_hooks:
- hook_ctx = {}
- if hook.is_prompt():
- hook_ctx["type"] = "prompt"
- hook_ctx["content"] = hook.prompt
- elif hook.is_prompt_file():
- hook_ctx["type"] = "prompt_file"
- hook_ctx["path"] = hook.prompt_file
- # Read the prompt file content
- prompt_file_path = job.job_dir / hook.prompt_file
- prompt_content = safe_read(prompt_file_path)
- if prompt_content is None:
- raise GeneratorError(f"Stop hook prompt file not found: {prompt_file_path}")
- hook_ctx["content"] = prompt_content
- elif hook.is_script():
- hook_ctx["type"] = "script"
- hook_ctx["path"] = hook.script
- stop_hooks.append(hook_ctx)
+ # Build hooks context for all lifecycle events
+ # Structure: {platform_event_name: [hook_contexts]}
+ hooks: dict[str, list[dict[str, Any]]] = {}
+ for event in LIFECYCLE_HOOK_EVENTS:
+ if event in step.hooks:
+ # Get platform-specific event name from adapter
+ hook_enum = CommandLifecycleHook(event)
+ platform_event_name = adapter.get_platform_hook_name(hook_enum)
+ if platform_event_name:
+ hook_contexts = [
+ self._build_hook_context(job, hook_action)
+ for hook_action in step.hooks[event]
+ ]
+ if hook_contexts:
+ hooks[platform_event_name] = hook_contexts
+
+ # Backward compatibility: stop_hooks is after_agent hooks
+ stop_hooks = hooks.get(
+ adapter.get_platform_hook_name(CommandLifecycleHook.AFTER_AGENT) or "Stop", []
+ )
return {
"job_name": job.name,
@@ -166,14 +197,15 @@ def _build_step_context(
"next_step": next_step,
"prev_step": prev_step,
"is_standalone": is_standalone,
- "stop_hooks": stop_hooks,
+ "hooks": hooks, # New: all hooks by platform event name
+ "stop_hooks": stop_hooks, # Backward compat: after_agent hooks only
}
def generate_step_command(
self,
job: JobDefinition,
step: Step,
- platform: PlatformConfig,
+ adapter: AgentAdapter,
output_dir: Path | str,
) -> Path:
"""
@@ -182,7 +214,7 @@ def generate_step_command(
Args:
job: Job definition
step: Step to generate command for
- platform: Platform configuration
+ adapter: Agent adapter for the target platform
output_dir: Directory to write command file to
Returns:
@@ -194,7 +226,7 @@ def generate_step_command(
output_dir = Path(output_dir)
# Create commands subdirectory if needed
- commands_dir = output_dir / platform.commands_dir
+ commands_dir = output_dir / adapter.commands_dir
commands_dir.mkdir(parents=True, exist_ok=True)
# Find step index
@@ -204,12 +236,12 @@ def generate_step_command(
raise GeneratorError(f"Step '{step.id}' not found in job '{job.name}'") from e
# Build context
- context = self._build_step_context(job, step, step_index)
+ context = self._build_step_context(job, step, step_index, adapter)
# Load and render template
- env = self._get_jinja_env(platform)
+ env = self._get_jinja_env(adapter)
try:
- template = env.get_template("command-job-step.md.jinja")
+ template = env.get_template(adapter.command_template)
except TemplateNotFound as e:
raise GeneratorError(f"Template not found: {e}") from e
@@ -219,7 +251,7 @@ def generate_step_command(
raise GeneratorError(f"Template rendering failed: {e}") from e
# Write command file
- command_filename = f"{job.name}.{step.id}.md"
+ command_filename = adapter.get_command_filename(job.name, step.id)
command_path = commands_dir / command_filename
try:
@@ -232,7 +264,7 @@ def generate_step_command(
def generate_all_commands(
self,
job: JobDefinition,
- platform: PlatformConfig,
+ adapter: AgentAdapter,
output_dir: Path | str,
) -> list[Path]:
"""
@@ -240,7 +272,7 @@ def generate_all_commands(
Args:
job: Job definition
- platform: Platform configuration
+ adapter: Agent adapter for the target platform
output_dir: Directory to write command files to
Returns:
@@ -252,7 +284,7 @@ def generate_all_commands(
command_paths = []
for step in job.steps:
- command_path = self.generate_step_command(job, step, platform, output_dir)
+ command_path = self.generate_step_command(job, step, adapter, output_dir)
command_paths.append(command_path)
return command_paths
diff --git a/src/deepwork/core/hooks_syncer.py b/src/deepwork/core/hooks_syncer.py
index 13e01954..65257ec2 100644
--- a/src/deepwork/core/hooks_syncer.py
+++ b/src/deepwork/core/hooks_syncer.py
@@ -1,13 +1,12 @@
"""Hooks syncer for DeepWork - collects and syncs hooks from jobs to platform settings."""
-import json
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any
import yaml
-from deepwork.core.detector import PlatformConfig
+from deepwork.core.adapters import AgentAdapter
class HooksSyncError(Exception):
@@ -175,62 +174,9 @@ def _hook_already_present(hooks: list[dict[str, Any]], script_path: str) -> bool
return False
-def sync_hooks_to_claude(
- project_path: Path,
- platform_config: PlatformConfig,
- hooks: dict[str, list[dict[str, Any]]],
-) -> None:
- """
- Sync hooks to Claude Code settings.json.
-
- Args:
- project_path: Path to project root
- platform_config: Platform configuration
- hooks: Merged hooks configuration
-
- Raises:
- HooksSyncError: If sync fails
- """
- if not hooks:
- return
-
- settings_file = project_path / platform_config.config_dir / "settings.json"
-
- # Load existing settings or create new
- existing_settings: dict[str, Any] = {}
- if settings_file.exists():
- try:
- with open(settings_file, encoding="utf-8") as f:
- existing_settings = json.load(f)
- except (json.JSONDecodeError, OSError) as e:
- raise HooksSyncError(f"Failed to read settings.json: {e}") from e
-
- # Merge hooks into existing settings
- if "hooks" not in existing_settings:
- existing_settings["hooks"] = {}
-
- for event, event_hooks in hooks.items():
- if event not in existing_settings["hooks"]:
- existing_settings["hooks"][event] = []
-
- # Add new hooks that aren't already present
- for hook in event_hooks:
- script_path = hook.get("hooks", [{}])[0].get("command", "")
- if not _hook_already_present(existing_settings["hooks"][event], script_path):
- existing_settings["hooks"][event].append(hook)
-
- # Write back to settings.json
- try:
- settings_file.parent.mkdir(parents=True, exist_ok=True)
- with open(settings_file, "w", encoding="utf-8") as f:
- json.dump(existing_settings, f, indent=2)
- except OSError as e:
- raise HooksSyncError(f"Failed to write settings.json: {e}") from e
-
-
def sync_hooks_to_platform(
project_path: Path,
- platform_config: PlatformConfig,
+ adapter: AgentAdapter,
job_hooks_list: list[JobHooks],
) -> int:
"""
@@ -238,7 +184,7 @@ def sync_hooks_to_platform(
Args:
project_path: Path to project root
- platform_config: Platform configuration
+ adapter: Agent adapter for the target platform
job_hooks_list: List of JobHooks from jobs
Returns:
@@ -253,14 +199,8 @@ def sync_hooks_to_platform(
if not merged_hooks:
return 0
- # Currently only Claude Code is fully supported
- if platform_config.name == "claude":
- sync_hooks_to_claude(project_path, platform_config, merged_hooks)
- else:
- # For other platforms, we'd add support here
- # For now, just skip
- return 0
-
- # Count total hooks
- total = sum(len(hooks) for hooks in merged_hooks.values())
- return total
+ # Delegate to adapter's sync_hooks method
+ try:
+ return adapter.sync_hooks(project_path, merged_hooks)
+ except Exception as e:
+ raise HooksSyncError(f"Failed to sync hooks: {e}") from e
diff --git a/src/deepwork/core/parser.py b/src/deepwork/core/parser.py
index babf16fc..42fba818 100644
--- a/src/deepwork/core/parser.py
+++ b/src/deepwork/core/parser.py
@@ -4,7 +4,7 @@
from pathlib import Path
from typing import Any
-from deepwork.schemas.job_schema import JOB_SCHEMA
+from deepwork.schemas.job_schema import JOB_SCHEMA, LIFECYCLE_HOOK_EVENTS
from deepwork.utils.validation import ValidationError, validate_against_schema
from deepwork.utils.yaml_utils import YAMLError, load_yaml
@@ -47,14 +47,14 @@ def from_dict(cls, data: dict[str, Any]) -> "StepInput":
@dataclass
-class StopHook:
- """Represents a stop hook configuration for quality validation loops.
-
- Stop hooks enable iterative refinement by blocking step completion until
- quality criteria are met. Three types are supported:
- - prompt: Inline prompt text for validation
- - prompt_file: Path to a file containing the validation prompt
- - script: Path to a shell script for custom validation logic
+class HookAction:
+ """Represents a hook action configuration.
+
+ Hook actions define what happens when a lifecycle hook is triggered.
+ Three types are supported:
+ - prompt: Inline prompt text for validation/action
+ - prompt_file: Path to a file containing the prompt
+ - script: Path to a shell script for custom logic
"""
# Inline prompt
@@ -79,8 +79,8 @@ def is_script(self) -> bool:
return self.script is not None
@classmethod
- def from_dict(cls, data: dict[str, Any]) -> "StopHook":
- """Create StopHook from dictionary."""
+ def from_dict(cls, data: dict[str, Any]) -> "HookAction":
+ """Create HookAction from dictionary."""
return cls(
prompt=data.get("prompt"),
prompt_file=data.get("prompt_file"),
@@ -88,6 +88,10 @@ def from_dict(cls, data: dict[str, Any]) -> "StopHook":
)
+# Backward compatibility alias
+StopHook = HookAction
+
+
@dataclass
class Step:
"""Represents a single step in a job."""
@@ -99,11 +103,38 @@ class Step:
inputs: list[StepInput] = field(default_factory=list)
outputs: list[str] = field(default_factory=list)
dependencies: list[str] = field(default_factory=list)
- stop_hooks: list[StopHook] = field(default_factory=list)
+
+ # New: hooks dict mapping lifecycle event names to HookAction lists
+ # Event names: after_agent, before_tool, before_prompt
+ hooks: dict[str, list[HookAction]] = field(default_factory=dict)
+
+ @property
+ def stop_hooks(self) -> list[HookAction]:
+ """
+ Backward compatibility property for stop_hooks.
+
+ Returns hooks for after_agent event.
+ """
+ return self.hooks.get("after_agent", [])
@classmethod
def from_dict(cls, data: dict[str, Any]) -> "Step":
"""Create Step from dictionary."""
+ # Parse new hooks structure
+ hooks: dict[str, list[HookAction]] = {}
+ if "hooks" in data:
+ hooks_data = data["hooks"]
+ for event in LIFECYCLE_HOOK_EVENTS:
+ if event in hooks_data:
+ hooks[event] = [HookAction.from_dict(h) for h in hooks_data[event]]
+
+ # Handle deprecated stop_hooks -> after_agent
+ if "stop_hooks" in data and data["stop_hooks"]:
+ # Merge with any existing after_agent hooks
+ after_agent_hooks = hooks.get("after_agent", [])
+ after_agent_hooks.extend([HookAction.from_dict(h) for h in data["stop_hooks"]])
+ hooks["after_agent"] = after_agent_hooks
+
return cls(
id=data["id"],
name=data["name"],
@@ -112,7 +143,7 @@ def from_dict(cls, data: dict[str, Any]) -> "Step":
inputs=[StepInput.from_dict(inp) for inp in data.get("inputs", [])],
outputs=data["outputs"],
dependencies=data.get("dependencies", []),
- stop_hooks=[StopHook.from_dict(h) for h in data.get("stop_hooks", [])],
+ hooks=hooks,
)
diff --git a/src/deepwork/schemas/job_schema.py b/src/deepwork/schemas/job_schema.py
index 0023a962..a55cbb71 100644
--- a/src/deepwork/schemas/job_schema.py
+++ b/src/deepwork/schemas/job_schema.py
@@ -2,6 +2,50 @@
from typing import Any
+# Supported lifecycle hook events (generic names, mapped to platform-specific by adapters)
+# These values must match CommandLifecycleHook enum in adapters.py
+LIFECYCLE_HOOK_EVENTS = ["after_agent", "before_tool", "before_prompt"]
+
+# Schema definition for a single hook action (prompt, prompt_file, or script)
+HOOK_ACTION_SCHEMA: dict[str, Any] = {
+ "type": "object",
+ "oneOf": [
+ {
+ "required": ["prompt"],
+ "properties": {
+ "prompt": {
+ "type": "string",
+ "minLength": 1,
+ "description": "Inline prompt for validation/action",
+ },
+ },
+ "additionalProperties": False,
+ },
+ {
+ "required": ["prompt_file"],
+ "properties": {
+ "prompt_file": {
+ "type": "string",
+ "minLength": 1,
+ "description": "Path to prompt file (relative to job directory)",
+ },
+ },
+ "additionalProperties": False,
+ },
+ {
+ "required": ["script"],
+ "properties": {
+ "script": {
+ "type": "string",
+ "minLength": 1,
+ "description": "Path to shell script (relative to job directory)",
+ },
+ },
+ "additionalProperties": False,
+ },
+ ],
+}
+
# JSON Schema for job.yml files
JOB_SCHEMA: dict[str, Any] = {
"$schema": "http://json-schema.org/draft-07/schema#",
@@ -132,47 +176,33 @@
},
"default": [],
},
+ "hooks": {
+ "type": "object",
+ "description": "Lifecycle hooks for this step, keyed by event type",
+ "properties": {
+ "after_agent": {
+ "type": "array",
+ "description": "Hooks triggered after the agent finishes (quality validation)",
+ "items": HOOK_ACTION_SCHEMA,
+ },
+ "before_tool": {
+ "type": "array",
+ "description": "Hooks triggered before a tool is used",
+ "items": HOOK_ACTION_SCHEMA,
+ },
+ "before_prompt": {
+ "type": "array",
+ "description": "Hooks triggered when user submits a prompt",
+ "items": HOOK_ACTION_SCHEMA,
+ },
+ },
+ "additionalProperties": False,
+ },
+ # DEPRECATED: Use hooks.after_agent instead
"stop_hooks": {
"type": "array",
- "description": "Stop hooks for quality validation loops (executed in order)",
- "items": {
- "type": "object",
- "oneOf": [
- {
- "required": ["prompt"],
- "properties": {
- "prompt": {
- "type": "string",
- "minLength": 1,
- "description": "Inline prompt for quality validation",
- },
- },
- "additionalProperties": False,
- },
- {
- "required": ["prompt_file"],
- "properties": {
- "prompt_file": {
- "type": "string",
- "minLength": 1,
- "description": "Path to prompt file (relative to job directory)",
- },
- },
- "additionalProperties": False,
- },
- {
- "required": ["script"],
- "properties": {
- "script": {
- "type": "string",
- "minLength": 1,
- "description": "Path to shell script (relative to job directory)",
- },
- },
- "additionalProperties": False,
- },
- ],
- },
+ "description": "DEPRECATED: Use hooks.after_agent instead. Stop hooks for quality validation loops.",
+ "items": HOOK_ACTION_SCHEMA,
},
},
"additionalProperties": False,
diff --git a/src/deepwork/standard_jobs/deepwork_jobs/job.yml b/src/deepwork/standard_jobs/deepwork_jobs/job.yml
index 603664ff..87d9c3ac 100644
--- a/src/deepwork/standard_jobs/deepwork_jobs/job.yml
+++ b/src/deepwork/standard_jobs/deepwork_jobs/job.yml
@@ -43,7 +43,7 @@ steps:
4. **Concise Summary**: Is the summary under 200 characters and descriptive?
5. **Rich Description**: Does the description provide enough context for future refinement?
6. **Valid Schema**: Does the job.yml follow the required schema (name, version, summary, steps)?
- 7. **File Created**: Has the job.yml file been created in `deepwork/[job_name]/job.yml`?
+ 7. **File Created**: Has the job.yml file been created in `.deepwork/jobs/[job_name]/job.yml`?
If ANY criterion is not met, continue working to address it.
If ALL criteria are satisfied, include `QUALITY_COMPLETE` in your response.
diff --git a/src/deepwork/standard_jobs/deepwork_jobs/steps/define.md b/src/deepwork/standard_jobs/deepwork_jobs/steps/define.md
index c63da0fe..bb1165dd 100644
--- a/src/deepwork/standard_jobs/deepwork_jobs/steps/define.md
+++ b/src/deepwork/standard_jobs/deepwork_jobs/steps/define.md
@@ -133,9 +133,9 @@ stop_hooks:
Only after you have complete understanding, create the `job.yml` file:
-**File Location**: `deepwork/[job_name]/job.yml`
+**File Location**: `.deepwork/jobs/[job_name]/job.yml`
-(Where `[job_name]` is the name of the NEW job you're creating, e.g., `deepwork/competitive_research/job.yml`)
+(Where `[job_name]` is the name of the NEW job you're creating, e.g., `.deepwork/jobs/competitive_research/job.yml`)
**Format**:
```yaml
@@ -298,12 +298,12 @@ User: Yes, that's perfect!
Claude: Great! Creating the job.yml specification now...
-[Creates deepwork/competitive_research/job.yml with the complete spec]
+[Creates .deepwork/jobs/competitive_research/job.yml with the complete spec]
✓ Job specification created!
**File created:**
-- deepwork/competitive_research/job.yml
+- .deepwork/jobs/competitive_research/job.yml
**Next step:**
Run `/deepwork_jobs.implement` to generate the instruction files for each step based on this specification.
@@ -337,9 +337,9 @@ Before creating the job.yml, ensure:
### job.yml
-The complete YAML specification file (example shown in Step 4 above).
+The complete YAML specification file (example shown in Step 5 above).
-**Location**: `deepwork/[job_name]/job.yml`
+**Location**: `.deepwork/jobs/[job_name]/job.yml`
(Where `[job_name]` is the name of the new job being created)
diff --git a/src/deepwork/standard_jobs/deepwork_jobs/steps/implement.md b/src/deepwork/standard_jobs/deepwork_jobs/steps/implement.md
index 99c6fba4..b87e8c5a 100644
--- a/src/deepwork/standard_jobs/deepwork_jobs/steps/implement.md
+++ b/src/deepwork/standard_jobs/deepwork_jobs/steps/implement.md
@@ -11,7 +11,7 @@ Read the `job.yml` specification file and create all the necessary files to make
### Step 1: Read and Validate the Specification
1. **Locate the job.yml file**
- - Read `deepwork/[job_name]/job.yml` from the define step (Where `[job_name]` is the name of the new job that was created in the define step)
+ - Read `.deepwork/jobs/[job_name]/job.yml` from the define step (Where `[job_name]` is the name of the new job that was created in the define step)
- Parse the YAML content
2. **Validate the specification**
@@ -144,13 +144,9 @@ If a step in the job.yml has `stop_hooks` defined, the generated instruction fil
This alignment ensures the AI agent knows exactly what will be validated and can self-check before completing.
-### Step 4: Copy job.yml to Job Directory
+### Step 4: Verify job.yml Location
-Copy the validated `job.yml` from the work directory to `.deepwork/jobs/[job_name]/job.yml`:
-
-```bash
-cp deepwork/[job_name]/job.yml .deepwork/jobs/[job_name]/job.yml
-```
+Verify that `job.yml` is in the correct location at `.deepwork/jobs/[job_name]/job.yml`. The define step should have created it there. If for some reason it's not there, you may need to create or move it.
### Step 5: Sync Commands
diff --git a/src/deepwork/templates/claude/command-job-step.md.jinja b/src/deepwork/templates/claude/command-job-step.md.jinja
index a8fd172f..73426ba4 100644
--- a/src/deepwork/templates/claude/command-job-step.md.jinja
+++ b/src/deepwork/templates/claude/command-job-step.md.jinja
@@ -1,16 +1,18 @@
---
description: {{ step_description }}
-{% if stop_hooks %}
+{% if hooks %}
hooks:
- Stop:
+{% for event_name, event_hooks in hooks.items() %}
+ {{ event_name }}:
- hooks:
-{% for hook in stop_hooks %}
+{% for hook in event_hooks %}
{% if hook.type == "script" %}
- type: command
command: ".deepwork/jobs/{{ job_name }}/{{ hook.path }}"
{% else %}
- type: prompt
prompt: |
+{% if event_name == "Stop" %}
You must evaluate whether Claude has met all the below quality criteria for the request.
## Quality Criteria
@@ -27,8 +29,12 @@ hooks:
If criteria are NOT met AND the promise tag is missing, respond with:
{"ok": false, "reason": "Continue working. [specific feedback on what's wrong]"}
+{% else %}
+ {{ hook.content | indent(12) }}
+{% endif %}
{% endif %}
{% endfor %}
+{% endfor %}
{% endif %}
---
diff --git a/tests/integration/test_full_workflow.py b/tests/integration/test_full_workflow.py
index 7f2a5708..270c91c7 100644
--- a/tests/integration/test_full_workflow.py
+++ b/tests/integration/test_full_workflow.py
@@ -2,7 +2,7 @@
from pathlib import Path
-from deepwork.core.detector import PLATFORMS
+from deepwork.core.adapters import ClaudeAdapter
from deepwork.core.generator import CommandGenerator
from deepwork.core.parser import parse_job_definition
@@ -21,11 +21,11 @@ def test_parse_and_generate_workflow(self, fixtures_dir: Path, temp_dir: Path) -
# Step 2: Generate commands
generator = CommandGenerator()
- platform = PLATFORMS["claude"]
+ adapter = ClaudeAdapter()
commands_dir = temp_dir / ".claude"
commands_dir.mkdir()
- command_paths = generator.generate_all_commands(job, platform, commands_dir)
+ command_paths = generator.generate_all_commands(job, adapter, commands_dir)
assert len(command_paths) == 4
@@ -50,11 +50,11 @@ def test_simple_job_workflow(self, fixtures_dir: Path, temp_dir: Path) -> None:
# Generate
generator = CommandGenerator()
- platform = PLATFORMS["claude"]
+ adapter = ClaudeAdapter()
commands_dir = temp_dir / ".claude"
commands_dir.mkdir()
- command_paths = generator.generate_all_commands(job, platform, commands_dir)
+ command_paths = generator.generate_all_commands(job, adapter, commands_dir)
assert len(command_paths) == 1
@@ -72,11 +72,11 @@ def test_command_generation_with_dependencies(self, fixtures_dir: Path, temp_dir
job = parse_job_definition(job_dir)
generator = CommandGenerator()
- platform = PLATFORMS["claude"]
+ adapter = ClaudeAdapter()
commands_dir = temp_dir / ".claude"
commands_dir.mkdir()
- command_paths = generator.generate_all_commands(job, platform, commands_dir)
+ command_paths = generator.generate_all_commands(job, adapter, commands_dir)
# Check first step (no prerequisites)
step1_content = command_paths[0].read_text()
@@ -101,11 +101,11 @@ def test_command_generation_with_file_inputs(self, fixtures_dir: Path, temp_dir:
job = parse_job_definition(job_dir)
generator = CommandGenerator()
- platform = PLATFORMS["claude"]
+ adapter = ClaudeAdapter()
commands_dir = temp_dir / ".claude"
commands_dir.mkdir()
- command_paths = generator.generate_all_commands(job, platform, commands_dir)
+ command_paths = generator.generate_all_commands(job, adapter, commands_dir)
# Check step with file input
step2_content = command_paths[1].read_text() # primary_research
@@ -125,11 +125,11 @@ def test_command_generation_with_user_inputs(self, fixtures_dir: Path, temp_dir:
job = parse_job_definition(job_dir)
generator = CommandGenerator()
- platform = PLATFORMS["claude"]
+ adapter = ClaudeAdapter()
commands_dir = temp_dir / ".claude"
commands_dir.mkdir()
- command_paths = generator.generate_all_commands(job, platform, commands_dir)
+ command_paths = generator.generate_all_commands(job, adapter, commands_dir)
# Check step with user inputs
step1_content = command_paths[0].read_text() # identify_competitors
diff --git a/tests/integration/test_install_flow.py b/tests/integration/test_install_flow.py
index ac992f7b..fa5f9583 100644
--- a/tests/integration/test_install_flow.py
+++ b/tests/integration/test_install_flow.py
@@ -80,26 +80,8 @@ def test_install_fails_without_platform(self, mock_git_repo: Path) -> None:
assert result.exit_code != 0
assert "No AI platform detected" in result.output
- def test_install_fails_with_multiple_platforms(self, temp_dir: Path) -> None:
- """Test that install fails when multiple platforms detected without explicit choice."""
- from git import Repo
-
- # Create git repo with multiple platforms
- repo = Repo.init(temp_dir)
- (temp_dir / "README.md").write_text("# Test\n")
- repo.index.add(["README.md"])
- repo.index.commit("Initial commit")
-
- # Create both Claude and Gemini directories
- (temp_dir / ".claude").mkdir()
- (temp_dir / ".gemini").mkdir()
-
- runner = CliRunner()
-
- result = runner.invoke(cli, ["install", "--path", str(temp_dir)])
-
- assert result.exit_code != 0
- assert "Multiple AI platforms detected" in result.output
+ # NOTE: Multiple platform detection test removed since we currently only support Claude.
+ # When more adapters are added, this test should be reinstated.
def test_install_with_specified_platform_when_missing(self, mock_git_repo: Path) -> None:
"""Test that install fails when specified platform is not present."""
diff --git a/tests/unit/test_adapters.py b/tests/unit/test_adapters.py
new file mode 100644
index 00000000..b0717897
--- /dev/null
+++ b/tests/unit/test_adapters.py
@@ -0,0 +1,165 @@
+"""Tests for agent adapters."""
+
+import json
+from pathlib import Path
+
+import pytest
+
+from deepwork.core.adapters import (
+ AdapterError,
+ AgentAdapter,
+ ClaudeAdapter,
+)
+
+
+class TestAgentAdapterRegistry:
+ """Tests for AgentAdapter registry functionality."""
+
+ def test_get_all_returns_registered_adapters(self) -> None:
+ """Test that get_all returns all registered adapters."""
+ adapters = AgentAdapter.get_all()
+
+ assert "claude" in adapters
+ assert adapters["claude"] is ClaudeAdapter
+
+ def test_get_returns_correct_adapter(self) -> None:
+ """Test that get returns the correct adapter class."""
+ assert AgentAdapter.get("claude") is ClaudeAdapter
+
+ def test_get_raises_for_unknown_adapter(self) -> None:
+ """Test that get raises AdapterError for unknown adapter."""
+ with pytest.raises(AdapterError, match="Unknown adapter 'unknown'"):
+ AgentAdapter.get("unknown")
+
+ def test_list_names_returns_all_names(self) -> None:
+ """Test that list_names returns all registered adapter names."""
+ names = AgentAdapter.list_names()
+
+ assert "claude" in names
+ assert len(names) >= 1 # At least claude
+
+
+class TestClaudeAdapter:
+ """Tests for ClaudeAdapter."""
+
+ def test_class_attributes(self) -> None:
+ """Test Claude adapter class attributes."""
+ assert ClaudeAdapter.name == "claude"
+ assert ClaudeAdapter.display_name == "Claude Code"
+ assert ClaudeAdapter.config_dir == ".claude"
+ assert ClaudeAdapter.commands_dir == "commands"
+
+ def test_init_with_project_root(self, temp_dir: Path) -> None:
+ """Test initialization with project root."""
+ adapter = ClaudeAdapter(temp_dir)
+
+ assert adapter.project_root == temp_dir
+
+ def test_init_without_project_root(self) -> None:
+ """Test initialization without project root."""
+ adapter = ClaudeAdapter()
+
+ assert adapter.project_root is None
+
+ def test_detect_when_present(self, temp_dir: Path) -> None:
+ """Test detect when .claude directory exists."""
+ (temp_dir / ".claude").mkdir()
+ adapter = ClaudeAdapter(temp_dir)
+
+ assert adapter.detect() is True
+
+ def test_detect_when_absent(self, temp_dir: Path) -> None:
+ """Test detect when .claude directory doesn't exist."""
+ adapter = ClaudeAdapter(temp_dir)
+
+ assert adapter.detect() is False
+
+ def test_detect_with_explicit_project_root(self, temp_dir: Path) -> None:
+ """Test detect with explicit project root parameter."""
+ (temp_dir / ".claude").mkdir()
+ adapter = ClaudeAdapter()
+
+ assert adapter.detect(temp_dir) is True
+
+ def test_get_template_dir(self, temp_dir: Path) -> None:
+ """Test get_template_dir."""
+ adapter = ClaudeAdapter()
+ templates_root = temp_dir / "templates"
+
+ result = adapter.get_template_dir(templates_root)
+
+ assert result == templates_root / "claude"
+
+ def test_get_commands_dir(self, temp_dir: Path) -> None:
+ """Test get_commands_dir."""
+ adapter = ClaudeAdapter(temp_dir)
+
+ result = adapter.get_commands_dir()
+
+ assert result == temp_dir / ".claude" / "commands"
+
+ def test_get_commands_dir_with_explicit_root(self, temp_dir: Path) -> None:
+ """Test get_commands_dir with explicit project root."""
+ adapter = ClaudeAdapter()
+
+ result = adapter.get_commands_dir(temp_dir)
+
+ assert result == temp_dir / ".claude" / "commands"
+
+ def test_get_commands_dir_raises_without_root(self) -> None:
+ """Test get_commands_dir raises when no project root specified."""
+ adapter = ClaudeAdapter()
+
+ with pytest.raises(AdapterError, match="No project root specified"):
+ adapter.get_commands_dir()
+
+ def test_get_command_filename(self) -> None:
+ """Test get_command_filename."""
+ adapter = ClaudeAdapter()
+
+ result = adapter.get_command_filename("my_job", "step_one")
+
+ assert result == "my_job.step_one.md"
+
+ def test_sync_hooks_creates_settings_file(self, temp_dir: Path) -> None:
+ """Test sync_hooks creates settings.json when it doesn't exist."""
+ (temp_dir / ".claude").mkdir()
+ adapter = ClaudeAdapter(temp_dir)
+ hooks = {
+ "PreToolUse": [{"matcher": "", "hooks": [{"type": "command", "command": "test.sh"}]}]
+ }
+
+ count = adapter.sync_hooks(temp_dir, hooks)
+
+ assert count == 1
+ settings_file = temp_dir / ".claude" / "settings.json"
+ assert settings_file.exists()
+ settings = json.loads(settings_file.read_text())
+ assert "hooks" in settings
+ assert "PreToolUse" in settings["hooks"]
+
+ def test_sync_hooks_merges_with_existing(self, temp_dir: Path) -> None:
+ """Test sync_hooks merges with existing settings."""
+ claude_dir = temp_dir / ".claude"
+ claude_dir.mkdir()
+ settings_file = claude_dir / "settings.json"
+ settings_file.write_text(json.dumps({"existing_key": "value", "hooks": {}}))
+
+ adapter = ClaudeAdapter(temp_dir)
+ hooks = {
+ "PreToolUse": [{"matcher": "", "hooks": [{"type": "command", "command": "test.sh"}]}]
+ }
+
+ adapter.sync_hooks(temp_dir, hooks)
+
+ settings = json.loads(settings_file.read_text())
+ assert settings["existing_key"] == "value"
+ assert "PreToolUse" in settings["hooks"]
+
+ def test_sync_hooks_empty_hooks_returns_zero(self, temp_dir: Path) -> None:
+ """Test sync_hooks returns 0 for empty hooks."""
+ adapter = ClaudeAdapter(temp_dir)
+
+ count = adapter.sync_hooks(temp_dir, {})
+
+ assert count == 0
diff --git a/tests/unit/test_detector.py b/tests/unit/test_detector.py
index 67b6c855..1e51a01b 100644
--- a/tests/unit/test_detector.py
+++ b/tests/unit/test_detector.py
@@ -4,42 +4,8 @@
import pytest
-from deepwork.core.detector import (
- PLATFORMS,
- DetectorError,
- PlatformDetector,
-)
-
-
-class TestPlatformConfig:
- """Tests for PlatformConfig dataclass."""
-
- def test_claude_config(self) -> None:
- """Test Claude platform configuration."""
- config = PLATFORMS["claude"]
-
- assert config.name == "claude"
- assert config.display_name == "Claude Code"
- assert config.config_dir == ".claude"
- assert config.commands_dir == "commands"
-
- def test_gemini_config(self) -> None:
- """Test Gemini platform configuration."""
- config = PLATFORMS["gemini"]
-
- assert config.name == "gemini"
- assert config.display_name == "Google Gemini"
- assert config.config_dir == ".gemini"
- assert config.commands_dir == "commands"
-
- def test_copilot_config(self) -> None:
- """Test Copilot platform configuration."""
- config = PLATFORMS["copilot"]
-
- assert config.name == "copilot"
- assert config.display_name == "GitHub Copilot"
- assert config.config_dir == ".github"
- assert config.commands_dir == "commands"
+from deepwork.core.adapters import ClaudeAdapter
+from deepwork.core.detector import DetectorError, PlatformDetector
class TestPlatformDetector:
@@ -51,89 +17,65 @@ def test_detect_claude_present(self, temp_dir: Path) -> None:
claude_dir.mkdir()
detector = PlatformDetector(temp_dir)
- config = detector.detect_platform("claude")
+ adapter = detector.detect_platform("claude")
- assert config is not None
- assert config.name == "claude"
+ assert adapter is not None
+ assert isinstance(adapter, ClaudeAdapter)
+ assert adapter.name == "claude"
def test_detect_claude_absent(self, temp_dir: Path) -> None:
"""Test detecting Claude when .claude directory doesn't exist."""
detector = PlatformDetector(temp_dir)
- config = detector.detect_platform("claude")
-
- assert config is None
+ adapter = detector.detect_platform("claude")
- def test_detect_gemini_present(self, temp_dir: Path) -> None:
- """Test detecting Gemini when .gemini directory exists."""
- gemini_dir = temp_dir / ".gemini"
- gemini_dir.mkdir()
-
- detector = PlatformDetector(temp_dir)
- config = detector.detect_platform("gemini")
-
- assert config is not None
- assert config.name == "gemini"
-
- def test_detect_copilot_present(self, temp_dir: Path) -> None:
- """Test detecting Copilot when .github directory exists."""
- github_dir = temp_dir / ".github"
- github_dir.mkdir()
-
- detector = PlatformDetector(temp_dir)
- config = detector.detect_platform("copilot")
-
- assert config is not None
- assert config.name == "copilot"
+ assert adapter is None
def test_detect_platform_raises_for_unknown(self, temp_dir: Path) -> None:
"""Test that detecting unknown platform raises error."""
detector = PlatformDetector(temp_dir)
- with pytest.raises(DetectorError, match="Unknown platform"):
+ with pytest.raises(DetectorError, match="Unknown adapter"):
detector.detect_platform("unknown")
def test_detect_all_platforms_empty(self, temp_dir: Path) -> None:
"""Test detecting all platforms when none are present."""
detector = PlatformDetector(temp_dir)
- platforms = detector.detect_all_platforms()
+ adapters = detector.detect_all_platforms()
- assert platforms == []
+ assert adapters == []
- def test_detect_all_platforms_multiple(self, temp_dir: Path) -> None:
- """Test detecting all platforms when multiple are present."""
+ def test_detect_all_platforms_claude_present(self, temp_dir: Path) -> None:
+ """Test detecting all platforms when Claude is present."""
(temp_dir / ".claude").mkdir()
- (temp_dir / ".gemini").mkdir()
detector = PlatformDetector(temp_dir)
- platforms = detector.detect_all_platforms()
+ adapters = detector.detect_all_platforms()
- assert len(platforms) == 2
- names = {p.name for p in platforms}
- assert names == {"claude", "gemini"}
+ assert len(adapters) == 1
+ assert adapters[0].name == "claude"
- def test_get_platform_config(self, temp_dir: Path) -> None:
- """Test getting platform config without checking availability."""
+ def test_get_adapter(self, temp_dir: Path) -> None:
+ """Test getting adapter without checking availability."""
detector = PlatformDetector(temp_dir)
- config = detector.get_platform_config("claude")
+ adapter = detector.get_adapter("claude")
- assert config.name == "claude"
- assert config.display_name == "Claude Code"
+ assert isinstance(adapter, ClaudeAdapter)
+ assert adapter.name == "claude"
+ assert adapter.display_name == "Claude Code"
- def test_get_platform_config_raises_for_unknown(self, temp_dir: Path) -> None:
- """Test that getting unknown platform config raises error."""
+ def test_get_adapter_raises_for_unknown(self, temp_dir: Path) -> None:
+ """Test that getting unknown adapter raises error."""
detector = PlatformDetector(temp_dir)
- with pytest.raises(DetectorError, match="Unknown platform"):
- detector.get_platform_config("unknown")
+ with pytest.raises(DetectorError, match="Unknown adapter"):
+ detector.get_adapter("unknown")
def test_list_supported_platforms(self) -> None:
"""Test listing all supported platforms."""
platforms = PlatformDetector.list_supported_platforms()
assert "claude" in platforms
- assert "gemini" in platforms
- assert "copilot" in platforms
- assert len(platforms) == 3
+ assert len(platforms) >= 1 # At least claude
def test_detect_ignores_files(self, temp_dir: Path) -> None:
"""Test that detector ignores files with platform names."""
@@ -141,6 +83,16 @@ def test_detect_ignores_files(self, temp_dir: Path) -> None:
(temp_dir / ".claude").write_text("not a directory")
detector = PlatformDetector(temp_dir)
- config = detector.detect_platform("claude")
+ adapter = detector.detect_platform("claude")
+
+ assert adapter is None
+
+ def test_detected_adapter_has_project_root(self, temp_dir: Path) -> None:
+ """Test that detected adapter has project_root set."""
+ (temp_dir / ".claude").mkdir()
+
+ detector = PlatformDetector(temp_dir)
+ adapter = detector.detect_platform("claude")
- assert config is None
+ assert adapter is not None
+ assert adapter.project_root == temp_dir
diff --git a/tests/unit/test_generator.py b/tests/unit/test_generator.py
index 92f98a90..f83d20a2 100644
--- a/tests/unit/test_generator.py
+++ b/tests/unit/test_generator.py
@@ -4,7 +4,7 @@
import pytest
-from deepwork.core.detector import PLATFORMS
+from deepwork.core.adapters import ClaudeAdapter
from deepwork.core.generator import CommandGenerator, GeneratorError
from deepwork.core.parser import parse_job_definition
@@ -41,9 +41,9 @@ def test_generate_step_command_simple_job(self, fixtures_dir: Path, temp_dir: Pa
job = parse_job_definition(job_dir)
generator = CommandGenerator()
- platform = PLATFORMS["claude"]
+ adapter = ClaudeAdapter()
- command_path = generator.generate_step_command(job, job.steps[0], platform, temp_dir)
+ command_path = generator.generate_step_command(job, job.steps[0], adapter, temp_dir)
assert command_path.exists()
assert command_path.name == "simple_job.single_step.md"
@@ -63,9 +63,9 @@ def test_generate_step_command_complex_job_first_step(
job = parse_job_definition(job_dir)
generator = CommandGenerator()
- platform = PLATFORMS["claude"]
+ adapter = ClaudeAdapter()
- command_path = generator.generate_step_command(job, job.steps[0], platform, temp_dir)
+ command_path = generator.generate_step_command(job, job.steps[0], adapter, temp_dir)
content = command_path.read_text()
assert "# competitive_research.identify_competitors" in content
@@ -85,10 +85,10 @@ def test_generate_step_command_complex_job_middle_step(
job = parse_job_definition(job_dir)
generator = CommandGenerator()
- platform = PLATFORMS["claude"]
+ adapter = ClaudeAdapter()
# Generate primary_research (step 2)
- command_path = generator.generate_step_command(job, job.steps[1], platform, temp_dir)
+ command_path = generator.generate_step_command(job, job.steps[1], adapter, temp_dir)
content = command_path.read_text()
assert "# competitive_research.primary_research" in content
@@ -110,10 +110,10 @@ def test_generate_step_command_complex_job_final_step(
job = parse_job_definition(job_dir)
generator = CommandGenerator()
- platform = PLATFORMS["claude"]
+ adapter = ClaudeAdapter()
# Generate comparative_report (step 4)
- command_path = generator.generate_step_command(job, job.steps[3], platform, temp_dir)
+ command_path = generator.generate_step_command(job, job.steps[3], adapter, temp_dir)
content = command_path.read_text()
assert "# competitive_research.comparative_report" in content
@@ -135,7 +135,7 @@ def test_generate_step_command_raises_for_missing_step(
job = parse_job_definition(job_dir)
generator = CommandGenerator()
- platform = PLATFORMS["claude"]
+ adapter = ClaudeAdapter()
# Create a fake step not in the job
from deepwork.core.parser import Step
@@ -149,7 +149,7 @@ def test_generate_step_command_raises_for_missing_step(
)
with pytest.raises(GeneratorError, match="Step 'fake' not found"):
- generator.generate_step_command(job, fake_step, platform, temp_dir)
+ generator.generate_step_command(job, fake_step, adapter, temp_dir)
def test_generate_step_command_raises_for_missing_instructions(
self, fixtures_dir: Path, temp_dir: Path
@@ -167,10 +167,10 @@ def test_generate_step_command_raises_for_missing_instructions(
instructions_file.unlink()
generator = CommandGenerator()
- platform = PLATFORMS["claude"]
+ adapter = ClaudeAdapter()
with pytest.raises(GeneratorError, match="instructions file not found"):
- generator.generate_step_command(job, job.steps[0], platform, temp_dir)
+ generator.generate_step_command(job, job.steps[0], adapter, temp_dir)
finally:
# Restore the file
instructions_file.write_text(original_content)
@@ -181,9 +181,9 @@ def test_generate_all_commands(self, fixtures_dir: Path, temp_dir: Path) -> None
job = parse_job_definition(job_dir)
generator = CommandGenerator()
- platform = PLATFORMS["claude"]
+ adapter = ClaudeAdapter()
- command_paths = generator.generate_all_commands(job, platform, temp_dir)
+ command_paths = generator.generate_all_commands(job, adapter, temp_dir)
assert len(command_paths) == 4
assert all(p.exists() for p in command_paths)
@@ -197,57 +197,3 @@ def test_generate_all_commands(self, fixtures_dir: Path, temp_dir: Path) -> None
]
actual_names = [p.name for p in command_paths]
assert actual_names == expected_names
-
- def test_generate_step_command_different_platform(
- self, fixtures_dir: Path, temp_dir: Path
- ) -> None:
- """Test generating command for different platform (Gemini)."""
- # Create .gemini templates directory by copying from claude
- import shutil
-
- templates_dir = Path(__file__).parent.parent.parent / "src" / "deepwork" / "templates"
- gemini_templates = templates_dir / "gemini"
- gemini_templates.mkdir(exist_ok=True)
-
- try:
- # Copy templates from claude to gemini
- for template_file in (templates_dir / "claude").glob("*.jinja"):
- shutil.copy(template_file, gemini_templates / template_file.name)
-
- job_dir = fixtures_dir / "jobs" / "simple_job"
- job = parse_job_definition(job_dir)
-
- generator = CommandGenerator()
- platform = PLATFORMS["gemini"]
-
- command_path = generator.generate_step_command(job, job.steps[0], platform, temp_dir)
-
- # Gemini uses same filename format
- assert command_path.name == "simple_job.single_step.md"
- assert command_path.exists()
- finally:
- # Cleanup
- if gemini_templates.exists():
- shutil.rmtree(gemini_templates)
-
- def test_generate_raises_for_missing_platform_templates(
- self, fixtures_dir: Path, temp_dir: Path
- ) -> None:
- """Test that missing platform templates raises error."""
- import shutil
-
- # Ensure gemini templates don't exist
- templates_dir = Path(__file__).parent.parent.parent / "src" / "deepwork" / "templates"
- gemini_templates = templates_dir / "gemini"
- if gemini_templates.exists():
- shutil.rmtree(gemini_templates)
-
- job_dir = fixtures_dir / "jobs" / "simple_job"
- job = parse_job_definition(job_dir)
-
- generator = CommandGenerator()
- # Gemini templates don't exist
- platform = PLATFORMS["gemini"]
-
- with pytest.raises(GeneratorError, match="Templates for platform"):
- generator.generate_step_command(job, job.steps[0], platform, temp_dir)
diff --git a/tests/unit/test_hooks_syncer.py b/tests/unit/test_hooks_syncer.py
index 5a8d4ea0..0a1b1c0c 100644
--- a/tests/unit/test_hooks_syncer.py
+++ b/tests/unit/test_hooks_syncer.py
@@ -3,13 +3,13 @@
import json
from pathlib import Path
-from deepwork.core.detector import PlatformConfig
+from deepwork.core.adapters import ClaudeAdapter
from deepwork.core.hooks_syncer import (
HookEntry,
JobHooks,
collect_job_hooks,
merge_hooks_for_platform,
- sync_hooks_to_claude,
+ sync_hooks_to_platform,
)
@@ -179,32 +179,33 @@ def test_avoids_duplicate_hooks(self, temp_dir: Path) -> None:
assert len(result["Stop"]) == 1
-class TestSyncHooksToClaude:
- """Tests for sync_hooks_to_claude function."""
-
- def test_creates_settings_file(self, temp_dir: Path) -> None:
- """Test creating settings.json when it doesn't exist."""
- platform = PlatformConfig(
- name="claude",
- display_name="Claude Code",
- config_dir=".claude",
- commands_dir="commands",
- )
+class TestSyncHooksToPlatform:
+ """Tests for sync_hooks_to_platform function using adapters."""
+ def test_syncs_hooks_via_adapter(self, temp_dir: Path) -> None:
+ """Test syncing hooks to platform via adapter."""
# Create .claude directory
(temp_dir / ".claude").mkdir(parents=True)
- hooks = {
- "Stop": [
- {
- "matcher": "",
- "hooks": [{"type": "command", "command": "test_hook.sh"}],
- }
- ]
- }
+ adapter = ClaudeAdapter(temp_dir)
- sync_hooks_to_claude(temp_dir, platform, hooks)
+ # Create job directories
+ job_dir = temp_dir / ".deepwork" / "jobs" / "test_job"
+ job_dir.mkdir(parents=True)
+ job_hooks_list = [
+ JobHooks(
+ job_name="test_job",
+ job_dir=job_dir,
+ hooks={"Stop": ["test_hook.sh"]},
+ ),
+ ]
+
+ count = sync_hooks_to_platform(temp_dir, adapter, job_hooks_list)
+
+ assert count == 1
+
+ # Verify settings.json was created
settings_file = temp_dir / ".claude" / "settings.json"
assert settings_file.exists()
@@ -213,17 +214,17 @@ def test_creates_settings_file(self, temp_dir: Path) -> None:
assert "hooks" in settings
assert "Stop" in settings["hooks"]
- assert len(settings["hooks"]["Stop"]) == 1
+
+ def test_returns_zero_for_empty_hooks(self, temp_dir: Path) -> None:
+ """Test returns 0 when no hooks to sync."""
+ adapter = ClaudeAdapter(temp_dir)
+
+ count = sync_hooks_to_platform(temp_dir, adapter, [])
+
+ assert count == 0
def test_merges_with_existing_settings(self, temp_dir: Path) -> None:
"""Test merging hooks into existing settings.json."""
- platform = PlatformConfig(
- name="claude",
- display_name="Claude Code",
- config_dir=".claude",
- commands_dir="commands",
- )
-
# Create .claude directory with existing settings
claude_dir = temp_dir / ".claude"
claude_dir.mkdir(parents=True)
@@ -240,16 +241,20 @@ def test_merges_with_existing_settings(self, temp_dir: Path) -> None:
with open(settings_file, "w") as f:
json.dump(existing_settings, f)
- hooks = {
- "Stop": [
- {
- "matcher": "",
- "hooks": [{"type": "command", "command": "new_hook.sh"}],
- }
- ]
- }
+ adapter = ClaudeAdapter(temp_dir)
+
+ job_dir = temp_dir / ".deepwork" / "jobs" / "test_job"
+ job_dir.mkdir(parents=True)
+
+ job_hooks_list = [
+ JobHooks(
+ job_name="test_job",
+ job_dir=job_dir,
+ hooks={"Stop": ["new_hook.sh"]},
+ ),
+ ]
- sync_hooks_to_claude(temp_dir, platform, hooks)
+ sync_hooks_to_platform(temp_dir, adapter, job_hooks_list)
with open(settings_file) as f:
settings = json.load(f)
@@ -261,62 +266,3 @@ def test_merges_with_existing_settings(self, temp_dir: Path) -> None:
# Should add new hooks
assert "Stop" in settings["hooks"]
assert len(settings["hooks"]["Stop"]) == 1
-
- def test_does_not_duplicate_existing_hooks(self, temp_dir: Path) -> None:
- """Test that existing hooks are not duplicated."""
- platform = PlatformConfig(
- name="claude",
- display_name="Claude Code",
- config_dir=".claude",
- commands_dir="commands",
- )
-
- claude_dir = temp_dir / ".claude"
- claude_dir.mkdir(parents=True)
-
- # Settings with existing hook
- existing_settings = {
- "hooks": {
- "Stop": [{"matcher": "", "hooks": [{"type": "command", "command": "hook.sh"}]}]
- },
- }
- settings_file = claude_dir / "settings.json"
- with open(settings_file, "w") as f:
- json.dump(existing_settings, f)
-
- # Try to add the same hook
- hooks = {
- "Stop": [
- {
- "matcher": "",
- "hooks": [{"type": "command", "command": "hook.sh"}],
- }
- ]
- }
-
- sync_hooks_to_claude(temp_dir, platform, hooks)
-
- with open(settings_file) as f:
- settings = json.load(f)
-
- # Should still only have one hook
- assert len(settings["hooks"]["Stop"]) == 1
-
- def test_handles_empty_hooks(self, temp_dir: Path) -> None:
- """Test that empty hooks dict doesn't modify settings."""
- platform = PlatformConfig(
- name="claude",
- display_name="Claude Code",
- config_dir=".claude",
- commands_dir="commands",
- )
-
- claude_dir = temp_dir / ".claude"
- claude_dir.mkdir(parents=True)
-
- # No settings file exists initially
- sync_hooks_to_claude(temp_dir, platform, {})
-
- # Settings file should not be created
- settings_file = claude_dir / "settings.json"
- assert not settings_file.exists()
diff --git a/tests/unit/test_stop_hooks.py b/tests/unit/test_stop_hooks.py
index cd0fcc77..0e30117e 100644
--- a/tests/unit/test_stop_hooks.py
+++ b/tests/unit/test_stop_hooks.py
@@ -4,8 +4,9 @@
import pytest
+from deepwork.core.adapters import ClaudeAdapter
from deepwork.core.generator import CommandGenerator, GeneratorError
-from deepwork.core.parser import JobDefinition, Step, StopHook
+from deepwork.core.parser import HookAction, JobDefinition, Step, StopHook
from deepwork.schemas.job_schema import JOB_SCHEMA
from deepwork.utils.validation import ValidationError, validate_against_schema
@@ -74,31 +75,33 @@ def test_step_with_no_stop_hooks(self) -> None:
assert step.stop_hooks == []
def test_step_with_single_stop_hook(self) -> None:
- """Test step with single stop hook."""
+ """Test step with single stop hook (using hooks dict)."""
step = Step(
id="test",
name="Test Step",
description="A test step",
instructions_file="steps/test.md",
outputs=["output.md"],
- stop_hooks=[StopHook(prompt="Check quality")],
+ hooks={"after_agent": [HookAction(prompt="Check quality")]},
)
assert len(step.stop_hooks) == 1
assert step.stop_hooks[0].is_prompt()
assert step.stop_hooks[0].prompt == "Check quality"
def test_step_with_multiple_stop_hooks(self) -> None:
- """Test step with multiple stop hooks."""
+ """Test step with multiple stop hooks (using hooks dict)."""
step = Step(
id="test",
name="Test Step",
description="A test step",
instructions_file="steps/test.md",
outputs=["output.md"],
- stop_hooks=[
- StopHook(prompt="Check criteria 1"),
- StopHook(script="hooks/validate.sh"),
- ],
+ hooks={
+ "after_agent": [
+ HookAction(prompt="Check criteria 1"),
+ HookAction(script="hooks/validate.sh"),
+ ]
+ },
)
assert len(step.stop_hooks) == 2
assert step.stop_hooks[0].is_prompt()
@@ -134,6 +137,35 @@ def test_step_from_dict_without_stop_hooks(self) -> None:
step = Step.from_dict(data)
assert step.stop_hooks == []
+ def test_step_from_dict_with_hooks_structure(self) -> None:
+ """Test Step.from_dict parses new hooks structure with lifecycle events."""
+ data = {
+ "id": "test",
+ "name": "Test Step",
+ "description": "A test step",
+ "instructions_file": "steps/test.md",
+ "outputs": ["output.md"],
+ "hooks": {
+ "after_agent": [
+ {"prompt": "Check quality"},
+ {"script": "hooks/validate.sh"},
+ ],
+ "before_tool": [
+ {"prompt": "Pre-tool check"},
+ ],
+ },
+ }
+ step = Step.from_dict(data)
+ # stop_hooks property returns after_agent hooks
+ assert len(step.stop_hooks) == 2
+ assert step.stop_hooks[0].prompt == "Check quality"
+ assert step.stop_hooks[1].script == "hooks/validate.sh"
+ # Check full hooks dict
+ assert "after_agent" in step.hooks
+ assert "before_tool" in step.hooks
+ assert len(step.hooks["after_agent"]) == 2
+ assert len(step.hooks["before_tool"]) == 1
+
class TestSchemaValidation:
"""Tests for stop_hooks schema validation."""
@@ -258,6 +290,71 @@ def test_invalid_stop_hook_extra_fields(self) -> None:
with pytest.raises(ValidationError):
validate_against_schema(job_data, JOB_SCHEMA)
+ def test_valid_hooks_with_after_agent(self) -> None:
+ """Test schema accepts new hooks structure with after_agent event."""
+ job_data = {
+ "name": "test_job",
+ "version": "1.0.0",
+ "summary": "Test job",
+ "steps": [
+ {
+ "id": "step1",
+ "name": "Step 1",
+ "description": "A step",
+ "instructions_file": "steps/step1.md",
+ "outputs": ["output.md"],
+ "hooks": {
+ "after_agent": [{"prompt": "Check quality"}],
+ },
+ }
+ ],
+ }
+ validate_against_schema(job_data, JOB_SCHEMA)
+
+ def test_valid_hooks_with_multiple_events(self) -> None:
+ """Test schema accepts hooks with multiple lifecycle events."""
+ job_data = {
+ "name": "test_job",
+ "version": "1.0.0",
+ "summary": "Test job",
+ "steps": [
+ {
+ "id": "step1",
+ "name": "Step 1",
+ "description": "A step",
+ "instructions_file": "steps/step1.md",
+ "outputs": ["output.md"],
+ "hooks": {
+ "after_agent": [{"prompt": "Check quality"}],
+ "before_tool": [{"script": "hooks/validate.sh"}],
+ "before_prompt": [{"prompt": "Initialize context"}],
+ },
+ }
+ ],
+ }
+ validate_against_schema(job_data, JOB_SCHEMA)
+
+ def test_valid_hooks_with_script_action(self) -> None:
+ """Test schema accepts hooks with script action."""
+ job_data = {
+ "name": "test_job",
+ "version": "1.0.0",
+ "summary": "Test job",
+ "steps": [
+ {
+ "id": "step1",
+ "name": "Step 1",
+ "description": "A step",
+ "instructions_file": "steps/step1.md",
+ "outputs": ["output.md"],
+ "hooks": {
+ "before_tool": [{"script": "hooks/check.sh"}],
+ },
+ }
+ ],
+ }
+ validate_against_schema(job_data, JOB_SCHEMA)
+
class TestGeneratorStopHooks:
"""Tests for generator stop hooks context building."""
@@ -314,9 +411,9 @@ def job_with_hooks(self, tmp_path: Path) -> JobDefinition:
description="First step",
instructions_file="steps/step1.md",
outputs=["output.md"],
- stop_hooks=[
- StopHook(prompt="Verify quality criteria"),
- ],
+ hooks={
+ "after_agent": [HookAction(prompt="Verify quality criteria")],
+ },
),
],
job_dir=job_dir,
@@ -343,9 +440,9 @@ def job_with_script_hook(self, tmp_path: Path) -> JobDefinition:
description="First step",
instructions_file="steps/step1.md",
outputs=["output.md"],
- stop_hooks=[
- StopHook(script="hooks/validate.sh"),
- ],
+ hooks={
+ "after_agent": [HookAction(script="hooks/validate.sh")],
+ },
),
],
job_dir=job_dir,
@@ -375,9 +472,9 @@ def job_with_prompt_file_hook(self, tmp_path: Path) -> JobDefinition:
description="First step",
instructions_file="steps/step1.md",
outputs=["output.md"],
- stop_hooks=[
- StopHook(prompt_file="hooks/quality.md"),
- ],
+ hooks={
+ "after_agent": [HookAction(prompt_file="hooks/quality.md")],
+ },
),
],
job_dir=job_dir,
@@ -387,7 +484,8 @@ def test_build_context_with_prompt_hook(
self, generator: CommandGenerator, job_with_hooks: JobDefinition
) -> None:
"""Test context building includes prompt stop hook."""
- context = generator._build_step_context(job_with_hooks, job_with_hooks.steps[0], 0)
+ adapter = ClaudeAdapter()
+ context = generator._build_step_context(job_with_hooks, job_with_hooks.steps[0], 0, adapter)
assert "stop_hooks" in context
assert len(context["stop_hooks"]) == 1
assert context["stop_hooks"][0]["type"] == "prompt"
@@ -397,8 +495,9 @@ def test_build_context_with_script_hook(
self, generator: CommandGenerator, job_with_script_hook: JobDefinition
) -> None:
"""Test context building includes script stop hook."""
+ adapter = ClaudeAdapter()
context = generator._build_step_context(
- job_with_script_hook, job_with_script_hook.steps[0], 0
+ job_with_script_hook, job_with_script_hook.steps[0], 0, adapter
)
assert "stop_hooks" in context
assert len(context["stop_hooks"]) == 1
@@ -409,8 +508,9 @@ def test_build_context_with_prompt_file_hook(
self, generator: CommandGenerator, job_with_prompt_file_hook: JobDefinition
) -> None:
"""Test context building reads prompt file content."""
+ adapter = ClaudeAdapter()
context = generator._build_step_context(
- job_with_prompt_file_hook, job_with_prompt_file_hook.steps[0], 0
+ job_with_prompt_file_hook, job_with_prompt_file_hook.steps[0], 0, adapter
)
assert "stop_hooks" in context
assert len(context["stop_hooks"]) == 1
@@ -439,14 +539,17 @@ def test_build_context_with_missing_prompt_file(
description="Step",
instructions_file="steps/step1.md",
outputs=["out.md"],
- stop_hooks=[StopHook(prompt_file="missing.md")],
+ hooks={
+ "after_agent": [HookAction(prompt_file="missing.md")],
+ },
)
],
job_dir=job_dir,
)
+ adapter = ClaudeAdapter()
with pytest.raises(GeneratorError, match="prompt file not found"):
- generator._build_step_context(job, job.steps[0], 0)
+ generator._build_step_context(job, job.steps[0], 0, adapter)
def test_build_context_no_hooks(self, generator: CommandGenerator, tmp_path: Path) -> None:
"""Test context with no stop hooks."""
@@ -473,7 +576,8 @@ def test_build_context_no_hooks(self, generator: CommandGenerator, tmp_path: Pat
job_dir=job_dir,
)
- context = generator._build_step_context(job, job.steps[0], 0)
+ adapter = ClaudeAdapter()
+ context = generator._build_step_context(job, job.steps[0], 0, adapter)
assert context["stop_hooks"] == []
def test_build_context_multiple_hooks(
@@ -498,17 +602,20 @@ def test_build_context_multiple_hooks(
description="Step",
instructions_file="steps/step1.md",
outputs=["out.md"],
- stop_hooks=[
- StopHook(prompt="Check criteria 1"),
- StopHook(script="hooks/test.sh"),
- StopHook(prompt="Check criteria 2"),
- ],
+ hooks={
+ "after_agent": [
+ HookAction(prompt="Check criteria 1"),
+ HookAction(script="hooks/test.sh"),
+ HookAction(prompt="Check criteria 2"),
+ ],
+ },
)
],
job_dir=job_dir,
)
- context = generator._build_step_context(job, job.steps[0], 0)
+ adapter = ClaudeAdapter()
+ context = generator._build_step_context(job, job.steps[0], 0, adapter)
assert len(context["stop_hooks"]) == 3
assert context["stop_hooks"][0]["type"] == "prompt"
assert context["stop_hooks"][1]["type"] == "script"