diff --git a/.claude/commands/add_platform.add_capabilities.md b/.claude/commands/add_platform.add_capabilities.md new file mode 100644 index 00000000..902ea1fe --- /dev/null +++ b/.claude/commands/add_platform.add_capabilities.md @@ -0,0 +1,290 @@ +--- +description: Update job schema and adapters with any new hook events the platform supports +hooks: + Stop: + - hooks: + - type: prompt + prompt: | + You must evaluate whether Claude has met all the below quality criteria for the request. + + ## Quality Criteria + + Verify the capability additions meet ALL criteria: + 1. Any new hooks from the platform (for slash commands only) are added to src/deepwork/schemas/job_schema.py + 2. All existing adapters in src/deepwork/adapters.py are updated with the new hook fields + (set to None/null if the platform doesn't support that hook) + 3. Only hooks available on slash command definitions are added (not general CLI hooks) + 4. job_schema.py remains valid Python with no syntax errors + 5. adapters.py remains consistent - all adapters have the same hook fields + 6. If no new hooks are needed, document why in a comment + + If ALL criteria are met, include `QUALITY_COMPLETE`. + + + ## Instructions + + Review the conversation and determine if ALL quality criteria above have been satisfied. + Look for evidence that each criterion has been addressed. + + If the agent has included `QUALITY_COMPLETE` in their response AND + all criteria appear to be met, respond with: {"ok": true} + + If criteria are NOT met AND the promise tag is missing, respond with: + {"ok": false, "reason": "Continue working. [specific feedback on what's wrong]"} +--- + +# add_platform.add_capabilities + +**Step 2 of 4** in the **add_platform** workflow + +**Summary**: Add a new AI platform to DeepWork with adapter, templates, and tests + +## Job Overview + +A workflow for adding support for a new AI platform (like Cursor, Windsurf, etc.) to DeepWork. + +This job guides you through four phases: +1. **Research**: Capture the platform's CLI configuration and hooks system documentation +2. **Add Capabilities**: Update the job schema and adapters with any new hook events +3. **Implement**: Create the platform adapter, templates, tests (100% coverage), and README updates +4. **Verify**: Ensure installation works correctly and produces expected files + +The workflow ensures consistency across all supported platforms and maintains +comprehensive test coverage for new functionality. + +**Important Notes**: +- Only hooks available on slash command definitions should be captured +- Each existing adapter must be updated when new hooks are added (typically with null values) +- Tests must achieve 100% coverage for any new functionality +- Installation verification confirms the platform integrates correctly with existing jobs + + +## Prerequisites + +This step requires completion of the following step(s): +- `/add_platform.research` + +Please ensure these steps have been completed before proceeding. + +## Instructions + +# Add Hook Capabilities + +## Objective + +Update the DeepWork job schema and platform adapters to support any new hook events that the new platform provides for slash command definitions. + +## Task + +Analyze the hooks documentation from the research step and update the codebase to support any new hook capabilities, ensuring consistency across all existing adapters. + +### Prerequisites + +Read the hooks documentation created in the previous step: +- `doc/platforms//hooks_system.md` + +Also review the existing schema and adapters: +- `src/deepwork/schemas/job_schema.py` +- `src/deepwork/adapters.py` + +### Process + +1. **Analyze the new platform's hooks** + - Read `doc/platforms//hooks_system.md` + - List all hooks available for slash command definitions + - Compare with hooks already in `job_schema.py` + - Identify any NEW hooks not currently supported + +2. **Determine if schema changes are needed** + - If the platform has hooks that DeepWork doesn't currently support, add them + - If all hooks are already supported, document this finding + - Remember: Only add hooks that are available on slash command definitions + +3. **Update job_schema.py (if needed)** + - Add new hook fields to the step schema + - Follow existing patterns for hook definitions + - Add appropriate type hints and documentation + - Example addition: + ```python + # New hook from + new_hook_name: Optional[List[HookConfig]] = None + ``` + +4. **Update all existing adapters** + - Open `src/deepwork/adapters.py` + - For EACH existing adapter class: + - Add the new hook field (set to `None` if not supported) + - This maintains consistency across all adapters + - Document why each adapter does or doesn't support the hook + +5. **Validate the changes** + - Run Python syntax check: `python -m py_compile src/deepwork/schemas/job_schema.py` + - Run Python syntax check: `python -m py_compile src/deepwork/adapters.py` + - Ensure no import errors + +6. **Document the decision** + - If no new hooks were added, add a comment explaining why + - If new hooks were added, ensure they're documented in the schema + +## Output Format + +### job_schema.py + +Location: `src/deepwork/schemas/job_schema.py` + +If new hooks are added: +```python +@dataclass +class StepDefinition: + # ... existing fields ... + + # New hook from - [description of what it does] + new_hook_name: Optional[List[HookConfig]] = None +``` + +### adapters.py + +Location: `src/deepwork/adapters.py` + +For each existing adapter, add the new hook field: +```python +class ExistingPlatformAdapter(PlatformAdapter): + # ... existing code ... + + def get_hook_support(self) -> dict: + return { + # ... existing hooks ... + "new_hook_name": None, # Not supported by this platform + } +``` + +Or if no changes are needed, add a documentation comment: +```python +# NOTE: hooks reviewed on YYYY-MM-DD +# No new hooks to add - all command hooks are already +# supported by the existing schema (stop_hooks covers their validation pattern) +``` + +## Quality Criteria + +- Hooks documentation from research step has been reviewed +- If new hooks exist: + - Added to `src/deepwork/schemas/job_schema.py` with proper typing + - ALL existing adapters updated in `src/deepwork/adapters.py` + - Each adapter indicates support level (implemented, None, or partial) +- If no new hooks needed: + - Decision documented with a comment explaining the analysis +- Only hooks available on slash command definitions are considered +- `job_schema.py` has no syntax errors (verified with py_compile) +- `adapters.py` has no syntax errors (verified with py_compile) +- All adapters have consistent hook fields (same fields across all adapters) +- When all criteria are met, include `QUALITY_COMPLETE` in your response + +## Context + +DeepWork supports multiple AI platforms, and each platform may have different capabilities for hooks within command definitions. The schema defines what hooks CAN exist, while adapters define what each platform actually SUPPORTS. + +This separation allows: +- Job definitions to use any hook (the schema is the superset) +- Platform-specific generation to only use supported hooks (adapters filter) +- Future platforms to add new hooks without breaking existing ones + +Maintaining consistency is critical - all adapters must have the same hook fields, even if they don't support them (use `None` for unsupported). + +## Common Hook Types + +For reference, here are common hook patterns across platforms: + +| Hook Type | Purpose | Example Platforms | +|-----------|---------|-------------------| +| `stop_hooks` | Quality validation loops | Claude Code | +| `pre_hooks` | Run before command | Various | +| `post_hooks` | Run after command | Various | +| `validation_hooks` | Validate inputs/outputs | Various | + +When you find a new hook type, consider whether it maps to an existing pattern or is genuinely new functionality. + + +## Inputs + + +### Required Files + +This step requires the following files from previous steps: +- `hooks_system.md` (from step `research`) + +Make sure to read and use these files as context for this step. + +## Work Branch Management + +All work for this job should be done on a dedicated work branch: + +1. **Check current branch**: + - If already on a work branch for this job (format: `deepwork/add_platform-[instance]-[date]`), continue using it + - If on main/master, create a new work branch + +2. **Create work branch** (if needed): + ```bash + git checkout -b deepwork/add_platform-[instance]-$(date +%Y%m%d) + ``` + Replace `[instance]` with a descriptive identifier (e.g., `acme`, `q1-launch`, etc.) + +## Output Requirements + +Create the following output(s): +- `job_schema.py`- `adapters.py` +Ensure all outputs are: +- Well-formatted and complete +- Ready for review or use by subsequent steps + +## Quality Validation Loop + +This step uses an iterative quality validation loop. After completing your work, stop hook(s) will evaluate whether the outputs meet quality criteria. If criteria are not met, you will be prompted to continue refining. + +### Quality Criteria +Verify the capability additions meet ALL criteria: +1. Any new hooks from the platform (for slash commands only) are added to src/deepwork/schemas/job_schema.py +2. All existing adapters in src/deepwork/adapters.py are updated with the new hook fields + (set to None/null if the platform doesn't support that hook) +3. Only hooks available on slash command definitions are added (not general CLI hooks) +4. job_schema.py remains valid Python with no syntax errors +5. adapters.py remains consistent - all adapters have the same hook fields +6. If no new hooks are needed, document why in a comment + +If ALL criteria are met, include `QUALITY_COMPLETE`. + + +### Completion Promise + +To signal that all quality criteria have been met, include this tag in your final response: + +``` +QUALITY_COMPLETE +``` + +**Important**: Only include this promise tag when you have verified that ALL quality criteria above are satisfied. The validation loop will continue until this promise is detected. + +## Completion + +After completing this step: + +1. **Verify outputs**: Confirm all required files have been created + +2. **Inform the user**: + - Step 2 of 4 is complete + - Outputs created: job_schema.py, adapters.py + - Ready to proceed to next step: `/add_platform.implement` + +## Next Step + +To continue the workflow, run: +``` +/add_platform.implement +``` + +--- + +## Context Files + +- Job definition: `.deepwork/jobs/add_platform/job.yml` +- Step instructions: `.deepwork/jobs/add_platform/steps/add_capabilities.md` \ No newline at end of file diff --git a/.claude/commands/add_platform.implement.md b/.claude/commands/add_platform.implement.md new file mode 100644 index 00000000..e204f9fe --- /dev/null +++ b/.claude/commands/add_platform.implement.md @@ -0,0 +1,393 @@ +--- +description: Add platform adapter, templates, tests with 100% coverage, and README documentation +hooks: + Stop: + - hooks: + - type: command + command: ".deepwork/jobs/add_platform/hooks/run_tests.sh" + - type: prompt + prompt: | + You must evaluate whether Claude has met all the below quality criteria for the request. + + ## Quality Criteria + + Verify the implementation meets ALL criteria: + 1. Platform adapter class is added to src/deepwork/adapters.py + 2. Templates exist in src/deepwork/templates// with appropriate command structure + 3. Tests exist for all new functionality + 4. Test coverage is 100% for new code (run: uv run pytest --cov) + 5. All tests pass + 6. README.md is updated with: + - New platform listed in supported platforms + - Installation instructions for the platform + - Any platform-specific notes + + If ALL criteria are met, include `QUALITY_COMPLETE`. + + + ## Instructions + + Review the conversation and determine if ALL quality criteria above have been satisfied. + Look for evidence that each criterion has been addressed. + + If the agent has included `QUALITY_COMPLETE` in their response AND + all criteria appear to be met, respond with: {"ok": true} + + If criteria are NOT met AND the promise tag is missing, respond with: + {"ok": false, "reason": "Continue working. [specific feedback on what's wrong]"} +--- + +# add_platform.implement + +**Step 3 of 4** in the **add_platform** workflow + +**Summary**: Add a new AI platform to DeepWork with adapter, templates, and tests + +## Job Overview + +A workflow for adding support for a new AI platform (like Cursor, Windsurf, etc.) to DeepWork. + +This job guides you through four phases: +1. **Research**: Capture the platform's CLI configuration and hooks system documentation +2. **Add Capabilities**: Update the job schema and adapters with any new hook events +3. **Implement**: Create the platform adapter, templates, tests (100% coverage), and README updates +4. **Verify**: Ensure installation works correctly and produces expected files + +The workflow ensures consistency across all supported platforms and maintains +comprehensive test coverage for new functionality. + +**Important Notes**: +- Only hooks available on slash command definitions should be captured +- Each existing adapter must be updated when new hooks are added (typically with null values) +- Tests must achieve 100% coverage for any new functionality +- Installation verification confirms the platform integrates correctly with existing jobs + + +## Prerequisites + +This step requires completion of the following step(s): +- `/add_platform.research` +- `/add_platform.add_capabilities` + +Please ensure these steps have been completed before proceeding. + +## Instructions + +# Implement Platform Support + +## Objective + +Create the complete platform implementation including the adapter class, command templates, comprehensive tests, and documentation updates. + +## Task + +Build the full platform support by implementing the adapter, creating templates, writing tests with 100% coverage, and updating the README. + +### Prerequisites + +Read the outputs from previous steps: +- `doc/platforms//cli_configuration.md` - For template structure +- `src/deepwork/schemas/job_schema.py` - For current schema +- `src/deepwork/adapters.py` - For adapter patterns + +Also review existing implementations for reference: +- `src/deepwork/templates/claude/` - Example templates +- `tests/` - Existing test patterns + +### Process + +1. **Create the platform adapter class** + + Add a new adapter class to `src/deepwork/adapters.py`: + + ```python + class NewPlatformAdapter(PlatformAdapter): + """Adapter for .""" + + platform_name = "" + command_directory = "" # e.g., ".cursor/commands" + command_extension = ".md" # or appropriate extension + + def get_hook_support(self) -> dict: + """Return which hooks this platform supports.""" + return { + "stop_hooks": True, # or False/None + # ... other hooks + } + + def generate_command(self, step: StepDefinition, job: JobDefinition) -> str: + """Generate command file content for this platform.""" + # Use Jinja2 template + template = self.env.get_template(f"{self.platform_name}/command.md.j2") + return template.render(step=step, job=job) + ``` + +2. **Create command templates** + + Create templates in `src/deepwork/templates//`: + + - `command.md.j2` - Main command template + - Any other templates needed for the platform's format + + Use the CLI configuration documentation to ensure the template matches the platform's expected format. + +3. **Register the adapter** + + Update the adapter registry in `src/deepwork/adapters.py`: + + ```python + PLATFORM_ADAPTERS = { + "claude": ClaudeAdapter, + "": NewPlatformAdapter, + # ... other adapters + } + ``` + +4. **Write comprehensive tests** + + Create tests in `tests/` that cover: + + - Adapter instantiation + - Hook support detection + - Command generation + - Template rendering + - Edge cases (empty inputs, special characters, etc.) + - Integration with the sync command + + **Critical**: Tests must achieve 100% coverage of new code. + +5. **Update README.md** + + Add the new platform to `README.md`: + + - Add to "Supported Platforms" list + - Add installation instructions: + ```bash + deepwork install --platform + ``` + - Document any platform-specific notes or limitations + +6. **Run tests and verify coverage** + + ```bash + uv run pytest --cov=src/deepwork --cov-report=term-missing + ``` + + - All tests must pass + - New code must have 100% coverage + - If coverage is below 100%, add more tests + +7. **Iterate until tests pass with full coverage** + + This step has a `stop_hooks` script that runs tests. Keep iterating until: + - All tests pass + - Coverage is 100% for new functionality + +## Output Format + +### templates/ + +Location: `src/deepwork/templates//` + +Create the following files: + +**command.md.j2**: +```jinja2 +{# Template for command files #} +{# Follows the platform's expected format from cli_configuration.md #} + +[Platform-specific frontmatter or metadata] + +# {{ step.name }} + +{{ step.description }} + +## Instructions + +{{ step.instructions_content }} + +[... rest of template based on platform format ...] +``` + +### tests/ + +Location: `tests/test__adapter.py` + +```python +"""Tests for the adapter.""" +import pytest +from deepwork.adapters import NewPlatformAdapter + +class TestNewPlatformAdapter: + """Test suite for NewPlatformAdapter.""" + + def test_adapter_initialization(self): + """Test adapter can be instantiated.""" + adapter = NewPlatformAdapter() + assert adapter.platform_name == "" + + def test_hook_support(self): + """Test hook support detection.""" + adapter = NewPlatformAdapter() + hooks = adapter.get_hook_support() + assert "stop_hooks" in hooks + # ... more assertions + + def test_command_generation(self): + """Test command file generation.""" + # ... test implementation + + # ... more tests for 100% coverage +``` + +### README.md + +Add to the existing README.md: + +```markdown +## Supported Platforms + +- **Claude Code** - Anthropic's CLI for Claude +- **** - [Brief description] + +## Installation + +### + +```bash +deepwork install --platform +``` + +[Any platform-specific notes] +``` + +## Quality Criteria + +- Platform adapter class added to `src/deepwork/adapters.py`: + - Inherits from `PlatformAdapter` + - Implements all required methods + - Registered in `PLATFORM_ADAPTERS` +- Templates created in `src/deepwork/templates//`: + - `command.md.j2` exists and renders correctly + - Format matches platform's expected command format +- Tests created in `tests/`: + - Cover all new adapter functionality + - Cover template rendering + - All tests pass +- Test coverage is 100% for new code: + - Run `uv run pytest --cov=src/deepwork --cov-report=term-missing` + - No uncovered lines in new code +- README.md updated: + - Platform listed in supported platforms + - Installation command documented + - Any platform-specific notes included +- When all criteria are met, include `QUALITY_COMPLETE` in your response + +## Context + +This is the core implementation step. The adapter you create will be responsible for: +- Determining where command files are placed +- Generating command file content from job definitions +- Handling platform-specific features and hooks + +The templates use Jinja2 and should produce files that match exactly what the platform expects. Reference the CLI configuration documentation frequently to ensure compatibility. + +## Tips + +- Study the existing `ClaudeAdapter` as a reference implementation +- Run tests frequently as you implement +- Use `--cov-report=html` for a detailed coverage report +- If a test is hard to write, the code might need refactoring +- Template syntax errors often show up at runtime - test early + + +## Inputs + + +### Required Files + +This step requires the following files from previous steps: +- `job_schema.py` (from step `add_capabilities`) +- `adapters.py` (from step `add_capabilities`) +- `cli_configuration.md` (from step `research`) + +Make sure to read and use these files as context for this step. + +## Work Branch Management + +All work for this job should be done on a dedicated work branch: + +1. **Check current branch**: + - If already on a work branch for this job (format: `deepwork/add_platform-[instance]-[date]`), continue using it + - If on main/master, create a new work branch + +2. **Create work branch** (if needed): + ```bash + git checkout -b deepwork/add_platform-[instance]-$(date +%Y%m%d) + ``` + Replace `[instance]` with a descriptive identifier (e.g., `acme`, `q1-launch`, etc.) + +## Output Requirements + +Create the following output(s): +- `templates/` (directory)- `tests/` (directory)- `README.md` +Ensure all outputs are: +- Well-formatted and complete +- Ready for review or use by subsequent steps + +## Quality Validation Loop + +This step uses an iterative quality validation loop. After completing your work, stop hook(s) will evaluate whether the outputs meet quality criteria. If criteria are not met, you will be prompted to continue refining. + +**Validation Script**: `.deepwork/jobs/add_platform/hooks/run_tests.sh` + +The validation script will be executed automatically when you attempt to complete this step. +### Quality Criteria (2) +Verify the implementation meets ALL criteria: +1. Platform adapter class is added to src/deepwork/adapters.py +2. Templates exist in src/deepwork/templates// with appropriate command structure +3. Tests exist for all new functionality +4. Test coverage is 100% for new code (run: uv run pytest --cov) +5. All tests pass +6. README.md is updated with: + - New platform listed in supported platforms + - Installation instructions for the platform + - Any platform-specific notes + +If ALL criteria are met, include `QUALITY_COMPLETE`. + + +### Completion Promise + +To signal that all quality criteria have been met, include this tag in your final response: + +``` +QUALITY_COMPLETE +``` + +**Important**: Only include this promise tag when you have verified that ALL quality criteria above are satisfied. The validation loop will continue until this promise is detected. + +## Completion + +After completing this step: + +1. **Verify outputs**: Confirm all required files have been created + +2. **Inform the user**: + - Step 3 of 4 is complete + - Outputs created: templates/, tests/, README.md + - Ready to proceed to next step: `/add_platform.verify` + +## Next Step + +To continue the workflow, run: +``` +/add_platform.verify +``` + +--- + +## Context Files + +- Job definition: `.deepwork/jobs/add_platform/job.yml` +- Step instructions: `.deepwork/jobs/add_platform/steps/implement.md` \ No newline at end of file diff --git a/.claude/commands/add_platform.research.md b/.claude/commands/add_platform.research.md new file mode 100644 index 00000000..303737b2 --- /dev/null +++ b/.claude/commands/add_platform.research.md @@ -0,0 +1,337 @@ +--- +description: Capture CLI configuration and hooks system documentation for the new platform +hooks: + Stop: + - hooks: + - type: prompt + prompt: | + You must evaluate whether Claude has met all the below quality criteria for the request. + + ## Quality Criteria + + Verify the research output meets ALL criteria: + 1. Both files exist in doc/platforms//: cli_configuration.md and hooks_system.md + 2. Each file has a comment at the top with: + - Last updated date + - Source URL where the documentation was obtained + 3. cli_configuration.md covers how the platform's CLI is configured + 4. hooks_system.md covers hooks available for slash command definitions ONLY + 5. No extraneous documentation (only these two specific topics) + 6. Documentation is comprehensive enough to implement the platform + + If ALL criteria are met, include `QUALITY_COMPLETE`. + + + ## Instructions + + Review the conversation and determine if ALL quality criteria above have been satisfied. + Look for evidence that each criterion has been addressed. + + If the agent has included `QUALITY_COMPLETE` in their response AND + all criteria appear to be met, respond with: {"ok": true} + + If criteria are NOT met AND the promise tag is missing, respond with: + {"ok": false, "reason": "Continue working. [specific feedback on what's wrong]"} +--- + +# add_platform.research + +**Step 1 of 4** in the **add_platform** workflow + +**Summary**: Add a new AI platform to DeepWork with adapter, templates, and tests + +## Job Overview + +A workflow for adding support for a new AI platform (like Cursor, Windsurf, etc.) to DeepWork. + +This job guides you through four phases: +1. **Research**: Capture the platform's CLI configuration and hooks system documentation +2. **Add Capabilities**: Update the job schema and adapters with any new hook events +3. **Implement**: Create the platform adapter, templates, tests (100% coverage), and README updates +4. **Verify**: Ensure installation works correctly and produces expected files + +The workflow ensures consistency across all supported platforms and maintains +comprehensive test coverage for new functionality. + +**Important Notes**: +- Only hooks available on slash command definitions should be captured +- Each existing adapter must be updated when new hooks are added (typically with null values) +- Tests must achieve 100% coverage for any new functionality +- Installation verification confirms the platform integrates correctly with existing jobs + + + +## Instructions + +# Research Platform Documentation + +## Objective + +Capture comprehensive documentation for the new AI platform's CLI configuration and hooks system, creating a local reference that will guide the implementation phases. + +## Task + +Research the target platform's official documentation and create two focused documentation files that will serve as the foundation for implementing platform support in DeepWork. + +### Process + +1. **Identify the platform's documentation sources** + - Find the official documentation website + - Locate the CLI/agent configuration documentation + - Find the hooks or customization system documentation + - Note: Focus ONLY on slash command/custom command hooks, not general CLI hooks + +2. **Gather CLI configuration documentation** + - How is the CLI configured? (config files, environment variables, etc.) + - Where are custom commands/skills stored? + - What is the command file format? (markdown, YAML, etc.) + - What metadata or frontmatter is supported? + - How does the platform discover and load commands? + +3. **Gather hooks system documentation** + - What hooks are available for custom command definitions? + - Focus on hooks that trigger during or after command execution + - Examples: `stop_hooks`, `pre_hooks`, `post_hooks`, validation hooks + - Document the syntax and available hook types + - **Important**: Only document hooks available on slash command definitions, not general CLI hooks + +4. **Create the documentation files** + - Place files in `doc/platforms//` + - Each file must have a header comment with source and date + - Content should be comprehensive but focused + +## Output Format + +### cli_configuration.md + +Located at: `doc/platforms//cli_configuration.md` + +**Structure**: +```markdown + + +# CLI Configuration + +## Overview + +[Brief description of the platform and its CLI/agent system] + +## Configuration Files + +[Document where configuration lives and its format] + +### File Locations + +- [Location 1]: [Purpose] +- [Location 2]: [Purpose] + +### Configuration Format + +[Show the configuration file format with examples] + +## Custom Commands/Skills + +[Document how custom commands are defined] + +### Command Location + +[Where command files are stored] + +### Command File Format + +[The format of command files - markdown, YAML, etc.] + +### Metadata/Frontmatter + +[What metadata fields are supported in command files] + +```[format] +[Example of a minimal command file] +``` + +## Command Discovery + +[How the platform discovers and loads commands] + +## Platform-Specific Features + +[Any unique features relevant to command configuration] +``` + +### hooks_system.md + +Located at: `doc/platforms//hooks_system.md` + +**Structure**: +```markdown + + +# Hooks System (Command Definitions) + +## Overview + +[Brief description of hooks available for command definitions] + +**Important**: This document covers ONLY hooks available within slash command/skill definitions, not general CLI hooks. + +## Available Hooks + +### [Hook Name 1] + +**Purpose**: [What this hook does] + +**Syntax**: +```yaml +[hook_name]: + - [configuration] +``` + +**Example**: +```yaml +[Complete example of using this hook] +``` + +**Behavior**: [When and how this hook executes] + +### [Hook Name 2] + +[Repeat for each available hook] + +## Hook Execution Order + +[Document the order in which hooks execute, if multiple are supported] + +## Comparison with Other Platforms + +| Feature | | Claude Code | Other | +|---------|-----------|-------------|-------| +| [Feature 1] | [Support] | [Support] | [Support] | + +## Limitations + +[Any limitations or caveats about the hooks system] +``` + +## Quality Criteria + +- Both files exist in `doc/platforms//` +- Each file has a header comment with: + - Last updated date (YYYY-MM-DD format) + - Source URL where documentation was obtained +- `cli_configuration.md` comprehensively covers: + - Configuration file locations and format + - Custom command file format and location + - Command discovery mechanism +- `hooks_system.md` comprehensively covers: + - All hooks available for slash command definitions + - Syntax and examples for each hook + - NOT general CLI hooks (only command-level hooks) +- Documentation is detailed enough to implement the platform adapter +- No extraneous topics (only CLI config and command hooks) +- When all criteria are met, include `QUALITY_COMPLETE` in your response + +## Context + +This is the foundation step for adding a new platform to DeepWork. The documentation you capture here will be referenced throughout the implementation process: +- CLI configuration informs how to generate command files +- Hooks documentation determines what features the adapter needs to support +- This documentation becomes a permanent reference in `doc/platforms/` + +Take time to be thorough - incomplete documentation will slow down subsequent steps. + +## Tips + +- Use the platform's official documentation as the primary source +- If documentation is sparse, check GitHub repos, community guides, or changelog entries +- When in doubt about whether something is a "command hook" vs "CLI hook", err on the side of inclusion and note the ambiguity +- Include code examples from the official docs where available + + +## Inputs + +### User Parameters + +Please gather the following information from the user: +- **platform_name**: Clear identifier of the platform (e.g., 'cursor', 'windsurf-editor', 'github-copilot-chat') + + +## Work Branch Management + +All work for this job should be done on a dedicated work branch: + +1. **Check current branch**: + - If already on a work branch for this job (format: `deepwork/add_platform-[instance]-[date]`), continue using it + - If on main/master, create a new work branch + +2. **Create work branch** (if needed): + ```bash + git checkout -b deepwork/add_platform-[instance]-$(date +%Y%m%d) + ``` + Replace `[instance]` with a descriptive identifier (e.g., `acme`, `q1-launch`, etc.) + +## Output Requirements + +Create the following output(s): +- `cli_configuration.md`- `hooks_system.md` +Ensure all outputs are: +- Well-formatted and complete +- Ready for review or use by subsequent steps + +## Quality Validation Loop + +This step uses an iterative quality validation loop. After completing your work, stop hook(s) will evaluate whether the outputs meet quality criteria. If criteria are not met, you will be prompted to continue refining. + +### Quality Criteria +Verify the research output meets ALL criteria: +1. Both files exist in doc/platforms//: cli_configuration.md and hooks_system.md +2. Each file has a comment at the top with: + - Last updated date + - Source URL where the documentation was obtained +3. cli_configuration.md covers how the platform's CLI is configured +4. hooks_system.md covers hooks available for slash command definitions ONLY +5. No extraneous documentation (only these two specific topics) +6. Documentation is comprehensive enough to implement the platform + +If ALL criteria are met, include `QUALITY_COMPLETE`. + + +### Completion Promise + +To signal that all quality criteria have been met, include this tag in your final response: + +``` +QUALITY_COMPLETE +``` + +**Important**: Only include this promise tag when you have verified that ALL quality criteria above are satisfied. The validation loop will continue until this promise is detected. + +## Completion + +After completing this step: + +1. **Verify outputs**: Confirm all required files have been created + +2. **Inform the user**: + - Step 1 of 4 is complete + - Outputs created: cli_configuration.md, hooks_system.md + - Ready to proceed to next step: `/add_platform.add_capabilities` + +## Next Step + +To continue the workflow, run: +``` +/add_platform.add_capabilities +``` + +--- + +## Context Files + +- Job definition: `.deepwork/jobs/add_platform/job.yml` +- Step instructions: `.deepwork/jobs/add_platform/steps/research.md` \ No newline at end of file diff --git a/.claude/commands/add_platform.verify.md b/.claude/commands/add_platform.verify.md new file mode 100644 index 00000000..f13f7957 --- /dev/null +++ b/.claude/commands/add_platform.verify.md @@ -0,0 +1,261 @@ +--- +description: Set up platform directories and verify deepwork install works correctly +hooks: + Stop: + - hooks: + - type: prompt + prompt: | + You must evaluate whether Claude has met all the below quality criteria for the request. + + ## Quality Criteria + + Verify the installation meets ALL criteria: + 1. Platform-specific directories/files are added to the deepwork repo as needed + 2. Running `deepwork install --platform ` completes without errors + 3. Expected command files are created in the platform's command directory + 4. Command file content matches the templates and job definitions + 5. Established DeepWork jobs (deepwork_jobs, deepwork_policy) are installed correctly + 6. The platform can be used alongside existing platforms without conflicts + + If ALL criteria are met, include `QUALITY_COMPLETE`. + + + ## Instructions + + Review the conversation and determine if ALL quality criteria above have been satisfied. + Look for evidence that each criterion has been addressed. + + If the agent has included `QUALITY_COMPLETE` in their response AND + all criteria appear to be met, respond with: {"ok": true} + + If criteria are NOT met AND the promise tag is missing, respond with: + {"ok": false, "reason": "Continue working. [specific feedback on what's wrong]"} +--- + +# add_platform.verify + +**Step 4 of 4** in the **add_platform** workflow + +**Summary**: Add a new AI platform to DeepWork with adapter, templates, and tests + +## Job Overview + +A workflow for adding support for a new AI platform (like Cursor, Windsurf, etc.) to DeepWork. + +This job guides you through four phases: +1. **Research**: Capture the platform's CLI configuration and hooks system documentation +2. **Add Capabilities**: Update the job schema and adapters with any new hook events +3. **Implement**: Create the platform adapter, templates, tests (100% coverage), and README updates +4. **Verify**: Ensure installation works correctly and produces expected files + +The workflow ensures consistency across all supported platforms and maintains +comprehensive test coverage for new functionality. + +**Important Notes**: +- Only hooks available on slash command definitions should be captured +- Each existing adapter must be updated when new hooks are added (typically with null values) +- Tests must achieve 100% coverage for any new functionality +- Installation verification confirms the platform integrates correctly with existing jobs + + +## Prerequisites + +This step requires completion of the following step(s): +- `/add_platform.implement` + +Please ensure these steps have been completed before proceeding. + +## Instructions + +# Verify Installation + +## Objective + +Ensure the new platform integration works correctly by setting up necessary directories and running the full installation process. + +## Task + +Perform end-to-end verification that the new platform can be installed and that DeepWork's standard jobs work correctly with it. + +### Prerequisites + +Ensure the implementation step is complete: +- Adapter class exists in `src/deepwork/adapters.py` +- Templates exist in `src/deepwork/templates//` +- Tests pass with 100% coverage +- README.md is updated + +### Process + +1. **Set up platform directories in the DeepWork repo** + + The DeepWork repository itself should have the platform's command directory structure for testing: + + ```bash + mkdir -p + ``` + + For example: + - Claude: `.claude/commands/` + - Cursor: `.cursor/commands/` (or wherever Cursor stores commands) + +2. **Run deepwork install for the new platform** + + ```bash + deepwork install --platform + ``` + + Verify: + - Command completes without errors + - No Python exceptions or tracebacks + - Output indicates successful installation + +3. **Check that command files were created** + + List the generated command files: + ```bash + ls -la / + ``` + + Verify: + - `deepwork_jobs.define.md` exists (or equivalent for the platform) + - `deepwork_jobs.implement.md` exists + - `deepwork_jobs.refine.md` exists + - `deepwork_policy.define.md` exists + - All expected step commands exist + +4. **Validate command file content** + + Read each generated command file and verify: + - Content matches the expected format for the platform + - Job metadata is correctly included + - Step instructions are properly rendered + - Any platform-specific features (hooks, frontmatter) are present + +5. **Test alongside existing platforms** + + If other platforms are already installed, verify they still work: + ```bash + deepwork install --platform claude + ls -la .claude/commands/ + ``` + + Ensure: + - New platform doesn't break existing installations + - Each platform's commands are independent + - No file conflicts or overwrites + +## Quality Criteria + +- Platform-specific directories are set up in the DeepWork repo +- `deepwork install --platform ` completes without errors +- All expected command files are created: + - deepwork_jobs.define, implement, refine + - deepwork_policy.define + - Any other standard job commands +- Command file content is correct: + - Matches platform's expected format + - Job/step information is properly rendered + - No template errors or missing content +- Existing platforms still work (if applicable) +- No conflicts between platforms +- When all criteria are met, include `QUALITY_COMPLETE` in your response + +## Context + +This is the final validation step before the platform is considered complete. A thorough verification ensures: +- The platform actually works, not just compiles +- Standard DeepWork jobs install correctly +- The platform integrates properly with the existing system +- Users can confidently use the new platform + +Take time to verify each aspect - finding issues now is much better than having users discover them later. + +## Common Issues to Check + +- **Template syntax errors**: May only appear when rendering specific content +- **Path issues**: Platform might expect different directory structure +- **Encoding issues**: Special characters in templates or content +- **Missing hooks**: Platform adapter might not handle all hook types +- **Permission issues**: Directory creation might fail in some cases + + +## Inputs + + +### Required Files + +This step requires the following files from previous steps: +- `templates/` (from step `implement`) + +Make sure to read and use these files as context for this step. + +## Work Branch Management + +All work for this job should be done on a dedicated work branch: + +1. **Check current branch**: + - If already on a work branch for this job (format: `deepwork/add_platform-[instance]-[date]`), continue using it + - If on main/master, create a new work branch + +2. **Create work branch** (if needed): + ```bash + git checkout -b deepwork/add_platform-[instance]-$(date +%Y%m%d) + ``` + Replace `[instance]` with a descriptive identifier (e.g., `acme`, `q1-launch`, etc.) + +## Output Requirements + +No specific files are output by this command. + +## Quality Validation Loop + +This step uses an iterative quality validation loop. After completing your work, stop hook(s) will evaluate whether the outputs meet quality criteria. If criteria are not met, you will be prompted to continue refining. + +### Quality Criteria +Verify the installation meets ALL criteria: +1. Platform-specific directories/files are added to the deepwork repo as needed +2. Running `deepwork install --platform ` completes without errors +3. Expected command files are created in the platform's command directory +4. Command file content matches the templates and job definitions +5. Established DeepWork jobs (deepwork_jobs, deepwork_policy) are installed correctly +6. The platform can be used alongside existing platforms without conflicts + +If ALL criteria are met, include `QUALITY_COMPLETE`. + + +### Completion Promise + +To signal that all quality criteria have been met, include this tag in your final response: + +``` +QUALITY_COMPLETE +``` + +**Important**: Only include this promise tag when you have verified that ALL quality criteria above are satisfied. The validation loop will continue until this promise is detected. + +## Completion + +After completing this step: + +1. **Verify outputs**: Confirm all required files have been created + +2. **Inform the user**: + - Step 4 of 4 is complete + - This is the final step - the job is complete! + +## Workflow Complete + +This is the final step in the add_platform workflow. All outputs should now be complete and ready for review. + +Consider: +- Reviewing all work products +- Creating a pull request to merge the work branch +- Documenting any insights or learnings + +--- + +## Context Files + +- Job definition: `.deepwork/jobs/add_platform/job.yml` +- Step instructions: `.deepwork/jobs/add_platform/steps/verify.md` \ No newline at end of file diff --git a/.claude/commands/deepwork_jobs.define.md b/.claude/commands/deepwork_jobs.define.md index 76a5bf45..76723ba5 100644 --- a/.claude/commands/deepwork_jobs.define.md +++ b/.claude/commands/deepwork_jobs.define.md @@ -300,9 +300,9 @@ User: I need to specify the market segment and product category as inputs. The o Claude: Perfect. A few more questions about that output: 1. What should the file be called? Something like `competitors_list.md`? -2. Should it go directly in the deepwork/competitive_research/ folder, or in a subdirectory like `research/` or `data/`? +2. Should it go in the project root, or in a subdirectory like `research/` or `data/`? -User: Just call it `competitors_list.md` in the main folder. +User: Just call it `competitors_list.md` in the project root. Claude: Great. Now for the "Research each one" step: 1. Does it use the competitor list from the first step as input? @@ -440,17 +440,12 @@ All work for this job should be done on a dedicated work branch: ``` Replace `[instance]` with a descriptive identifier (e.g., `acme`, `q1-launch`, etc.) -3. **All outputs go in the work directory**: - - Create files in: `deepwork/deepwork_jobs/` - - This keeps work products organized by job - ## Output Requirements -Create the following output(s) in the work directory: -- `deepwork/deepwork_jobs/job.yml` +Create the following output(s): +- `job.yml` Ensure all outputs are: - Well-formatted and complete -- Committed to the work branch - Ready for review or use by subsequent steps ## Quality Validation Loop @@ -486,15 +481,9 @@ To signal that all quality criteria have been met, include this tag in your fina After completing this step: -1. **Commit your work**: - ```bash - git add deepwork/deepwork_jobs/ - git commit -m "deepwork_jobs: Complete define step" - ``` - -2. **Verify outputs**: Confirm all required files have been created +1. **Verify outputs**: Confirm all required files have been created -3. **Inform the user**: +2. **Inform the user**: - Step 1 of 3 is complete - Outputs created: job.yml - Ready to proceed to next step: `/deepwork_jobs.implement` diff --git a/.claude/commands/deepwork_jobs.implement.md b/.claude/commands/deepwork_jobs.implement.md index f9a026b8..4f414427 100644 --- a/.claude/commands/deepwork_jobs.implement.md +++ b/.claude/commands/deepwork_jobs.implement.md @@ -103,7 +103,7 @@ mkdir -p .deepwork/jobs/[job_name]/steps ``` Files to create: -- `.deepwork/jobs/[job_name]/job.yml` - Copy from work directory +- `.deepwork/jobs/[job_name]/job.yml` - `.deepwork/jobs/[job_name]/steps/[step_id].md` - One for each step ### Step 3: Generate Step Instruction Files @@ -288,7 +288,7 @@ your strategy documentation. I'd suggest a policy like: - **Name**: "Update strategy when competitor analysis changes" -- **Trigger**: `deepwork/competitive_research-*/report.md` +- **Trigger**: `**/positioning_report.md` - **Action**: Prompt to review and update `docs/strategy.md` Would you like me to create this policy? I can run `/deepwork_policy.define` to set it up. @@ -511,7 +511,6 @@ Before marking this step complete, ensure: This step requires the following files from previous steps: - `job.yml` (from step `define`) - Location: `deepwork/deepwork_jobs/job.yml` Make sure to read and use these files as context for this step. @@ -529,17 +528,12 @@ All work for this job should be done on a dedicated work branch: ``` Replace `[instance]` with a descriptive identifier (e.g., `acme`, `q1-launch`, etc.) -3. **All outputs go in the work directory**: - - Create files in: `deepwork/deepwork_jobs/` - - This keeps work products organized by job - ## Output Requirements -Create the following output(s) in the work directory: -- `deepwork/deepwork_jobs/implementation_summary.md` +Create the following output(s): +- `implementation_summary.md` Ensure all outputs are: - Well-formatted and complete -- Committed to the work branch - Ready for review or use by subsequent steps ## Quality Validation Loop @@ -579,15 +573,9 @@ To signal that all quality criteria have been met, include this tag in your fina After completing this step: -1. **Commit your work**: - ```bash - git add deepwork/deepwork_jobs/ - git commit -m "deepwork_jobs: Complete implement step" - ``` - -2. **Verify outputs**: Confirm all required files have been created +1. **Verify outputs**: Confirm all required files have been created -3. **Inform the user**: +2. **Inform the user**: - Step 2 of 3 is complete - Outputs created: implementation_summary.md - Ready to proceed to next step: `/deepwork_jobs.refine` diff --git a/.claude/commands/deepwork_jobs.refine.md b/.claude/commands/deepwork_jobs.refine.md index 763b50c2..7801c1f6 100644 --- a/.claude/commands/deepwork_jobs.refine.md +++ b/.claude/commands/deepwork_jobs.refine.md @@ -529,17 +529,12 @@ All work for this job should be done on a dedicated work branch: ``` Replace `[instance]` with a descriptive identifier (e.g., `acme`, `q1-launch`, etc.) -3. **All outputs go in the work directory**: - - Create files in: `deepwork/deepwork_jobs/` - - This keeps work products organized by job - ## Output Requirements -Create the following output(s) in the work directory: -- `deepwork/deepwork_jobs/job.yml` +Create the following output(s): +- `job.yml` Ensure all outputs are: - Well-formatted and complete -- Committed to the work branch - Ready for review or use by subsequent steps ## Quality Validation Loop @@ -576,15 +571,9 @@ To signal that all quality criteria have been met, include this tag in your fina After completing this step: -1. **Commit your work**: - ```bash - git add deepwork/deepwork_jobs/ - git commit -m "deepwork_jobs: Complete refine step" - ``` - -2. **Verify outputs**: Confirm all required files have been created +1. **Verify outputs**: Confirm all required files have been created -3. **Inform the user**: +2. **Inform the user**: - The refine command is complete - Outputs created: job.yml - This command can be run again anytime to make further changes @@ -594,7 +583,7 @@ After completing this step: This is a standalone command that can be run anytime. The outputs are ready for use. Consider: -- Reviewing the outputs in `deepwork/deepwork_jobs/` +- Reviewing the outputs - Running `deepwork sync` if job definitions were changed - Re-running this command later if further changes are needed diff --git a/.claude/commands/deepwork_policy.define.md b/.claude/commands/deepwork_policy.define.md index 4269712a..6ff16c72 100644 --- a/.claude/commands/deepwork_policy.define.md +++ b/.claude/commands/deepwork_policy.define.md @@ -228,32 +228,21 @@ All work for this job should be done on a dedicated work branch: ``` Replace `[instance]` with a descriptive identifier (e.g., `acme`, `q1-launch`, etc.) -3. **All outputs go in the work directory**: - - Create files in: `deepwork/deepwork_policy/` - - This keeps work products organized by job - ## Output Requirements -Create the following output(s) in the work directory: -- `deepwork/deepwork_policy/.deepwork.policy.yml` +Create the following output(s): +- `.deepwork.policy.yml` Ensure all outputs are: - Well-formatted and complete -- Committed to the work branch - Ready for review or use by subsequent steps ## Completion After completing this step: -1. **Commit your work**: - ```bash - git add deepwork/deepwork_policy/ - git commit -m "deepwork_policy: Complete define step" - ``` - -2. **Verify outputs**: Confirm all required files have been created +1. **Verify outputs**: Confirm all required files have been created -3. **Inform the user**: +2. **Inform the user**: - The define command is complete - Outputs created: .deepwork.policy.yml - This command can be run again anytime to make further changes @@ -263,7 +252,7 @@ After completing this step: This is a standalone command that can be run anytime. The outputs are ready for use. Consider: -- Reviewing the outputs in `deepwork/deepwork_policy/` +- Reviewing the outputs - Running `deepwork sync` if job definitions were changed - Re-running this command later if further changes are needed diff --git a/.deepwork/config.yml b/.deepwork/config.yml index 1ba9023c..94dcabe1 100644 --- a/.deepwork/config.yml +++ b/.deepwork/config.yml @@ -1,3 +1,4 @@ version: 1.0.0 platforms: - claude +- gemini diff --git a/.deepwork/jobs/add_platform/job.yml b/.deepwork/jobs/add_platform/job.yml new file mode 100644 index 00000000..1a6fc3ba --- /dev/null +++ b/.deepwork/jobs/add_platform/job.yml @@ -0,0 +1,132 @@ +name: add_platform +version: "1.0.0" +summary: "Add a new AI platform to DeepWork with adapter, templates, and tests" + +description: | + A workflow for adding support for a new AI platform (like Cursor, Windsurf, etc.) to DeepWork. + + This job guides you through four phases: + 1. **Research**: Capture the platform's CLI configuration and hooks system documentation + 2. **Add Capabilities**: Update the job schema and adapters with any new hook events + 3. **Implement**: Create the platform adapter, templates, tests (100% coverage), and README updates + 4. **Verify**: Ensure installation works correctly and produces expected files + + The workflow ensures consistency across all supported platforms and maintains + comprehensive test coverage for new functionality. + + **Important Notes**: + - Only hooks available on slash command definitions should be captured + - Each existing adapter must be updated when new hooks are added (typically with null values) + - Tests must achieve 100% coverage for any new functionality + - Installation verification confirms the platform integrates correctly with existing jobs + +changelog: + - version: "1.0.0" + changes: "Initial job creation" + +steps: + - id: research + name: "Research Platform Documentation" + description: "Capture CLI configuration and hooks system documentation for the new platform" + instructions_file: steps/research.md + inputs: + - name: platform_name + description: "Clear identifier of the platform (e.g., 'cursor', 'windsurf-editor', 'github-copilot-chat')" + outputs: + - cli_configuration.md + - hooks_system.md + dependencies: [] + stop_hooks: + - prompt: | + Verify the research output meets ALL criteria: + 1. Both files exist in doc/platforms//: cli_configuration.md and hooks_system.md + 2. Each file has a comment at the top with: + - Last updated date + - Source URL where the documentation was obtained + 3. cli_configuration.md covers how the platform's CLI is configured + 4. hooks_system.md covers hooks available for slash command definitions ONLY + 5. No extraneous documentation (only these two specific topics) + 6. Documentation is comprehensive enough to implement the platform + + If ALL criteria are met, include `QUALITY_COMPLETE`. + + - id: add_capabilities + name: "Add Hook Capabilities" + description: "Update job schema and adapters with any new hook events the platform supports" + instructions_file: steps/add_capabilities.md + inputs: + - file: hooks_system.md + from_step: research + outputs: + - job_schema.py + - adapters.py + dependencies: + - research + stop_hooks: + - prompt: | + Verify the capability additions meet ALL criteria: + 1. Any new hooks from the platform (for slash commands only) are added to src/deepwork/schemas/job_schema.py + 2. All existing adapters in src/deepwork/adapters.py are updated with the new hook fields + (set to None/null if the platform doesn't support that hook) + 3. Only hooks available on slash command definitions are added (not general CLI hooks) + 4. job_schema.py remains valid Python with no syntax errors + 5. adapters.py remains consistent - all adapters have the same hook fields + 6. If no new hooks are needed, document why in a comment + + If ALL criteria are met, include `QUALITY_COMPLETE`. + + - id: implement + name: "Implement Platform Support" + description: "Add platform adapter, templates, tests with 100% coverage, and README documentation" + instructions_file: steps/implement.md + inputs: + - file: job_schema.py + from_step: add_capabilities + - file: adapters.py + from_step: add_capabilities + - file: cli_configuration.md + from_step: research + outputs: + - templates/ + - tests/ + - README.md + dependencies: + - research + - add_capabilities + stop_hooks: + - script: hooks/run_tests.sh + - prompt: | + Verify the implementation meets ALL criteria: + 1. Platform adapter class is added to src/deepwork/adapters.py + 2. Templates exist in src/deepwork/templates// with appropriate command structure + 3. Tests exist for all new functionality + 4. Test coverage is 100% for new code (run: uv run pytest --cov) + 5. All tests pass + 6. README.md is updated with: + - New platform listed in supported platforms + - Installation instructions for the platform + - Any platform-specific notes + + If ALL criteria are met, include `QUALITY_COMPLETE`. + + - id: verify + name: "Verify Installation" + description: "Set up platform directories and verify deepwork install works correctly" + instructions_file: steps/verify.md + inputs: + - file: templates/ + from_step: implement + outputs: [] + dependencies: + - implement + stop_hooks: + - prompt: | + Verify the installation meets ALL criteria: + 1. Platform-specific directories/files are added to the deepwork repo as needed + 2. Running `deepwork install --platform ` completes without errors + 3. Expected command files are created in the platform's command directory + 4. Command file content matches the templates and job definitions + 5. Established DeepWork jobs (deepwork_jobs, deepwork_policy) are installed correctly + 6. The platform can be used alongside existing platforms without conflicts + + If ALL criteria are met, include `QUALITY_COMPLETE`. diff --git a/.deepwork/jobs/add_platform/steps/add_capabilities.md b/.deepwork/jobs/add_platform/steps/add_capabilities.md new file mode 100644 index 00000000..46812245 --- /dev/null +++ b/.deepwork/jobs/add_platform/steps/add_capabilities.md @@ -0,0 +1,135 @@ +# Add Hook Capabilities + +## Objective + +Update the DeepWork job schema and platform adapters to support any new hook events that the new platform provides for slash command definitions. + +## Task + +Analyze the hooks documentation from the research step and update the codebase to support any new hook capabilities, ensuring consistency across all existing adapters. + +### Prerequisites + +Read the hooks documentation created in the previous step: +- `doc/platforms//hooks_system.md` + +Also review the existing schema and adapters: +- `src/deepwork/schemas/job_schema.py` +- `src/deepwork/adapters.py` + +### Process + +1. **Analyze the new platform's hooks** + - Read `doc/platforms//hooks_system.md` + - List all hooks available for slash command definitions + - Compare with hooks already in `job_schema.py` + - Identify any NEW hooks not currently supported + +2. **Determine if schema changes are needed** + - If the platform has hooks that DeepWork doesn't currently support, add them + - If all hooks are already supported, document this finding + - Remember: Only add hooks that are available on slash command definitions + +3. **Update job_schema.py (if needed)** + - Add new hook fields to the step schema + - Follow existing patterns for hook definitions + - Add appropriate type hints and documentation + - Example addition: + ```python + # New hook from + new_hook_name: Optional[List[HookConfig]] = None + ``` + +4. **Update all existing adapters** + - Open `src/deepwork/adapters.py` + - For EACH existing adapter class: + - Add the new hook field (set to `None` if not supported) + - This maintains consistency across all adapters + - Document why each adapter does or doesn't support the hook + +5. **Validate the changes** + - Run Python syntax check: `python -m py_compile src/deepwork/schemas/job_schema.py` + - Run Python syntax check: `python -m py_compile src/deepwork/adapters.py` + - Ensure no import errors + +6. **Document the decision** + - If no new hooks were added, add a comment explaining why + - If new hooks were added, ensure they're documented in the schema + +## Output Format + +### job_schema.py + +Location: `src/deepwork/schemas/job_schema.py` + +If new hooks are added: +```python +@dataclass +class StepDefinition: + # ... existing fields ... + + # New hook from - [description of what it does] + new_hook_name: Optional[List[HookConfig]] = None +``` + +### adapters.py + +Location: `src/deepwork/adapters.py` + +For each existing adapter, add the new hook field: +```python +class ExistingPlatformAdapter(PlatformAdapter): + # ... existing code ... + + def get_hook_support(self) -> dict: + return { + # ... existing hooks ... + "new_hook_name": None, # Not supported by this platform + } +``` + +Or if no changes are needed, add a documentation comment: +```python +# NOTE: hooks reviewed on YYYY-MM-DD +# No new hooks to add - all command hooks are already +# supported by the existing schema (stop_hooks covers their validation pattern) +``` + +## Quality Criteria + +- Hooks documentation from research step has been reviewed +- If new hooks exist: + - Added to `src/deepwork/schemas/job_schema.py` with proper typing + - ALL existing adapters updated in `src/deepwork/adapters.py` + - Each adapter indicates support level (implemented, None, or partial) +- If no new hooks needed: + - Decision documented with a comment explaining the analysis +- Only hooks available on slash command definitions are considered +- `job_schema.py` has no syntax errors (verified with py_compile) +- `adapters.py` has no syntax errors (verified with py_compile) +- All adapters have consistent hook fields (same fields across all adapters) +- When all criteria are met, include `QUALITY_COMPLETE` in your response + +## Context + +DeepWork supports multiple AI platforms, and each platform may have different capabilities for hooks within command definitions. The schema defines what hooks CAN exist, while adapters define what each platform actually SUPPORTS. + +This separation allows: +- Job definitions to use any hook (the schema is the superset) +- Platform-specific generation to only use supported hooks (adapters filter) +- Future platforms to add new hooks without breaking existing ones + +Maintaining consistency is critical - all adapters must have the same hook fields, even if they don't support them (use `None` for unsupported). + +## Common Hook Types + +For reference, here are common hook patterns across platforms: + +| Hook Type | Purpose | Example Platforms | +|-----------|---------|-------------------| +| `stop_hooks` | Quality validation loops | Claude Code | +| `pre_hooks` | Run before command | Various | +| `post_hooks` | Run after command | Various | +| `validation_hooks` | Validate inputs/outputs | Various | + +When you find a new hook type, consider whether it maps to an existing pattern or is genuinely new functionality. diff --git a/.deepwork/jobs/add_platform/steps/implement.md b/.deepwork/jobs/add_platform/steps/implement.md new file mode 100644 index 00000000..760cc251 --- /dev/null +++ b/.deepwork/jobs/add_platform/steps/implement.md @@ -0,0 +1,226 @@ +# Implement Platform Support + +## Objective + +Create the complete platform implementation including the adapter class, command templates, comprehensive tests, and documentation updates. + +## Task + +Build the full platform support by implementing the adapter, creating templates, writing tests with 100% coverage, and updating the README. + +### Prerequisites + +Read the outputs from previous steps: +- `doc/platforms//cli_configuration.md` - For template structure +- `src/deepwork/schemas/job_schema.py` - For current schema +- `src/deepwork/adapters.py` - For adapter patterns + +Also review existing implementations for reference: +- `src/deepwork/templates/claude/` - Example templates +- `tests/` - Existing test patterns + +### Process + +1. **Create the platform adapter class** + + Add a new adapter class to `src/deepwork/adapters.py`: + + ```python + class NewPlatformAdapter(PlatformAdapter): + """Adapter for .""" + + platform_name = "" + command_directory = "" # e.g., ".cursor/commands" + command_extension = ".md" # or appropriate extension + + def get_hook_support(self) -> dict: + """Return which hooks this platform supports.""" + return { + "stop_hooks": True, # or False/None + # ... other hooks + } + + def generate_command(self, step: StepDefinition, job: JobDefinition) -> str: + """Generate command file content for this platform.""" + # Use Jinja2 template + template = self.env.get_template(f"{self.platform_name}/command.md.j2") + return template.render(step=step, job=job) + ``` + +2. **Create command templates** + + Create templates in `src/deepwork/templates//`: + + - `command.md.j2` - Main command template + - Any other templates needed for the platform's format + + Use the CLI configuration documentation to ensure the template matches the platform's expected format. + +3. **Register the adapter** + + Update the adapter registry in `src/deepwork/adapters.py`: + + ```python + PLATFORM_ADAPTERS = { + "claude": ClaudeAdapter, + "": NewPlatformAdapter, + # ... other adapters + } + ``` + +4. **Write comprehensive tests** + + Create tests in `tests/` that cover: + + - Adapter instantiation + - Hook support detection + - Command generation + - Template rendering + - Edge cases (empty inputs, special characters, etc.) + - Integration with the sync command + + **Critical**: Tests must achieve 100% coverage of new code. + +5. **Update README.md** + + Add the new platform to `README.md`: + + - Add to "Supported Platforms" list + - Add installation instructions: + ```bash + deepwork install --platform + ``` + - Document any platform-specific notes or limitations + +6. **Run tests and verify coverage** + + ```bash + uv run pytest --cov=src/deepwork --cov-report=term-missing + ``` + + - All tests must pass + - New code must have 100% coverage + - If coverage is below 100%, add more tests + +7. **Iterate until tests pass with full coverage** + + This step has a `stop_hooks` script that runs tests. Keep iterating until: + - All tests pass + - Coverage is 100% for new functionality + +## Output Format + +### templates/ + +Location: `src/deepwork/templates//` + +Create the following files: + +**command.md.j2**: +```jinja2 +{# Template for command files #} +{# Follows the platform's expected format from cli_configuration.md #} + +[Platform-specific frontmatter or metadata] + +# {{ step.name }} + +{{ step.description }} + +## Instructions + +{{ step.instructions_content }} + +[... rest of template based on platform format ...] +``` + +### tests/ + +Location: `tests/test__adapter.py` + +```python +"""Tests for the adapter.""" +import pytest +from deepwork.adapters import NewPlatformAdapter + +class TestNewPlatformAdapter: + """Test suite for NewPlatformAdapter.""" + + def test_adapter_initialization(self): + """Test adapter can be instantiated.""" + adapter = NewPlatformAdapter() + assert adapter.platform_name == "" + + def test_hook_support(self): + """Test hook support detection.""" + adapter = NewPlatformAdapter() + hooks = adapter.get_hook_support() + assert "stop_hooks" in hooks + # ... more assertions + + def test_command_generation(self): + """Test command file generation.""" + # ... test implementation + + # ... more tests for 100% coverage +``` + +### README.md + +Add to the existing README.md: + +```markdown +## Supported Platforms + +- **Claude Code** - Anthropic's CLI for Claude +- **** - [Brief description] + +## Installation + +### + +```bash +deepwork install --platform +``` + +[Any platform-specific notes] +``` + +## Quality Criteria + +- Platform adapter class added to `src/deepwork/adapters.py`: + - Inherits from `PlatformAdapter` + - Implements all required methods + - Registered in `PLATFORM_ADAPTERS` +- Templates created in `src/deepwork/templates//`: + - `command.md.j2` exists and renders correctly + - Format matches platform's expected command format +- Tests created in `tests/`: + - Cover all new adapter functionality + - Cover template rendering + - All tests pass +- Test coverage is 100% for new code: + - Run `uv run pytest --cov=src/deepwork --cov-report=term-missing` + - No uncovered lines in new code +- README.md updated: + - Platform listed in supported platforms + - Installation command documented + - Any platform-specific notes included +- When all criteria are met, include `QUALITY_COMPLETE` in your response + +## Context + +This is the core implementation step. The adapter you create will be responsible for: +- Determining where command files are placed +- Generating command file content from job definitions +- Handling platform-specific features and hooks + +The templates use Jinja2 and should produce files that match exactly what the platform expects. Reference the CLI configuration documentation frequently to ensure compatibility. + +## Tips + +- Study the existing `ClaudeAdapter` as a reference implementation +- Run tests frequently as you implement +- Use `--cov-report=html` for a detailed coverage report +- If a test is hard to write, the code might need refactoring +- Template syntax errors often show up at runtime - test early diff --git a/.deepwork/jobs/add_platform/steps/research.md b/.deepwork/jobs/add_platform/steps/research.md new file mode 100644 index 00000000..4c7d1e0f --- /dev/null +++ b/.deepwork/jobs/add_platform/steps/research.md @@ -0,0 +1,188 @@ +# Research Platform Documentation + +## Objective + +Capture comprehensive documentation for the new AI platform's CLI configuration and hooks system, creating a local reference that will guide the implementation phases. + +## Task + +Research the target platform's official documentation and create two focused documentation files that will serve as the foundation for implementing platform support in DeepWork. + +### Process + +1. **Identify the platform's documentation sources** + - Find the official documentation website + - Locate the CLI/agent configuration documentation + - Find the hooks or customization system documentation + - Note: Focus ONLY on slash command/custom command hooks, not general CLI hooks + +2. **Gather CLI configuration documentation** + - How is the CLI configured? (config files, environment variables, etc.) + - Where are custom commands/skills stored? + - What is the command file format? (markdown, YAML, etc.) + - What metadata or frontmatter is supported? + - How does the platform discover and load commands? + +3. **Gather hooks system documentation** + - What hooks are available for custom command definitions? + - Focus on hooks that trigger during or after command execution + - Examples: `stop_hooks`, `pre_hooks`, `post_hooks`, validation hooks + - Document the syntax and available hook types + - **Important**: Only document hooks available on slash command definitions, not general CLI hooks + +4. **Create the documentation files** + - Place files in `doc/platforms//` + - Each file must have a header comment with source and date + - Content should be comprehensive but focused + +## Output Format + +### cli_configuration.md + +Located at: `doc/platforms//cli_configuration.md` + +**Structure**: +```markdown + + +# CLI Configuration + +## Overview + +[Brief description of the platform and its CLI/agent system] + +## Configuration Files + +[Document where configuration lives and its format] + +### File Locations + +- [Location 1]: [Purpose] +- [Location 2]: [Purpose] + +### Configuration Format + +[Show the configuration file format with examples] + +## Custom Commands/Skills + +[Document how custom commands are defined] + +### Command Location + +[Where command files are stored] + +### Command File Format + +[The format of command files - markdown, YAML, etc.] + +### Metadata/Frontmatter + +[What metadata fields are supported in command files] + +```[format] +[Example of a minimal command file] +``` + +## Command Discovery + +[How the platform discovers and loads commands] + +## Platform-Specific Features + +[Any unique features relevant to command configuration] +``` + +### hooks_system.md + +Located at: `doc/platforms//hooks_system.md` + +**Structure**: +```markdown + + +# Hooks System (Command Definitions) + +## Overview + +[Brief description of hooks available for command definitions] + +**Important**: This document covers ONLY hooks available within slash command/skill definitions, not general CLI hooks. + +## Available Hooks + +### [Hook Name 1] + +**Purpose**: [What this hook does] + +**Syntax**: +```yaml +[hook_name]: + - [configuration] +``` + +**Example**: +```yaml +[Complete example of using this hook] +``` + +**Behavior**: [When and how this hook executes] + +### [Hook Name 2] + +[Repeat for each available hook] + +## Hook Execution Order + +[Document the order in which hooks execute, if multiple are supported] + +## Comparison with Other Platforms + +| Feature | | Claude Code | Other | +|---------|-----------|-------------|-------| +| [Feature 1] | [Support] | [Support] | [Support] | + +## Limitations + +[Any limitations or caveats about the hooks system] +``` + +## Quality Criteria + +- Both files exist in `doc/platforms//` +- Each file has a header comment with: + - Last updated date (YYYY-MM-DD format) + - Source URL where documentation was obtained +- `cli_configuration.md` comprehensively covers: + - Configuration file locations and format + - Custom command file format and location + - Command discovery mechanism +- `hooks_system.md` comprehensively covers: + - All hooks available for slash command definitions + - Syntax and examples for each hook + - NOT general CLI hooks (only command-level hooks) +- Documentation is detailed enough to implement the platform adapter +- No extraneous topics (only CLI config and command hooks) +- When all criteria are met, include `QUALITY_COMPLETE` in your response + +## Context + +This is the foundation step for adding a new platform to DeepWork. The documentation you capture here will be referenced throughout the implementation process: +- CLI configuration informs how to generate command files +- Hooks documentation determines what features the adapter needs to support +- This documentation becomes a permanent reference in `doc/platforms/` + +Take time to be thorough - incomplete documentation will slow down subsequent steps. + +## Tips + +- Use the platform's official documentation as the primary source +- If documentation is sparse, check GitHub repos, community guides, or changelog entries +- When in doubt about whether something is a "command hook" vs "CLI hook", err on the side of inclusion and note the ambiguity +- Include code examples from the official docs where available diff --git a/.deepwork/jobs/add_platform/steps/verify.md b/.deepwork/jobs/add_platform/steps/verify.md new file mode 100644 index 00000000..7288d79b --- /dev/null +++ b/.deepwork/jobs/add_platform/steps/verify.md @@ -0,0 +1,111 @@ +# Verify Installation + +## Objective + +Ensure the new platform integration works correctly by setting up necessary directories and running the full installation process. + +## Task + +Perform end-to-end verification that the new platform can be installed and that DeepWork's standard jobs work correctly with it. + +### Prerequisites + +Ensure the implementation step is complete: +- Adapter class exists in `src/deepwork/adapters.py` +- Templates exist in `src/deepwork/templates//` +- Tests pass with 100% coverage +- README.md is updated + +### Process + +1. **Set up platform directories in the DeepWork repo** + + The DeepWork repository itself should have the platform's command directory structure for testing: + + ```bash + mkdir -p + ``` + + For example: + - Claude: `.claude/commands/` + - Cursor: `.cursor/commands/` (or wherever Cursor stores commands) + +2. **Run deepwork install for the new platform** + + ```bash + deepwork install --platform + ``` + + Verify: + - Command completes without errors + - No Python exceptions or tracebacks + - Output indicates successful installation + +3. **Check that command files were created** + + List the generated command files: + ```bash + ls -la / + ``` + + Verify: + - `deepwork_jobs.define.md` exists (or equivalent for the platform) + - `deepwork_jobs.implement.md` exists + - `deepwork_jobs.refine.md` exists + - `deepwork_policy.define.md` exists + - All expected step commands exist + +4. **Validate command file content** + + Read each generated command file and verify: + - Content matches the expected format for the platform + - Job metadata is correctly included + - Step instructions are properly rendered + - Any platform-specific features (hooks, frontmatter) are present + +5. **Test alongside existing platforms** + + If other platforms are already installed, verify they still work: + ```bash + deepwork install --platform claude + ls -la .claude/commands/ + ``` + + Ensure: + - New platform doesn't break existing installations + - Each platform's commands are independent + - No file conflicts or overwrites + +## Quality Criteria + +- Platform-specific directories are set up in the DeepWork repo +- `deepwork install --platform ` completes without errors +- All expected command files are created: + - deepwork_jobs.define, implement, refine + - deepwork_policy.define + - Any other standard job commands +- Command file content is correct: + - Matches platform's expected format + - Job/step information is properly rendered + - No template errors or missing content +- Existing platforms still work (if applicable) +- No conflicts between platforms +- When all criteria are met, include `QUALITY_COMPLETE` in your response + +## Context + +This is the final validation step before the platform is considered complete. A thorough verification ensures: +- The platform actually works, not just compiles +- Standard DeepWork jobs install correctly +- The platform integrates properly with the existing system +- Users can confidently use the new platform + +Take time to verify each aspect - finding issues now is much better than having users discover them later. + +## Common Issues to Check + +- **Template syntax errors**: May only appear when rendering specific content +- **Path issues**: Platform might expect different directory structure +- **Encoding issues**: Special characters in templates or content +- **Missing hooks**: Platform adapter might not handle all hook types +- **Permission issues**: Directory creation might fail in some cases diff --git a/.deepwork/jobs/deepwork_jobs/steps/define.md b/.deepwork/jobs/deepwork_jobs/steps/define.md index bb1165dd..aceb3677 100644 --- a/.deepwork/jobs/deepwork_jobs/steps/define.md +++ b/.deepwork/jobs/deepwork_jobs/steps/define.md @@ -241,9 +241,9 @@ User: I need to specify the market segment and product category as inputs. The o Claude: Perfect. A few more questions about that output: 1. What should the file be called? Something like `competitors_list.md`? -2. Should it go directly in the deepwork/competitive_research/ folder, or in a subdirectory like `research/` or `data/`? +2. Should it go in the project root, or in a subdirectory like `research/` or `data/`? -User: Just call it `competitors_list.md` in the main folder. +User: Just call it `competitors_list.md` in the project root. Claude: Great. Now for the "Research each one" step: 1. Does it use the competitor list from the first step as input? diff --git a/.deepwork/jobs/deepwork_jobs/steps/implement.md b/.deepwork/jobs/deepwork_jobs/steps/implement.md index b87e8c5a..bb100a3c 100644 --- a/.deepwork/jobs/deepwork_jobs/steps/implement.md +++ b/.deepwork/jobs/deepwork_jobs/steps/implement.md @@ -34,7 +34,7 @@ mkdir -p .deepwork/jobs/[job_name]/steps ``` Files to create: -- `.deepwork/jobs/[job_name]/job.yml` - Copy from work directory +- `.deepwork/jobs/[job_name]/job.yml` - `.deepwork/jobs/[job_name]/steps/[step_id].md` - One for each step ### Step 3: Generate Step Instruction Files @@ -219,7 +219,7 @@ your strategy documentation. I'd suggest a policy like: - **Name**: "Update strategy when competitor analysis changes" -- **Trigger**: `deepwork/competitive_research-*/report.md` +- **Trigger**: `**/positioning_report.md` - **Action**: Prompt to review and update `docs/strategy.md` Would you like me to create this policy? I can run `/deepwork_policy.define` to set it up. diff --git a/.gemini/commands/add_platform/add_capabilities.toml b/.gemini/commands/add_platform/add_capabilities.toml new file mode 100644 index 00000000..0a0f0ce1 --- /dev/null +++ b/.gemini/commands/add_platform/add_capabilities.toml @@ -0,0 +1,240 @@ +# add_platform:add_capabilities +# +# Update job schema and adapters with any new hook events the platform supports +# +# Generated by DeepWork - do not edit manually + +description = "Update job schema and adapters with any new hook events the platform supports" + +prompt = """ +# add_platform:add_capabilities + +**Step 2 of 4** in the **add_platform** workflow + +**Summary**: Add a new AI platform to DeepWork with adapter, templates, and tests + +## Job Overview + +A workflow for adding support for a new AI platform (like Cursor, Windsurf, etc.) to DeepWork. + +This job guides you through four phases: +1. **Research**: Capture the platform's CLI configuration and hooks system documentation +2. **Add Capabilities**: Update the job schema and adapters with any new hook events +3. **Implement**: Create the platform adapter, templates, tests (100% coverage), and README updates +4. **Verify**: Ensure installation works correctly and produces expected files + +The workflow ensures consistency across all supported platforms and maintains +comprehensive test coverage for new functionality. + +**Important Notes**: +- Only hooks available on slash command definitions should be captured +- Each existing adapter must be updated when new hooks are added (typically with null values) +- Tests must achieve 100% coverage for any new functionality +- Installation verification confirms the platform integrates correctly with existing jobs + + +## Prerequisites + +This step requires completion of the following step(s): +- `/add_platform:research` + +Please ensure these steps have been completed before proceeding. + +## Instructions + +# Add Hook Capabilities + +## Objective + +Update the DeepWork job schema and platform adapters to support any new hook events that the new platform provides for slash command definitions. + +## Task + +Analyze the hooks documentation from the research step and update the codebase to support any new hook capabilities, ensuring consistency across all existing adapters. + +### Prerequisites + +Read the hooks documentation created in the previous step: +- `doc/platforms//hooks_system.md` + +Also review the existing schema and adapters: +- `src/deepwork/schemas/job_schema.py` +- `src/deepwork/adapters.py` + +### Process + +1. **Analyze the new platform's hooks** + - Read `doc/platforms//hooks_system.md` + - List all hooks available for slash command definitions + - Compare with hooks already in `job_schema.py` + - Identify any NEW hooks not currently supported + +2. **Determine if schema changes are needed** + - If the platform has hooks that DeepWork doesn't currently support, add them + - If all hooks are already supported, document this finding + - Remember: Only add hooks that are available on slash command definitions + +3. **Update job_schema.py (if needed)** + - Add new hook fields to the step schema + - Follow existing patterns for hook definitions + - Add appropriate type hints and documentation + - Example addition: + ```python + # New hook from + new_hook_name: Optional[List[HookConfig]] = None + ``` + +4. **Update all existing adapters** + - Open `src/deepwork/adapters.py` + - For EACH existing adapter class: + - Add the new hook field (set to `None` if not supported) + - This maintains consistency across all adapters + - Document why each adapter does or doesn't support the hook + +5. **Validate the changes** + - Run Python syntax check: `python -m py_compile src/deepwork/schemas/job_schema.py` + - Run Python syntax check: `python -m py_compile src/deepwork/adapters.py` + - Ensure no import errors + +6. **Document the decision** + - If no new hooks were added, add a comment explaining why + - If new hooks were added, ensure they're documented in the schema + +## Output Format + +### job_schema.py + +Location: `src/deepwork/schemas/job_schema.py` + +If new hooks are added: +```python +@dataclass +class StepDefinition: + # ... existing fields ... + + # New hook from - [description of what it does] + new_hook_name: Optional[List[HookConfig]] = None +``` + +### adapters.py + +Location: `src/deepwork/adapters.py` + +For each existing adapter, add the new hook field: +```python +class ExistingPlatformAdapter(PlatformAdapter): + # ... existing code ... + + def get_hook_support(self) -> dict: + return { + # ... existing hooks ... + "new_hook_name": None, # Not supported by this platform + } +``` + +Or if no changes are needed, add a documentation comment: +```python +# NOTE: hooks reviewed on YYYY-MM-DD +# No new hooks to add - all command hooks are already +# supported by the existing schema (stop_hooks covers their validation pattern) +``` + +## Quality Criteria + +- Hooks documentation from research step has been reviewed +- If new hooks exist: + - Added to `src/deepwork/schemas/job_schema.py` with proper typing + - ALL existing adapters updated in `src/deepwork/adapters.py` + - Each adapter indicates support level (implemented, None, or partial) +- If no new hooks needed: + - Decision documented with a comment explaining the analysis +- Only hooks available on slash command definitions are considered +- `job_schema.py` has no syntax errors (verified with py_compile) +- `adapters.py` has no syntax errors (verified with py_compile) +- All adapters have consistent hook fields (same fields across all adapters) +- When all criteria are met, include `QUALITY_COMPLETE` in your response + +## Context + +DeepWork supports multiple AI platforms, and each platform may have different capabilities for hooks within command definitions. The schema defines what hooks CAN exist, while adapters define what each platform actually SUPPORTS. + +This separation allows: +- Job definitions to use any hook (the schema is the superset) +- Platform-specific generation to only use supported hooks (adapters filter) +- Future platforms to add new hooks without breaking existing ones + +Maintaining consistency is critical - all adapters must have the same hook fields, even if they don't support them (use `None` for unsupported). + +## Common Hook Types + +For reference, here are common hook patterns across platforms: + +| Hook Type | Purpose | Example Platforms | +|-----------|---------|-------------------| +| `stop_hooks` | Quality validation loops | Claude Code | +| `pre_hooks` | Run before command | Various | +| `post_hooks` | Run after command | Various | +| `validation_hooks` | Validate inputs/outputs | Various | + +When you find a new hook type, consider whether it maps to an existing pattern or is genuinely new functionality. + + +## Inputs + + +### Required Files + +This step requires the following files from previous steps: +- `hooks_system.md` (from step `research`) + +Make sure to read and use these files as context for this step. + +## Work Branch Management + +All work for this job should be done on a dedicated work branch: + +1. **Check current branch**: + - If already on a work branch for this job (format: `deepwork/add_platform-[instance]-[date]`), continue using it + - If on main/master, create a new work branch + +2. **Create work branch** (if needed): + ```bash + git checkout -b deepwork/add_platform-[instance]-$(date +%Y%m%d) + ``` + Replace `[instance]` with a descriptive identifier (e.g., `acme`, `q1-launch`, etc.) + +## Output Requirements + +Create the following output(s): +- `job_schema.py` +- `adapters.py` + +Ensure all outputs are: +- Well-formatted and complete +- Ready for review or use by subsequent steps + +## Completion + +After completing this step: + +1. **Verify outputs**: Confirm all required files have been created + +2. **Inform the user**: + - Step 2 of 4 is complete + - Outputs created: job_schema.py, adapters.py + - Ready to proceed to next step: `/add_platform:implement` + +## Next Step + +To continue the workflow, run: +``` +/add_platform:implement +``` + +--- + +## Context Files + +- Job definition: `.deepwork/jobs/add_platform/job.yml` +- Step instructions: `.deepwork/jobs/add_platform/steps/add_capabilities.md` +""" \ No newline at end of file diff --git a/.gemini/commands/add_platform/implement.toml b/.gemini/commands/add_platform/implement.toml new file mode 100644 index 00000000..39c5c379 --- /dev/null +++ b/.gemini/commands/add_platform/implement.toml @@ -0,0 +1,335 @@ +# add_platform:implement +# +# Add platform adapter, templates, tests with 100% coverage, and README documentation +# +# Generated by DeepWork - do not edit manually + +description = "Add platform adapter, templates, tests with 100% coverage, and README documentation" + +prompt = """ +# add_platform:implement + +**Step 3 of 4** in the **add_platform** workflow + +**Summary**: Add a new AI platform to DeepWork with adapter, templates, and tests + +## Job Overview + +A workflow for adding support for a new AI platform (like Cursor, Windsurf, etc.) to DeepWork. + +This job guides you through four phases: +1. **Research**: Capture the platform's CLI configuration and hooks system documentation +2. **Add Capabilities**: Update the job schema and adapters with any new hook events +3. **Implement**: Create the platform adapter, templates, tests (100% coverage), and README updates +4. **Verify**: Ensure installation works correctly and produces expected files + +The workflow ensures consistency across all supported platforms and maintains +comprehensive test coverage for new functionality. + +**Important Notes**: +- Only hooks available on slash command definitions should be captured +- Each existing adapter must be updated when new hooks are added (typically with null values) +- Tests must achieve 100% coverage for any new functionality +- Installation verification confirms the platform integrates correctly with existing jobs + + +## Prerequisites + +This step requires completion of the following step(s): +- `/add_platform:research` +- `/add_platform:add_capabilities` + +Please ensure these steps have been completed before proceeding. + +## Instructions + +# Implement Platform Support + +## Objective + +Create the complete platform implementation including the adapter class, command templates, comprehensive tests, and documentation updates. + +## Task + +Build the full platform support by implementing the adapter, creating templates, writing tests with 100% coverage, and updating the README. + +### Prerequisites + +Read the outputs from previous steps: +- `doc/platforms//cli_configuration.md` - For template structure +- `src/deepwork/schemas/job_schema.py` - For current schema +- `src/deepwork/adapters.py` - For adapter patterns + +Also review existing implementations for reference: +- `src/deepwork/templates/claude/` - Example templates +- `tests/` - Existing test patterns + +### Process + +1. **Create the platform adapter class** + + Add a new adapter class to `src/deepwork/adapters.py`: + + ```python + class NewPlatformAdapter(PlatformAdapter): + """Adapter for .""" + + platform_name = "" + command_directory = "" # e.g., ".cursor/commands" + command_extension = ".md" # or appropriate extension + + def get_hook_support(self) -> dict: + """Return which hooks this platform supports.""" + return { + "stop_hooks": True, # or False/None + # ... other hooks + } + + def generate_command(self, step: StepDefinition, job: JobDefinition) -> str: + """Generate command file content for this platform.""" + # Use Jinja2 template + template = self.env.get_template(f"{self.platform_name}/command.md.j2") + return template.render(step=step, job=job) + ``` + +2. **Create command templates** + + Create templates in `src/deepwork/templates//`: + + - `command.md.j2` - Main command template + - Any other templates needed for the platform's format + + Use the CLI configuration documentation to ensure the template matches the platform's expected format. + +3. **Register the adapter** + + Update the adapter registry in `src/deepwork/adapters.py`: + + ```python + PLATFORM_ADAPTERS = { + "claude": ClaudeAdapter, + "": NewPlatformAdapter, + # ... other adapters + } + ``` + +4. **Write comprehensive tests** + + Create tests in `tests/` that cover: + + - Adapter instantiation + - Hook support detection + - Command generation + - Template rendering + - Edge cases (empty inputs, special characters, etc.) + - Integration with the sync command + + **Critical**: Tests must achieve 100% coverage of new code. + +5. **Update README.md** + + Add the new platform to `README.md`: + + - Add to "Supported Platforms" list + - Add installation instructions: + ```bash + deepwork install --platform + ``` + - Document any platform-specific notes or limitations + +6. **Run tests and verify coverage** + + ```bash + uv run pytest --cov=src/deepwork --cov-report=term-missing + ``` + + - All tests must pass + - New code must have 100% coverage + - If coverage is below 100%, add more tests + +7. **Iterate until tests pass with full coverage** + + This step has a `stop_hooks` script that runs tests. Keep iterating until: + - All tests pass + - Coverage is 100% for new functionality + +## Output Format + +### templates/ + +Location: `src/deepwork/templates//` + +Create the following files: + +**command.md.j2**: +```jinja2 +{# Template for command files #} +{# Follows the platform's expected format from cli_configuration.md #} + +[Platform-specific frontmatter or metadata] + +# {{ step.name }} + +{{ step.description }} + +## Instructions + +{{ step.instructions_content }} + +[... rest of template based on platform format ...] +``` + +### tests/ + +Location: `tests/test__adapter.py` + +```python +"""Tests for the adapter.""" +import pytest +from deepwork.adapters import NewPlatformAdapter + +class TestNewPlatformAdapter: + """Test suite for NewPlatformAdapter.""" + + def test_adapter_initialization(self): + """Test adapter can be instantiated.""" + adapter = NewPlatformAdapter() + assert adapter.platform_name == "" + + def test_hook_support(self): + """Test hook support detection.""" + adapter = NewPlatformAdapter() + hooks = adapter.get_hook_support() + assert "stop_hooks" in hooks + # ... more assertions + + def test_command_generation(self): + """Test command file generation.""" + # ... test implementation + + # ... more tests for 100% coverage +``` + +### README.md + +Add to the existing README.md: + +```markdown +## Supported Platforms + +- **Claude Code** - Anthropic's CLI for Claude +- **** - [Brief description] + +## Installation + +### + +```bash +deepwork install --platform +``` + +[Any platform-specific notes] +``` + +## Quality Criteria + +- Platform adapter class added to `src/deepwork/adapters.py`: + - Inherits from `PlatformAdapter` + - Implements all required methods + - Registered in `PLATFORM_ADAPTERS` +- Templates created in `src/deepwork/templates//`: + - `command.md.j2` exists and renders correctly + - Format matches platform's expected command format +- Tests created in `tests/`: + - Cover all new adapter functionality + - Cover template rendering + - All tests pass +- Test coverage is 100% for new code: + - Run `uv run pytest --cov=src/deepwork --cov-report=term-missing` + - No uncovered lines in new code +- README.md updated: + - Platform listed in supported platforms + - Installation command documented + - Any platform-specific notes included +- When all criteria are met, include `QUALITY_COMPLETE` in your response + +## Context + +This is the core implementation step. The adapter you create will be responsible for: +- Determining where command files are placed +- Generating command file content from job definitions +- Handling platform-specific features and hooks + +The templates use Jinja2 and should produce files that match exactly what the platform expects. Reference the CLI configuration documentation frequently to ensure compatibility. + +## Tips + +- Study the existing `ClaudeAdapter` as a reference implementation +- Run tests frequently as you implement +- Use `--cov-report=html` for a detailed coverage report +- If a test is hard to write, the code might need refactoring +- Template syntax errors often show up at runtime - test early + + +## Inputs + + +### Required Files + +This step requires the following files from previous steps: +- `job_schema.py` (from step `add_capabilities`) +- `adapters.py` (from step `add_capabilities`) +- `cli_configuration.md` (from step `research`) + +Make sure to read and use these files as context for this step. + +## Work Branch Management + +All work for this job should be done on a dedicated work branch: + +1. **Check current branch**: + - If already on a work branch for this job (format: `deepwork/add_platform-[instance]-[date]`), continue using it + - If on main/master, create a new work branch + +2. **Create work branch** (if needed): + ```bash + git checkout -b deepwork/add_platform-[instance]-$(date +%Y%m%d) + ``` + Replace `[instance]` with a descriptive identifier (e.g., `acme`, `q1-launch`, etc.) + +## Output Requirements + +Create the following output(s): +- `templates/` (directory) +- `tests/` (directory) +- `README.md` + +Ensure all outputs are: +- Well-formatted and complete +- Ready for review or use by subsequent steps + +## Completion + +After completing this step: + +1. **Verify outputs**: Confirm all required files have been created + +2. **Inform the user**: + - Step 3 of 4 is complete + - Outputs created: templates/, tests/, README.md + - Ready to proceed to next step: `/add_platform:verify` + +## Next Step + +To continue the workflow, run: +``` +/add_platform:verify +``` + +--- + +## Context Files + +- Job definition: `.deepwork/jobs/add_platform/job.yml` +- Step instructions: `.deepwork/jobs/add_platform/steps/implement.md` +""" \ No newline at end of file diff --git a/.gemini/commands/add_platform/research.toml b/.gemini/commands/add_platform/research.toml new file mode 100644 index 00000000..4be39e9a --- /dev/null +++ b/.gemini/commands/add_platform/research.toml @@ -0,0 +1,285 @@ +# add_platform:research +# +# Capture CLI configuration and hooks system documentation for the new platform +# +# Generated by DeepWork - do not edit manually + +description = "Capture CLI configuration and hooks system documentation for the new platform" + +prompt = """ +# add_platform:research + +**Step 1 of 4** in the **add_platform** workflow + +**Summary**: Add a new AI platform to DeepWork with adapter, templates, and tests + +## Job Overview + +A workflow for adding support for a new AI platform (like Cursor, Windsurf, etc.) to DeepWork. + +This job guides you through four phases: +1. **Research**: Capture the platform's CLI configuration and hooks system documentation +2. **Add Capabilities**: Update the job schema and adapters with any new hook events +3. **Implement**: Create the platform adapter, templates, tests (100% coverage), and README updates +4. **Verify**: Ensure installation works correctly and produces expected files + +The workflow ensures consistency across all supported platforms and maintains +comprehensive test coverage for new functionality. + +**Important Notes**: +- Only hooks available on slash command definitions should be captured +- Each existing adapter must be updated when new hooks are added (typically with null values) +- Tests must achieve 100% coverage for any new functionality +- Installation verification confirms the platform integrates correctly with existing jobs + + + +## Instructions + +# Research Platform Documentation + +## Objective + +Capture comprehensive documentation for the new AI platform's CLI configuration and hooks system, creating a local reference that will guide the implementation phases. + +## Task + +Research the target platform's official documentation and create two focused documentation files that will serve as the foundation for implementing platform support in DeepWork. + +### Process + +1. **Identify the platform's documentation sources** + - Find the official documentation website + - Locate the CLI/agent configuration documentation + - Find the hooks or customization system documentation + - Note: Focus ONLY on slash command/custom command hooks, not general CLI hooks + +2. **Gather CLI configuration documentation** + - How is the CLI configured? (config files, environment variables, etc.) + - Where are custom commands/skills stored? + - What is the command file format? (markdown, YAML, etc.) + - What metadata or frontmatter is supported? + - How does the platform discover and load commands? + +3. **Gather hooks system documentation** + - What hooks are available for custom command definitions? + - Focus on hooks that trigger during or after command execution + - Examples: `stop_hooks`, `pre_hooks`, `post_hooks`, validation hooks + - Document the syntax and available hook types + - **Important**: Only document hooks available on slash command definitions, not general CLI hooks + +4. **Create the documentation files** + - Place files in `doc/platforms//` + - Each file must have a header comment with source and date + - Content should be comprehensive but focused + +## Output Format + +### cli_configuration.md + +Located at: `doc/platforms//cli_configuration.md` + +**Structure**: +```markdown + + +# CLI Configuration + +## Overview + +[Brief description of the platform and its CLI/agent system] + +## Configuration Files + +[Document where configuration lives and its format] + +### File Locations + +- [Location 1]: [Purpose] +- [Location 2]: [Purpose] + +### Configuration Format + +[Show the configuration file format with examples] + +## Custom Commands/Skills + +[Document how custom commands are defined] + +### Command Location + +[Where command files are stored] + +### Command File Format + +[The format of command files - markdown, YAML, etc.] + +### Metadata/Frontmatter + +[What metadata fields are supported in command files] + +```[format] +[Example of a minimal command file] +``` + +## Command Discovery + +[How the platform discovers and loads commands] + +## Platform-Specific Features + +[Any unique features relevant to command configuration] +``` + +### hooks_system.md + +Located at: `doc/platforms//hooks_system.md` + +**Structure**: +```markdown + + +# Hooks System (Command Definitions) + +## Overview + +[Brief description of hooks available for command definitions] + +**Important**: This document covers ONLY hooks available within slash command/skill definitions, not general CLI hooks. + +## Available Hooks + +### [Hook Name 1] + +**Purpose**: [What this hook does] + +**Syntax**: +```yaml +[hook_name]: + - [configuration] +``` + +**Example**: +```yaml +[Complete example of using this hook] +``` + +**Behavior**: [When and how this hook executes] + +### [Hook Name 2] + +[Repeat for each available hook] + +## Hook Execution Order + +[Document the order in which hooks execute, if multiple are supported] + +## Comparison with Other Platforms + +| Feature | | Claude Code | Other | +|---------|-----------|-------------|-------| +| [Feature 1] | [Support] | [Support] | [Support] | + +## Limitations + +[Any limitations or caveats about the hooks system] +``` + +## Quality Criteria + +- Both files exist in `doc/platforms//` +- Each file has a header comment with: + - Last updated date (YYYY-MM-DD format) + - Source URL where documentation was obtained +- `cli_configuration.md` comprehensively covers: + - Configuration file locations and format + - Custom command file format and location + - Command discovery mechanism +- `hooks_system.md` comprehensively covers: + - All hooks available for slash command definitions + - Syntax and examples for each hook + - NOT general CLI hooks (only command-level hooks) +- Documentation is detailed enough to implement the platform adapter +- No extraneous topics (only CLI config and command hooks) +- When all criteria are met, include `QUALITY_COMPLETE` in your response + +## Context + +This is the foundation step for adding a new platform to DeepWork. The documentation you capture here will be referenced throughout the implementation process: +- CLI configuration informs how to generate command files +- Hooks documentation determines what features the adapter needs to support +- This documentation becomes a permanent reference in `doc/platforms/` + +Take time to be thorough - incomplete documentation will slow down subsequent steps. + +## Tips + +- Use the platform's official documentation as the primary source +- If documentation is sparse, check GitHub repos, community guides, or changelog entries +- When in doubt about whether something is a "command hook" vs "CLI hook", err on the side of inclusion and note the ambiguity +- Include code examples from the official docs where available + + +## Inputs + +### User Parameters + +Please gather the following information from the user: +- **platform_name**: Clear identifier of the platform (e.g., 'cursor', 'windsurf-editor', 'github-copilot-chat') + + +## Work Branch Management + +All work for this job should be done on a dedicated work branch: + +1. **Check current branch**: + - If already on a work branch for this job (format: `deepwork/add_platform-[instance]-[date]`), continue using it + - If on main/master, create a new work branch + +2. **Create work branch** (if needed): + ```bash + git checkout -b deepwork/add_platform-[instance]-$(date +%Y%m%d) + ``` + Replace `[instance]` with a descriptive identifier (e.g., `acme`, `q1-launch`, etc.) + +## Output Requirements + +Create the following output(s): +- `cli_configuration.md` +- `hooks_system.md` + +Ensure all outputs are: +- Well-formatted and complete +- Ready for review or use by subsequent steps + +## Completion + +After completing this step: + +1. **Verify outputs**: Confirm all required files have been created + +2. **Inform the user**: + - Step 1 of 4 is complete + - Outputs created: cli_configuration.md, hooks_system.md + - Ready to proceed to next step: `/add_platform:add_capabilities` + +## Next Step + +To continue the workflow, run: +``` +/add_platform:add_capabilities +``` + +--- + +## Context Files + +- Job definition: `.deepwork/jobs/add_platform/job.yml` +- Step instructions: `.deepwork/jobs/add_platform/steps/research.md` +""" \ No newline at end of file diff --git a/.gemini/commands/add_platform/verify.toml b/.gemini/commands/add_platform/verify.toml new file mode 100644 index 00000000..040fd991 --- /dev/null +++ b/.gemini/commands/add_platform/verify.toml @@ -0,0 +1,211 @@ +# add_platform:verify +# +# Set up platform directories and verify deepwork install works correctly +# +# Generated by DeepWork - do not edit manually + +description = "Set up platform directories and verify deepwork install works correctly" + +prompt = """ +# add_platform:verify + +**Step 4 of 4** in the **add_platform** workflow + +**Summary**: Add a new AI platform to DeepWork with adapter, templates, and tests + +## Job Overview + +A workflow for adding support for a new AI platform (like Cursor, Windsurf, etc.) to DeepWork. + +This job guides you through four phases: +1. **Research**: Capture the platform's CLI configuration and hooks system documentation +2. **Add Capabilities**: Update the job schema and adapters with any new hook events +3. **Implement**: Create the platform adapter, templates, tests (100% coverage), and README updates +4. **Verify**: Ensure installation works correctly and produces expected files + +The workflow ensures consistency across all supported platforms and maintains +comprehensive test coverage for new functionality. + +**Important Notes**: +- Only hooks available on slash command definitions should be captured +- Each existing adapter must be updated when new hooks are added (typically with null values) +- Tests must achieve 100% coverage for any new functionality +- Installation verification confirms the platform integrates correctly with existing jobs + + +## Prerequisites + +This step requires completion of the following step(s): +- `/add_platform:implement` + +Please ensure these steps have been completed before proceeding. + +## Instructions + +# Verify Installation + +## Objective + +Ensure the new platform integration works correctly by setting up necessary directories and running the full installation process. + +## Task + +Perform end-to-end verification that the new platform can be installed and that DeepWork's standard jobs work correctly with it. + +### Prerequisites + +Ensure the implementation step is complete: +- Adapter class exists in `src/deepwork/adapters.py` +- Templates exist in `src/deepwork/templates//` +- Tests pass with 100% coverage +- README.md is updated + +### Process + +1. **Set up platform directories in the DeepWork repo** + + The DeepWork repository itself should have the platform's command directory structure for testing: + + ```bash + mkdir -p + ``` + + For example: + - Claude: `.claude/commands/` + - Cursor: `.cursor/commands/` (or wherever Cursor stores commands) + +2. **Run deepwork install for the new platform** + + ```bash + deepwork install --platform + ``` + + Verify: + - Command completes without errors + - No Python exceptions or tracebacks + - Output indicates successful installation + +3. **Check that command files were created** + + List the generated command files: + ```bash + ls -la / + ``` + + Verify: + - `deepwork_jobs.define.md` exists (or equivalent for the platform) + - `deepwork_jobs.implement.md` exists + - `deepwork_jobs.refine.md` exists + - `deepwork_policy.define.md` exists + - All expected step commands exist + +4. **Validate command file content** + + Read each generated command file and verify: + - Content matches the expected format for the platform + - Job metadata is correctly included + - Step instructions are properly rendered + - Any platform-specific features (hooks, frontmatter) are present + +5. **Test alongside existing platforms** + + If other platforms are already installed, verify they still work: + ```bash + deepwork install --platform claude + ls -la .claude/commands/ + ``` + + Ensure: + - New platform doesn't break existing installations + - Each platform's commands are independent + - No file conflicts or overwrites + +## Quality Criteria + +- Platform-specific directories are set up in the DeepWork repo +- `deepwork install --platform ` completes without errors +- All expected command files are created: + - deepwork_jobs.define, implement, refine + - deepwork_policy.define + - Any other standard job commands +- Command file content is correct: + - Matches platform's expected format + - Job/step information is properly rendered + - No template errors or missing content +- Existing platforms still work (if applicable) +- No conflicts between platforms +- When all criteria are met, include `QUALITY_COMPLETE` in your response + +## Context + +This is the final validation step before the platform is considered complete. A thorough verification ensures: +- The platform actually works, not just compiles +- Standard DeepWork jobs install correctly +- The platform integrates properly with the existing system +- Users can confidently use the new platform + +Take time to verify each aspect - finding issues now is much better than having users discover them later. + +## Common Issues to Check + +- **Template syntax errors**: May only appear when rendering specific content +- **Path issues**: Platform might expect different directory structure +- **Encoding issues**: Special characters in templates or content +- **Missing hooks**: Platform adapter might not handle all hook types +- **Permission issues**: Directory creation might fail in some cases + + +## Inputs + + +### Required Files + +This step requires the following files from previous steps: +- `templates/` (from step `implement`) + +Make sure to read and use these files as context for this step. + +## Work Branch Management + +All work for this job should be done on a dedicated work branch: + +1. **Check current branch**: + - If already on a work branch for this job (format: `deepwork/add_platform-[instance]-[date]`), continue using it + - If on main/master, create a new work branch + +2. **Create work branch** (if needed): + ```bash + git checkout -b deepwork/add_platform-[instance]-$(date +%Y%m%d) + ``` + Replace `[instance]` with a descriptive identifier (e.g., `acme`, `q1-launch`, etc.) + +## Output Requirements + +No specific files are output by this command. + +## Completion + +After completing this step: + +1. **Verify outputs**: Confirm all required files have been created + +2. **Inform the user**: + - Step 4 of 4 is complete + - This is the final step - the job is complete! + +## Workflow Complete + +This is the final step in the add_platform workflow. All outputs should now be complete and ready for review. + +Consider: +- Reviewing all work products +- Creating a pull request to merge the work branch +- Documenting any insights or learnings + +--- + +## Context Files + +- Job definition: `.deepwork/jobs/add_platform/job.yml` +- Step instructions: `.deepwork/jobs/add_platform/steps/verify.md` +""" \ No newline at end of file diff --git a/.gemini/commands/deepwork_jobs/define.toml b/.gemini/commands/deepwork_jobs/define.toml new file mode 100644 index 00000000..5e7ea51b --- /dev/null +++ b/.gemini/commands/deepwork_jobs/define.toml @@ -0,0 +1,448 @@ +# deepwork_jobs:define +# +# Create the job.yml specification file by understanding workflow requirements +# +# Generated by DeepWork - do not edit manually + +description = "Create the job.yml specification file by understanding workflow requirements" + +prompt = """ +# deepwork_jobs:define + +**Step 1 of 3** in the **deepwork_jobs** workflow + +**Summary**: DeepWork job management commands + +## Job Overview + +Core commands for managing DeepWork jobs. These commands help you define new multi-step +workflows and refine existing ones. + +The `define` command guides you through an interactive process to create a new job by +asking detailed questions about your workflow, understanding each step's inputs and outputs, +and generating all necessary files. + +The `refine` command helps you modify existing jobs safely by understanding what you want +to change, validating the impact, and ensuring consistency across your workflow. + + + +## Instructions + +# Define Job Specification + +## Objective + +Create a `job.yml` specification file that defines the structure of a new DeepWork job by thoroughly understanding the user's workflow requirements through an interactive question-and-answer process. + +## Task + +Guide the user through defining a job specification by asking clarifying questions. **Do not attempt to create the specification without first fully understanding the user's needs.** + +The output of this step is **only** the `job.yml` file - a complete specification of the workflow. The actual step instruction files will be created in the next step (`implement`). + +### Step 1: Understand the Job Purpose + +Start by asking questions to understand what the user wants to accomplish: + +1. **What is the overall goal of this workflow?** + - What complex task are they trying to accomplish? + - What domain is this in? (e.g., research, marketing, development, reporting) + - How often will they run this workflow? + +2. **What does success look like?** + - What's the final deliverable or outcome? + - Who is the audience for the output? + - What quality criteria matter most? + +3. **What are the major phases?** + - Ask them to describe the workflow at a high level + - What are the distinct stages from start to finish? + - Are there any dependencies between phases? + +### Step 2: Define Each Step + +For each major phase they mentioned, ask detailed questions: + +1. **Step Purpose** + - What exactly does this step accomplish? + - What is the input to this step? + - What is the output from this step? + +2. **Step Inputs** + - What information is needed to start this step? + - Does it need user-provided parameters? (e.g., topic, target audience) + - Does it need files from previous steps? + - What format should inputs be in? + +3. **Step Outputs** + - What files or artifacts does this step produce? + - What format should the output be in? (markdown, YAML, JSON, etc.) + - Where should each output be saved? (filename/path) + - Should outputs be organized in subdirectories? (e.g., `reports/`, `data/`, `drafts/`) + - Will other steps need this output? + +4. **Step Dependencies** + - Which previous steps must complete before this one? + - Are there any ordering constraints? + +5. **Step Process** (high-level understanding) + - What are the key activities in this step? + - Are there any quality checks or validation needed? + - What makes a good vs. bad output for this step? + +**Note**: You're gathering this information to understand what instructions will be needed, but you won't create the instruction files yet - that happens in the `implement` step. + +### Step 3: Validate the Workflow + +After gathering information about all steps: + +1. **Review the flow** + - Summarize the complete workflow + - Show how outputs from one step feed into the next + - Ask if anything is missing + +2. **Check for gaps** + - Are there any steps where the input isn't clearly defined? + - Are there any outputs that aren't used by later steps? + - Are there circular dependencies? + +3. **Confirm details** + - Job name (lowercase, underscores, descriptive) + - Job summary (one clear sentence, max 200 chars) + - Job description (detailed multi-line explanation) + - Version number (start with 1.0.0) + +### Step 4: Define Quality Validation (Stop Hooks) + +For each step, consider whether it would benefit from **quality validation loops**. Stop hooks allow the AI agent to iteratively refine its work until quality criteria are met. + +**Ask the user about quality validation:** +- "Are there specific quality criteria that must be met for this step?" +- "Would you like the agent to validate its work before completing?" +- "What would make you send the work back for revision?" + +**Stop hooks are particularly valuable for:** +- Steps with complex outputs that need multiple checks +- Steps where quality is critical (final deliverables) +- Steps with subjective quality criteria that benefit from AI self-review + +**Three types of stop hooks are supported:** + +1. **Inline Prompt** (`prompt`) - Best for simple quality criteria + ```yaml + stop_hooks: + - prompt: | + Verify the output meets these criteria: + 1. Contains at least 5 competitors + 2. Each competitor has a description + 3. Selection rationale is clear + ``` + +2. **Prompt File** (`prompt_file`) - For detailed/reusable criteria + ```yaml + stop_hooks: + - prompt_file: hooks/quality_check.md + ``` + +3. **Script** (`script`) - For programmatic validation (tests, linting) + ```yaml + stop_hooks: + - script: hooks/run_tests.sh + ``` + +**Multiple hooks can be combined:** +```yaml +stop_hooks: + - script: hooks/lint_output.sh + - prompt: "Verify the content is comprehensive and well-organized" +``` + +**Encourage prompt-based hooks** - They leverage the AI's ability to understand context and make nuanced quality judgments. Script hooks are best for objective checks (syntax, format, tests). + +### Step 5: Create the job.yml Specification + +Only after you have complete understanding, create the `job.yml` file: + +**File Location**: `.deepwork/jobs/[job_name]/job.yml` + +(Where `[job_name]` is the name of the NEW job you're creating, e.g., `.deepwork/jobs/competitive_research/job.yml`) + +**Format**: +```yaml +name: [job_name] +version: "1.0.0" +summary: "[Brief one-line summary of what this job accomplishes]" +description: | + [Detailed multi-line description of the job's purpose, process, and goals. + + This should explain: + - What problem this workflow solves + - What the overall process looks like + - What the end result will be + - Who the intended users are + - Any important context about the workflow] + +changelog: + - version: "1.0.0" + changes: "Initial job creation" + +steps: + - id: [step_id] + name: "[Step Name]" + description: "[What this step does]" + instructions_file: steps/[step_id].md + inputs: + - name: [param_name] + description: "[What user needs to provide]" + # OR for file inputs from previous steps: + # - file: [filename_or_path] + # from_step: [previous_step_id] + outputs: + - [output_filename_or_path] # e.g., "report.md" or "reports/analysis.md" + dependencies: [] # List of step IDs that must complete first + # Optional: Quality validation hooks + stop_hooks: + - prompt: | + Verify this step's output meets quality criteria: + 1. [Criterion 1] + 2. [Criterion 2] + If ALL criteria are met, include `QUALITY_COMPLETE`. + + - id: [another_step] + name: "[Another Step]" + description: "[What this step does]" + instructions_file: steps/[another_step].md + inputs: + - file: [output_filename_or_path] + from_step: [step_id] + outputs: + - [another_output_path] + dependencies: + - [step_id] # This step requires the previous step +``` + +**Important**: +- Use lowercase with underscores for job name and step IDs +- Ensure file inputs reference steps in dependencies +- No circular dependencies +- At least one output per step +- The `summary` should be concise (max 200 chars) +- The `description` should provide rich context for future refinement +- Include a `changelog` section with an initial entry for version 1.0.0 + +## Example Dialog + +``` +Claude: I'll help you define a new DeepWork job. Let's start by understanding what you're trying to accomplish. + +What complex task or workflow are you trying to create? What's the overall goal? + +User: I want to create competitive research reports for my company + +Claude: Great! Let's dig into that. A few questions: + +1. What's the final deliverable? Is it a written report, a presentation, or something else? +2. Who is the audience? Your team, executives, clients? +3. How comprehensive should these reports be? + +User: It's a written report for our product team. Should be comprehensive - analyzing 3-5 competitors. + +Claude: Perfect. Now let's think about the major phases of creating this report. What are the main stages you go through? For example: +- Identifying which competitors to analyze? +- Gathering information about them? +- Analyzing the data? +- Writing the report? + +Or is your process different? + +User: Yes, that's close. I would say: +1. Identify competitors +2. Research each one +3. Create comparison analysis +4. Write positioning recommendations + +Claude: Excellent! Now let's define each step in detail. Starting with "Identify competitors": + +1. What inputs do you need to start? Do you already have a market segment in mind, or do you need to specify that? +2. What should the output look like? A list of competitor names? More detailed? +3. How many competitors should be identified? + +User: I need to specify the market segment and product category as inputs. The output should be a markdown file with 5-7 competitors and brief descriptions of each. + +Claude: Perfect. A few more questions about that output: +1. What should the file be called? Something like `competitors_list.md`? +2. Should it go in the project root, or in a subdirectory like `research/` or `data/`? + +User: Just call it `competitors_list.md` in the project root. + +Claude: Great. Now for the "Research each one" step: +1. Does it use the competitor list from the first step as input? +2. What kind of research - product features, pricing, marketing, all of the above? +3. What format should the research output be in? +4. Where should that research be saved? + +[continues for each step...] + +[After gathering all information:] + +Claude: Let me summarize the workflow I've designed based on our conversation: + +**Job: competitive_research** +Summary: Systematic competitive analysis workflow for product positioning + +Description: +A comprehensive workflow for analyzing competitors in your market segment. This job helps product teams understand the competitive landscape by systematically identifying competitors, researching their offerings, creating comparison matrices, and developing strategic positioning recommendations. + +The workflow produces: +- A vetted list of key competitors +- Detailed research notes on each competitor +- A comparison matrix highlighting key differentiators +- Strategic positioning recommendations + +Designed for product teams conducting quarterly competitive analysis. + +**Steps:** + +Step 1: identify_competitors +- Input: market_segment (user), product_category (user) +- Output: competitors_list.md (5-7 competitors with descriptions) + +Step 2: research_competitors +- Input: competitors_list.md (from step 1) +- Output: research_notes.md (detailed findings for each competitor) +- Depends on: identify_competitors + +Step 3: comparative_analysis +- Input: research_notes.md (from step 2) +- Output: comparison_matrix.md (side-by-side analysis) +- Depends on: research_competitors + +Step 4: positioning_recommendations +- Input: comparison_matrix.md (from step 3) +- Output: positioning_report.md (strategic recommendations) +- Depends on: comparative_analysis + +Does this accurately capture your workflow? Any changes needed? + +User: Yes, that's perfect! + +Claude: Great! Creating the job.yml specification now... + +[Creates .deepwork/jobs/competitive_research/job.yml with the complete spec] + +✓ Job specification created! + +**File created:** +- .deepwork/jobs/competitive_research/job.yml + +**Next step:** +Run `/deepwork_jobs.implement` to generate the instruction files for each step based on this specification. +``` + +## Important Guidelines + +1. **Focus on specification only** - Don't create instruction files yet +2. **Ask clarifying questions** - Never skip the discovery phase +3. **Rich context in description** - This helps with future refinement +4. **Validate understanding** - Summarize and confirm before creating +5. **Use examples** - Help users understand what good specifications look like +6. **Understand file organization** - Always ask where outputs should be saved and if subdirectories are needed + +## Validation Rules + +Before creating the job.yml, ensure: +- Job name: lowercase, underscores, no spaces +- Version: semantic versioning (1.0.0) +- Summary: concise, under 200 characters +- Description: detailed, provides context +- Step IDs: unique, descriptive, lowercase with underscores +- Dependencies: must reference existing step IDs +- File inputs: `from_step` must be in dependencies +- At least one output per step +- Outputs can be filenames (e.g., `report.md`) or paths (e.g., `reports/analysis.md`) +- File paths in outputs should match where files will actually be created +- No circular dependencies + +## Output Format + +### job.yml + +The complete YAML specification file (example shown in Step 5 above). + +**Location**: `.deepwork/jobs/[job_name]/job.yml` + +(Where `[job_name]` is the name of the new job being created) + +After creating the file: +1. Inform the user that the specification is complete +2. Recommend that they review the job.yml file +3. Tell them to run `/deepwork_jobs.implement` next + +## Quality Criteria + +- User fully understands what job they're creating +- All steps have clear inputs and outputs +- Dependencies make logical sense +- Summary is concise and descriptive +- Description provides rich context for future refinement +- Specification is valid YAML and follows the schema +- Ready for implementation step + + +## Inputs + +### User Parameters + +Please gather the following information from the user: +- **job_purpose**: What complex task or workflow are you trying to accomplish? + + +## Work Branch Management + +All work for this job should be done on a dedicated work branch: + +1. **Check current branch**: + - If already on a work branch for this job (format: `deepwork/deepwork_jobs-[instance]-[date]`), continue using it + - If on main/master, create a new work branch + +2. **Create work branch** (if needed): + ```bash + git checkout -b deepwork/deepwork_jobs-[instance]-$(date +%Y%m%d) + ``` + Replace `[instance]` with a descriptive identifier (e.g., `acme`, `q1-launch`, etc.) + +## Output Requirements + +Create the following output(s): +- `job.yml` + +Ensure all outputs are: +- Well-formatted and complete +- Ready for review or use by subsequent steps + +## Completion + +After completing this step: + +1. **Verify outputs**: Confirm all required files have been created + +2. **Inform the user**: + - Step 1 of 3 is complete + - Outputs created: job.yml + - Ready to proceed to next step: `/deepwork_jobs:implement` + +## Next Step + +To continue the workflow, run: +``` +/deepwork_jobs:implement +``` + +--- + +## Context Files + +- Job definition: `.deepwork/jobs/deepwork_jobs/job.yml` +- Step instructions: `.deepwork/jobs/deepwork_jobs/steps/define.md` +""" \ No newline at end of file diff --git a/.gemini/commands/deepwork_jobs/implement.toml b/.gemini/commands/deepwork_jobs/implement.toml new file mode 100644 index 00000000..3c1958e8 --- /dev/null +++ b/.gemini/commands/deepwork_jobs/implement.toml @@ -0,0 +1,532 @@ +# deepwork_jobs:implement +# +# Generate instruction files for each step based on the job.yml specification +# +# Generated by DeepWork - do not edit manually + +description = "Generate instruction files for each step based on the job.yml specification" + +prompt = """ +# deepwork_jobs:implement + +**Step 2 of 3** in the **deepwork_jobs** workflow + +**Summary**: DeepWork job management commands + +## Job Overview + +Core commands for managing DeepWork jobs. These commands help you define new multi-step +workflows and refine existing ones. + +The `define` command guides you through an interactive process to create a new job by +asking detailed questions about your workflow, understanding each step's inputs and outputs, +and generating all necessary files. + +The `refine` command helps you modify existing jobs safely by understanding what you want +to change, validating the impact, and ensuring consistency across your workflow. + + +## Prerequisites + +This step requires completion of the following step(s): +- `/deepwork_jobs:define` + +Please ensure these steps have been completed before proceeding. + +## Instructions + +# Implement Job Steps + +## Objective + +Generate the DeepWork job directory structure and instruction files for each step based on the `job.yml` specification created in the previous step. + +## Task + +Read the `job.yml` specification file and create all the necessary files to make the job functional, including directory structure and step instruction files. Then sync the commands to make them available. + +### Step 1: Read and Validate the Specification + +1. **Locate the job.yml file** + - Read `.deepwork/jobs/[job_name]/job.yml` from the define step (Where `[job_name]` is the name of the new job that was created in the define step) + - Parse the YAML content + +2. **Validate the specification** + - Ensure it follows the schema (name, version, summary, description, steps) + - Check that all dependencies reference existing steps + - Verify no circular dependencies + - Confirm file inputs match dependencies + +3. **Extract key information** + - Job name, version, summary, description + - List of all steps with their details + - Understand the workflow structure + +### Step 2: Create Directory Structure + +Create the job directory in `.deepwork/jobs/[job_name]/`: + +```bash +mkdir -p .deepwork/jobs/[job_name]/steps +``` + +Files to create: +- `.deepwork/jobs/[job_name]/job.yml` +- `.deepwork/jobs/[job_name]/steps/[step_id].md` - One for each step + +### Step 3: Generate Step Instruction Files + +For each step in the job.yml, create a comprehensive instruction file at `.deepwork/jobs/[job_name]/steps/[step_id].md`. + +Each instruction file should follow this structure: + +```markdown +# [Step Name] + +## Objective + +[Clear statement of what this step accomplishes, derived from the step's description] + +## Task + +[Detailed instructions for completing this step, based on: +- The step's purpose +- Expected inputs and outputs +- The job's overall context +] + +### Process + +[Break down the step into substeps. Use the information gathered during define about: +- What needs to be done +- What makes a good output +- Any quality criteria +] + +1. [Substep 1] +2. [Substep 2] +3. [Substep 3] + +[If this step has user inputs, explain how to gather them] +[If this step has file inputs, explain how to use them] + +## Output Format + +### [output_filename_1] + +[Description of what should be in this output file] + +**Structure**: +```[file format] +[Example or template of what the output should look like] +``` + +[Repeat for each output file] + +## Quality Criteria + +[List what makes this step's output high quality: +- Completeness checks +- Format requirements +- Content requirements +] + +- [Quality criterion 1] +- [Quality criterion 2] +- [Quality criterion 3] + +## Context + +[Provide context from the job's overall description to help understand why this step matters and how it fits into the bigger picture] +``` + +**Guidelines for generating instructions:** + +1. **Use the job description** - The detailed description from job.yml provides crucial context +2. **Be specific** - Don't write generic instructions; tailor them to the step's purpose +3. **Provide examples** - Show what good output looks like +4. **Explain the "why"** - Help the user understand the step's role in the workflow +5. **Quality over quantity** - Detailed, actionable instructions are better than vague ones +6. **Align with stop hooks** - If the step has `stop_hooks` defined, ensure the quality criteria in the instruction file match the validation criteria in the hooks + +### Handling Stop Hooks + +If a step in the job.yml has `stop_hooks` defined, the generated instruction file should: + +1. **Mirror the quality criteria** - The "Quality Criteria" section should match what the stop hooks will validate +2. **Be explicit about success** - Help the agent understand when the step is truly complete +3. **Include the promise pattern** - Mention that `QUALITY_COMPLETE` should be included when criteria are met + +**Example: If the job.yml has:** +```yaml +- id: research_competitors + name: "Research Competitors" + stop_hooks: + - prompt: | + Verify the research meets criteria: + 1. Each competitor has at least 3 data points + 2. Sources are cited + 3. Information is current (within last year) +``` + +**The instruction file should include:** +```markdown +## Quality Criteria + +- Each competitor has at least 3 distinct data points +- All information is sourced with citations +- Data is current (from within the last year) +- When all criteria are met, include `QUALITY_COMPLETE` in your response +``` + +This alignment ensures the AI agent knows exactly what will be validated and can self-check before completing. + +### Step 4: Verify job.yml Location + +Verify that `job.yml` is in the correct location at `.deepwork/jobs/[job_name]/job.yml`. The define step should have created it there. If for some reason it's not there, you may need to create or move it. + +### Step 5: Sync Commands + +Run `deepwork sync` to generate the slash-commands for this job: + +```bash +deepwork sync +``` + +This will: +- Parse the job definition +- Generate slash-commands for each step +- Make the commands available in `.claude/commands/` (or appropriate platform directory) + +### Step 6: Reload Commands + +Instruct the user to reload commands in their current session: +- Run `/reload` command (if available) +- Or restart the Claude session + +### Step 7: Consider Policies for the New Job + +After implementing the job, consider whether there are **policies** that would help enforce quality or consistency when working with this job's domain. + +**What are policies?** + +Policies are automated guardrails defined in `.deepwork.policy.yml` that trigger when certain files change during an AI session. They help ensure: +- Documentation stays in sync with code +- Team guidelines are followed +- Architectural decisions are respected +- Quality standards are maintained + +**When to suggest policies:** + +Think about the job you just implemented and ask: +- Does this job produce outputs that other files depend on? +- Are there documentation files that should be updated when this job's outputs change? +- Are there quality checks or reviews that should happen when certain files in this domain change? +- Could changes to the job's output files impact other parts of the project? + +**Examples of policies that might make sense:** + +| Job Type | Potential Policy | +|----------|------------------| +| API Design | "Update API docs when endpoint definitions change" | +| Database Schema | "Review migrations when schema files change" | +| Competitive Research | "Update strategy docs when competitor analysis changes" | +| Feature Development | "Update changelog when feature files change" | +| Configuration Management | "Update install guide when config files change" | + +**How to offer policy creation:** + +If you identify one or more policies that would benefit the user, explain: +1. **What the policy would do** - What triggers it and what action it prompts +2. **Why it would help** - How it prevents common mistakes or keeps things in sync +3. **What files it would watch** - The trigger patterns + +Then ask the user: + +> "Would you like me to create this policy for you? I can run `/deepwork_policy.define` to set it up." + +If the user agrees, invoke the `/deepwork_policy.define` command to guide them through creating the policy. + +**Example dialogue:** + +``` +Based on the competitive_research job you just created, I noticed that when +competitor analysis files change, it would be helpful to remind you to update +your strategy documentation. + +I'd suggest a policy like: +- **Name**: "Update strategy when competitor analysis changes" +- **Trigger**: `**/positioning_report.md` +- **Action**: Prompt to review and update `docs/strategy.md` + +Would you like me to create this policy? I can run `/deepwork_policy.define` to set it up. +``` + +**Note:** Not every job needs policies. Only suggest them when they would genuinely help maintain consistency or quality. Don't force policies where they don't make sense. + +## Example Implementation + +**Given this job.yml:** +```yaml +name: competitive_research +version: "1.0.0" +summary: "Systematic competitive analysis workflow" +description: | + A comprehensive workflow for analyzing competitors in your market segment. + Helps product teams understand the competitive landscape through systematic + identification, research, comparison, and positioning recommendations. + +steps: + - id: identify_competitors + name: "Identify Competitors" + description: "Identify 5-7 key competitors in the target market" + instructions_file: steps/identify_competitors.md + inputs: + - name: market_segment + description: "The market segment to analyze" + - name: product_category + description: "The product category" + outputs: + - competitors_list.md + dependencies: [] +``` + +**Generate this instruction file** (`.deepwork/jobs/competitive_research/steps/identify_competitors.md`): + +```markdown +# Identify Competitors + +## Objective + +Identify 5-7 key competitors in the target market segment to analyze for competitive positioning. + +## Task + +Research and identify the most relevant competitors in the specified market segment and product category. Focus on companies that directly compete for the same customer base and solve similar problems. + +### Process + +1. **Understand the market context** + - Review the market segment provided by the user + - Understand the product category boundaries + - Consider direct and indirect competitors + +2. **Research competitors** + - Search for companies in this space + - Look for market leaders and emerging players + - Consider different competitive dimensions (features, price, target market) + +3. **Select 5-7 key competitors** + - Prioritize direct competitors + - Include at least one market leader + - Include 1-2 emerging/innovative players + - Ensure diversity in the competitive set + +4. **Document each competitor** + - Company name + - Brief description (2-3 sentences) + - Why they're a relevant competitor + - Primary competitive dimension + +## Output Format + +### competitors_list.md + +A markdown document listing each competitor with context. + +**Structure**: +```markdown +# Competitor Analysis: [Market Segment] + +## Market Context +- **Segment**: [market segment] +- **Category**: [product category] +- **Analysis Date**: [current date] + +## Identified Competitors + +### 1. [Competitor Name] +**Description**: [2-3 sentence description of what they do] + +**Why Relevant**: [Why they're a key competitor in this space] + +**Competitive Dimension**: [What they compete on - e.g., price, features, market segment] + +[Repeat for each competitor, 5-7 total] + +## Selection Rationale + +[Brief paragraph explaining why these specific competitors were chosen and what dimensions of competition they represent] +``` + +## Quality Criteria + +- 5-7 competitors identified (not too few, not too many) +- Mix of established and emerging players +- All competitors are genuinely relevant to the market segment +- Each competitor has clear, specific description +- Selection rationale explains the competitive landscape +- Output is well-formatted and ready for use by next step + +## Context + +This is the foundation step for competitive analysis. The competitors identified here will be deeply researched in subsequent steps, so it's important to choose the right set. Focus on competitors that will provide strategic insights for positioning decisions. +``` + +## Important Guidelines + +1. **Read the spec carefully** - Understand the job's intent from the description +2. **Generate complete instructions** - Don't create placeholder or stub files +3. **Maintain consistency** - Use the same structure for all step instruction files +4. **Provide examples** - Show what good output looks like +5. **Use context** - The job description provides valuable context for each step +6. **Be specific** - Tailor instructions to the specific step, not generic advice + +## Validation Before Sync + +Before running `deepwork sync`, verify: +- All directories exist +- `job.yml` is in place +- All step instruction files exist (one per step) +- No file system errors + +## Output Format + +### implementation_summary.md + +After successful implementation, create a summary: + +```markdown +# Job Implementation Complete: [job_name] + +## Overview + +Successfully implemented the **[job_name]** workflow with [N] steps. + +**Summary**: [job summary] + +**Version**: [version] + +## Files Created + +### Job Definition +- `.deepwork/jobs/[job_name]/job.yml` + +### Step Instructions +- `.deepwork/jobs/[job_name]/steps/[step1_id].md` +- `.deepwork/jobs/[job_name]/steps/[step2_id].md` +[... list all step files ...] + + +## Generated Commands + +After running `deepwork sync`, the following slash-commands are now available: + +- `/[job_name].[step1_id]` - [step description] +- `/[job_name].[step2_id]` - [step description] +[... list all commands ...] + +## Next Steps + +1. **Reload commands**: Run `/reload` or restart your Claude session +2. **Start the workflow**: Run `/[job_name].[first_step_id]` to begin +3. **Test the job**: Try executing the first step to ensure everything works + +## Job Structure + +[Show the workflow diagram with step names and dependencies] + +Step 1: [step_name] + ↓ +Step 2: [step_name] + ↓ +Step 3: [step_name] + ↓ +[Final output] + +The job is now ready for use! +``` + +## Completion Checklist + +Before marking this step complete, ensure: +- [ ] job.yml validated and copied to job directory +- [ ] All step instruction files created +- [ ] Each instruction file is complete and actionable +- [ ] `deepwork sync` executed successfully +- [ ] Commands generated in platform directory +- [ ] User informed of next steps (reload commands) +- [ ] implementation_summary.md created +- [ ] Considered whether policies would benefit this job (Step 7) +- [ ] If policies suggested, offered to run `/deepwork_policy.define` + +## Quality Criteria + +- Job directory structure is correct +- All instruction files are complete (not stubs) +- Instructions are specific and actionable +- Output examples are provided in each instruction file +- Quality criteria defined for each step +- Sync completed successfully +- Commands available for use +- Thoughtfully considered relevant policies for the job domain + + +## Inputs + + +### Required Files + +This step requires the following files from previous steps: +- `job.yml` (from step `define`) + +Make sure to read and use these files as context for this step. + +## Work Branch Management + +All work for this job should be done on a dedicated work branch: + +1. **Check current branch**: + - If already on a work branch for this job (format: `deepwork/deepwork_jobs-[instance]-[date]`), continue using it + - If on main/master, create a new work branch + +2. **Create work branch** (if needed): + ```bash + git checkout -b deepwork/deepwork_jobs-[instance]-$(date +%Y%m%d) + ``` + Replace `[instance]` with a descriptive identifier (e.g., `acme`, `q1-launch`, etc.) + +## Output Requirements + +Create the following output(s): +- `implementation_summary.md` + +Ensure all outputs are: +- Well-formatted and complete +- Ready for review or use by subsequent steps + +## Completion + +After completing this step: + +1. **Verify outputs**: Confirm all required files have been created + +2. **Inform the user**: + - Step 2 of 3 is complete + - Outputs created: implementation_summary.md + - Ready to proceed to next step: `/deepwork_jobs:refine` + +## Next Step + +To continue the workflow, run: +``` +/deepwork_jobs:refine +``` + +--- + +## Context Files + +- Job definition: `.deepwork/jobs/deepwork_jobs/job.yml` +- Step instructions: `.deepwork/jobs/deepwork_jobs/steps/implement.md` +""" \ No newline at end of file diff --git a/.gemini/commands/deepwork_jobs/refine.toml b/.gemini/commands/deepwork_jobs/refine.toml new file mode 100644 index 00000000..5a29751b --- /dev/null +++ b/.gemini/commands/deepwork_jobs/refine.toml @@ -0,0 +1,538 @@ +# deepwork_jobs:refine +# +# Modify an existing job definition +# +# Generated by DeepWork - do not edit manually + +description = "Modify an existing job definition" + +prompt = """ +# deepwork_jobs:refine + +**Standalone command** in the **deepwork_jobs** job - can be run anytime + +**Summary**: DeepWork job management commands + +## Job Overview + +Core commands for managing DeepWork jobs. These commands help you define new multi-step +workflows and refine existing ones. + +The `define` command guides you through an interactive process to create a new job by +asking detailed questions about your workflow, understanding each step's inputs and outputs, +and generating all necessary files. + +The `refine` command helps you modify existing jobs safely by understanding what you want +to change, validating the impact, and ensuring consistency across your workflow. + + + +## Instructions + +# Refine Existing DeepWork Job + +## Objective + +Help the user modify an existing DeepWork job definition by understanding what they want to change and ensuring the modifications maintain consistency and validity. + +## Task + +Guide the user through refining a job by first understanding their existing job, then what they want to change, and finally implementing those changes safely. + +### Step 1: Select and Load Job + +1. **List available jobs** + - Scan `.deepwork/jobs/` directory for installed jobs + - Display installed jobs with versions and descriptions + - Ask which job to refine + +2. **Load job definition** + - Read `.deepwork/jobs/[job_name]/job.yml` + - Parse and understand the current structure + +3. **Show current structure** + - Display all steps with their names and descriptions + - Show the dependency flow + - Highlight key inputs and outputs + +### Step 2: Understand Desired Changes + +Ask the user what they want to change: + +**Change Options:** +1. Add a new step to the workflow +2. Modify step instructions +3. Change step inputs/outputs +4. Update dependencies between steps +5. Update job metadata (description, version) +6. Remove a step +7. Add or modify stop hooks (quality validation) + +**For each change, ask clarifying questions:** +- Why do they want to make this change? +- How will it improve the workflow? +- Are there dependencies or side effects to consider? + +### Step 3: Make Changes Safely + +Based on the user's selection: + +#### Adding a Step + +1. **Gather step details** (same as define command) + - What does this step accomplish? + - What are the inputs? User parameters or file inputs? + - What outputs does it produce? + - What are the dependencies? + +2. **Determine placement** + - Where should it go in the workflow? + - Before which existing step? + - Or at the end? + +3. **Validate placement** + - Can't depend on later steps if inserted early + - File inputs must come from dependencies + - Check for circular dependencies + +4. **Update files** + - Update `job.yml` with new step + - Create step instructions file in `steps/[step_id].md` + - Prepare changelog entry describing the addition + +#### Modifying Step Instructions + +1. **Select step to modify** + - Show list of steps + - Ask which one to modify + +2. **Understand the change** + - What's not working with current instructions? + - What should be different? + - Show current instructions + +3. **Update instructions** + - Modify `.deepwork/jobs/[job_name]/steps/[step_id].md` + - Keep the same structure (Objective, Task, Process, Output Format, Quality Criteria) + - Prepare changelog entry describing the modification + +#### Changing Inputs/Outputs + +1. **Select step to modify** +2. **Show current inputs and outputs** +3. **Understand the change** + - Adding or removing? + - Why is this needed? + +4. **Validate impact** + - If removing output: check if other steps depend on it (BREAKING CHANGE) + - If adding file input: ensure from_step is in dependencies + - If removing input: ensure it's not critical + +5. **Update job.yml** + - Prepare changelog entry describing the input/output changes + +#### Updating Dependencies + +1. **Select step to modify** +2. **Show current dependency graph** +3. **Understand the change** + - Adding or removing dependency? + - Why? + +4. **Validate** + - Check for circular dependencies + - Ensure all file inputs have matching dependencies + - Ensure dependency chain makes logical sense + +5. **Update job.yml** + - Prepare changelog entry describing the dependency changes + +#### Updating Metadata + +1. **Ask what to change** + - Description? + - Version? + +2. **If version change, explain semantic versioning** + - Major (x.0.0): Breaking changes (removing steps, removing outputs) + - Minor (0.x.0): New features, backwards compatible (adding steps) + - Patch (0.0.x): Bug fixes, improvements + +3. **Update job.yml** + - Prepare changelog entry describing the metadata changes + +#### Removing a Step + +1. **Select step to remove** + +2. **CRITICAL: Validate safety** + - Check if other steps depend on this step + - Check if other steps use outputs from this step + - If dependencies exist: **WARN** and suggest updating dependents first + - This is a BREAKING CHANGE - requires major version bump + +3. **If safe to remove** + - Remove from `job.yml` + - Delete step instructions file + - Suggest version bump + - Prepare changelog entry describing the removal + +#### Adding or Modifying Stop Hooks + +Stop hooks provide quality validation loops that ensure step outputs meet criteria before completing. + +1. **Select step to modify** + - Show list of steps + - Ask which one to add/modify hooks for + +2. **Understand the need** + - What quality criteria should be validated? + - Is the output subjective (use prompt hook) or objective (use script hook)? + - Should validation happen automatically or only on specific conditions? + +3. **Choose hook type** + + **Prompt hooks** (recommended for most cases): + - Best for subjective quality criteria + - AI evaluates the output against criteria + - Example: "Verify the report is comprehensive and well-organized" + ```yaml + stop_hooks: + - prompt: | + Verify the output meets criteria: + 1. Contains all required sections + 2. Analysis is thorough + 3. Recommendations are actionable + ``` + + **Prompt file hooks**: + - For reusable or complex validation criteria + - Stores criteria in a separate markdown file + ```yaml + stop_hooks: + - prompt_file: hooks/quality_check.md + ``` + + **Script hooks**: + - For objective, programmatic validation + - Best for tests, linting, format checking + ```yaml + stop_hooks: + - script: hooks/run_tests.sh + ``` + +4. **Multiple hooks can be combined** + ```yaml + stop_hooks: + - script: hooks/lint.sh # First: objective checks + - prompt: "Verify content quality" # Then: subjective review + ``` + +5. **Update files** + - Add/modify `stop_hooks` array in job.yml + - Create hook files if using prompt_file or script types + - Update step instructions to match quality criteria + - Prepare changelog entry + +6. **Encourage prompt-based hooks** + - They leverage the AI's ability to understand context + - More flexible than rigid script checks + - Can evaluate nuanced quality criteria + +### Step 4: Update Changelog + +After making changes to the job.yml: + +1. **Add a changelog entry** + - Add a new entry to the `changelog` array in the job.yml + - Use the new version number + - List all changes made in this refinement + +2. **Changelog entry format**: + ```yaml + - version: "[new_version]" + changes: "[Description of all changes in this version]" + ``` + +3. **Example changelog entries**: + - "Added step: validate_positioning" + - "Modified step instructions for research_competitors to improve clarity and add quality criteria" + - "Removed step: duplicate_analysis (consolidated into comparative_analysis)" + - "Updated dependencies: positioning_recommendations now depends on validate_positioning" + - "Changed output filename: comparison_matrix.md → comparison_table.md" + - "Added step: validate_positioning; Updated dependencies for positioning_recommendations" + +### Step 5: Validate Changes + +After updating the changelog: + +1. **Review the updated structure** + - Show the complete updated workflow + - Highlight what changed + - Check for consistency + +2. **Validate job definition** + - No circular dependencies + - All file inputs match dependencies + - All step IDs unique + - All outputs defined + +3. **Confirm version update** + - Ensure version was bumped appropriately + - Breaking changes? Major version + - New features? Minor version + - Improvements? Patch version + +### Step 6: Sync and Reload + +1. **Run sync** + ```bash + deepwork sync + ``` + +2. **Reload commands** + - Instruct user to run `/reload` or restart session + +3. **Provide summary** + - Show what changed + - List updated files + - Display the changelog entry + - Explain next steps + +## Safe Modification Patterns + +Guide users toward safe changes: + +### Safe Changes (Always OK) +- Adding steps at the end with new outputs +- Updating step instructions (content only) +- Changing job description +- Adding user inputs to a step +- Adding outputs (not removing) +- Patch version bumps + +### Potentially Breaking Changes (Warn User) +- Removing steps +- Removing outputs +- Removing inputs +- Changing dependencies +- Renaming outputs (other steps may reference them) +- Requires major or minor version bump + +### Always Validate +- Circular dependencies after any dependency change +- File inputs match dependencies after any change +- Step IDs remain unique +- At least one output per step + +## Example Dialog + +``` +Claude: I'll help you refine an existing job. Here are your installed jobs: + +1. competitive_research v1.0.0 - Systematic competitive analysis workflow +2. blog_post v1.0.0 - Blog post creation workflow + +Which job would you like to refine? + +User: 1 + +Claude: Loading competitive_research v1.0.0... + +Current structure: +- Step 1: identify_competitors - Identify 5-7 competitors in target market +- Step 2: research_competitors - Deep research on each competitor +- Step 3: comparative_analysis - Side-by-side comparison matrix +- Step 4: positioning_recommendations - Strategic positioning report + +What would you like to change? +1. Add a new step +2. Modify step instructions +3. Change inputs/outputs +4. Update dependencies +5. Update job metadata +6. Remove a step + +User: 1 + +Claude: Great! Let's add a new step. First, help me understand: + +1. What should this new step accomplish? +2. Where should it fit in the workflow? + - Should it go between existing steps or at the end? + +User: I want to add a step that validates our product positioning before the final recommendations. It should go between step 3 and step 4. + +Claude: Excellent idea. So this would be: +- Step 4: validate_positioning +- Step 5: positioning_recommendations (renumbered) + +For this validation step: +1. What inputs does it need? I assume it needs the comparison_matrix.md from step 3? +2. What should it output? +3. What exactly should this validation check for? + +User: Yes, it needs the comparison matrix. It should output a validation_report.md that identifies gaps in our positioning. + +Claude: Perfect. Let me confirm the details: + +**New Step: validate_positioning** +- Name: "Validate Product Positioning" +- Description: "Identify gaps and opportunities in current positioning" +- Inputs: comparison_matrix.md (from comparative_analysis step) +- Outputs: validation_report.md +- Dependencies: comparative_analysis +- Position: Between step 3 and 4 + +Should the positioning_recommendations step also use the validation_report.md as input? + +User: Yes, definitely + +Claude: Great! So I'll also update the positioning_recommendations step to include validation_report.md as an input and add validate_positioning as a dependency. + +This is a new feature, so I'll bump the version to 1.1.0 (minor version). + +Creating the new step... ✓ + +Updated files: +- .deepwork/jobs/competitive_research/job.yml (added step, updated dependencies, version → 1.1.0, updated changelog) +- .deepwork/jobs/competitive_research/steps/validate_positioning.md (created) + +Changelog entry added: +```yaml +- version: "1.1.0" + changes: "Added step: validate_positioning between comparative_analysis and positioning_recommendations; Updated dependencies for positioning_recommendations" +``` + +Now run: +```bash +deepwork sync +``` + +Then reload your session and you'll have: +- /competitive_research.validate_positioning (new!) +- All other commands updated +``` + +## Validation Rules + +Enforce the same rules as job definition: +- Job name: lowercase, underscores +- Version: semantic versioning +- Step IDs: unique within job +- Dependencies: must reference existing step IDs +- File inputs: `from_step` must be in dependencies +- At least one output per step +- No circular dependencies + +## Error Handling + +If issues arise, provide clear guidance: +- **Dependency conflict**: "Step X depends on step Y which you're trying to remove. You must update step X first, or remove both steps." +- **Circular dependency**: "Adding this dependency would create a cycle: A → B → C → A. Please choose a different dependency structure." +- **Missing file input**: "Step X requires file.md from step Y, but Y is not in its dependencies. I'll add Y to the dependencies." +- **Breaking change**: "Removing this output is a breaking change. Other steps depend on it. I recommend against this change unless you update the dependent steps first." + +## Changelog Entry Format + +Instead of creating a separate refinement_summary.md file, add the changes directly to the job.yml changelog section. This creates a permanent version history within the job definition itself. + +**Location**: `.deepwork/jobs/[job_name]/job.yml` + +**Add to the `changelog` array**: + +```yaml +changelog: + - version: "1.0.0" + changes: "Initial job creation" + - version: "[new_version]" + changes: "[Concise description of all changes in this version]" +``` + +**Guidelines for changelog entries**: +- Be concise but descriptive +- Use action verbs (Added, Modified, Removed, Updated, Changed, Fixed) +- Reference specific step names when relevant +- For breaking changes, prefix with "BREAKING:" +- If multiple changes, separate with semicolons or use clear phrasing + +**Examples**: +- "Added step: validate_positioning between comparative_analysis and positioning_recommendations" +- "Modified step instructions for research_competitors to improve clarity and add quality criteria" +- "Removed step: duplicate_analysis (consolidated into comparative_analysis)" +- "Updated dependencies: positioning_recommendations now depends on validate_positioning" +- "Changed output filename: comparison_matrix.md → comparison_table.md" +- "BREAKING: Removed output file shared_data.json from identify_competitors step" +- "Fixed circular dependency between steps A and B" +- "Updated job description to reflect new validation phase" +- "Added validate_positioning step; Updated dependencies for positioning_recommendations" + +## Quality Criteria + +- Changes maintain job consistency +- Dependencies are logically valid +- Version bump follows semantic versioning +- No circular dependencies introduced +- User understands impact of changes +- Breaking changes are clearly communicated + + +## Inputs + +### User Parameters + +Please gather the following information from the user: +- **job_name**: Name of the job to refine + + +## Work Branch Management + +All work for this job should be done on a dedicated work branch: + +1. **Check current branch**: + - If already on a work branch for this job (format: `deepwork/deepwork_jobs-[instance]-[date]`), continue using it + - If on main/master, create a new work branch + +2. **Create work branch** (if needed): + ```bash + git checkout -b deepwork/deepwork_jobs-[instance]-$(date +%Y%m%d) + ``` + Replace `[instance]` with a descriptive identifier (e.g., `acme`, `q1-launch`, etc.) + +## Output Requirements + +Create the following output(s): +- `job.yml` + +Ensure all outputs are: +- Well-formatted and complete +- Ready for review or use by subsequent steps + +## Completion + +After completing this step: + +1. **Verify outputs**: Confirm all required files have been created + +2. **Inform the user**: + - The refine command is complete + - Outputs created: job.yml + - This command can be run again anytime to make further changes + +## Command Complete + +This is a standalone command that can be run anytime. The outputs are ready for use. + +Consider: +- Reviewing the outputs +- Running `deepwork sync` if job definitions were changed +- Re-running this command later if further changes are needed + +--- + +## Context Files + +- Job definition: `.deepwork/jobs/deepwork_jobs/job.yml` +- Step instructions: `.deepwork/jobs/deepwork_jobs/steps/refine.md` +""" \ No newline at end of file diff --git a/.gemini/commands/deepwork_policy/define.toml b/.gemini/commands/deepwork_policy/define.toml new file mode 100644 index 00000000..9333a940 --- /dev/null +++ b/.gemini/commands/deepwork_policy/define.toml @@ -0,0 +1,271 @@ +# deepwork_policy:define +# +# Create or update policy entries in .deepwork.policy.yml +# +# Generated by DeepWork - do not edit manually + +description = "Create or update policy entries in .deepwork.policy.yml" + +prompt = """ +# deepwork_policy:define + +**Standalone command** in the **deepwork_policy** job - can be run anytime + +**Summary**: Policy enforcement for AI agent sessions + +## Job Overview + +Manages policies that automatically trigger when certain files change during an AI agent session. +Policies help ensure that code changes follow team guidelines, documentation is updated, +and architectural decisions are respected. + +Policies are defined in a `.deepwork.policy.yml` file at the root of your project. Each policy +specifies: +- Trigger patterns: Glob patterns for files that, when changed, should trigger the policy +- Safety patterns: Glob patterns for files that, if also changed, mean the policy doesn't need to fire +- Instructions: What the agent should do when the policy triggers + +Example use cases: +- Update installation docs when configuration files change +- Require security review when authentication code is modified +- Ensure API documentation stays in sync with API code +- Remind developers to update changelogs + + + +## Instructions + +# Define Policy + +## Objective + +Create or update policy entries in the `.deepwork.policy.yml` file to enforce team guidelines, documentation requirements, or other constraints when specific files change. + +## Task + +Guide the user through defining a new policy by asking clarifying questions. **Do not create the policy without first understanding what they want to enforce.** + +### Step 1: Understand the Policy Purpose + +Start by asking questions to understand what the user wants to enforce: + +1. **What guideline or constraint should this policy enforce?** + - What situation triggers the need for action? + - What files or directories, when changed, should trigger this policy? + - Examples: "When config files change", "When API code changes", "When database schema changes" + +2. **What action should be taken?** + - What should the agent do when the policy triggers? + - Update documentation? Perform a security review? Update tests? + - Is there a specific file or process that needs attention? + +3. **Are there any "safety" conditions?** + - Are there files that, if also changed, mean the policy doesn't need to fire? + - For example: If config changes AND install_guide.md changes, assume docs are already updated + - This prevents redundant prompts when the user has already done the right thing + +### Step 2: Define the Trigger Patterns + +Help the user define glob patterns for files that should trigger the policy: + +**Common patterns:** +- `src/**/*.py` - All Python files in src directory (recursive) +- `app/config/**/*` - All files in app/config directory +- `*.md` - All markdown files in root +- `src/api/**/*` - All files in the API directory +- `migrations/**/*.sql` - All SQL migrations + +**Pattern syntax:** +- `*` - Matches any characters within a single path segment +- `**` - Matches any characters across multiple path segments (recursive) +- `?` - Matches a single character + +### Step 3: Define Safety Patterns (Optional) + +If there are files that, when also changed, mean the policy shouldn't fire: + +**Examples:** +- Policy: "Update install guide when config changes" + - Trigger: `app/config/**/*` + - Safety: `docs/install_guide.md` (if already updated, don't prompt) + +- Policy: "Security review for auth changes" + - Trigger: `src/auth/**/*` + - Safety: `SECURITY.md`, `docs/security_review.md` + +### Step 4: Write the Instructions + +Create clear, actionable instructions for what the agent should do when the policy fires. + +**Good instructions include:** +- What to check or review +- What files might need updating +- Specific actions to take +- Quality criteria for completion + +**Example:** +``` +Configuration files have changed. Please: +1. Review docs/install_guide.md for accuracy +2. Update any installation steps that reference changed config +3. Verify environment variable documentation is current +4. Test that installation instructions still work +``` + +### Step 5: Create the Policy Entry + +Create or update `.deepwork.policy.yml` in the project root. + +**File Location**: `.deepwork.policy.yml` (root of project) + +**Format**: +```yaml +- name: "[Friendly name for the policy]" + trigger: "[glob pattern]" # or array: ["pattern1", "pattern2"] + safety: "[glob pattern]" # optional, or array + instructions: | + [Multi-line instructions for the agent...] +``` + +**Alternative with instructions_file**: +```yaml +- name: "[Friendly name for the policy]" + trigger: "[glob pattern]" + safety: "[glob pattern]" + instructions_file: "path/to/instructions.md" +``` + +### Step 6: Verify the Policy + +After creating the policy: + +1. **Check the YAML syntax** - Ensure valid YAML formatting +2. **Test trigger patterns** - Verify patterns match intended files +3. **Review instructions** - Ensure they're clear and actionable +4. **Check for conflicts** - Ensure the policy doesn't conflict with existing ones + +## Example Policies + +### Update Documentation on Config Changes +```yaml +- name: "Update install guide on config changes" + trigger: "app/config/**/*" + safety: "docs/install_guide.md" + instructions: | + Configuration files have been modified. Please review docs/install_guide.md + and update it if any installation instructions need to change based on the + new configuration. +``` + +### Security Review for Auth Code +```yaml +- name: "Security review for authentication changes" + trigger: + - "src/auth/**/*" + - "src/security/**/*" + safety: + - "SECURITY.md" + - "docs/security_audit.md" + instructions: | + Authentication or security code has been changed. Please: + 1. Review for hardcoded credentials or secrets + 2. Check input validation on user inputs + 3. Verify access control logic is correct + 4. Update security documentation if needed +``` + +### API Documentation Sync +```yaml +- name: "API documentation update" + trigger: "src/api/**/*.py" + safety: "docs/api/**/*.md" + instructions: | + API code has changed. Please verify that API documentation in docs/api/ + is up to date with the code changes. Pay special attention to: + - New or changed endpoints + - Modified request/response schemas + - Updated authentication requirements +``` + +## Output Format + +### .deepwork.policy.yml +Create or update this file at the project root with the new policy entry. + +## Quality Criteria + +- Policy name is clear and descriptive +- Trigger patterns accurately match the intended files +- Safety patterns prevent unnecessary triggering +- Instructions are actionable and specific +- YAML is valid and properly formatted + +## Context + +Policies are evaluated automatically when you finish working on a task. The system: +1. Tracks which files you changed during the session +2. Checks if any changes match policy trigger patterns +3. Skips policies where safety patterns also matched +4. Prompts you with instructions for any triggered policies + +You can mark a policy as addressed by including `considered` in your response. This tells the system you've already handled that policy's requirements. + + +## Inputs + +### User Parameters + +Please gather the following information from the user: +- **policy_purpose**: What guideline or constraint should this policy enforce? + + +## Work Branch Management + +All work for this job should be done on a dedicated work branch: + +1. **Check current branch**: + - If already on a work branch for this job (format: `deepwork/deepwork_policy-[instance]-[date]`), continue using it + - If on main/master, create a new work branch + +2. **Create work branch** (if needed): + ```bash + git checkout -b deepwork/deepwork_policy-[instance]-$(date +%Y%m%d) + ``` + Replace `[instance]` with a descriptive identifier (e.g., `acme`, `q1-launch`, etc.) + +## Output Requirements + +Create the following output(s): +- `.deepwork.policy.yml` + +Ensure all outputs are: +- Well-formatted and complete +- Ready for review or use by subsequent steps + +## Completion + +After completing this step: + +1. **Verify outputs**: Confirm all required files have been created + +2. **Inform the user**: + - The define command is complete + - Outputs created: .deepwork.policy.yml + - This command can be run again anytime to make further changes + +## Command Complete + +This is a standalone command that can be run anytime. The outputs are ready for use. + +Consider: +- Reviewing the outputs +- Running `deepwork sync` if job definitions were changed +- Re-running this command later if further changes are needed + +--- + +## Context Files + +- Job definition: `.deepwork/jobs/deepwork_policy/job.yml` +- Step instructions: `.deepwork/jobs/deepwork_policy/steps/define.md` +""" \ No newline at end of file diff --git a/README.md b/README.md index 8a09646c..6dfe6011 100644 --- a/README.md +++ b/README.md @@ -4,13 +4,21 @@ DeepWork is a tool for defining and executing multi-step workflows with AI coding assistants like Claude Code, Google Gemini, and GitHub Copilot. It enables you to decompose complex tasks into manageable steps, with clear inputs, outputs, and dependencies. +## Supported Platforms + +| Platform | Status | Command Format | Hooks Support | +|----------|--------|----------------|---------------| +| **Claude Code** | Full Support | Markdown | Yes (stop_hooks, pre/post) | +| **Gemini CLI** | Full Support | TOML | No (global only) | +| GitHub Copilot | Planned | - | - | + ## Installation ### Prerequisites - Python 3.11 or higher - Git repository -- One of: Claude Code, Google Gemini, or GitHub Copilot +- One of: Claude Code or Gemini CLI ### Install DeepWork @@ -24,11 +32,26 @@ pip install -e . ### Install in Your Project +#### Claude Code + +```bash +cd your-project/ +deepwork install --platform claude +``` + +#### Gemini CLI + ```bash cd your-project/ -deepwork install --platform claude # or gemini, copilot +deepwork install --platform gemini ``` +**Gemini CLI Notes**: +- Commands are generated as TOML files in `.gemini/commands/` +- Commands use colon (`:`) namespacing: `/job_name:step_id` +- Gemini CLI does not support command-level hooks; quality validation is embedded in prompts +- See [Gemini CLI documentation](https://geminicli.com/docs/) for more details + This will: - Create `.deepwork/` directory structure - Generate core DeepWork jobs @@ -155,15 +178,18 @@ your-project/ │ └── job_name/ │ ├── job.yml # Job metadata │ └── steps/ # Step instructions -├── .claude/ # Claude Code skills (auto-generated) -│ ├── skill-deepwork.define.md -│ ├── skill-deepwork.refine.md -│ └── skill-job_name.step_name.md -└── work/ # Work products (Git branches) - └── job-instance-date/ - └── outputs... +├── .claude/ # Claude Code commands (auto-generated) +│ └── commands/ +│ ├── deepwork_jobs.define.md +│ └── job_name.step_name.md +└── .gemini/ # Gemini CLI commands (auto-generated) + └── commands/ + └── job_name/ + └── step_name.toml ``` +**Note**: Work outputs are created on dedicated Git branches (e.g., `deepwork/job_name-instance-date`), not in a separate directory. + ## Development ### Setup Development Environment @@ -220,7 +246,8 @@ deepwork/ │ │ ├── detector.py # Platform detection │ │ └── generator.py # Skill file generation │ ├── templates/ # Jinja2 templates -│ │ └── claude/ # Claude Code templates +│ │ ├── claude/ # Claude Code templates +│ │ └── gemini/ # Gemini CLI templates │ ├── schemas/ # JSON schemas │ └── utils/ # Utilities (fs, yaml, git, validation) ├── tests/ diff --git a/claude.md b/claude.md index 85b6ef2d..75077a1e 100644 --- a/claude.md +++ b/claude.md @@ -120,7 +120,7 @@ Each step: - Suggests next step ### 4. Work Completion -- Review outputs in `deepwork/[branch-name]/` +- Review outputs on the work branch - Commit artifacts as you progress - Create PR for team review - Merge to preserve work products for future context @@ -136,21 +136,20 @@ my-project/ │ ├── deepwork_jobs.implement.md │ ├── deepwork_jobs.refine.md │ └── [job].[step].md -├── .deepwork/ # DeepWork configuration -│ ├── config.yml # version, platforms[] -│ └── jobs/ -│ ├── deepwork_jobs/ # Built-in job -│ │ ├── job.yml -│ │ └── steps/ -│ └── [job-name]/ -│ ├── job.yml -│ └── steps/ -│ └── [step].md -└── deepwork/ # Work products (on branches) - └── [job-name]-[instance]-[date]/ - └── [outputs].md +└── .deepwork/ # DeepWork configuration + ├── config.yml # version, platforms[] + └── jobs/ + ├── deepwork_jobs/ # Built-in job + │ ├── job.yml + │ └── steps/ + └── [job-name]/ + ├── job.yml + └── steps/ + └── [step].md ``` +**Note**: Work outputs are created on dedicated Git branches (e.g., `deepwork/job_name-instance-date`), not in a separate directory. + ## Key Files to Reference diff --git a/doc/architecture.md b/doc/architecture.md index 7383f8f9..e98ef5d1 100644 --- a/doc/architecture.md +++ b/doc/architecture.md @@ -300,15 +300,12 @@ my-project/ # User's project (target) │ └── ad_campaign/ │ └── ... ├── .deepwork.policy.yml # Policy definitions (project root) -├── deepwork/ # Work products (Git branches) -│ ├── competitive_research-acme-2026-01-11/ -│ │ └── ... -│ └── ad_campaign-q1-2026-01-11/ -│ └── ... ├── (rest of user's project files) └── README.md ``` +**Note**: Work outputs are created directly in the project on dedicated Git branches (e.g., `deepwork/competitive_research-acme-2026-01-11`). The branch naming convention is `deepwork/[job_name]-[instance]-[date]`. + ## Configuration Files ### `.deepwork/config.yml` @@ -579,7 +576,7 @@ This section describes how AI agents (like Claude Code) actually execute jobs us Product category: Project Management Claude: [Performs research using web tools, analysis, etc.] - ✓ Created deepwork/competitive_research-acme-2026-01-11/competitors.md + ✓ Created competitors.md Found 8 direct competitors and 4 indirect competitors. Review the file and run /competitive_research.primary_research when ready. @@ -640,8 +637,10 @@ Since there's no DeepWork runtime process, context is passed through: ### 1. Filesystem (Primary Mechanism) +On a work branch like `deepwork/competitive_research-acme-2026-01-11`, outputs are created in the project: + ``` -deepwork/competitive_research-acme-2026-01-11/ +(project root on work branch) ├── competitors.md ← Step 1 output ├── primary_research.md ← Step 2 output ├── competitor_profiles/ ← Step 2 output @@ -656,7 +655,7 @@ deepwork/competitive_research-acme-2026-01-11/ Each command instructs Claude to: - Read specific input files from previous steps - Write specific output files for this step -- All within the same work directory +- All on the same work branch ### 2. Command Instructions @@ -676,7 +675,7 @@ Conduct web research on secondary sources for each competitor identified in comp When working on similar jobs: - User: "Do competitive research for Acme Corp, similar to our Widget Corp analysis" -- Claude can read old existing branches like`deepwork/competitive_research-widget-corp-2024-01-05/` from git history +- Claude can read old existing branches like `deepwork/competitive_research-widget-corp-2024-01-05` from git history - Uses it as a template for style, depth, format ### 4. No Environment Variables Needed @@ -930,200 +929,19 @@ tests/ ### Test Strategy #### 1. Unit Tests - -**Job Parser** (`test_job_parser.py`): -```python -def test_parse_valid_job_definition(): - """Verify parser correctly loads valid job.yml""" - -def test_parse_rejects_invalid_schema(): - """Ensure invalid job definitions raise ValidationError""" - -def test_parse_resolves_dependencies(): - """Check dependency graph construction""" - -def test_parse_handles_optional_steps(): - """Verify optional steps are marked correctly""" -``` - -**Registry** (`test_registry.py`): -```python -def test_add_job_to_registry(): - """Test job registration""" - -def test_detect_version_conflicts(): - """Ensure version conflicts are caught""" - -def test_list_installed_jobs(): - """Verify registry query operations""" - -def test_remove_job_from_registry(): - """Test job uninstallation""" -``` - -**Runtime Engine** (`test_runtime_engine.py`): -```python -def test_create_work_branch(): - """Verify branch creation with correct naming""" - -def test_step_initialization(): - """Check step setup and context preparation""" - -def test_validate_step_outputs(): - """Test output validation against schemas""" - -def test_handle_missing_dependencies(): - """Ensure proper error when dependencies not met""" -``` - -**Template Renderer** (`test_template_renderer.py`): -```python -def test_render_basic_template(): - """Test simple variable substitution""" - -def test_render_with_loops(): - """Verify loop constructs work correctly""" - -def test_render_with_conditionals(): - """Check conditional rendering""" - -def test_escape_special_characters(): - """Ensure proper escaping of Markdown""" -``` +Use unit tests for small pieces of functionality that don't depend on external systems. #### 2. Integration Tests - -**Job Import** (`test_job_import.py`): -```python -def test_import_from_github(): - """Test importing job from GitHub repository""" - # Mock git clone operation - # Verify job files copied correctly - # Check registry updated - -def test_import_local_job(): - """Test importing from local directory""" - -def test_import_with_dependencies(): - """Verify transitive job dependencies handled""" -``` - -**Workflow Execution** (`test_workflow_execution.py`): -```python -def test_single_step_execution(): - """Run a single step end-to-end""" - # Create mock AI responses - # Execute step - # Verify outputs created - -def test_multi_step_workflow(): - """Execute complete job workflow""" - # Mock all steps - # Verify context passes between steps - # Check final outputs - -def test_resume_after_interruption(): - """Test workflow can resume mid-execution""" -``` - -**Git Integration** (`test_git_integration.py`): -```python -def test_branch_management(): - """Verify branch creation, switching, cleanup""" - -def test_commit_integration(): - """Test auto-commit functionality""" - -def test_merge_detection(): - """Check detection of merged work""" -``` +Use integration tests for larger pieces of functionality that depend on external systems. #### 3. End-to-End Tests - -**Full Workflow** (`test_full_workflow.py`): -```python -@pytest.mark.e2e -def test_complete_competitive_research_job(): - """ - Full simulation of competitive research job: - 1. Install DeepWork - 2. Import job definition - 3. Start job - 4. Execute all steps with mocked AI responses - 5. Validate final outputs - 6. Verify Git state - """ - # Setup test repository - # Run: deepwork install --claude - # Run: deepwork import github.com/deepwork-jobs/competitive-research - # Run: /competitive_research.identify_competitors (mocked) - # Validate outputs - # Run: /competitive_research.primary_research (mocked) - # Continue through all steps - # Assert final state correct -``` - -**Multi-Platform** (`test_multi_platform.py`): -```python -@pytest.mark.e2e -def test_claude_and_gemini_compatibility(): - """Verify same job works on different platforms""" - # Install for Claude - # Execute job - # Capture outputs - # Reinstall for Gemini - # Execute same job - # Compare outputs (should be structurally similar) -``` +Use end-to-end tests to verify the entire workflow from start to finish. #### 4. Mock AI Agents +Use mock AI agents to simulate AI agent responses. -Since testing requires AI agent interactions, create mock agents: - -```python -# tests/mocks/claude_mock.py -class MockClaudeAgent: - """Simulates Claude Code agent behavior""" - - def __init__(self, response_fixtures_dir): - self.fixtures = load_fixtures(response_fixtures_dir) - - def execute_skill(self, skill_name, context): - """ - Simulates skill execution by returning pre-recorded responses - based on the skill name and context - """ - fixture_key = self._get_fixture_key(skill_name, context) - return self.fixtures[fixture_key] - - def _get_fixture_key(self, skill_name, context): - # Match to appropriate fixture based on patterns - # e.g., "competitive_research.identify_competitors" -> - # "competitive_research_identify_competitors_001.md" - ... -``` - -Fixtures stored as markdown files with expected outputs: -``` -tests/fixtures/mock_responses/ -├── competitive_research_identify_competitors_001.md -├── competitive_research_primary_research_001.md -└── ... -``` - -#### 5. Validation Testing - -**Schema Validation** (`test_validation.py`): -```python -def test_json_schema_validation(): - """Test validating outputs against JSON schemas""" - -def test_completeness_validation(): - """Verify required sections are present""" - -def test_custom_validation_scripts(): - """Run custom validation scripts""" -``` +#### 5. Fixtures +Use fixtures to provide test data. #### 6. Performance Testing @@ -1148,63 +966,7 @@ def full_simple_cycle(): ### CI/CD Integration -**GitHub Actions Workflow** (`.github/workflows/test.yml`): -```yaml -name: DeepWork Tests - -on: [push, pull_request] - -jobs: - test: - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-latest, macos-latest, windows-latest] - python-version: ["3.11", "3.12"] - - steps: - - uses: actions/checkout@v4 - - - name: Install uv - run: curl -LsSf https://astral.sh/uv/install.sh | sh - - - name: Setup Python - uses: actions/setup-python@v5 - with: - python-version: ${{ matrix.python-version }} - - - name: Install dependencies - run: uv sync - - - name: Run unit tests - run: uv run pytest tests/unit -v --cov=deepwork --cov-report=xml - - - name: Run integration tests - run: uv run pytest tests/integration -v - - - name: Run E2E tests - run: uv run pytest tests/e2e -v --timeout=300 - - - name: Upload coverage - uses: codecov/codecov-action@v3 - with: - file: ./coverage.xml - - lint: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - run: uv tool install ruff - - run: ruff check deepwork/ - - run: ruff format --check deepwork/ - - type-check: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - run: uv tool install mypy - - run: mypy deepwork/ -``` +Github Actions are used for all CI/CD tasks. ### Test Coverage Goals @@ -1333,36 +1095,6 @@ Claude: ✓ Created policy "API documentation update" in .deepwork.policy.yml --- -## Implementation Status - -**Completed**: Phases 1 & 2 are complete. The core runtime, CLI, installation, and command generation systems are fully functional. - -### Current Focus: Phase 5 - Job Ecosystem -- ✅ Standard job `deepwork_jobs` with define, implement, refine commands -- 📋 Reference job library (competitive research, ad campaigns, etc.) -- 📋 Job sharing via Git repositories - -### Future Work - -**Phase 3: Runtime Engine** (Optional enhancements) -- Step execution tracking and monitoring -- Output validation system -- Advanced state management - -**Phase 4: Multi-Platform Support** -- Gemini command generation -- GitHub Copilot integration -- Multi-platform testing - -**Phase 6: Polish and Release** -- Performance optimization -- Enhanced error handling -- Comprehensive user documentation -- Tutorial content -- Beta and public release - ---- - ## Technical Decisions ### Language: Python 3.11+ @@ -1392,25 +1124,6 @@ Claude: ✓ Created policy "API documentation update" in .deepwork.policy.yml --- -## Open Questions - -1. **Job Versioning**: How do we handle breaking changes in job definitions? - - Proposal: Semantic versioning + migration scripts - -2. **Concurrent Jobs**: Should users be able to run multiple jobs simultaneously? - - Proposal: Yes, using separate Git branches - -3. **Job Composition**: Can jobs include other jobs as steps? - - Proposal: Phase 2 feature - support nested jobs - -4. **Private Jobs**: How do we support proprietary job definitions? - - Proposal: Support private Git repos + local job directories - -5. **AI Agent Abstraction**: Should we abstract AI platforms behind a common interface? - - Proposal: No - embrace platform-specific strengths via custom templates - ---- - ## Success Metrics 1. **Usability**: User can define and execute a new job type in <30 minutes diff --git a/doc/platforms/gemini/cli_configuration.md b/doc/platforms/gemini/cli_configuration.md new file mode 100644 index 00000000..06f54d22 --- /dev/null +++ b/doc/platforms/gemini/cli_configuration.md @@ -0,0 +1,206 @@ + + +# Gemini CLI Configuration + +## Overview + +Gemini CLI is an open-source AI agent from Google that brings Gemini directly into your terminal for coding, problem-solving, and task management. It uses a reason-and-act (ReAct) loop with built-in tools and supports Model Context Protocol (MCP) servers for extensibility. + +## Configuration Files + +Gemini CLI uses JSON-based configuration with a hierarchical precedence system. + +### File Locations + +Configuration is applied in this order (lowest to highest priority): + +| Priority | File Type | Linux/macOS | Windows | +|----------|-----------|-------------|---------| +| 1 | Default values | Hardcoded | Hardcoded | +| 2 | System defaults | `/etc/gemini-cli/system-defaults.json` | `C:\ProgramData\gemini-cli\system-defaults.json` | +| 3 | User settings | `~/.gemini/settings.json` | `~/.gemini/settings.json` | +| 4 | Project settings | `.gemini/settings.json` | `.gemini/settings.json` | +| 5 | System settings | `/etc/gemini-cli/settings.json` | `C:\ProgramData\gemini-cli\settings.json` | +| 6 | Environment variables | System or `.env` file | System or `.env` file | +| 7 | Command-line arguments | Runtime flags | Runtime flags | + +### Configuration Format + +The `settings.json` uses nested category objects. A JSON schema is available at: +`https://raw.githubusercontent.com/google-gemini/gemini-cli/main/schemas/settings.schema.json` + +Key configuration categories include: + +- **general**: Core behavior (vimMode, preferredEditor, checkpointing) +- **model**: Model selection, session turn limits, compression thresholds +- **context**: Context file discovery and filtering +- **tools**: Sandbox configuration, tool discovery, hooks enablement +- **mcp**: Model Context Protocol server configurations +- **security**: YOLO mode, tool approval, folder trust +- **hooks**: Hook system activation and event-specific hook arrays + +## Custom Commands/Skills + +Custom commands allow you to create personalized shortcuts for your most-used prompts. + +### Command Location + +Commands are discovered from two locations with specific precedence: + +1. **Global commands**: `~/.gemini/commands/` - Available across all projects +2. **Project commands**: `/.gemini/commands/` - Project-specific, can be version-controlled + +Project commands override identically-named global commands. + +### Command File Format + +Commands use **TOML format** with `.toml` extension. + +### Metadata/Frontmatter + +Commands support two fields: + +| Field | Required | Type | Description | +|-------|----------|------|-------------| +| `prompt` | Yes | String | The prompt sent to Gemini (single or multi-line) | +| `description` | No | String | One-line description shown in `/help` menu | + +```toml +# Example: ~/.gemini/commands/refactor/pure.toml +# Invoked via: /refactor:pure + +description = "Refactors code into pure functions." + +prompt = """ +Please analyze the code I've provided in the current context. +Refactor it into a pure function. + +Your response should include: +1. The refactored, pure function code block. +2. A brief explanation of the key changes you made. +""" +``` + +### Argument Handling + +Custom commands support several methods for dynamic content: + +#### 1. Argument Injection with `{{args}}` + +When `{{args}}` appears in the prompt, it's replaced with user-provided text: + +```toml +prompt = "Review the following code: {{args}}" +``` + +- Outside shell blocks: Arguments inject raw as typed +- Inside `!{...}` blocks: Arguments are automatically shell-escaped + +#### 2. Shell Command Execution with `!{...}` + +Execute shell commands and inject their output: + +```toml +prompt = """ +Here are the staged changes: +!{git diff --staged} + +Please review and suggest a commit message. +""" +``` + +#### 3. File Content Injection with `@{...}` + +Embed file or directory content: + +```toml +prompt = """ +Review this configuration: +@{config/settings.json} +""" +``` + +Features: +- Supports multimodal input (images, PDFs, audio, video) +- Directory traversal respects `.gitignore` and `.geminiignore` + +## Command Discovery + +### Naming & Namespacing + +Command names derive from file paths relative to the commands directory. Subdirectories create namespaced commands using colons: + +| File Path | Command | +|-----------|---------| +| `~/.gemini/commands/test.toml` | `/test` | +| `/.gemini/commands/git/commit.toml` | `/git:commit` | +| `/.gemini/commands/review/pr.toml` | `/review:pr` | + +### Discovery Order + +1. User global commands (`~/.gemini/commands/`) +2. Project commands (`/.gemini/commands/`) + +Project commands take precedence over global commands with the same name. + +## Context Files (GEMINI.md) + +Context files provide persistent instructions to the model: + +### Loading Hierarchy + +1. **Global context**: `~/.gemini/GEMINI.md` - Instructions for all projects +2. **Project root & ancestors**: Searches from current directory up to `.git` or home +3. **Subdirectories**: Scans below current working directory (limit: 200 dirs) + +The CLI concatenates all discovered files with origin separators. Use `/memory refresh` to reload and `/memory show` to inspect. + +### Modular Imports + +Supports `@path/to/file.md` syntax for including other files. + +## Platform-Specific Features + +### Shell History + +Per-project shell history stored at: `~/.gemini/tmp//shell_history` + +### Sandboxing + +Enable via: +- `--sandbox` flag +- `GEMINI_SANDBOX` environment variable +- `tools.sandbox` setting in settings.json + +Custom Dockerfile support at `.gemini/sandbox.Dockerfile`. + +### MCP Server Integration + +Configure MCP servers in settings.json: + +```json +{ + "mcpServers": { + "my-server": { + "command": "npx", + "args": ["-y", "@my-org/mcp-server"], + "includeTools": ["tool1", "tool2"] + } + } +} +``` + +## Key Differences from Claude Code + +| Feature | Gemini CLI | Claude Code | +|---------|------------|-------------| +| Command format | TOML | Markdown | +| Command directory | `.gemini/commands/` | `.claude/commands/` | +| Context file | `GEMINI.md` | `CLAUDE.md` | +| Config format | JSON (`settings.json`) | JSON/YAML | +| Namespacing | Colon (`:`) | Dot (`.`) | diff --git a/doc/platforms/gemini/hooks_system.md b/doc/platforms/gemini/hooks_system.md new file mode 100644 index 00000000..9c40649f --- /dev/null +++ b/doc/platforms/gemini/hooks_system.md @@ -0,0 +1,183 @@ + + +# Gemini CLI Hooks System (Command Definitions) + +## Overview + +**Important**: Gemini CLI does **NOT** support hooks within slash command definitions. Unlike Claude Code's `stop_hooks` that can be defined per-command in markdown frontmatter, Gemini CLI's hooks are configured globally or at the project level in `settings.json`. + +This document describes the hooks system as it relates to custom commands, and clarifies what is and isn't available for command-level customization. + +## Custom Command Hooks - NOT SUPPORTED + +Gemini CLI custom commands (defined in `.toml` files) only support two fields: + +- `prompt` (required): The prompt text +- `description` (optional): Description shown in help + +There are **no hook fields** available in the TOML command definition format: +- No `pre_hooks` or `before_hooks` +- No `post_hooks` or `after_hooks` +- No `stop_hooks` or validation hooks +- No `on_complete` or lifecycle callbacks + +## Global Hooks System (For Reference) + +While not applicable to individual command definitions, Gemini CLI does have a global hooks system that triggers at various points in the agent lifecycle. + +### Available Hook Events + +| Event | Trigger Point | +|-------|---------------| +| `SessionStart` | Session begins | +| `SessionEnd` | Session closes | +| `BeforeAgent` | Post-prompt, pre-planning | +| `AfterAgent` | Agent loop completion | +| `BeforeModel` | Before LLM request | +| `AfterModel` | After LLM response | +| `BeforeToolSelection` | Pre-tool selection | +| `BeforeTool` | Before tool execution | +| `AfterTool` | After tool execution | +| `PreCompress` | Before context compression | +| `Notification` | Permission/notification events | + +### Hook Configuration (Global/Project Level) + +Hooks are defined in `settings.json`, not in command files: + +```json +{ + "hooks": { + "enabled": true, + "BeforeTool": [ + { + "matcher": "write_file|replace", + "hooks": [ + { + "name": "security-check", + "type": "command", + "command": "./hooks/validate.sh", + "timeout": 30000 + } + ] + } + ], + "AfterTool": [ + { + "matcher": "FileEdit", + "hooks": [ + { + "name": "format-code", + "type": "command", + "command": "./hooks/prettier.sh" + } + ] + } + ] + } +} +``` + +### Hook Input/Output Contract + +- **Communication**: JSON via stdin; exit codes + stdout/stderr responses +- **Exit Codes**: + - `0` = Success (output shown/injected) + - `2` = Blocking error (stderr shown, operation may be blocked) + - Other = Non-blocking warning (logged, continues) + +## Workarounds for Command-Level Hooks + +Since per-command hooks aren't supported, here are alternative approaches: + +### 1. Shell Command Injection + +Use `!{...}` in the prompt to execute validation/setup commands: + +```toml +prompt = """ +!{./scripts/pre-check.sh} + +Now proceed with the task... +""" +``` + +**Limitation**: This runs at prompt expansion time, not as a hook with control flow. + +### 2. Global Hooks with Matchers + +Configure global hooks that pattern-match on specific conditions: + +```json +{ + "hooks": { + "AfterAgent": [ + { + "hooks": [ + { + "name": "run-tests", + "type": "command", + "command": "./scripts/run-tests.sh" + } + ] + } + ] + } +} +``` + +### 3. Prompt-Based Validation + +Include validation instructions directly in the prompt: + +```toml +prompt = """ +Before completing this task, ensure: +1. All tests pass (run: npm test) +2. No linting errors (run: npm run lint) + +Only mark the task complete if all checks pass. +""" +``` + +## Comparison with Other Platforms + +| Feature | Gemini CLI | Claude Code | +|---------|------------|-------------| +| Command-level hooks | No | Yes (`stop_hooks` in frontmatter) | +| Global hooks | Yes (settings.json) | Yes (CLAUDE.md hooks) | +| Hook types | `command` only | `prompt`, `script` | +| Hook events | 11 events | 1 event (`stop`) | +| Per-command customization | None | Full | + +## Implications for DeepWork + +Since Gemini CLI doesn't support command-level hooks: + +1. **`stop_hooks` cannot be implemented** per-command as they are in Claude Code +2. **Quality validation loops** would need to be: + - Embedded in the prompt instructions + - Handled by global AfterAgent hooks + - Managed through explicit user confirmation +3. **Platform adapter** should set hook-related fields to `None`/`null` + +## Limitations + +1. **No command-level lifecycle hooks**: All hooks are global/project-scoped +2. **No hook filtering by command**: Cannot trigger hooks only for specific slash commands +3. **Experimental status**: The entire hooks system is marked as experimental +4. **Command type only**: No plugin/npm-based hooks yet (planned for future) + +## Future Considerations + +Based on GitHub issues, there are proposals for: +- Extension-level hooks (`hooks/hooks.json` convention) +- More granular hook matchers +- Plugin-based hook types + +Monitor the Gemini CLI repository for updates to the hooks system that might enable command-level hooks in the future. diff --git a/src/deepwork/cli/sync.py b/src/deepwork/cli/sync.py index f5e46751..e127e49b 100644 --- a/src/deepwork/cli/sync.py +++ b/src/deepwork/cli/sync.py @@ -91,6 +91,7 @@ def sync_commands(project_path: Path) -> None: # Parse all jobs jobs = [] + failed_jobs: list[tuple[str, str]] = [] for job_dir in job_dirs: try: job_def = parse_job_definition(job_dir) @@ -98,6 +99,15 @@ def sync_commands(project_path: Path) -> None: console.print(f" [green]✓[/green] Loaded {job_def.name} v{job_def.version}") except Exception as e: console.print(f" [red]✗[/red] Failed to load {job_dir.name}: {e}") + failed_jobs.append((job_dir.name, str(e))) + + # Fail early if any jobs failed to parse + if failed_jobs: + console.print() + console.print("[bold red]Sync aborted due to job parsing errors:[/bold red]") + for job_name, error in failed_jobs: + console.print(f" • {job_name}: {error}") + raise SyncError(f"Failed to parse {len(failed_jobs)} job(s)") # Collect hooks from all jobs job_hooks_list = collect_job_hooks(jobs_dir) diff --git a/src/deepwork/core/adapters.py b/src/deepwork/core/adapters.py index 700010b6..1ddcb537 100644 --- a/src/deepwork/core/adapters.py +++ b/src/deepwork/core/adapters.py @@ -230,6 +230,22 @@ def _hook_already_present(hooks: list[dict[str, Any]], script_path: str) -> bool return False +# ============================================================================= +# Platform Adapters +# ============================================================================= +# +# Each adapter must define hook_name_mapping to indicate which hooks it supports. +# Use an empty dict {} for platforms that don't support command-level hooks. +# +# Hook support reviewed: +# - Claude Code: Full support (Stop, PreToolUse, UserPromptSubmit) - 2025-01 +# - Gemini CLI: No command-level hooks (reviewed 2026-01-12) +# Gemini's hooks are global/project-level in settings.json, not per-command. +# TOML command files only support 'prompt' and 'description' fields. +# See: doc/platforms/gemini/hooks_system.md +# ============================================================================= + + class ClaudeAdapter(AgentAdapter): """Adapter for Claude Code.""" @@ -297,3 +313,61 @@ def sync_hooks(self, project_path: Path, hooks: dict[str, list[dict[str, Any]]]) # Count total hooks total = sum(len(hooks_list) for hooks_list in hooks.values()) return total + + +class GeminiAdapter(AgentAdapter): + """Adapter for Gemini CLI. + + Gemini CLI uses TOML format for custom commands stored in .gemini/commands/. + Commands use colon (:) for namespacing instead of dot (.). + + Note: Gemini CLI does NOT support command-level hooks. Hooks are configured + globally in settings.json, not per-command. Therefore, hook_name_mapping + is empty and sync_hooks returns 0. + + See: doc/platforms/gemini/hooks_system.md + """ + + name = "gemini" + display_name = "Gemini CLI" + config_dir = ".gemini" + command_template = "command-job-step.toml.jinja" + + # Gemini CLI does NOT support command-level hooks + # Hooks are global/project-level in settings.json, not per-command + hook_name_mapping: ClassVar[dict[CommandLifecycleHook, str]] = {} + + def get_command_filename(self, job_name: str, step_id: str) -> str: + """ + Get the filename for a Gemini command. + + Gemini uses TOML files and colon namespacing via subdirectories. + For job "my_job" and step "step_one", creates: my_job/step_one.toml + + Args: + job_name: Name of the job + step_id: ID of the step + + Returns: + Command filename path (e.g., "my_job/step_one.toml") + """ + return f"{job_name}/{step_id}.toml" + + def sync_hooks(self, project_path: Path, hooks: dict[str, list[dict[str, Any]]]) -> int: + """ + Sync hooks to Gemini CLI settings. + + Gemini CLI does not support command-level hooks. All hooks are + configured globally in settings.json. This method is a no-op + that always returns 0. + + Args: + project_path: Path to project root + hooks: Dict mapping lifecycle events to hook configurations (ignored) + + Returns: + 0 (Gemini does not support command-level hooks) + """ + # Gemini CLI does not support command-level hooks + # Hooks are configured globally in settings.json, not per-command + return 0 diff --git a/src/deepwork/schemas/job_schema.py b/src/deepwork/schemas/job_schema.py index a55cbb71..4127abba 100644 --- a/src/deepwork/schemas/job_schema.py +++ b/src/deepwork/schemas/job_schema.py @@ -161,7 +161,6 @@ }, "outputs": { "type": "array", - "minItems": 1, "description": "List of output files/directories", "items": { "type": "string", diff --git a/src/deepwork/standard_jobs/deepwork_jobs/steps/define.md b/src/deepwork/standard_jobs/deepwork_jobs/steps/define.md index bb1165dd..aceb3677 100644 --- a/src/deepwork/standard_jobs/deepwork_jobs/steps/define.md +++ b/src/deepwork/standard_jobs/deepwork_jobs/steps/define.md @@ -241,9 +241,9 @@ User: I need to specify the market segment and product category as inputs. The o Claude: Perfect. A few more questions about that output: 1. What should the file be called? Something like `competitors_list.md`? -2. Should it go directly in the deepwork/competitive_research/ folder, or in a subdirectory like `research/` or `data/`? +2. Should it go in the project root, or in a subdirectory like `research/` or `data/`? -User: Just call it `competitors_list.md` in the main folder. +User: Just call it `competitors_list.md` in the project root. Claude: Great. Now for the "Research each one" step: 1. Does it use the competitor list from the first step as input? diff --git a/src/deepwork/standard_jobs/deepwork_jobs/steps/implement.md b/src/deepwork/standard_jobs/deepwork_jobs/steps/implement.md index b87e8c5a..bb100a3c 100644 --- a/src/deepwork/standard_jobs/deepwork_jobs/steps/implement.md +++ b/src/deepwork/standard_jobs/deepwork_jobs/steps/implement.md @@ -34,7 +34,7 @@ mkdir -p .deepwork/jobs/[job_name]/steps ``` Files to create: -- `.deepwork/jobs/[job_name]/job.yml` - Copy from work directory +- `.deepwork/jobs/[job_name]/job.yml` - `.deepwork/jobs/[job_name]/steps/[step_id].md` - One for each step ### Step 3: Generate Step Instruction Files @@ -219,7 +219,7 @@ your strategy documentation. I'd suggest a policy like: - **Name**: "Update strategy when competitor analysis changes" -- **Trigger**: `deepwork/competitive_research-*/report.md` +- **Trigger**: `**/positioning_report.md` - **Action**: Prompt to review and update `docs/strategy.md` Would you like me to create this policy? I can run `/deepwork_policy.define` to set it up. diff --git a/src/deepwork/templates/claude/command-job-step.md.jinja b/src/deepwork/templates/claude/command-job-step.md.jinja index 73426ba4..c7e6853e 100644 --- a/src/deepwork/templates/claude/command-job-step.md.jinja +++ b/src/deepwork/templates/claude/command-job-step.md.jinja @@ -87,7 +87,6 @@ Please gather the following information from the user: This step requires the following files from previous steps: {% for input in file_inputs %} - `{{ input.file }}` (from step `{{ input.from_step }}`) - Location: `deepwork/{{ job_name }}/{{ input.file }}` {% endfor %} Make sure to read and use these files as context for this step. @@ -108,21 +107,20 @@ All work for this job should be done on a dedicated work branch: ``` Replace `[instance]` with a descriptive identifier (e.g., `acme`, `q1-launch`, etc.) -3. **All outputs go in the work directory**: - - Create files in: `deepwork/{{ job_name }}/` - - This keeps work products organized by job - ## Output Requirements -Create the following output(s) in the work directory: +{% if outputs %} +Create the following output(s): {% for output in outputs %} -- `deepwork/{{ job_name }}/{{ output }}`{% if output.endswith('/') %} (directory){% endif %} +- `{{ output }}`{% if output.endswith('/') %} (directory){% endif %} {% endfor %} Ensure all outputs are: - Well-formatted and complete -- Committed to the work branch - Ready for review or use by subsequent steps +{% else %} +No specific files are output by this command. +{% endif %} {% if stop_hooks %} ## Quality Validation Loop @@ -156,22 +154,20 @@ To signal that all quality criteria have been met, include this tag in your fina After completing this step: -1. **Commit your work**: - ```bash - git add deepwork/{{ job_name }}/ - git commit -m "{{ job_name }}: Complete {{ step_id }} step" - ``` - -2. **Verify outputs**: Confirm all required files have been created +1. **Verify outputs**: Confirm all required files have been created -3. **Inform the user**: +2. **Inform the user**: {% if is_standalone %} - The {{ step_id }} command is complete +{% if outputs %} - Outputs created: {{ outputs | join(', ') }} +{% endif %} - This command can be run again anytime to make further changes {% else %} - Step {{ step_number }} of {{ total_steps }} is complete +{% if outputs %} - Outputs created: {{ outputs | join(', ') }} +{% endif %} {% if next_step %} - Ready to proceed to next step: `/{{ job_name }}.{{ next_step }}` {% else %} @@ -185,7 +181,7 @@ After completing this step: This is a standalone command that can be run anytime. The outputs are ready for use. Consider: -- Reviewing the outputs in `deepwork/{{ job_name }}/` +- Reviewing the outputs - Running `deepwork sync` if job definitions were changed - Re-running this command later if further changes are needed {% elif next_step %} @@ -201,7 +197,7 @@ To continue the workflow, run: This is the final step in the {{ job_name }} workflow. All outputs should now be complete and ready for review. Consider: -- Reviewing all work products in `deepwork/{{ job_name }}/` +- Reviewing all work products - Creating a pull request to merge the work branch - Documenting any insights or learnings {% endif %} diff --git a/src/deepwork/templates/gemini/command-job-step.toml.jinja b/src/deepwork/templates/gemini/command-job-step.toml.jinja new file mode 100644 index 00000000..bb5d69be --- /dev/null +++ b/src/deepwork/templates/gemini/command-job-step.toml.jinja @@ -0,0 +1,169 @@ +# {{ job_name }}:{{ step_id }} +# +# {{ step_description }} +# +# Generated by DeepWork - do not edit manually + +description = "{{ step_description | replace('"', '\\"') }}" + +prompt = """ +# {{ job_name }}:{{ step_id }} + +{% if is_standalone %} +**Standalone command** in the **{{ job_name }}** job - can be run anytime +{% else %} +**Step {{ step_number }} of {{ total_steps }}** in the **{{ job_name }}** workflow +{% endif %} + +**Summary**: {{ job_summary }} + +{% if job_description %} +## Job Overview + +{{ job_description }} +{% endif %} + +{% if dependencies %} +## Prerequisites + +This step requires completion of the following step(s): +{% for dep in dependencies %} +- `/{{ job_name }}:{{ dep }}` +{% endfor %} + +Please ensure these steps have been completed before proceeding. +{% endif %} + +## Instructions + +{{ instructions_content }} + +{% if user_inputs or file_inputs %} +## Inputs + +{% if user_inputs %} +### User Parameters + +Please gather the following information from the user: +{% for input in user_inputs %} +- **{{ input.name }}**: {{ input.description }} +{% endfor %} +{% endif %} + +{% if file_inputs %} +### Required Files + +This step requires the following files from previous steps: +{% for input in file_inputs %} +- `{{ input.file }}` (from step `{{ input.from_step }}`) +{% endfor %} + +Make sure to read and use these files as context for this step. +{% endif %} +{% endif %} + +## Work Branch Management + +All work for this job should be done on a dedicated work branch: + +1. **Check current branch**: + - If already on a work branch for this job (format: `deepwork/{{ job_name }}-[instance]-[date]`), continue using it + - If on main/master, create a new work branch + +2. **Create work branch** (if needed): + ```bash + git checkout -b deepwork/{{ job_name }}-[instance]-$(date +%Y%m%d) + ``` + Replace `[instance]` with a descriptive identifier (e.g., `acme`, `q1-launch`, etc.) + +## Output Requirements + +{% if outputs %} +Create the following output(s): +{% for output in outputs %} +- `{{ output }}`{% if output.endswith('/') %} (directory){% endif %} + +{% endfor %} + +Ensure all outputs are: +- Well-formatted and complete +- Ready for review or use by subsequent steps +{% else %} +No specific files are output by this command. +{% endif %} + +{% if stop_hooks %} +## Quality Validation + +This step has quality criteria that should be verified before completion. + +{% for hook in stop_hooks %} +{% if hook.type != "script" %} +### Quality Criteria{% if stop_hooks | length > 1 %} ({{ loop.index }}){% endif %} + +{{ hook.content }} +{% endif %} +{% endfor %} + +**Note**: Gemini CLI does not support automated validation hooks. Please manually verify the criteria above before proceeding. + +{% endif %} +## Completion + +After completing this step: + +1. **Verify outputs**: Confirm all required files have been created + +2. **Inform the user**: +{% if is_standalone %} + - The {{ step_id }} command is complete +{% if outputs %} + - Outputs created: {{ outputs | join(', ') }} +{% endif %} + - This command can be run again anytime to make further changes +{% else %} + - Step {{ step_number }} of {{ total_steps }} is complete +{% if outputs %} + - Outputs created: {{ outputs | join(', ') }} +{% endif %} + {% if next_step %} + - Ready to proceed to next step: `/{{ job_name }}:{{ next_step }}` + {% else %} + - This is the final step - the job is complete! + {% endif %} +{% endif %} + +{% if is_standalone %} +## Command Complete + +This is a standalone command that can be run anytime. The outputs are ready for use. + +Consider: +- Reviewing the outputs +- Running `deepwork sync` if job definitions were changed +- Re-running this command later if further changes are needed +{% elif next_step %} +## Next Step + +To continue the workflow, run: +``` +/{{ job_name }}:{{ next_step }} +``` +{% else %} +## Workflow Complete + +This is the final step in the {{ job_name }} workflow. All outputs should now be complete and ready for review. + +Consider: +- Reviewing all work products +- Creating a pull request to merge the work branch +- Documenting any insights or learnings +{% endif %} + +--- + +## Context Files + +- Job definition: `.deepwork/jobs/{{ job_name }}/job.yml` +- Step instructions: `.deepwork/jobs/{{ job_name }}/{{ instructions_file }}` +""" diff --git a/tests/unit/test_adapters.py b/tests/unit/test_adapters.py index b0717897..a1d06f32 100644 --- a/tests/unit/test_adapters.py +++ b/tests/unit/test_adapters.py @@ -9,6 +9,8 @@ AdapterError, AgentAdapter, ClaudeAdapter, + CommandLifecycleHook, + GeminiAdapter, ) @@ -21,10 +23,13 @@ def test_get_all_returns_registered_adapters(self) -> None: assert "claude" in adapters assert adapters["claude"] is ClaudeAdapter + assert "gemini" in adapters + assert adapters["gemini"] is GeminiAdapter def test_get_returns_correct_adapter(self) -> None: """Test that get returns the correct adapter class.""" assert AgentAdapter.get("claude") is ClaudeAdapter + assert AgentAdapter.get("gemini") is GeminiAdapter def test_get_raises_for_unknown_adapter(self) -> None: """Test that get raises AdapterError for unknown adapter.""" @@ -36,7 +41,8 @@ def test_list_names_returns_all_names(self) -> None: names = AgentAdapter.list_names() assert "claude" in names - assert len(names) >= 1 # At least claude + assert "gemini" in names + assert len(names) >= 2 # At least claude and gemini class TestClaudeAdapter: @@ -163,3 +169,148 @@ def test_sync_hooks_empty_hooks_returns_zero(self, temp_dir: Path) -> None: count = adapter.sync_hooks(temp_dir, {}) assert count == 0 + + +class TestGeminiAdapter: + """Tests for GeminiAdapter.""" + + def test_class_attributes(self) -> None: + """Test Gemini adapter class attributes.""" + assert GeminiAdapter.name == "gemini" + assert GeminiAdapter.display_name == "Gemini CLI" + assert GeminiAdapter.config_dir == ".gemini" + assert GeminiAdapter.commands_dir == "commands" + assert GeminiAdapter.command_template == "command-job-step.toml.jinja" + + def test_init_with_project_root(self, temp_dir: Path) -> None: + """Test initialization with project root.""" + adapter = GeminiAdapter(temp_dir) + + assert adapter.project_root == temp_dir + + def test_init_without_project_root(self) -> None: + """Test initialization without project root.""" + adapter = GeminiAdapter() + + assert adapter.project_root is None + + def test_detect_when_present(self, temp_dir: Path) -> None: + """Test detect when .gemini directory exists.""" + (temp_dir / ".gemini").mkdir() + adapter = GeminiAdapter(temp_dir) + + assert adapter.detect() is True + + def test_detect_when_absent(self, temp_dir: Path) -> None: + """Test detect when .gemini directory doesn't exist.""" + adapter = GeminiAdapter(temp_dir) + + assert adapter.detect() is False + + def test_detect_with_explicit_project_root(self, temp_dir: Path) -> None: + """Test detect with explicit project root parameter.""" + (temp_dir / ".gemini").mkdir() + adapter = GeminiAdapter() + + assert adapter.detect(temp_dir) is True + + def test_get_template_dir(self, temp_dir: Path) -> None: + """Test get_template_dir.""" + adapter = GeminiAdapter() + templates_root = temp_dir / "templates" + + result = adapter.get_template_dir(templates_root) + + assert result == templates_root / "gemini" + + def test_get_commands_dir(self, temp_dir: Path) -> None: + """Test get_commands_dir.""" + adapter = GeminiAdapter(temp_dir) + + result = adapter.get_commands_dir() + + assert result == temp_dir / ".gemini" / "commands" + + def test_get_commands_dir_with_explicit_root(self, temp_dir: Path) -> None: + """Test get_commands_dir with explicit project root.""" + adapter = GeminiAdapter() + + result = adapter.get_commands_dir(temp_dir) + + assert result == temp_dir / ".gemini" / "commands" + + def test_get_commands_dir_raises_without_root(self) -> None: + """Test get_commands_dir raises when no project root specified.""" + adapter = GeminiAdapter() + + with pytest.raises(AdapterError, match="No project root specified"): + adapter.get_commands_dir() + + def test_get_command_filename(self) -> None: + """Test get_command_filename returns TOML with subdirectory.""" + adapter = GeminiAdapter() + + result = adapter.get_command_filename("my_job", "step_one") + + # Gemini uses subdirectories for namespacing (colon becomes path) + assert result == "my_job/step_one.toml" + + def test_get_command_filename_with_underscores(self) -> None: + """Test get_command_filename with underscores in names.""" + adapter = GeminiAdapter() + + result = adapter.get_command_filename("competitive_research", "identify_competitors") + + assert result == "competitive_research/identify_competitors.toml" + + def test_hook_name_mapping_is_empty(self) -> None: + """Test that Gemini has no command-level hooks.""" + assert GeminiAdapter.hook_name_mapping == {} + + def test_supports_hook_returns_false_for_all_hooks(self) -> None: + """Test that Gemini doesn't support any command-level hooks.""" + adapter = GeminiAdapter() + + for hook in CommandLifecycleHook: + assert adapter.supports_hook(hook) is False + + def test_get_platform_hook_name_returns_none(self) -> None: + """Test that get_platform_hook_name returns None for all hooks.""" + adapter = GeminiAdapter() + + for hook in CommandLifecycleHook: + assert adapter.get_platform_hook_name(hook) is None + + def test_sync_hooks_returns_zero(self, temp_dir: Path) -> None: + """Test sync_hooks always returns 0 (no hook support).""" + (temp_dir / ".gemini").mkdir() + adapter = GeminiAdapter(temp_dir) + hooks = { + "SomeEvent": [{"matcher": "", "hooks": [{"type": "command", "command": "test.sh"}]}] + } + + count = adapter.sync_hooks(temp_dir, hooks) + + assert count == 0 + + def test_sync_hooks_empty_hooks_returns_zero(self, temp_dir: Path) -> None: + """Test sync_hooks returns 0 for empty hooks.""" + adapter = GeminiAdapter(temp_dir) + + count = adapter.sync_hooks(temp_dir, {}) + + assert count == 0 + + def test_sync_hooks_does_not_create_settings_file(self, temp_dir: Path) -> None: + """Test that sync_hooks doesn't create settings.json (unlike Claude).""" + gemini_dir = temp_dir / ".gemini" + gemini_dir.mkdir() + adapter = GeminiAdapter(temp_dir) + hooks = { + "AfterAgent": [{"matcher": "", "hooks": [{"type": "command", "command": "test.sh"}]}] + } + + adapter.sync_hooks(temp_dir, hooks) + + settings_file = gemini_dir / "settings.json" + assert not settings_file.exists()