diff --git a/.claude/settings.json b/.claude/settings.json
index c6158219..6bc2c067 100644
--- a/.claude/settings.json
+++ b/.claude/settings.json
@@ -99,7 +99,6 @@
"Edit(./.deepwork/**)",
"Write(./.deepwork/**)",
"Bash(deepwork:*)",
- "Bash(./.deepwork/jobs/deepwork_jobs/make_new_job.sh:*)",
"WebSearch",
"Skill(deepwork)",
"mcp__deepwork__get_workflows",
diff --git a/.deepwork/config.yml b/.deepwork/config.yml
index 9de79eea..30c250ec 100644
--- a/.deepwork/config.yml
+++ b/.deepwork/config.yml
@@ -1,4 +1,4 @@
-version: 0.1.0
+version: '1.0'
platforms:
- claude
- gemini
diff --git a/.deepwork/doc_specs/job_spec.md b/.deepwork/doc_specs/job_spec.md
index 23fd9fc7..9bbd795f 100644
--- a/.deepwork/doc_specs/job_spec.md
+++ b/.deepwork/doc_specs/job_spec.md
@@ -12,10 +12,8 @@ quality_criteria:
description: "Version must follow semantic versioning format X.Y.Z (e.g., `1.0.0`, `2.1.3`)"
- name: Concise Summary
description: "Summary must be under 200 characters and clearly describe what the job accomplishes"
- - name: Rich Description
- description: "Description must be multi-line and explain: the problem solved, the process, expected outcomes, and target users"
- - name: Changelog Present
- description: "Must include a changelog array with at least the initial version entry. Changelog should only include one entry per branch at most"
+ - name: Common Job Info
+ description: "common_job_info_provided_to_all_steps_at_runtime must be present and provide shared context for all steps"
- name: Complete Steps
description: "Each step must have: id (lowercase_underscores), name, description, instructions_file, outputs (at least one), and dependencies array"
- name: Valid Dependencies
@@ -40,19 +38,9 @@ A `job.yml` file defines a complete multi-step workflow that AI agents can execu
name: job_name # lowercase, underscores only
version: "1.0.0" # semantic versioning
summary: "Brief description" # max 200 characters
-description: | # detailed multi-line explanation
- [Explain what this workflow does, why it exists,
- what outputs it produces, and who should use it]
-```
-
-### Changelog
-
-```yaml
-changelog:
- - version: "1.0.0"
- changes: "Initial job creation"
- - version: "1.1.0"
- changes: "Added quality validation hooks"
+common_job_info_provided_to_all_steps_at_runtime: |
+ [Common context shared across all steps at runtime.
+ Include key terminology, constraints, and shared knowledge.]
```
### Steps Array
@@ -128,21 +116,11 @@ steps:
name: competitive_research
version: "1.0.0"
summary: "Systematic competitive analysis workflow"
-description: |
+common_job_info_provided_to_all_steps_at_runtime: |
A comprehensive workflow for analyzing competitors in your market segment.
Helps product teams understand the competitive landscape through systematic
identification, research, comparison, and positioning recommendations.
- Produces:
- - Vetted competitor list
- - Research notes per competitor
- - Comparison matrix
- - Strategic positioning report
-
-changelog:
- - version: "1.0.0"
- changes: "Initial job creation"
-
steps:
- id: identify_competitors
name: "Identify Competitors"
diff --git a/.deepwork/jobs/test_job_flow/job.yml b/.deepwork/jobs/test_job_flow/job.yml
index 46eee8e8..c599ae43 100644
--- a/.deepwork/jobs/test_job_flow/job.yml
+++ b/.deepwork/jobs/test_job_flow/job.yml
@@ -1,7 +1,7 @@
name: test_job_flow
-version: "1.0.1"
+version: "1.0.2"
summary: "End-to-end test of the DeepWork job creation workflow with friction analysis"
-description: |
+common_job_info_provided_to_all_steps_at_runtime: |
A meta-workflow that tests the DeepWork job creation process itself. This job:
1. Creates a new job ("detailed_test_review") via a nested sub-agent workflow,
@@ -14,16 +14,6 @@ description: |
This is a diagnostic/improvement workflow for the DeepWork framework. The final
output is a set of actionable recommendations for reducing job creation friction.
- The "detailed_test_review" job created in step 1 has two steps:
- - Run all tests with coverage reporting, with per-file and per-step quality reviews
- - Update the README with coverage numbers and an as-of date
-
-changelog:
- - version: "1.0.1"
- changes: "Updated create_test_review_job instructions: added explicit `required` field to all output examples to prevent schema validation failures. Added description fields to YAML example outputs."
- - version: "1.0.0"
- changes: "Initial job creation"
-
workflows:
- name: run
summary: "Create a test job via sub-agent, review the process, and identify improvements"
diff --git a/.deepwork/jobs/test_job_flow/steps/create_test_review_job.md b/.deepwork/jobs/test_job_flow/steps/create_test_review_job.md
index 2c62b39b..db797be0 100644
--- a/.deepwork/jobs/test_job_flow/steps/create_test_review_job.md
+++ b/.deepwork/jobs/test_job_flow/steps/create_test_review_job.md
@@ -107,17 +107,6 @@ steps:
"Date Included": "..."
```
-## Quality Criteria
-
-- The nested workflow ran to completion (all steps finished)
-- The `detailed_test_review` job.yml exists and is valid YAML
-- It defines exactly two steps: `run_tests` and `update_readme`
-- `run_tests` has both `test_files` (files) and `coverage_report` (file) outputs
-- `run_tests` has a for_each file review on `test_files` and a for_each step review for coverage
-- `update_readme` takes `coverage_report` as input from `run_tests`
-- `update_readme` produces a `readme` output
-- When all criteria are met, include `Quality Criteria Met` in your response
-
## Context
This step is the core exercise of the test_job_flow. By running the full job creation workflow as a nested sub-agent, we can observe the entire process end-to-end and identify any friction points. The transcript from this step will be reviewed in the next step.
diff --git a/.deepwork/jobs/test_job_flow/steps/identify_improvements.md b/.deepwork/jobs/test_job_flow/steps/identify_improvements.md
index 51ec39b0..cabb6c52 100644
--- a/.deepwork/jobs/test_job_flow/steps/identify_improvements.md
+++ b/.deepwork/jobs/test_job_flow/steps/identify_improvements.md
@@ -76,15 +76,6 @@ A markdown file at `.deepwork/tmp/improvement_recommendations.md`.
[Suggested sequence for implementing the recommendations, noting dependencies]
```
-## Quality Criteria
-
-- Every recommendation maps to a specific friction point from the friction report
-- Recommendations point to specific files and code paths (not vague suggestions)
-- Each recommendation includes effort and risk assessment
-- Recommendations are technically feasible given the DeepWork architecture
-- Prioritization is logical (quick wins first, high-impact items ranked higher)
-- When all criteria are met, include `Quality Criteria Met` in your response
-
## Context
This is the final step of the test_job_flow. Its output is a decision document for the user — they will review these recommendations and decide which ones to implement. The quality of this output determines whether the entire test_job_flow exercise produces actionable value. Be thorough but practical; the user wants recommendations they can act on, not a theoretical analysis.
diff --git a/.deepwork/jobs/test_job_flow/steps/review_creation_process.md b/.deepwork/jobs/test_job_flow/steps/review_creation_process.md
index 96c0d057..26da64f5 100644
--- a/.deepwork/jobs/test_job_flow/steps/review_creation_process.md
+++ b/.deepwork/jobs/test_job_flow/steps/review_creation_process.md
@@ -77,15 +77,6 @@ A markdown file at `.deepwork/tmp/job_creation_friction.md`.
[Was the process smooth enough for production use? What's the biggest single improvement that could be made?]
```
-## Quality Criteria
-
-- The friction report references specific events from the sub-agent's transcript (not vague generalities)
-- Each friction point is described concretely enough that a developer could reproduce and fix it
-- The completion status section accurately reflects what happened
-- Both problems AND successes are documented (balanced view)
-- The overall assessment provides a clear priority for improvement
-- When all criteria are met, include `Quality Criteria Met` in your response
-
## Context
This step bridges observation and action. The friction points documented here will be the input to step 3, where we investigate the actual code to find improvements. The more specific and concrete the friction descriptions, the more targeted the improvements can be.
diff --git a/.deepwork/jobs/update_job_schema/AGENTS.md b/.deepwork/jobs/update_job_schema/AGENTS.md
new file mode 100644
index 00000000..7feb4a29
--- /dev/null
+++ b/.deepwork/jobs/update_job_schema/AGENTS.md
@@ -0,0 +1,32 @@
+# Job Management
+
+This folder and its subfolders are managed using `deepwork_jobs` workflows.
+
+## Recommended Workflows
+
+- `deepwork_jobs/new_job` - Full lifecycle: define → implement → test → iterate
+- `deepwork_jobs/learn` - Improve instructions based on execution learnings
+- `deepwork_jobs/repair` - Clean up and migrate from prior DeepWork versions
+
+## Directory Structure
+
+```
+.
+├── AGENTS.md # This file - project context and guidance
+├── job.yml # Job specification (created by define step)
+├── steps/ # Step instruction files (created by implement step)
+│ └── *.md # One file per step
+├── hooks/ # Custom validation scripts and prompts
+│ └── *.md|*.sh # Hook files referenced in job.yml
+├── scripts/ # Reusable scripts and utilities created during job execution
+│ └── *.sh|*.py # Helper scripts referenced in step instructions
+└── templates/ # Example file formats and templates
+ └── *.md|*.yml # Templates referenced in step instructions
+```
+
+## Editing Guidelines
+
+1. **Use workflows** for structural changes (adding steps, modifying job.yml)
+2. **Direct edits** are fine for minor instruction tweaks
+3. **Run `deepwork_jobs/learn`** after executing job steps to capture improvements
+4. **Run `deepwork install`** after any changes to regenerate commands
diff --git a/.deepwork/jobs/update_job_schema/hooks/.gitkeep b/.deepwork/jobs/update_job_schema/hooks/.gitkeep
new file mode 100644
index 00000000..e69de29b
diff --git a/.deepwork/jobs/update_job_schema/job.yml b/.deepwork/jobs/update_job_schema/job.yml
new file mode 100644
index 00000000..b1fa8cf3
--- /dev/null
+++ b/.deepwork/jobs/update_job_schema/job.yml
@@ -0,0 +1,260 @@
+name: update_job_schema
+version: "1.0.2"
+summary: "Update the job.yml JSON Schema and propagate changes through all dependent code and files"
+common_job_info_provided_to_all_steps_at_runtime: |
+ A repeatable workflow for making changes to the job.yml JSON Schema
+ (src/deepwork/schemas/job.schema.json) and systematically propagating those
+ changes through every dependent layer of the DeepWork codebase.
+
+ The schema is the single source of truth for job.yml structure. Changes to it
+ cascade through: the parser (dataclasses + validation), MCP Pydantic models,
+ all fixture and real job.yml files, unit/integration tests, documentation,
+ and the repair migration workflow.
+
+ This job enforces a strict ordering: the schema file is always updated first,
+ then downstream code and files are updated layer by layer, and finally the
+ repair workflow is updated and end-to-end verification confirms everything
+ still parses correctly.
+
+ Based on historical analysis, a typical schema change touches 20-40 files
+ across the codebase. This workflow ensures nothing is missed.
+
+ ## Shared References
+
+ - **Change summary**: `.deepwork/tmp/schema_change_summary.md` — written in
+ step 1 (update_schema) and consumed by every subsequent step. Always read
+ this first to understand what changed.
+ - **Schema file**: `src/deepwork/schemas/job.schema.json`
+ - **Parser**: `src/deepwork/core/parser.py`
+ - **MCP schemas**: `src/deepwork/mcp/schemas.py`
+ - **MCP tools**: `src/deepwork/mcp/tools.py`
+
+ ## Quality Criteria Convention
+
+ When all quality criteria listed in a step's reviews are met, include
+ `✓ Quality Criteria Met` in your response.
+
+workflows:
+ - name: run
+ summary: "Update the job.yml schema and propagate changes through all dependencies"
+ steps:
+ - update_schema
+ - update_parser
+ - update_mcp_schemas
+ - [update_fixtures, update_jobs, update_docs]
+ - update_tests
+ - verify_and_repair
+
+steps:
+ - id: update_schema
+ name: "Update JSON Schema"
+ description: "Edit the job.schema.json file — the single source of truth for job.yml structure"
+ instructions_file: steps/update_schema.md
+ inputs:
+ - name: change_description
+ description: "Description of the schema change to make (e.g., 'Add optional timeout field to steps')"
+ - name: motivation
+ description: "Why this change is needed — what problem it solves or what capability it enables"
+ outputs:
+ schema_file:
+ type: file
+ description: "The updated src/deepwork/schemas/job.schema.json file"
+ required: true
+ change_summary:
+ type: file
+ description: "A brief markdown summary of what changed in the schema, saved to .deepwork/tmp/schema_change_summary.md"
+ required: true
+ dependencies: []
+ reviews:
+ - run_each: schema_file
+ quality_criteria:
+ "Valid JSON Schema": "Is the file valid JSON Schema Draft 7? Are $ref references correct?"
+ "Backwards Compatible": "If intended as backwards-compatible, are new fields optional with sensible defaults? If breaking, is this clearly documented in the change summary?"
+ "Consistent Naming": "Do new field names follow existing conventions (snake_case, descriptive)?"
+ - run_each: change_summary
+ quality_criteria:
+ "Clear Description": "Does it clearly describe what fields were added, removed, or modified?"
+ "Breaking Change Noted": "If the change is breaking, does the summary explicitly say so and describe what existing files will need to change?"
+
+ - id: update_parser
+ name: "Update Parser and Dataclasses"
+ description: "Update src/deepwork/core/parser.py dataclasses and parsing logic, and src/deepwork/schemas/job_schema.py if needed"
+ instructions_file: steps/update_parser.md
+ inputs:
+ - file: schema_file
+ from_step: update_schema
+ - file: change_summary
+ from_step: update_schema
+ outputs:
+ parser_file:
+ type: file
+ description: "The updated src/deepwork/core/parser.py"
+ required: true
+ job_schema_py:
+ type: file
+ description: "The updated src/deepwork/schemas/job_schema.py (even if unchanged, provide the path)"
+ required: true
+ dependencies:
+ - update_schema
+ reviews:
+ - run_each: parser_file
+ additional_review_guidance: "Read .deepwork/tmp/schema_change_summary.md for context on what schema fields changed. Verify the parser handles all new/modified fields."
+ quality_criteria:
+ "Schema Parity": "Do the dataclasses and parsing logic handle every field added/modified in the schema change?"
+ "Validation Logic": "Are new fields validated appropriately (e.g., enum values, required fields, cross-references)?"
+ "No Regressions": "Are existing fields and parsing paths left intact unless intentionally changed?"
+
+ - id: update_mcp_schemas
+ name: "Update MCP Schemas"
+ description: "Update src/deepwork/mcp/schemas.py Pydantic models and src/deepwork/mcp/tools.py if the change affects MCP tool responses"
+ instructions_file: steps/update_mcp_schemas.md
+ inputs:
+ - file: change_summary
+ from_step: update_schema
+ - file: parser_file
+ from_step: update_parser
+ outputs:
+ mcp_schemas_file:
+ type: file
+ description: "The updated src/deepwork/mcp/schemas.py"
+ required: true
+ mcp_tools_file:
+ type: file
+ description: "The updated src/deepwork/mcp/tools.py (even if unchanged, provide the path)"
+ required: true
+ dependencies:
+ - update_schema
+ - update_parser
+ reviews:
+ - run_each: step
+ additional_review_guidance: "Read .deepwork/tmp/schema_change_summary.md for what changed. Read src/deepwork/core/parser.py to see the updated dataclasses that MCP schemas must reflect."
+ quality_criteria:
+ "Model Parity": "Do the Pydantic models reflect any new fields that should be exposed through MCP tool responses?"
+ "Serialization": "Are new fields properly serialized/deserialized in tool input/output handling?"
+
+ - id: update_fixtures
+ name: "Update Test Fixtures"
+ description: "Update all test fixture job.yml files to conform to the updated schema"
+ instructions_file: steps/update_fixtures.md
+ inputs:
+ - file: change_summary
+ from_step: update_schema
+ - file: schema_file
+ from_step: update_schema
+ outputs:
+ fixture_files:
+ type: files
+ description: "All updated fixture job.yml files under tests/fixtures/jobs/"
+ required: true
+ dependencies:
+ - update_schema
+ reviews:
+ - run_each: fixture_files
+ additional_review_guidance: "Read .deepwork/tmp/schema_change_summary.md and src/deepwork/schemas/job.schema.json to verify each fixture conforms to the updated schema."
+ quality_criteria:
+ "Schema Conformance": "Does this fixture job.yml conform to the updated schema? Are new required fields present?"
+ "Test Intent Preserved": "Does the fixture still test what it was designed to test (e.g., invalid_job should still be invalid, complex_job should still be complex)?"
+
+ - id: update_jobs
+ name: "Update Standard, Library, and Instance Jobs"
+ description: "Update real job.yml files: standard_jobs (source of truth), library jobs, and .deepwork/jobs instances"
+ instructions_file: steps/update_jobs.md
+ inputs:
+ - file: change_summary
+ from_step: update_schema
+ - file: schema_file
+ from_step: update_schema
+ outputs:
+ job_files:
+ type: files
+ description: "All updated real job.yml files (standard_jobs, library, .deepwork/jobs)"
+ required: true
+ dependencies:
+ - update_schema
+ reviews:
+ - run_each: job_files
+ additional_review_guidance: "Read .deepwork/tmp/schema_change_summary.md to verify each job.yml has been updated for the schema change."
+ quality_criteria:
+ "Schema Conformance": "Does this job.yml conform to the updated schema?"
+ "Functionality Preserved": "Does the job still work as intended after the update?"
+
+ - id: update_tests
+ name: "Update Unit and Integration Tests"
+ description: "Update test files to cover new schema fields and ensure existing tests pass with the changes"
+ instructions_file: steps/update_tests.md
+ inputs:
+ - file: change_summary
+ from_step: update_schema
+ - file: parser_file
+ from_step: update_parser
+ - file: mcp_schemas_file
+ from_step: update_mcp_schemas
+ outputs:
+ test_files:
+ type: files
+ description: "All updated test files (test_parser.py, test_validation.py, test_schemas.py, test_tools.py, etc.)"
+ required: true
+ dependencies:
+ - update_schema
+ - update_parser
+ - update_mcp_schemas
+ - update_fixtures
+ reviews:
+ - run_each: step
+ additional_review_guidance: "Read .deepwork/tmp/schema_change_summary.md for what changed. The tests should cover both new functionality and ensure no regressions."
+ quality_criteria:
+ "New Field Coverage": "Are there tests for the new/modified schema fields in both parser and validation tests?"
+ "No Broken Tests": "Have existing tests been updated to work with the schema change rather than just deleted?"
+
+ - id: update_docs
+ name: "Update Documentation"
+ description: "Update architecture.md, mcp_interface.md, and any other documentation affected by the schema change"
+ instructions_file: steps/update_docs.md
+ inputs:
+ - file: change_summary
+ from_step: update_schema
+ outputs:
+ doc_files:
+ type: files
+ description: "All updated documentation files"
+ required: true
+ dependencies:
+ - update_schema
+ reviews:
+ - run_each: doc_files
+ additional_review_guidance: "Read .deepwork/tmp/schema_change_summary.md to verify docs reflect the schema change."
+ quality_criteria:
+ "Accuracy": "Does the documentation accurately describe the current schema structure after the change?"
+ "Completeness": "Are all affected sections of the documentation updated?"
+
+ - id: verify_and_repair
+ name: "Verify and Update Repair Workflow"
+ description: "Update the deepwork_jobs repair workflow to handle migration for the schema change, then restart MCP and call get_workflows to verify everything parses"
+ instructions_file: steps/verify_and_repair.md
+ inputs:
+ - file: change_summary
+ from_step: update_schema
+ - file: schema_file
+ from_step: update_schema
+ outputs:
+ repair_step_file:
+ type: file
+ description: "The updated repair workflow step file (if migration logic was needed)"
+ required: false
+ verification_log:
+ type: file
+ description: "Log of the get_workflows verification result, saved to .deepwork/tmp/verification_log.md"
+ required: true
+ dependencies:
+ - update_schema
+ - update_parser
+ - update_mcp_schemas
+ - update_fixtures
+ - update_jobs
+ - update_tests
+ - update_docs
+ reviews:
+ - run_each: verification_log
+ quality_criteria:
+ "Parses Successfully": "Does the verification log show that get_workflows returned successfully with all jobs parsed?"
+ "No Errors": "Are there any parsing errors or warnings in the log?"
diff --git a/.deepwork/jobs/update_job_schema/scripts/.gitkeep b/.deepwork/jobs/update_job_schema/scripts/.gitkeep
new file mode 100644
index 00000000..e69de29b
diff --git a/.deepwork/jobs/update_job_schema/steps/update_docs.md b/.deepwork/jobs/update_job_schema/steps/update_docs.md
new file mode 100644
index 00000000..319d2093
--- /dev/null
+++ b/.deepwork/jobs/update_job_schema/steps/update_docs.md
@@ -0,0 +1,77 @@
+# Update Documentation
+
+## Objective
+
+Update all documentation files affected by the schema change, including architecture docs, MCP interface docs, and any other files that reference job.yml structure.
+
+## Task
+
+Documentation must accurately reflect the current schema. Outdated docs cause confusion for both users and AI agents working with DeepWork.
+
+### Process
+
+1. **Read the change summary** and identify affected documentation
+ - `doc/architecture.md` — comprehensive architecture documentation, references job.yml structure, JobDefinition dataclasses, parsing process
+ - `doc/mcp_interface.md` — MCP tool documentation, references job fields through tool descriptions
+ - `CLAUDE.md` — project instructions, references standard_jobs structure
+ - `README.md` — project overview
+ - Search for other files that reference the changed fields using grep
+
+3. **Update architecture.md**
+ - Update any sections describing job.yml structure
+ - Update JobDefinition dataclass documentation if fields changed
+ - Update parsing process descriptions if parsing logic changed
+ - Update any example job.yml snippets
+
+4. **Update mcp_interface.md**
+ - Update tool response descriptions if MCP schemas changed
+ - Update any example responses that include changed fields
+
+5. **Update other docs as needed**
+ - Search for references to changed field names across all .md files
+ - Update CLAUDE.md if the project structure description needs updating
+ - Update README.md if the change affects user-facing workflow descriptions
+
+6. **Verify consistency**
+ - Ensure all docs tell the same story about the schema
+ - Check that example snippets in docs match the actual schema
+
+## Output Format
+
+### doc_files
+
+All updated documentation files. Each should accurately reflect the current schema state.
+
+**Example — updating architecture.md when adding a `timeout` field to steps:**
+
+Before (outdated):
+```markdown
+### Step Fields
+- `id` — Step identifier
+- `name` — Human-readable name
+- `dependencies` — List of prerequisite step IDs
+```
+
+After (updated):
+```markdown
+### Step Fields
+- `id` — Step identifier
+- `name` — Human-readable name
+- `dependencies` — List of prerequisite step IDs
+- `timeout` — Optional maximum execution time in seconds
+```
+
+**Common mistake to avoid**: Updating a field description in one section of a doc but missing the same field mentioned in a different section (e.g., updating the "Step Fields" table but missing a YAML example snippet that shows step structure). Search the entire document for references to the changed area.
+
+**Also check**: If the doc contains example YAML snippets showing job.yml structure, update those snippets to include new required fields. Example snippets that don't match the real schema are confusing.
+
+## Quality Criteria
+
+- Architecture doc accurately describes the current schema structure
+- MCP interface doc reflects any changes to tool responses
+- Example snippets in documentation match the actual schema
+- No documentation references removed or renamed fields by old names
+- All affected sections are updated (not just the first one found)
+## Context
+
+Documentation is read by both humans and AI agents. The CLAUDE.md and architecture.md files are particularly important because they are loaded into AI agent context when working on this project. Stale documentation here means AI agents will make incorrect assumptions about the codebase.
diff --git a/.deepwork/jobs/update_job_schema/steps/update_fixtures.md b/.deepwork/jobs/update_job_schema/steps/update_fixtures.md
new file mode 100644
index 00000000..dd9e46b0
--- /dev/null
+++ b/.deepwork/jobs/update_job_schema/steps/update_fixtures.md
@@ -0,0 +1,96 @@
+# Update Test Fixtures
+
+## Objective
+
+Update all test fixture job.yml files under `tests/fixtures/jobs/` to conform to the updated schema, while preserving each fixture's intended test purpose.
+
+## Task
+
+Test fixtures are job.yml files used in unit and integration tests. They must conform to the schema or intentionally violate it (in the case of `invalid_job`). After a schema change, fixtures need updating to prevent test failures.
+
+### Process
+
+1. **Read the change summary** and identify whether the change adds required fields, modifies existing fields, or removes fields
+
+2. **Find all fixture job.yml files**
+ - Use glob to find all `tests/fixtures/jobs/*/job.yml` files
+ - Current known fixtures:
+ - `tests/fixtures/jobs/simple_job/job.yml` — minimal single-step job
+ - `tests/fixtures/jobs/complex_job/job.yml` — multi-step with dependencies
+ - `tests/fixtures/jobs/concurrent_steps_job/job.yml` — concurrent workflow steps
+ - `tests/fixtures/jobs/exposed_step_job/job.yml` — step visibility testing
+ - `tests/fixtures/jobs/fruits/job.yml` — simple two-step job
+ - `tests/fixtures/jobs/invalid_job/job.yml` — intentionally invalid for validation tests
+ - `tests/fixtures/jobs/job_with_doc_spec/job.yml` — doc_spec functionality
+ - There may be additional fixtures — always glob to find them all
+
+3. **Update each fixture**
+ - Read each fixture file
+ - Apply the schema change (add new fields, modify existing ones, etc.)
+ - **Preserve test intent**: Each fixture exists for a specific testing purpose:
+ - `simple_job`: Keep it minimal — only add new required fields
+ - `complex_job`: Add new fields in a realistic multi-step context
+ - `invalid_job`: Keep intentionally invalid, but update the *type* of invalidity if the old invalid fields are now valid or vice versa
+ - Others: Update to conform while maintaining their special characteristics
+
+4. **Verify schema conformance**
+ - For valid fixtures: ensure they would pass validation against the updated schema
+ - For `invalid_job`: ensure it still fails validation, and the reason is still testable
+
+## Output Format
+
+### fixture_files
+
+All updated fixture job.yml files. Each should be a valid (or intentionally invalid) job.yml that conforms to (or intentionally violates) the updated schema.
+
+**Example — adding a new required `reviews` field to fixtures:**
+
+Before (simple_job/job.yml, missing new required field):
+```yaml
+steps:
+ - id: greet
+ name: "Greet"
+ outputs:
+ greeting:
+ type: file
+ description: "A greeting"
+ required: true
+```
+
+After (simple_job/job.yml, with new required field added minimally):
+```yaml
+steps:
+ - id: greet
+ name: "Greet"
+ outputs:
+ greeting:
+ type: file
+ description: "A greeting"
+ required: true
+ reviews: [] # Required field, empty for simple fixtures
+```
+
+**Example — updating invalid_job to test invalidity of the new field:**
+
+```yaml
+steps:
+ - id: bad_step
+ reviews: "not_an_array" # Invalid type — should be array, not string
+```
+
+**Common mistake to avoid**: Don't add optional fields to `simple_job` — keep it minimal. Add optional fields to `complex_job` where realistic multi-step usage is demonstrated. Don't accidentally fix the `invalid_job` — it must remain intentionally invalid.
+
+**For valid fixtures**, ensure all new required fields are present and new optional fields are used in at least one fixture (preferably `complex_job`) to ensure they're testable.
+
+**For invalid_job**, verify the file still triggers a validation error, and consider adding a test of the new field being invalid if applicable.
+
+## Quality Criteria
+
+- All valid fixture job.yml files conform to the updated schema
+- The `invalid_job` fixture still fails validation intentionally
+- Each fixture preserves its original test purpose and characteristics
+- New required fields are added to all valid fixtures
+- At least one fixture demonstrates usage of new optional fields
+## Context
+
+Test fixtures are the foundation of the test suite. If they don't match the schema, parser tests will fail before we even get to test the new functionality. Updating fixtures early (before tests) means we can run the test suite incrementally as we update test code.
diff --git a/.deepwork/jobs/update_job_schema/steps/update_jobs.md b/.deepwork/jobs/update_job_schema/steps/update_jobs.md
new file mode 100644
index 00000000..91c20e50
--- /dev/null
+++ b/.deepwork/jobs/update_job_schema/steps/update_jobs.md
@@ -0,0 +1,95 @@
+# Update Standard, Library, and Instance Jobs
+
+## Objective
+
+Update all real job.yml files — standard jobs (source of truth in `src/deepwork/standard_jobs/`), library jobs (`library/jobs/`), and installed instances (`.deepwork/jobs/`) — to conform to the updated schema.
+
+## Task
+
+Real job.yml files are used at runtime by the DeepWork MCP server and CLI. They must conform to the updated schema or the system will fail to parse them.
+
+### Process
+
+1. **Read the change summary** and identify what fields changed
+
+2. **Find all real job.yml files**
+ - Standard jobs: `src/deepwork/standard_jobs/*/job.yml`
+ - Library jobs: `library/jobs/*/job.yml`
+ - Instance jobs: `.deepwork/jobs/*/job.yml`
+
+3. **Update standard jobs first** (source of truth)
+ - Read and update `src/deepwork/standard_jobs/deepwork_jobs/job.yml`
+ - This is the most important file — it defines the core DeepWork workflows
+ - Be careful to preserve all existing workflow and step definitions
+ - Add new fields thoughtfully — these serve as examples for users
+
+4. **Update library jobs**
+ - Read and update `library/jobs/spec_driven_development/job.yml`
+ - Preserve the job's intended workflow while adding new schema fields
+
+5. **Update instance jobs**
+ - Update `.deepwork/jobs/deepwork_jobs/job.yml` (installed copy of standard job)
+ - Update `.deepwork/jobs/test_job_flow/job.yml`
+ - Update any other instance jobs found
+ - **Note**: For the standard job copy, it should match the source of truth after changes
+
+6. **Sync the install**
+ - After updating standard jobs source, run `deepwork install` to sync to `.deepwork/jobs/`
+ - Or manually copy changed files if install isn't available
+
+## Output Format
+
+### job_files
+
+All updated job.yml files across all three locations. Each should conform to the updated schema.
+
+**Example — adding a new optional `timeout` field to a step in a real job:**
+
+Before:
+```yaml
+ - id: define
+ name: "Define Job Specification"
+ description: "Create a job.yml specification..."
+ instructions_file: steps/define.md
+ outputs:
+ job.yml:
+ type: file
+ description: "Definition of the job"
+ required: true
+ reviews:
+ - run_each: job.yml
+ quality_criteria:
+ "Complete": "Is the job.yml complete?"
+```
+
+After (with new optional field used where it makes sense):
+```yaml
+ - id: define
+ name: "Define Job Specification"
+ description: "Create a job.yml specification..."
+ instructions_file: steps/define.md
+ timeout: 600 # Optional — added where meaningful
+ outputs:
+ job.yml:
+ type: file
+ description: "Definition of the job"
+ required: true
+ reviews:
+ - run_each: job.yml
+ quality_criteria:
+ "Complete": "Is the job.yml complete?"
+```
+
+**Common mistake to avoid**: Don't add new required fields without defaults — this breaks backwards compatibility for users who haven't updated their job files yet. If the schema change adds a required field, the repair workflow (final step) must handle migration.
+
+## Quality Criteria
+
+- All standard job files conform to the updated schema
+- All library job files conform to the updated schema
+- All instance job files conform to the updated schema
+- Standard job source of truth and installed copies are in sync
+- Existing workflows and step definitions are preserved
+- New fields are used appropriately in each job's context
+## Context
+
+Standard jobs are auto-installed to every user's project. Library jobs serve as examples users can adopt. Instance jobs are the working copies read by the MCP server at runtime. All three must conform to the schema or the system breaks. Standard jobs are the source of truth — update them first, then propagate.
diff --git a/.deepwork/jobs/update_job_schema/steps/update_mcp_schemas.md b/.deepwork/jobs/update_job_schema/steps/update_mcp_schemas.md
new file mode 100644
index 00000000..0f33c0a1
--- /dev/null
+++ b/.deepwork/jobs/update_job_schema/steps/update_mcp_schemas.md
@@ -0,0 +1,78 @@
+# Update MCP Schemas
+
+## Objective
+
+Update `src/deepwork/mcp/schemas.py` Pydantic models and `src/deepwork/mcp/tools.py` tool logic to reflect the schema change, ensuring MCP tool responses correctly expose new fields to clients.
+
+## Task
+
+The MCP layer is how external tools (like Claude Code) interact with DeepWork. If the schema change affects what information should be returned by MCP tools (get_workflows, start_workflow, finished_step), the Pydantic models and tool logic must be updated.
+
+### Process
+
+1. **Read the change summary and current MCP schemas**
+ - Key models in `src/deepwork/mcp/schemas.py`: `JobInfo`, `WorkflowInfo`, `StepInfo`, `StartWorkflowInput`, `FinishedStepInput`, `GetWorkflowsResponse`
+ - Also read the updated `src/deepwork/core/parser.py` to see the new dataclass fields
+
+3. **Determine what needs updating**
+ - Not every schema change affects MCP responses. Ask:
+ - Does the new field need to be visible to MCP clients?
+ - Does it change how workflows are started or steps are completed?
+ - Does it affect the information returned by `get_workflows`?
+ - If the change is purely internal (e.g., a validation-only field), MCP schemas may not need changes
+
+4. **Update Pydantic models** (if needed)
+ - Add new fields to the appropriate Pydantic model(s)
+ - Use `Optional` for fields that may not always be present
+ - Match field types to the parser dataclass types
+
+5. **Update tools.py** (if needed)
+ - Read `src/deepwork/mcp/tools.py`
+ - Update the conversion logic that transforms `JobDefinition` → MCP response models
+ - Ensure new fields are correctly mapped from parser dataclasses to Pydantic models
+
+## Output Format
+
+### mcp_schemas_file
+
+The updated `src/deepwork/mcp/schemas.py`. If no changes were needed, provide the unchanged file path.
+
+**Example — adding an optional `timeout` field to StepInfo:**
+
+Pydantic model change in schemas.py:
+```python
+class StepInfo(BaseModel):
+ step_id: str
+ # ... existing fields ...
+ timeout: Optional[int] = None # New field exposed to MCP clients
+```
+
+**Common mistake to avoid**: Adding a field to the Pydantic model but forgetting to populate it in tools.py. Both files must be updated together.
+
+### mcp_tools_file
+
+The updated `src/deepwork/mcp/tools.py`. If no changes were needed, provide the unchanged file path.
+
+**Example — populating the new field in the conversion logic:**
+
+In tools.py where `Step` dataclass is converted to `StepInfo`:
+```python
+StepInfo(
+ step_id=step.id,
+ # ... existing fields ...
+ timeout=step.timeout, # Map from parser dataclass to Pydantic model
+)
+```
+
+**When NOT to update MCP schemas**: If the new field is purely for internal validation or parsing (e.g., a field that only affects how the parser processes other fields), it doesn't need to be in the MCP response.
+
+## Quality Criteria
+
+- If the schema change adds fields that should be visible to MCP clients, they are present in the Pydantic models
+- Field types in Pydantic models match the parser dataclass types
+- Conversion logic in tools.py correctly maps new dataclass fields to Pydantic fields
+- No existing MCP response fields are broken or removed unintentionally
+- The header comment in schemas.py about staying in sync with mcp_interface.md is respected
+## Context
+
+The MCP server is the primary interface for AI agents interacting with DeepWork. The Pydantic models in schemas.py define the contract between the MCP server and its clients. Changes here should be minimal and deliberate — only expose fields that clients actually need.
diff --git a/.deepwork/jobs/update_job_schema/steps/update_parser.md b/.deepwork/jobs/update_job_schema/steps/update_parser.md
new file mode 100644
index 00000000..92f30d55
--- /dev/null
+++ b/.deepwork/jobs/update_job_schema/steps/update_parser.md
@@ -0,0 +1,80 @@
+# Update Parser and Dataclasses
+
+## Objective
+
+Update `src/deepwork/core/parser.py` to handle the new/modified schema fields, including any needed dataclass changes, parsing logic, and validation. Also update `src/deepwork/schemas/job_schema.py` if the change affects schema loading or exported constants.
+
+## Task
+
+The parser is the runtime representation of job.yml files. It converts raw YAML into typed Python dataclasses that the rest of the codebase uses. Any schema change must be reflected here.
+
+### Process
+
+1. **Read the change summary and current parser**
+ - Read `src/deepwork/core/parser.py` in full
+ - Understand the existing dataclass hierarchy: `Step`, `OutputSpec`, `StepInput`, `HookAction`, `Review`, `Workflow`, `WorkflowStepEntry`, `JobDefinition`
+ - Understand the parsing flow in `parse_job_definition()`
+
+3. **Update dataclasses**
+ - Add new fields to the appropriate dataclass(es)
+ - Use appropriate Python types (str, bool, list, dict, Optional, etc.)
+ - Set defaults for optional fields
+ - Follow existing patterns (e.g., how `Optional` fields default to `None`)
+
+4. **Update parsing logic**
+ - Modify the parsing code in `parse_job_definition()` or related functions
+ - Handle the new field extraction from raw YAML data
+ - Add validation logic if needed (e.g., cross-referencing between fields)
+
+5. **Check job_schema.py**
+ - Read `src/deepwork/schemas/job_schema.py`
+ - If the change adds new enum values or constants that the schema uses, update `LIFECYCLE_HOOK_EVENTS` or add new constants as needed
+ - If no changes needed, note this in the output
+
+## Output Format
+
+### parser_file
+
+The updated `src/deepwork/core/parser.py`. Follow the existing patterns in the file.
+
+**Example — adding an optional `timeout` field to steps:**
+
+Dataclass change:
+```python
+@dataclass
+class Step:
+ id: str
+ name: str
+ # ... existing fields ...
+ timeout: Optional[int] = None # New optional field with None default
+```
+
+Parsing logic change (in the step-parsing section of `parse_job_definition`):
+```python
+timeout = step_data.get("timeout") # Optional — returns None if missing
+```
+
+**Common mistake to avoid**: Don't add step-level fields to `JobDefinition`, or job-level fields to `Step`. Match the schema hierarchy — if the field is under `steps[].properties` in the JSON Schema, it belongs on the `Step` dataclass.
+
+**For required fields**, follow the pattern of raising `ParseError` when missing:
+```python
+new_field = step_data.get("new_field")
+if new_field is None:
+ raise ParseError(f"Step '{step_id}' missing required field 'new_field'")
+```
+
+### job_schema_py
+
+The updated `src/deepwork/schemas/job_schema.py` (provide the path even if unchanged).
+
+## Quality Criteria
+
+- Every field added/modified in the schema has a corresponding dataclass field
+- Parsing logic correctly extracts the new field from raw YAML data
+- Optional schema fields have sensible Python defaults (typically `None`)
+- Required schema fields raise clear errors when missing
+- Validation logic catches invalid values (e.g., bad enum values, invalid cross-references)
+- Existing parsing behavior is preserved for all unchanged fields
+## Context
+
+The parser defines the in-memory representation used by everything downstream: the MCP server reads `JobDefinition` objects, the CLI uses them for sync/install, and the skill generator uses them to create slash commands. Getting the dataclasses right here is critical — all subsequent steps depend on this representation.
diff --git a/.deepwork/jobs/update_job_schema/steps/update_schema.md b/.deepwork/jobs/update_job_schema/steps/update_schema.md
new file mode 100644
index 00000000..6ca33334
--- /dev/null
+++ b/.deepwork/jobs/update_job_schema/steps/update_schema.md
@@ -0,0 +1,91 @@
+# Update JSON Schema
+
+## Objective
+
+Edit `src/deepwork/schemas/job.schema.json` — the single source of truth for all job.yml file structure — to implement the desired schema change.
+
+## Task
+
+Apply the requested change to the JSON Schema file. This is always the first step because every other file in the codebase derives its understanding of job.yml structure from this schema.
+
+### Process
+
+1. **Understand the change**
+ - Read the `change_description` and `motivation` inputs
+ - Ask structured questions if the change is ambiguous (e.g., "Should this field be required or optional?", "What type should this field be?", "Should it have a default value?")
+
+2. **Read the current schema**
+ - Read `src/deepwork/schemas/job.schema.json` in full
+ - Understand the existing structure, `$defs`, and how fields relate to each other
+
+3. **Plan the change**
+ - Identify exactly which section(s) of the schema need modification
+ - Determine if new `$defs` entries are needed
+ - Consider whether the change is backwards-compatible (new optional fields) or breaking (new required fields, removed fields, changed types)
+ - If breaking, plan what existing files will need to change
+
+4. **Apply the change**
+ - Edit the JSON Schema file
+ - Follow existing patterns for field definitions (look at peer fields for style)
+ - Use `$ref` for reusable sub-schemas
+ - Add `description` fields for new properties to make the schema self-documenting
+
+5. **Write the change summary**
+ - Create `.deepwork/tmp/schema_change_summary.md` documenting what changed
+ - This file is critical — every subsequent step reads it to know what to update
+
+## Output Format
+
+### schema_file
+
+The updated `src/deepwork/schemas/job.schema.json`. Must be valid JSON Schema Draft 7.
+
+### change_summary
+
+A markdown file at `.deepwork/tmp/schema_change_summary.md`:
+
+```markdown
+# Schema Change Summary
+
+## Change Description
+[What was changed in plain English]
+
+## Motivation
+[Why this change was made]
+
+## Breaking Change
+[Yes/No — and if yes, what existing files will need to change]
+
+## Fields Modified
+- **[field_path]**: [description of change]
+ - Type: [type]
+ - Required: [yes/no]
+ - Default: [value, if any]
+
+## Impact on Existing Files
+- [ ] Parser dataclasses need: [what]
+- [ ] MCP schemas need: [what]
+- [ ] Fixture job.yml files need: [what]
+- [ ] Standard/library jobs need: [what]
+- [ ] Tests need: [what]
+- [ ] Docs need: [what]
+- [ ] Repair workflow needs: [migration logic description, or "no migration needed"]
+```
+
+## Quality Criteria
+
+- The schema file is valid JSON Schema Draft 7
+- New fields follow existing naming conventions (snake_case)
+- If backwards-compatible, new fields are optional with sensible defaults
+- If breaking, the change summary clearly documents what needs to change
+- The change summary has a complete impact checklist for all downstream files
+- `$ref` is used for reusable sub-schemas rather than inline duplication
+## Context
+
+The JSON Schema at `src/deepwork/schemas/job.schema.json` is the single source of truth for job.yml structure. It is used by:
+- `src/deepwork/utils/validation.py` for schema validation via jsonschema
+- `src/deepwork/schemas/job_schema.py` which loads and exports it as `JOB_SCHEMA`
+- `src/deepwork/core/parser.py` which parses job.yml into dataclasses
+- The installed copy at `.deepwork/schemas/job.schema.json`
+
+Historical pattern: schema changes typically touch 20-40 files across the codebase. The change summary you write here is the roadmap for all subsequent steps.
diff --git a/.deepwork/jobs/update_job_schema/steps/update_tests.md b/.deepwork/jobs/update_job_schema/steps/update_tests.md
new file mode 100644
index 00000000..caafda12
--- /dev/null
+++ b/.deepwork/jobs/update_job_schema/steps/update_tests.md
@@ -0,0 +1,94 @@
+# Update Unit and Integration Tests
+
+## Objective
+
+Update test files to cover new/modified schema fields: add new test cases, update existing tests that reference changed structure, and ensure the full test suite passes.
+
+## Task
+
+Tests validate that the schema, parser, MCP layer, and validation logic all work correctly. After a schema change, tests need updating to cover the new fields and ensure no regressions.
+
+### Process
+
+1. **Read the change summary**, the updated parser, and MCP schemas to understand the new code
+
+2. **Identify affected test files**
+ - `tests/unit/test_parser.py` — parser and dataclass tests
+ - `tests/unit/test_validation.py` — schema validation tests
+ - `tests/unit/mcp/test_schemas.py` — MCP Pydantic model tests
+ - `tests/unit/mcp/test_tools.py` — MCP tool logic tests
+ - `tests/unit/mcp/test_state.py` — workflow state management tests
+ - `tests/unit/mcp/test_quality_gate.py` — quality review tests
+ - `tests/unit/mcp/test_async_interface.py` — async interface tests
+ - `tests/integration/test_install_flow.py` — install flow tests
+ - There may be additional test files — always check with glob
+
+3. **Update test_parser.py**
+ - Add tests for new dataclass fields
+ - Test parsing of the new field from YAML
+ - Test default values for optional fields
+ - Test validation errors for invalid values
+ - Update existing test fixtures/assertions if the change modified existing behavior
+
+4. **Update test_validation.py**
+ - Add tests that validate the new schema field
+ - Test valid values pass validation
+ - Test invalid values fail validation
+ - Update the `complex_job` fixture used in validation tests if needed
+
+5. **Update MCP test files** (if MCP schemas changed)
+ - Update test_schemas.py for new Pydantic model fields
+ - Update test_tools.py if tool logic changed
+ - Update test_state.py if workflow state handling changed
+
+6. **Run the test suite**
+ - Run `uv run pytest tests/` to verify all tests pass
+ - Fix any failures from the schema change
+ - Do NOT delete tests that fail — fix them to work with the new schema
+
+## Output Format
+
+### test_files
+
+All updated test files. Each should have new test cases, pass against the updated code, and not have deleted tests.
+
+**Example — test patterns for a new optional `timeout` field:**
+
+Parser test (test_parser.py):
+```python
+def test_step_timeout_parsed():
+ """Test that timeout field is parsed from step data."""
+ step = make_step({"id": "s1", "timeout": 300, ...})
+ assert step.timeout == 300
+
+def test_step_timeout_defaults_to_none():
+ """Test that timeout defaults to None when not specified."""
+ step = make_step({"id": "s1", ...}) # No timeout field
+ assert step.timeout is None
+```
+
+Validation test (test_validation.py):
+```python
+def test_timeout_invalid_type():
+ """Test that non-integer timeout fails validation."""
+ job_data = make_valid_job()
+ job_data["steps"][0]["timeout"] = "not_a_number"
+ with pytest.raises(ValidationError):
+ validate_against_schema(job_data, JOB_SCHEMA)
+```
+
+**Common mistake to avoid**: Only testing the happy path. Always include tests for:
+- Valid values (field present and correct)
+- Default behavior (field absent)
+- Invalid values (wrong type, out of range, etc.)
+
+## Quality Criteria
+
+- New schema fields have dedicated test cases in test_parser.py and test_validation.py
+- Tests cover both valid and invalid values for new fields
+- Existing tests are updated (not deleted) to work with the schema change
+- The full test suite passes: `uv run pytest tests/`
+- If MCP schemas changed, MCP test files are updated accordingly
+## Context
+
+Tests are the safety net that catches bugs before they reach users. A schema change that isn't properly tested will eventually cause hard-to-debug failures. The test suite should provide confidence that the change works correctly across all layers.
diff --git a/.deepwork/jobs/update_job_schema/steps/verify_and_repair.md b/.deepwork/jobs/update_job_schema/steps/verify_and_repair.md
new file mode 100644
index 00000000..3b579377
--- /dev/null
+++ b/.deepwork/jobs/update_job_schema/steps/verify_and_repair.md
@@ -0,0 +1,82 @@
+# Verify and Update Repair Workflow
+
+## Objective
+
+Update the `deepwork_jobs` repair workflow to handle migration for the schema change (if needed), then verify the entire system works by restarting the MCP server and calling `get_workflows` to confirm all jobs parse successfully.
+
+## Task
+
+This is the final verification step. It ensures that: (1) existing user installations can be migrated to the new schema via the repair workflow, and (2) the full system works end-to-end after all changes.
+
+### Process
+
+1. **Read the change summary** and determine if the change is breaking (requires migration) or non-breaking (optional fields only)
+
+2. **Update the repair workflow** (if migration needed)
+ - Read the repair workflow step files:
+ - `src/deepwork/standard_jobs/deepwork_jobs/steps/fix_jobs.md` — fixes job.yml files
+ - `src/deepwork/standard_jobs/deepwork_jobs/steps/fix_settings.md` — fixes settings
+ - `src/deepwork/standard_jobs/deepwork_jobs/steps/errata.md` — post-repair verification
+ - If the schema change is breaking, add migration instructions to `fix_jobs.md`:
+ - Describe what old job.yml files look like (before the change)
+ - Describe what they should look like after migration
+ - Provide specific transformation rules
+ - If the change is non-breaking (new optional fields), migration may not be needed — document this decision
+
+3. **Sync standard jobs**
+ - After updating repair workflow files in `src/deepwork/standard_jobs/`, run `deepwork install` to sync to `.deepwork/jobs/`
+ - This ensures the MCP server picks up the updated repair workflow
+
+4. **Verify with get_workflows**
+ - Call the `get_workflows` MCP tool
+ - Confirm ALL jobs are listed and parsed correctly
+ - Check that no parsing errors or warnings appear
+ - If any jobs fail to parse, diagnose and fix the issue before completing
+
+5. **Run the test suite one final time**
+ - Run `uv run pytest tests/` to confirm everything passes
+ - This is the final safety check
+
+6. **Write the verification log**
+ - Document the get_workflows output
+ - Document the test suite results
+ - Note any issues encountered and how they were resolved
+
+## Output Format
+
+### repair_step_file
+
+The updated repair workflow step file (typically `fix_jobs.md`), if migration logic was added. This output is optional — if no migration was needed, skip it.
+
+### verification_log
+
+A markdown file at `.deepwork/tmp/verification_log.md`:
+
+```markdown
+# Verification Log
+
+## Schema Change
+[Brief description of what was changed]
+
+## get_workflows Result
+[Full output from the get_workflows MCP tool call, showing all jobs parsed successfully]
+
+## Test Suite Result
+[Summary of pytest results — number of tests passed/failed/skipped]
+
+## Issues Encountered
+[Any issues found during verification and how they were resolved, or "None"]
+
+## Migration Status
+[Whether the repair workflow was updated with migration logic, and why/why not]
+```
+
+## Quality Criteria
+
+- The get_workflows MCP call succeeds and returns all jobs without parsing errors
+- The full test suite passes (`uv run pytest tests/`)
+- If the change is breaking, the repair workflow has migration instructions
+- The verification log documents all results clearly
+## Context
+
+This step is the final gate. If get_workflows fails, it means something in the chain is broken — a job.yml doesn't conform, the parser can't handle a field, or the MCP layer has a mismatch. The repair workflow is also critical: when users upgrade DeepWork, the repair workflow is how their existing job.yml files get migrated to the new schema version.
diff --git a/.deepwork/jobs/update_job_schema/templates/.gitkeep b/.deepwork/jobs/update_job_schema/templates/.gitkeep
new file mode 100644
index 00000000..e69de29b
diff --git a/.deepwork/schemas/job.schema.json b/.deepwork/schemas/job.schema.json
index 4f2227cc..92055d36 100644
--- a/.deepwork/schemas/job.schema.json
+++ b/.deepwork/schemas/job.schema.json
@@ -8,6 +8,7 @@
"name",
"version",
"summary",
+ "common_job_info_provided_to_all_steps_at_runtime",
"steps"
],
"additionalProperties": false,
@@ -28,10 +29,10 @@
"maxLength": 200,
"description": "Brief one-line summary of what this job accomplishes. Used in skill descriptions."
},
- "description": {
+ "common_job_info_provided_to_all_steps_at_runtime": {
"type": "string",
"minLength": 1,
- "description": "Detailed multi-line description of the job's purpose, process, and goals"
+ "description": "Common context and information provided to all steps at runtime. Use this for shared knowledge that every step needs (e.g., project background, key terminology, constraints, or conventions) rather than duplicating it in individual step instructions."
},
"workflows": {
"type": "array",
@@ -40,13 +41,6 @@
"$ref": "#/$defs/workflow"
}
},
- "changelog": {
- "type": "array",
- "description": "Version history documenting changes to the job definition",
- "items": {
- "$ref": "#/$defs/changelogEntry"
- }
- },
"steps": {
"type": "array",
"minItems": 1,
@@ -108,26 +102,6 @@
}
]
},
- "changelogEntry": {
- "type": "object",
- "required": [
- "version",
- "changes"
- ],
- "additionalProperties": false,
- "properties": {
- "version": {
- "type": "string",
- "pattern": "^\\d+\\.\\d+\\.\\d+$",
- "description": "Version number for this change"
- },
- "changes": {
- "type": "string",
- "minLength": 1,
- "description": "Description of changes made in this version"
- }
- }
- },
"step": {
"type": "object",
"required": [
diff --git a/doc/architecture.md b/doc/architecture.md
index 2325cd4e..77c4de49 100644
--- a/doc/architecture.md
+++ b/doc/architecture.md
@@ -304,24 +304,10 @@ platforms:
name: competitive_research
version: "1.0.0"
summary: "Systematic competitive analysis workflow"
-description: |
- A comprehensive workflow for analyzing competitors in your market segment. This job
- helps product teams understand the competitive landscape by systematically identifying
- competitors, researching their offerings, creating comparison matrices, and developing
- strategic positioning recommendations.
-
- The workflow produces:
- - A vetted list of key competitors
- - Detailed research notes on each competitor (primary and secondary sources)
- - A comparison matrix highlighting key differentiators
- - Strategic positioning recommendations
-
+common_job_info_provided_to_all_steps_at_runtime: |
+ A comprehensive workflow for analyzing competitors in your market segment.
Designed for product teams conducting quarterly competitive analysis.
-changelog:
- - version: "1.0.0"
- changes: "Initial job creation"
-
# Workflows define named sequences of steps that form complete processes.
# Steps not in any workflow are "standalone skills" that can be run anytime.
# Steps can be listed as simple strings (sequential) or arrays (concurrent execution).
diff --git a/flake.lock b/flake.lock
index 8a9dd522..79497ddd 100644
--- a/flake.lock
+++ b/flake.lock
@@ -6,11 +6,11 @@
"nixpkgs": "nixpkgs"
},
"locked": {
- "lastModified": 1770362224,
- "narHash": "sha256-glZjGWSy+LpalbwlsQ3iWNpWU4TlEOandYWOpl8sMt8=",
+ "lastModified": 1771013219,
+ "narHash": "sha256-EyR1pOL4vWyefDij3/HyZd3qjG6gzfk2BcjbAFQiXdY=",
"owner": "sadjow",
"repo": "claude-code-nix",
- "rev": "f4f8d6e7cc59e34e5a85550f017ead83ab925b22",
+ "rev": "3f8cff15b530cc4f09ed2497932a66662f7a8422",
"type": "github"
},
"original": {
@@ -39,11 +39,11 @@
},
"nixpkgs": {
"locked": {
- "lastModified": 1770169770,
- "narHash": "sha256-awR8qIwJxJJiOmcEGgP2KUqYmHG4v/z8XpL9z8FnT1A=",
+ "lastModified": 1770843696,
+ "narHash": "sha256-LovWTGDwXhkfCOmbgLVA10bvsi/P8eDDpRudgk68HA8=",
"owner": "NixOS",
"repo": "nixpkgs",
- "rev": "aa290c9891fa4ebe88f8889e59633d20cc06a5f2",
+ "rev": "2343bbb58f99267223bc2aac4fc9ea301a155a16",
"type": "github"
},
"original": {
@@ -55,11 +55,11 @@
},
"nixpkgs_2": {
"locked": {
- "lastModified": 1770197578,
- "narHash": "sha256-AYqlWrX09+HvGs8zM6ebZ1pwUqjkfpnv8mewYwAo+iM=",
+ "lastModified": 1771008912,
+ "narHash": "sha256-gf2AmWVTs8lEq7z/3ZAsgnZDhWIckkb+ZnAo5RzSxJg=",
"owner": "NixOS",
"repo": "nixpkgs",
- "rev": "00c21e4c93d963c50d4c0c89bfa84ed6e0694df2",
+ "rev": "a82ccc39b39b621151d6732718e3e250109076fa",
"type": "github"
},
"original": {
@@ -82,11 +82,11 @@
]
},
"locked": {
- "lastModified": 1763662255,
- "narHash": "sha256-4bocaOyLa3AfiS8KrWjZQYu+IAta05u3gYZzZ6zXbT0=",
+ "lastModified": 1771039651,
+ "narHash": "sha256-WZOfX4APbc6vmL14ZWJXgBeRfEER8H+OIX0D0nSmv0M=",
"owner": "pyproject-nix",
"repo": "build-system-pkgs",
- "rev": "042904167604c681a090c07eb6967b4dd4dae88c",
+ "rev": "69bc2b53b79cbd6ce9f66f506fc962b45b5e68b9",
"type": "github"
},
"original": {
@@ -149,11 +149,11 @@
]
},
"locked": {
- "lastModified": 1770331927,
- "narHash": "sha256-jlOvO++uvne/lTgWqdI4VhTV5OpVWi70ZDVBlT6vGSs=",
+ "lastModified": 1770770348,
+ "narHash": "sha256-A2GzkmzdYvdgmMEu5yxW+xhossP+txrYb7RuzRaqhlg=",
"owner": "pyproject-nix",
"repo": "uv2nix",
- "rev": "5b43a934e15b23bfba6c408cba1c570eccf80080",
+ "rev": "5d1b2cb4fe3158043fbafbbe2e46238abbc954b0",
"type": "github"
},
"original": {
diff --git a/library/jobs/spec_driven_development/job.yml b/library/jobs/spec_driven_development/job.yml
index 6d575879..4ae6de5d 100644
--- a/library/jobs/spec_driven_development/job.yml
+++ b/library/jobs/spec_driven_development/job.yml
@@ -2,35 +2,18 @@
name: spec_driven_development
version: "1.0.0"
summary: "Spec-driven development workflow that turns specifications into working implementations through structured planning."
-description: |
+common_job_info_provided_to_all_steps_at_runtime: |
A comprehensive workflow inspired by GitHub's spec-kit that enables "spec-driven development" -
a methodology where executable specifications generate working implementations rather than
merely guiding them.
This job inverts traditional development: instead of starting with code, you first create
detailed specifications that directly generate implementations. The workflow progresses through
- six phases:
-
- 1. **Constitution**: Establish project governance principles and development guidelines
- 2. **Specification**: Define functional requirements focusing on "what" and "why"
- 3. **Clarification**: Resolve ambiguities before technical planning
- 4. **Planning**: Create technical implementation strategy and architecture
- 5. **Task Generation**: Break plans into actionable, ordered development tasks
- 6. **Implementation**: Execute tasks to deliver the complete feature
+ six phases: Constitution, Specification, Clarification, Planning, Task Generation, and Implementation.
The workflow produces all artifacts in a `specs/[feature-name]/` directory structure,
keeping specifications versioned alongside the implementation they generate.
- Ideal for:
- - New feature development requiring upfront design
- - Complex features with multiple stakeholders
- - Projects where specification quality directly impacts implementation success
- - Teams wanting to capture design decisions for future reference
-
-changelog:
- - version: "1.0.0"
- changes: "Initial version based on GitHub spec-kit workflow"
-
steps:
- id: constitution
name: "Establish Constitution"
diff --git a/src/deepwork/core/jobs.py b/src/deepwork/core/jobs.py
index cb9436ac..ef0cef44 100644
--- a/src/deepwork/core/jobs.py
+++ b/src/deepwork/core/jobs.py
@@ -14,12 +14,23 @@
import logging
import os
+from dataclasses import dataclass
from pathlib import Path
from deepwork.core.parser import JobDefinition, ParseError, parse_job_definition
logger = logging.getLogger("deepwork.core.jobs")
+
+@dataclass
+class JobLoadError:
+ """A job that failed to load."""
+
+ job_name: str
+ job_dir: str
+ error: str
+
+
# Environment variable for additional job folders (colon-delimited)
ENV_ADDITIONAL_JOBS_FOLDERS = "DEEPWORK_ADDITIONAL_JOBS_FOLDERS"
@@ -52,7 +63,9 @@ def get_job_folders(project_root: Path) -> list[Path]:
return folders
-def load_all_jobs(project_root: Path) -> list[JobDefinition]:
+def load_all_jobs(
+ project_root: Path,
+) -> tuple[list[JobDefinition], list[JobLoadError]]:
"""Load all job definitions from all configured job folders.
Jobs are discovered from each folder returned by :func:`get_job_folders`.
@@ -60,10 +73,11 @@ def load_all_jobs(project_root: Path) -> list[JobDefinition]:
earlier folder wins (project-local overrides standard, etc.).
Returns:
- List of successfully parsed ``JobDefinition`` objects.
+ Tuple of (successfully parsed jobs, errors for jobs that failed to load).
"""
seen_names: set[str] = set()
jobs: list[JobDefinition] = []
+ errors: list[JobLoadError] = []
for folder in get_job_folders(project_root):
if not folder.exists() or not folder.is_dir():
@@ -82,8 +96,15 @@ def load_all_jobs(project_root: Path) -> list[JobDefinition]:
seen_names.add(job_dir.name)
except ParseError as e:
logger.warning("Skipping invalid job '%s': %s", job_dir.name, e)
-
- return jobs
+ errors.append(
+ JobLoadError(
+ job_name=job_dir.name,
+ job_dir=str(job_dir),
+ error=str(e),
+ )
+ )
+
+ return jobs, errors
def find_job_dir(project_root: Path, job_name: str) -> Path | None:
diff --git a/src/deepwork/core/parser.py b/src/deepwork/core/parser.py
index b6f6a380..8c5ddf38 100644
--- a/src/deepwork/core/parser.py
+++ b/src/deepwork/core/parser.py
@@ -280,7 +280,7 @@ class JobDefinition:
name: str
version: str
summary: str
- description: str | None
+ common_job_info_provided_to_all_steps_at_runtime: str
steps: list[Step]
job_dir: Path
workflows: list[Workflow] = field(default_factory=list)
@@ -583,7 +583,9 @@ def from_dict(cls, data: dict[str, Any], job_dir: Path) -> "JobDefinition":
name=data["name"],
version=data["version"],
summary=data["summary"],
- description=data.get("description"),
+ common_job_info_provided_to_all_steps_at_runtime=data[
+ "common_job_info_provided_to_all_steps_at_runtime"
+ ],
steps=[Step.from_dict(step_data) for step_data in data["steps"]],
job_dir=job_dir,
workflows=workflows,
diff --git a/src/deepwork/mcp/schemas.py b/src/deepwork/mcp/schemas.py
index 0e210698..bd0240df 100644
--- a/src/deepwork/mcp/schemas.py
+++ b/src/deepwork/mcp/schemas.py
@@ -66,7 +66,6 @@ class JobInfo(BaseModel):
name: str = Field(description="Job identifier")
summary: str = Field(description="Short summary of the job")
- description: str | None = Field(default=None, description="Full description")
workflows: list[WorkflowInfo] = Field(default_factory=list)
@@ -218,12 +217,27 @@ class ActiveStepInfo(BaseModel):
default_factory=list, description="Reviews to run when step completes"
)
step_instructions: str = Field(description="Instructions for the step")
+ common_job_info: str = Field(
+ description="Common context and information shared across all steps in this job"
+ )
+
+
+class JobLoadErrorInfo(BaseModel):
+ """A job that failed to load due to a parse or validation error."""
+
+ job_name: str = Field(description="Directory name of the job that failed")
+ job_dir: str = Field(description="Absolute path to the job directory")
+ error: str = Field(description="Detailed error message explaining why the job failed to load")
class GetWorkflowsResponse(BaseModel):
"""Response from get_workflows tool."""
jobs: list[JobInfo] = Field(description="List of all jobs with their workflows")
+ errors: list[JobLoadErrorInfo] = Field(
+ default_factory=list,
+ description="Jobs that failed to load, with detailed error messages",
+ )
class StackEntry(BaseModel):
diff --git a/src/deepwork/mcp/tools.py b/src/deepwork/mcp/tools.py
index 75075df1..f0fd930a 100644
--- a/src/deepwork/mcp/tools.py
+++ b/src/deepwork/mcp/tools.py
@@ -14,7 +14,7 @@
import aiofiles
-from deepwork.core.jobs import find_job_dir, load_all_jobs
+from deepwork.core.jobs import JobLoadError, find_job_dir, load_all_jobs
from deepwork.core.parser import (
JobDefinition,
OutputSpec,
@@ -31,6 +31,7 @@
FinishedStepResponse,
GetWorkflowsResponse,
JobInfo,
+ JobLoadErrorInfo,
ReviewInfo,
StartWorkflowInput,
StartWorkflowResponse,
@@ -78,11 +79,11 @@ def __init__(
self.max_quality_attempts = max_quality_attempts
self.external_runner = external_runner
- def _load_all_jobs(self) -> list[JobDefinition]:
+ def _load_all_jobs(self) -> tuple[list[JobDefinition], list[JobLoadError]]:
"""Load all job definitions from all configured job folders.
Returns:
- List of parsed JobDefinition objects
+ Tuple of (parsed JobDefinition objects, errors for jobs that failed)
"""
return load_all_jobs(self.project_root)
@@ -107,7 +108,6 @@ def _job_to_info(self, job: JobDefinition) -> JobInfo:
return JobInfo(
name=job.name,
summary=job.summary,
- description=job.description,
workflows=workflows,
)
@@ -285,10 +285,18 @@ def get_workflows(self) -> GetWorkflowsResponse:
Returns:
GetWorkflowsResponse with all jobs and their workflows
"""
- jobs = self._load_all_jobs()
+ jobs, load_errors = self._load_all_jobs()
job_infos = [self._job_to_info(job) for job in jobs]
+ error_infos = [
+ JobLoadErrorInfo(
+ job_name=e.job_name,
+ job_dir=e.job_dir,
+ error=e.error,
+ )
+ for e in load_errors
+ ]
- return GetWorkflowsResponse(jobs=job_infos)
+ return GetWorkflowsResponse(jobs=job_infos, errors=error_infos)
async def start_workflow(self, input_data: StartWorkflowInput) -> StartWorkflowResponse:
"""Start a new workflow session.
@@ -348,6 +356,7 @@ async def start_workflow(self, input_data: StartWorkflowInput) -> StartWorkflowR
for r in first_step.reviews
],
step_instructions=instructions,
+ common_job_info=job.common_job_info_provided_to_all_steps_at_runtime,
),
stack=self.state_manager.get_stack(),
)
@@ -542,6 +551,7 @@ async def finished_step(self, input_data: FinishedStepInput) -> FinishedStepResp
for r in next_step.reviews
],
step_instructions=instructions,
+ common_job_info=job.common_job_info_provided_to_all_steps_at_runtime,
),
stack=self.state_manager.get_stack(),
)
diff --git a/src/deepwork/schemas/job.schema.json b/src/deepwork/schemas/job.schema.json
index 4f2227cc..92055d36 100644
--- a/src/deepwork/schemas/job.schema.json
+++ b/src/deepwork/schemas/job.schema.json
@@ -8,6 +8,7 @@
"name",
"version",
"summary",
+ "common_job_info_provided_to_all_steps_at_runtime",
"steps"
],
"additionalProperties": false,
@@ -28,10 +29,10 @@
"maxLength": 200,
"description": "Brief one-line summary of what this job accomplishes. Used in skill descriptions."
},
- "description": {
+ "common_job_info_provided_to_all_steps_at_runtime": {
"type": "string",
"minLength": 1,
- "description": "Detailed multi-line description of the job's purpose, process, and goals"
+ "description": "Common context and information provided to all steps at runtime. Use this for shared knowledge that every step needs (e.g., project background, key terminology, constraints, or conventions) rather than duplicating it in individual step instructions."
},
"workflows": {
"type": "array",
@@ -40,13 +41,6 @@
"$ref": "#/$defs/workflow"
}
},
- "changelog": {
- "type": "array",
- "description": "Version history documenting changes to the job definition",
- "items": {
- "$ref": "#/$defs/changelogEntry"
- }
- },
"steps": {
"type": "array",
"minItems": 1,
@@ -108,26 +102,6 @@
}
]
},
- "changelogEntry": {
- "type": "object",
- "required": [
- "version",
- "changes"
- ],
- "additionalProperties": false,
- "properties": {
- "version": {
- "type": "string",
- "pattern": "^\\d+\\.\\d+\\.\\d+$",
- "description": "Version number for this change"
- },
- "changes": {
- "type": "string",
- "minLength": 1,
- "description": "Description of changes made in this version"
- }
- }
- },
"step": {
"type": "object",
"required": [
diff --git a/src/deepwork/standard_jobs/deepwork_jobs/AGENTS.md b/src/deepwork/standard_jobs/deepwork_jobs/AGENTS.md
index abb9fe9f..46402f3a 100644
--- a/src/deepwork/standard_jobs/deepwork_jobs/AGENTS.md
+++ b/src/deepwork/standard_jobs/deepwork_jobs/AGENTS.md
@@ -69,7 +69,6 @@ The github_outreach `final_report` step had `analyze_repos` as a file input but
- Version is tracked in `job.yml`
- Bump patch version (0.0.x) for instruction improvements
- Bump minor version (0.x.0) for new features or structural changes
-- Always update changelog when bumping version
## Last Updated
diff --git a/src/deepwork/standard_jobs/deepwork_jobs/job.yml b/src/deepwork/standard_jobs/deepwork_jobs/job.yml
index ec8b9e61..1c7062c8 100644
--- a/src/deepwork/standard_jobs/deepwork_jobs/job.yml
+++ b/src/deepwork/standard_jobs/deepwork_jobs/job.yml
@@ -2,7 +2,7 @@
name: deepwork_jobs
version: "1.4.0"
summary: "Creates and manages multi-step AI workflows. Use when defining, implementing, testing, or improving DeepWork jobs."
-description: |
+common_job_info_provided_to_all_steps_at_runtime: |
Core commands for managing DeepWork jobs. These commands help you define new multi-step
workflows, test them on real use cases, and learn from running them.
@@ -37,34 +37,6 @@ workflows:
steps:
- learn
-changelog:
- - version: "1.4.0"
- changes: "Fixed implement step review criteria that caused severe friction: 'Ask Structured Questions' now auto-passes for steps without user inputs; 'Output Examples' renamed to 'Output Format Examples' and accepts template placeholders; added detailed review guidance to prevent misapplication of criteria"
- - version: "1.3.0"
- changes: "Migrated quality_criteria to reviews system with run_each targeting and map-format criteria"
- - version: "1.2.1"
- changes: "Removed deprecated exposed field from learn step; added learn workflow to make step accessible via MCP"
- - version: "1.2.0"
- changes: "Added repair workflow with fix_settings, fix_jobs, and errata steps for migrating old DeepWork configurations to current format"
- - version: "1.1.0"
- changes: "Added test and iterate steps to new_job workflow; test runs the workflow on a real use case and gathers feedback; iterate improves the job definition based on what happened"
- - version: "1.0.1"
- changes: "Removed review_job_spec step from new_job workflow; implement now follows directly from define"
- - version: "1.0.0"
- changes: "Added workflows section to distinguish new_job workflow (define→review_job_spec→implement) from standalone learn skill"
- - version: "0.1.0"
- changes: "Initial version"
- - version: "0.2.0"
- changes: "Replaced refine command with learn command for conversation-driven improvement"
- - version: "0.3.0"
- changes: "Added make_new_job.sh script and templates directory; updated instructions to reference templates instead of inline examples"
- - version: "0.4.0"
- changes: "Removed implementation_summary and learning_summary outputs; simplified step outputs"
- - version: "0.5.0"
- changes: "Standardized on 'ask structured questions' phrasing for user input; Updated quality criteria hooks to verify phrase usage; Added guidance in implement.md to use phrase in generated instructions"
- - version: "0.9.0"
- changes: "Improved skill descriptions with third-person voice and 'Use when...' triggers for better discoverability"
-
steps:
- id: define
name: "Define Job Specification"
@@ -112,6 +84,7 @@ steps:
"Quality Criteria": "Does the instruction file define quality criteria for its outputs?"
"Ask Structured Questions": "If this step gathers user input, do instructions explicitly use the phrase 'ask structured questions'? If the step has no user inputs, this criterion passes automatically."
"Prompt Engineering": "Does the instruction file follow Anthropic's best practices for prompt engineering?"
+ "No Redundant Info": "Does the instruction file avoid duplicating information that belongs in the job.yml's common_job_info_provided_to_all_steps_at_runtime section? Shared context (project background, terminology, conventions) should be in common_job_info, not repeated in each step."
- id: test
name: "Test the New Workflow"
diff --git a/src/deepwork/standard_jobs/deepwork_jobs/steps/define.md b/src/deepwork/standard_jobs/deepwork_jobs/steps/define.md
index 361c9f0f..d870f1f8 100644
--- a/src/deepwork/standard_jobs/deepwork_jobs/steps/define.md
+++ b/src/deepwork/standard_jobs/deepwork_jobs/steps/define.md
@@ -290,8 +290,7 @@ Only after you have complete understanding, create the job directory and `job.ym
- No circular dependencies
- At least one output per step
- The `summary` should be concise (max 200 chars)
-- The `description` should provide rich context for future refinement
-- Include a `changelog` section with an initial entry for version 1.0.0
+- The `common_job_info_provided_to_all_steps_at_runtime` should provide rich context shared across all steps
## Example Dialog
diff --git a/src/deepwork/standard_jobs/deepwork_jobs/steps/fix_jobs.md b/src/deepwork/standard_jobs/deepwork_jobs/steps/fix_jobs.md
index 64984b1c..213588f4 100644
--- a/src/deepwork/standard_jobs/deepwork_jobs/steps/fix_jobs.md
+++ b/src/deepwork/standard_jobs/deepwork_jobs/steps/fix_jobs.md
@@ -33,7 +33,9 @@ For each job directory, you'll need to check and potentially fix the `job.yml` f
```
Be concise. Output minimal text — only report changes made or confirm no changes needed. Do not echo back file contents, do not explain what each migration rule means, and do not narrate your process.
-Audit and repair the job at `.deepwork/jobs/[job_name]/job.yml`:
+First, read the job.yml JSON Schema at `.deepwork/schemas/job.schema.json` to understand the current valid structure. Use it as the source of truth.
+
+Then audit and repair the job at `.deepwork/jobs/[job_name]/job.yml`:
1. Remove any `exposed: true` fields from steps
2. Migrate `stop_hooks` to `hooks.after_agent` format
3. Remove references to deleted steps (like `review_job_spec`)
@@ -41,8 +43,12 @@ Audit and repair the job at `.deepwork/jobs/[job_name]/job.yml`:
5. Migrate `outputs` from array format to map format with `type` and `description`
6. Update any `file` inputs that reference renamed output keys
7. Migrate `quality_criteria` arrays to `reviews` format (run_each + map criteria)
-8. Bump version and add changelog entry if changes were made
-9. Validate YAML syntax
+8. Remove any `changelog` section (no longer in schema)
+9. Replace `description:` with `common_job_info_provided_to_all_steps_at_runtime:` if present
+10. Remove any info in `common_job_info_provided_to_all_steps_at_runtime` that is not relevant to most steps.
+11. Read the step instructions and remove anything that is repeated in many steps and put it into `common_job_info_provided_to_all_steps_at_runtime`
+12. Bump version if changes were made
+13. Validate YAML syntax
Report only: which checks passed with no changes, and which changes were made (one line each).
```
@@ -253,11 +259,6 @@ If you made significant changes to a job, bump its version number:
```yaml
# Bump patch version for minor fixes
version: "1.0.0" -> version: "1.0.1"
-
-# Add changelog entry
-changelog:
- - version: "1.0.1"
- changes: "Migrated to current DeepWork format; removed deprecated fields"
```
## Common Issues and Fixes
diff --git a/src/deepwork/standard_jobs/deepwork_jobs/steps/iterate.md b/src/deepwork/standard_jobs/deepwork_jobs/steps/iterate.md
index 73dcb589..33a80c8a 100644
--- a/src/deepwork/standard_jobs/deepwork_jobs/steps/iterate.md
+++ b/src/deepwork/standard_jobs/deepwork_jobs/steps/iterate.md
@@ -112,7 +112,7 @@ Examples:
- If data processing was slow, suggest a different method or tool
- If file generation had issues, recommend a different library or format
-### Step 6: Update Job Version and Changelog
+### Step 6: Update Job Version
After making improvements:
@@ -120,13 +120,6 @@ After making improvements:
- Patch version (x.x.1) for minor instruction tweaks
- Minor version (x.1.0) for quality criteria changes or significant improvements
-2. **Add a changelog entry** describing what was improved:
- ```yaml
- changelog:
- - version: "1.0.1"
- changes: "Improved [step] instructions based on test run: added examples, clarified criteria, updated tool recommendations"
- ```
-
### Step 7: Provide Recap
Summarize the improvements made:
@@ -158,7 +151,6 @@ Example recap format:
**job.yml:**
- Updated version to 1.0.1
-- Added changelog entry
### Expected Impact
Future runs should produce reports with better visual design and clearer summaries, reducing the need for post-generation corrections.
diff --git a/src/deepwork/standard_jobs/deepwork_jobs/steps/learn.md b/src/deepwork/standard_jobs/deepwork_jobs/steps/learn.md
index dc546110..0d9bc406 100644
--- a/src/deepwork/standard_jobs/deepwork_jobs/steps/learn.md
+++ b/src/deepwork/standard_jobs/deepwork_jobs/steps/learn.md
@@ -100,10 +100,6 @@ For each generalizable learning:
- Add to appropriate sections rather than restructuring
- Maintain consistency with other steps
-5. **Track changes for changelog**
- - Note what was changed and why
- - Prepare changelog entry for job.yml
-
### Step 4b: Extract Shared Content into Referenced Files
Review all instruction files for the job and identify content that:
@@ -152,7 +148,7 @@ The AGENTS.md file captures project-specific knowledge that helps future agent r
- Use line numbers when referencing specific code: `file.ext:42`
- Group related learnings together
-### Step 6: Update Job Version and Changelog
+### Step 6: Update Job Version
If instruction files were modified:
@@ -160,12 +156,6 @@ If instruction files were modified:
- Patch version (0.0.x) for instruction improvements
- Minor version (0.x.0) if quality criteria changed
-2. **Add changelog entry**
- ```yaml
- - version: "[new_version]"
- changes: "Improved [step] instructions based on execution learnings: [brief description]"
- ```
-
## File Reference Patterns
When adding entries to AGENTS.md, prefer these patterns:
@@ -235,7 +225,7 @@ I found the following job executions:
- From conversation about: Initial competitive analysis run
```
-3. Updated job.yml version to 1.0.1 with changelog entry
+3. Updated job.yml version to 1.0.1
**Summary**
diff --git a/src/deepwork/standard_jobs/deepwork_jobs/templates/job.yml.example b/src/deepwork/standard_jobs/deepwork_jobs/templates/job.yml.example
index dac1aba8..5ba528ad 100644
--- a/src/deepwork/standard_jobs/deepwork_jobs/templates/job.yml.example
+++ b/src/deepwork/standard_jobs/deepwork_jobs/templates/job.yml.example
@@ -5,15 +5,11 @@
name: competitive_research
version: "1.0.0"
summary: "Systematic competitive analysis workflow"
-description: |
+common_job_info_provided_to_all_steps_at_runtime: |
A comprehensive workflow for analyzing competitors in your market segment.
Helps product teams understand the competitive landscape through systematic
identification, research, comparison, and positioning recommendations.
-changelog:
- - version: "1.0.0"
- changes: "Initial job creation"
-
workflows:
- name: full_analysis
summary: "Complete competitive research from identification to positioning"
diff --git a/src/deepwork/standard_jobs/deepwork_jobs/templates/job.yml.template b/src/deepwork/standard_jobs/deepwork_jobs/templates/job.yml.template
index f0f87bfb..386d83da 100644
--- a/src/deepwork/standard_jobs/deepwork_jobs/templates/job.yml.template
+++ b/src/deepwork/standard_jobs/deepwork_jobs/templates/job.yml.template
@@ -6,19 +6,14 @@
name: [job_name]
version: "1.0.0"
summary: "[Brief one-line summary of what this job accomplishes - max 200 chars]"
-description: |
- [Detailed multi-line description of the job's purpose, process, and goals.
+common_job_info_provided_to_all_steps_at_runtime: |
+ [Common context and information shared across all steps at runtime.
- This should explain:
+ This should include:
- What problem this workflow solves
- - What the overall process looks like
- - What the end result will be
- - Who the intended users are
- - Any important context about the workflow]
-
-changelog:
- - version: "1.0.0"
- changes: "Initial job creation"
+ - Key terminology and conventions
+ - Constraints or guidelines that apply to every step
+ - Any important shared context about the workflow]
workflows:
- name: [workflow_name]
diff --git a/tests/e2e/test_claude_code_integration.py b/tests/e2e/test_claude_code_integration.py
index a11d6659..76ca429f 100644
--- a/tests/e2e/test_claude_code_integration.py
+++ b/tests/e2e/test_claude_code_integration.py
@@ -205,7 +205,7 @@ def test_get_workflows_returns_jobs(self, project_with_job: Path) -> None:
# Find fruits job and check structure
fruits_job = next(j for j in response.jobs if j.name == "fruits")
- assert fruits_job.description is not None
+ assert fruits_job.summary is not None
# The fruits fixture has a "full" workflow
assert len(fruits_job.workflows) >= 1
diff --git a/tests/fixtures/jobs/complex_job/job.yml b/tests/fixtures/jobs/complex_job/job.yml
index 9fbc86c9..1e4ff1ab 100644
--- a/tests/fixtures/jobs/complex_job/job.yml
+++ b/tests/fixtures/jobs/complex_job/job.yml
@@ -2,24 +2,10 @@
name: competitive_research
version: "0.1.0"
summary: "Systematic competitive analysis workflow"
-description: |
- A comprehensive workflow for analyzing competitors in your market segment. This job
- helps product teams understand the competitive landscape by systematically identifying
- competitors, researching their offerings, creating comparison matrices, and developing
- strategic positioning recommendations.
-
- The workflow produces:
- - A vetted list of key competitors
- - Detailed research notes on each competitor (primary and secondary sources)
- - A comparison matrix highlighting key differentiators
- - Strategic positioning recommendations
-
+common_job_info_provided_to_all_steps_at_runtime: |
+ A comprehensive workflow for analyzing competitors in your market segment.
Designed for product teams conducting quarterly competitive analysis.
-changelog:
- - version: "0.1.0"
- changes: "Initial version"
-
steps:
- id: identify_competitors
name: "Identify Competitors"
diff --git a/tests/fixtures/jobs/concurrent_steps_job/job.yml b/tests/fixtures/jobs/concurrent_steps_job/job.yml
index 21707dec..ccf4d2a4 100644
--- a/tests/fixtures/jobs/concurrent_steps_job/job.yml
+++ b/tests/fixtures/jobs/concurrent_steps_job/job.yml
@@ -2,7 +2,7 @@
name: concurrent_workflow
version: "1.0.0"
summary: "Workflow with concurrent steps for testing"
-description: |
+common_job_info_provided_to_all_steps_at_runtime: |
A test workflow that demonstrates concurrent step execution.
Some steps run sequentially while others run in parallel.
diff --git a/tests/fixtures/jobs/exposed_step_job/job.yml b/tests/fixtures/jobs/exposed_step_job/job.yml
index 3e59a980..8152163a 100644
--- a/tests/fixtures/jobs/exposed_step_job/job.yml
+++ b/tests/fixtures/jobs/exposed_step_job/job.yml
@@ -2,15 +2,10 @@
name: exposed_job
version: "0.1.0"
summary: "A job with exposed and hidden steps for testing"
-description: |
+common_job_info_provided_to_all_steps_at_runtime: |
A test job demonstrating the exposed step feature.
-
This job has two steps: one hidden by default and one explicitly exposed.
-changelog:
- - version: "0.1.0"
- changes: "Initial version"
-
steps:
- id: hidden_step
name: "Hidden Step"
diff --git a/tests/fixtures/jobs/fruits/job.yml b/tests/fixtures/jobs/fruits/job.yml
index 012fd9df..72ce9c2c 100644
--- a/tests/fixtures/jobs/fruits/job.yml
+++ b/tests/fixtures/jobs/fruits/job.yml
@@ -2,19 +2,10 @@
name: fruits
version: "1.0.0"
summary: "Identify and classify fruits from a mixed list of items"
-description: |
+common_job_info_provided_to_all_steps_at_runtime: |
A simple, deterministic job for CI testing of the DeepWork framework.
-
- This job takes a list of mixed items (fruits and non-fruits) and:
- 1. Identifies which items are fruits
- 2. Classifies those fruits into categories (citrus, berries, tropical, etc.)
-
- This workflow is designed to produce predictable, verifiable outputs
- making it ideal for automated testing.
-
-changelog:
- - version: "1.0.0"
- changes: "Initial version for CI testing"
+ This job takes a list of mixed items (fruits and non-fruits) and identifies
+ and classifies them. Designed to produce predictable, verifiable outputs.
workflows:
- name: full
diff --git a/tests/fixtures/jobs/invalid_job/job.yml b/tests/fixtures/jobs/invalid_job/job.yml
index 192cb942..67f53f48 100644
--- a/tests/fixtures/jobs/invalid_job/job.yml
+++ b/tests/fixtures/jobs/invalid_job/job.yml
@@ -1,7 +1,7 @@
# This is an invalid job definition for testing validation
name: invalid-job-name-with-dashes
version: not-semver
-# missing description
+# missing common_job_info_provided_to_all_steps_at_runtime
steps:
- id: step_with_invalid_dependency
name: "Invalid Step"
diff --git a/tests/fixtures/jobs/job_with_doc_spec/job.yml b/tests/fixtures/jobs/job_with_doc_spec/job.yml
index 5eb56f1e..1c145c07 100644
--- a/tests/fixtures/jobs/job_with_doc_spec/job.yml
+++ b/tests/fixtures/jobs/job_with_doc_spec/job.yml
@@ -2,8 +2,8 @@
name: job_with_doc_spec
version: "1.0.0"
summary: "Job with typed output for testing"
-description: |
- A test job that produces a report document.
+common_job_info_provided_to_all_steps_at_runtime: |
+ Reports should always include citations.
steps:
- id: generate_report
diff --git a/tests/fixtures/jobs/simple_job/job.yml b/tests/fixtures/jobs/simple_job/job.yml
index ca5a9c27..cc5ccd95 100644
--- a/tests/fixtures/jobs/simple_job/job.yml
+++ b/tests/fixtures/jobs/simple_job/job.yml
@@ -2,15 +2,9 @@
name: simple_job
version: "0.1.0"
summary: "A simple single-step job for testing"
-description: |
+common_job_info_provided_to_all_steps_at_runtime: |
A simple single-step job for testing the DeepWork framework.
-
- This job demonstrates the minimal structure required for a valid job definition,
- including a single step with user input and file output.
-
-changelog:
- - version: "0.1.0"
- changes: "Initial version"
+ This job demonstrates the minimal structure required for a valid job definition.
steps:
- id: single_step
diff --git a/tests/unit/mcp/test_schemas.py b/tests/unit/mcp/test_schemas.py
index 9b6389dc..a690aaf8 100644
--- a/tests/unit/mcp/test_schemas.py
+++ b/tests/unit/mcp/test_schemas.py
@@ -106,7 +106,6 @@ def test_basic_job(self) -> None:
assert job.name == "test_job"
assert job.summary == "A test job"
- assert job.description is None
assert job.workflows == []
@@ -313,6 +312,7 @@ def test_basic_step_info(self) -> None:
)
],
step_instructions="Do something",
+ common_job_info="Test job info",
)
assert step_info.session_id == "abc123"
@@ -326,6 +326,7 @@ def test_basic_step_info(self) -> None:
assert len(step_info.step_reviews) == 1
assert step_info.step_reviews[0].run_each == "step"
assert step_info.step_instructions == "Do something"
+ assert step_info.common_job_info == "Test job info"
def test_default_reviews(self) -> None:
"""Test default empty reviews."""
@@ -344,6 +345,7 @@ def test_default_reviews(self) -> None:
)
],
step_instructions="Do something",
+ common_job_info="Test job info",
)
assert step_info.step_reviews == []
@@ -370,6 +372,7 @@ def test_basic_response(self) -> None:
)
],
step_instructions="Do something",
+ common_job_info="Test job info",
)
)
@@ -423,6 +426,7 @@ def test_next_step_status(self) -> None:
)
],
step_instructions="Next step instructions",
+ common_job_info="Test job info",
),
)
diff --git a/tests/unit/mcp/test_tools.py b/tests/unit/mcp/test_tools.py
index ca275805..fe4b3425 100644
--- a/tests/unit/mcp/test_tools.py
+++ b/tests/unit/mcp/test_tools.py
@@ -47,7 +47,7 @@ def project_root(tmp_path: Path) -> Path:
name: test_job
version: "1.0.0"
summary: A test job
-description: This is a test job for unit tests
+common_job_info_provided_to_all_steps_at_runtime: This is a test job for unit tests
steps:
- id: step1
@@ -215,7 +215,7 @@ async def test_start_workflow_invalid_workflow_multiple(
name: multi_wf_job
version: "1.0.0"
summary: A job with multiple workflows
-description: Test job with multiple workflows
+common_job_info_provided_to_all_steps_at_runtime: Test job with multiple workflows
steps:
- id: step_a
@@ -493,7 +493,7 @@ async def test_finished_step_allows_omitting_optional_outputs(
name: optional_job
version: "1.0.0"
summary: Job with optional output
-description: Test job
+common_job_info_provided_to_all_steps_at_runtime: Test job
steps:
- id: produce
@@ -558,7 +558,7 @@ async def test_finished_step_rejects_missing_required_but_not_optional(
name: mixed_job
version: "1.0.0"
summary: Job with mixed required/optional outputs
-description: Test job
+common_job_info_provided_to_all_steps_at_runtime: Test job
steps:
- id: produce
@@ -618,7 +618,7 @@ async def test_finished_step_accepts_optional_outputs_when_provided(
name: optional_provided_job
version: "1.0.0"
summary: Job with optional output that gets provided
-description: Test job
+common_job_info_provided_to_all_steps_at_runtime: Test job
steps:
- id: produce
@@ -680,7 +680,7 @@ async def test_expected_outputs_include_required_field(
name: req_field_job
version: "1.0.0"
summary: Job to test required field in expected outputs
-description: Test job
+common_job_info_provided_to_all_steps_at_runtime: Test job
steps:
- id: produce
@@ -774,7 +774,7 @@ async def test_finished_step_empty_outputs_for_step_with_no_outputs(
name: no_output_job
version: "1.0.0"
summary: Job with no-output step
-description: Test job
+common_job_info_provided_to_all_steps_at_runtime: Test job
steps:
- id: cleanup
@@ -823,7 +823,7 @@ async def test_finished_step_validates_files_type_output(
name: files_job
version: "1.0.0"
summary: Job with files output
-description: Test job
+common_job_info_provided_to_all_steps_at_runtime: Test job
steps:
- id: generate
@@ -875,7 +875,7 @@ async def test_finished_step_validates_files_type_existence(
name: files_job2
version: "1.0.0"
summary: Job with files output
-description: Test job
+common_job_info_provided_to_all_steps_at_runtime: Test job
steps:
- id: generate
@@ -931,7 +931,7 @@ async def test_finished_step_files_type_success(
name: files_job3
version: "1.0.0"
summary: Job with files output
-description: Test job
+common_job_info_provided_to_all_steps_at_runtime: Test job
steps:
- id: generate
@@ -992,7 +992,7 @@ async def test_quality_reviewer_receives_only_current_step_outputs(
name: chain_job
version: "1.0.0"
summary: Three-step chain to test input filtering
-description: Test job
+common_job_info_provided_to_all_steps_at_runtime: Test job
steps:
- id: step1
@@ -1111,7 +1111,7 @@ async def test_additional_review_guidance_reaches_reviewer(
name: guided_job
version: "1.0.0"
summary: Job with review guidance
-description: Test job
+common_job_info_provided_to_all_steps_at_runtime: Test job
steps:
- id: write
@@ -1176,7 +1176,7 @@ async def test_review_guidance_in_start_workflow_response(
name: guided_job2
version: "1.0.0"
summary: Job with review guidance
-description: Test job
+common_job_info_provided_to_all_steps_at_runtime: Test job
steps:
- id: analyze
@@ -1245,7 +1245,7 @@ def project_root(self, tmp_path: Path) -> Path:
name: job_a
version: "1.0.0"
summary: Job A
-description: Test job A
+common_job_info_provided_to_all_steps_at_runtime: Test job A
steps:
- id: a_step1
@@ -1290,7 +1290,7 @@ def project_root(self, tmp_path: Path) -> Path:
name: job_b
version: "1.0.0"
summary: Job B
-description: Test job B
+common_job_info_provided_to_all_steps_at_runtime: Test job B
steps:
- id: b_step1
diff --git a/tests/unit/test_jobs.py b/tests/unit/test_jobs.py
index 55532c18..de60cb4e 100644
--- a/tests/unit/test_jobs.py
+++ b/tests/unit/test_jobs.py
@@ -24,7 +24,7 @@ def _create_minimal_job(parent: Path, job_name: str) -> Path:
name: {job_name}
version: "1.0.0"
summary: Test job {job_name}
-description: A test job
+common_job_info_provided_to_all_steps_at_runtime: A test job
steps:
- id: step1
@@ -92,9 +92,10 @@ def test_loads_from_project_jobs(self, tmp_path: Path, monkeypatch: pytest.Monke
"deepwork.core.jobs.get_job_folders",
lambda pr: [pr / ".deepwork" / "jobs"],
)
- jobs = load_all_jobs(tmp_path)
+ jobs, errors = load_all_jobs(tmp_path)
assert len(jobs) == 1
assert jobs[0].name == "my_job"
+ assert len(errors) == 0
def test_loads_from_multiple_folders(
self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch
@@ -107,9 +108,10 @@ def test_loads_from_multiple_folders(
"deepwork.core.jobs.get_job_folders",
lambda pr: [folder_a, folder_b],
)
- jobs = load_all_jobs(tmp_path)
+ jobs, errors = load_all_jobs(tmp_path)
names = {j.name for j in jobs}
assert names == {"job_a", "job_b"}
+ assert len(errors) == 0
def test_first_folder_wins_for_duplicate_name(
self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch
@@ -128,7 +130,7 @@ def test_first_folder_wins_for_duplicate_name(
"deepwork.core.jobs.get_job_folders",
lambda pr: [folder_a, folder_b],
)
- jobs = load_all_jobs(tmp_path)
+ jobs, errors = load_all_jobs(tmp_path)
assert len(jobs) == 1
assert jobs[0].summary == "Test job same_name"
@@ -139,8 +141,9 @@ def test_skips_nonexistent_folders(
"deepwork.core.jobs.get_job_folders",
lambda pr: [tmp_path / "does_not_exist"],
)
- jobs = load_all_jobs(tmp_path)
+ jobs, errors = load_all_jobs(tmp_path)
assert len(jobs) == 0
+ assert len(errors) == 0
def test_skips_invalid_jobs(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None:
folder = tmp_path / "jobs"
@@ -151,8 +154,12 @@ def test_skips_invalid_jobs(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatc
"deepwork.core.jobs.get_job_folders",
lambda pr: [folder],
)
- jobs = load_all_jobs(tmp_path)
+ jobs, errors = load_all_jobs(tmp_path)
assert len(jobs) == 0
+ assert len(errors) == 1
+ assert errors[0].job_name == "bad_job"
+ assert errors[0].job_dir == str(bad_job)
+ assert errors[0].error # non-empty error message
class TestFindJobDir:
diff --git a/tests/unit/test_parser.py b/tests/unit/test_parser.py
index aba569c6..c0e5d41c 100644
--- a/tests/unit/test_parser.py
+++ b/tests/unit/test_parser.py
@@ -329,7 +329,7 @@ def test_validate_dependencies_missing_step(self) -> None:
name="test_job",
version="1.0.0",
summary="Test job",
- description="Test",
+ common_job_info_provided_to_all_steps_at_runtime="Test",
steps=[
Step(
id="step1",
@@ -356,7 +356,7 @@ def test_validate_dependencies_circular(self) -> None:
name="test_job",
version="1.0.0",
summary="Test job",
- description="Test",
+ common_job_info_provided_to_all_steps_at_runtime="Test",
steps=[
Step(
id="step1",
@@ -403,7 +403,7 @@ def test_validate_file_inputs_missing_step(self) -> None:
name="test_job",
version="1.0.0",
summary="Test job",
- description="Test",
+ common_job_info_provided_to_all_steps_at_runtime="Test",
steps=[
Step(
id="step1",
@@ -431,7 +431,7 @@ def test_validate_reviews_valid(self) -> None:
name="test_job",
version="1.0.0",
summary="Test job",
- description="Test",
+ common_job_info_provided_to_all_steps_at_runtime="Test",
steps=[
Step(
id="step1",
@@ -461,7 +461,7 @@ def test_validate_reviews_invalid_run_each(self) -> None:
name="test_job",
version="1.0.0",
summary="Test job",
- description="Test",
+ common_job_info_provided_to_all_steps_at_runtime="Test",
steps=[
Step(
id="step1",
@@ -493,7 +493,7 @@ def test_validate_file_inputs_not_in_dependencies(self) -> None:
name="test_job",
version="1.0.0",
summary="Test job",
- description="Test",
+ common_job_info_provided_to_all_steps_at_runtime="Test",
steps=[
Step(
id="step1",
@@ -538,7 +538,7 @@ def test_parses_simple_job(self, fixtures_dir: Path) -> None:
assert job.name == "simple_job"
assert job.summary == "A simple single-step job for testing"
- assert "DeepWork framework" in job.description # Multi-line description
+ assert "DeepWork framework" in job.common_job_info_provided_to_all_steps_at_runtime
assert len(job.steps) == 1
assert job.steps[0].id == "single_step"
assert job.job_dir == job_dir
diff --git a/tests/unit/test_validation.py b/tests/unit/test_validation.py
index 811f4582..eb4bb9af 100644
--- a/tests/unit/test_validation.py
+++ b/tests/unit/test_validation.py
@@ -15,7 +15,7 @@ def test_validates_simple_job(self) -> None:
"name": "simple_job",
"version": "1.0.0",
"summary": "A simple job for testing",
- "description": "A simple job",
+ "common_job_info_provided_to_all_steps_at_runtime": "A simple job",
"steps": [
{
"id": "step1",
@@ -40,7 +40,7 @@ def test_validates_job_with_user_inputs(self) -> None:
"name": "job_with_inputs",
"version": "1.0.0",
"summary": "Job with user inputs",
- "description": "Job with inputs",
+ "common_job_info_provided_to_all_steps_at_runtime": "Job with inputs",
"steps": [
{
"id": "step1",
@@ -68,7 +68,7 @@ def test_validates_job_with_file_inputs(self) -> None:
"name": "job_with_deps",
"version": "1.0.0",
"summary": "Job with dependencies",
- "description": "Job with dependencies",
+ "common_job_info_provided_to_all_steps_at_runtime": "Job with dependencies",
"steps": [
{
"id": "step1",
@@ -108,7 +108,7 @@ def test_raises_for_missing_required_field(self) -> None:
"name": "incomplete_job",
"version": "1.0.0",
# Missing summary
- # Missing description
+ # Missing common_job_info_provided_to_all_steps_at_runtime
"steps": [],
}
@@ -121,7 +121,7 @@ def test_raises_for_invalid_job_name(self) -> None:
"name": "Invalid-Job-Name", # Dashes not allowed
"version": "1.0.0",
"summary": "Invalid name test",
- "description": "Invalid name",
+ "common_job_info_provided_to_all_steps_at_runtime": "Invalid name",
"steps": [
{
"id": "step1",
@@ -145,7 +145,7 @@ def test_raises_for_invalid_version(self) -> None:
"name": "job",
"version": "1.0", # Not semver
"summary": "Invalid version test",
- "description": "Job",
+ "common_job_info_provided_to_all_steps_at_runtime": "Job",
"steps": [
{
"id": "step1",
@@ -169,7 +169,7 @@ def test_raises_for_empty_steps(self) -> None:
"name": "job",
"version": "1.0.0",
"summary": "Empty steps test",
- "description": "Job with no steps",
+ "common_job_info_provided_to_all_steps_at_runtime": "Job with no steps",
"steps": [],
}
@@ -182,7 +182,7 @@ def test_raises_for_step_missing_outputs(self) -> None:
"name": "job",
"version": "1.0.0",
"summary": "Missing outputs test",
- "description": "Job",
+ "common_job_info_provided_to_all_steps_at_runtime": "Job",
"steps": [
{
"id": "step1",
@@ -203,7 +203,7 @@ def test_raises_for_invalid_input_format(self) -> None:
"name": "job",
"version": "1.0.0",
"summary": "Invalid input format test",
- "description": "Job",
+ "common_job_info_provided_to_all_steps_at_runtime": "Job",
"steps": [
{
"id": "step1",
@@ -243,7 +243,7 @@ def test_raises_for_step_missing_reviews(self) -> None:
"name": "job",
"version": "1.0.0",
"summary": "Missing reviews test",
- "description": "Job",
+ "common_job_info_provided_to_all_steps_at_runtime": "Job",
"steps": [
{
"id": "step1",
@@ -267,7 +267,7 @@ def test_validates_job_with_reviews(self) -> None:
"name": "job_with_reviews",
"version": "1.0.0",
"summary": "Job with reviews",
- "description": "Job",
+ "common_job_info_provided_to_all_steps_at_runtime": "Job",
"steps": [
{
"id": "step1",
@@ -304,7 +304,7 @@ def test_raises_for_review_missing_run_each(self) -> None:
"name": "job",
"version": "1.0.0",
"summary": "Test",
- "description": "Job",
+ "common_job_info_provided_to_all_steps_at_runtime": "Job",
"steps": [
{
"id": "step1",
@@ -333,7 +333,7 @@ def test_raises_for_review_empty_criteria(self) -> None:
"name": "job",
"version": "1.0.0",
"summary": "Test",
- "description": "Job",
+ "common_job_info_provided_to_all_steps_at_runtime": "Job",
"steps": [
{
"id": "step1",