diff --git a/.github/workflows/validate.yml b/.github/workflows/validate.yml
new file mode 100644
index 00000000..68c78a32
--- /dev/null
+++ b/.github/workflows/validate.yml
@@ -0,0 +1,85 @@
+name: Validate
+
+on:
+ pull_request:
+ branches: ["*"]
+
+permissions:
+ contents: write
+
+jobs:
+ format:
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ ref: ${{ github.head_ref }}
+ token: ${{ secrets.GITHUB_TOKEN }}
+
+ - name: Install uv
+ uses: astral-sh/setup-uv@v4
+ with:
+ version: "latest"
+
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.11"
+
+ - name: Install dependencies
+ run: uv sync --extra dev
+
+ - name: Run ruff formatting
+ run: uv run ruff format src/ tests/
+
+ - name: Run ruff linting with auto-fix
+ run: uv run ruff check --fix src/ tests/
+
+ - name: Re-run formatting after auto-fix
+ run: uv run ruff format src/ tests/
+
+ - name: Verify all issues are fixed
+ run: |
+ uv run ruff check src/ tests/
+ uv run ruff format --check src/ tests/
+
+ - name: Check for changes
+ id: check_changes
+ run: |
+ if [[ -n "$(git status --porcelain)" ]]; then
+ echo "has_changes=true" >> $GITHUB_OUTPUT
+ else
+ echo "has_changes=false" >> $GITHUB_OUTPUT
+ fi
+
+ - name: Commit and push formatting changes
+ if: steps.check_changes.outputs.has_changes == 'true'
+ run: |
+ git config --local user.email "github-actions[bot]@users.noreply.github.com"
+ git config --local user.name "github-actions[bot]"
+ git add -A
+ git commit -m "style: auto-format code with ruff"
+ git push
+
+ tests:
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Install uv
+ uses: astral-sh/setup-uv@v4
+ with:
+ version: "latest"
+
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.11"
+
+ - name: Install dependencies
+ run: uv sync --extra dev
+
+ - name: Run tests
+ run: uv run pytest tests/ -v
diff --git a/src/deepwork/__init__.py b/src/deepwork/__init__.py
index 9e3fc9c1..38f6acdf 100644
--- a/src/deepwork/__init__.py
+++ b/src/deepwork/__init__.py
@@ -8,6 +8,7 @@
"__author__",
]
+
# Lazy imports to avoid circular dependencies and missing modules during development
def __getattr__(name: str) -> object:
"""Lazy import for core modules."""
@@ -19,5 +20,6 @@ def __getattr__(name: str) -> object:
StepInput,
parse_job_definition,
)
+
return locals()[name]
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
diff --git a/src/deepwork/cli/install.py b/src/deepwork/cli/install.py
index d710bf8a..030f5668 100644
--- a/src/deepwork/cli/install.py
+++ b/src/deepwork/cli/install.py
@@ -20,9 +20,7 @@ class InstallError(Exception):
pass
-def _inject_standard_job(
- job_name: str, jobs_dir: Path, project_path: Path
-) -> None:
+def _inject_standard_job(job_name: str, jobs_dir: Path, project_path: Path) -> None:
"""
Inject a standard job definition into the project.
@@ -54,8 +52,7 @@ def _inject_standard_job(
shutil.copytree(standard_jobs_dir, target_dir)
console.print(
- f" [green]✓[/green] Installed {job_name} "
- f"({target_dir.relative_to(project_path)})"
+ f" [green]✓[/green] Installed {job_name} ({target_dir.relative_to(project_path)})"
)
except Exception as e:
raise InstallError(f"Failed to install {job_name}: {e}") from e
@@ -250,9 +247,7 @@ def _install_deepwork(platform_name: str | None, project_path: Path) -> None:
# Add platform if not already present
if platform_to_add not in config_data["platforms"]:
config_data["platforms"].append(platform_to_add)
- console.print(
- f" [green]✓[/green] Added {platform_config.display_name} to platforms"
- )
+ console.print(f" [green]✓[/green] Added {platform_config.display_name} to platforms")
else:
console.print(f" [dim]•[/dim] {platform_config.display_name} already configured")
diff --git a/src/deepwork/cli/sync.py b/src/deepwork/cli/sync.py
index 126b9480..c895819e 100644
--- a/src/deepwork/cli/sync.py
+++ b/src/deepwork/cli/sync.py
@@ -137,9 +137,7 @@ def sync_commands(project_path: Path) -> None:
if job_hooks_list:
console.print(" [dim]•[/dim] Syncing hooks...")
try:
- hooks_count = sync_hooks_to_platform(
- project_path, platform_config, job_hooks_list
- )
+ hooks_count = sync_hooks_to_platform(project_path, platform_config, job_hooks_list)
stats["hooks"] += hooks_count
if hooks_count > 0:
console.print(f" [green]✓[/green] Synced {hooks_count} hook(s)")
diff --git a/src/deepwork/core/generator.py b/src/deepwork/core/generator.py
index 16a45e27..616dad78 100644
--- a/src/deepwork/core/generator.py
+++ b/src/deepwork/core/generator.py
@@ -100,9 +100,7 @@ def _build_step_context(
instructions_file = job.job_dir / step.instructions_file
instructions_content = safe_read(instructions_file)
if instructions_content is None:
- raise GeneratorError(
- f"Step instructions file not found: {instructions_file}"
- )
+ raise GeneratorError(f"Step instructions file not found: {instructions_file}")
# Separate user inputs and file inputs
user_inputs = [
@@ -142,9 +140,7 @@ def _build_step_context(
prompt_file_path = job.job_dir / hook.prompt_file
prompt_content = safe_read(prompt_file_path)
if prompt_content is None:
- raise GeneratorError(
- f"Stop hook prompt file not found: {prompt_file_path}"
- )
+ raise GeneratorError(f"Stop hook prompt file not found: {prompt_file_path}")
hook_ctx["content"] = prompt_content
elif hook.is_script():
hook_ctx["type"] = "script"
@@ -203,9 +199,7 @@ def generate_step_command(
# Find step index
try:
- step_index = next(
- i for i, s in enumerate(job.steps) if s.id == step.id
- )
+ step_index = next(i for i, s in enumerate(job.steps) if s.id == step.id)
except StopIteration as e:
raise GeneratorError(f"Step '{step.id}' not found in job '{job.name}'") from e
@@ -262,4 +256,3 @@ def generate_all_commands(
command_paths.append(command_path)
return command_paths
-
diff --git a/src/deepwork/core/hooks_syncer.py b/src/deepwork/core/hooks_syncer.py
index e11bd366..13e01954 100644
--- a/src/deepwork/core/hooks_syncer.py
+++ b/src/deepwork/core/hooks_syncer.py
@@ -8,7 +8,6 @@
import yaml
from deepwork.core.detector import PlatformConfig
-from deepwork.utils.yaml_utils import load_yaml
class HooksSyncError(Exception):
diff --git a/src/deepwork/core/parser.py b/src/deepwork/core/parser.py
index 640ec92b..babf16fc 100644
--- a/src/deepwork/core/parser.py
+++ b/src/deepwork/core/parser.py
@@ -155,9 +155,7 @@ def validate_dependencies(self) -> None:
for step in self.steps:
for dep_id in step.dependencies:
if dep_id not in step_ids:
- raise ParseError(
- f"Step '{step.id}' depends on non-existent step '{dep_id}'"
- )
+ raise ParseError(f"Step '{step.id}' depends on non-existent step '{dep_id}'")
# Check for circular dependencies using topological sort
visited = set()
diff --git a/src/deepwork/hooks/evaluate_policies.py b/src/deepwork/hooks/evaluate_policies.py
index e5af4950..341b6d16 100644
--- a/src/deepwork/hooks/evaluate_policies.py
+++ b/src/deepwork/hooks/evaluate_policies.py
@@ -63,7 +63,7 @@ def format_policy_message(policies: list) -> str:
lines = ["## DeepWork Policies Triggered", ""]
lines.append(
"Comply with the following policies. "
- "To mark a policy as addressed, include `addressed` "
+ 'To mark a policy as addressed, include `addressed` '
"in your response."
)
lines.append("")
diff --git a/src/deepwork/utils/fs.py b/src/deepwork/utils/fs.py
index d775f0cc..f650516a 100644
--- a/src/deepwork/utils/fs.py
+++ b/src/deepwork/utils/fs.py
@@ -54,9 +54,7 @@ def safe_read(path: Path | str) -> str | None:
return path_obj.read_text(encoding="utf-8")
-def copy_dir(
- src: Path | str, dst: Path | str, ignore_patterns: list[str] | None = None
-) -> None:
+def copy_dir(src: Path | str, dst: Path | str, ignore_patterns: list[str] | None = None) -> None:
"""
Recursively copy directory, optionally ignoring patterns.
diff --git a/tests/integration/test_full_workflow.py b/tests/integration/test_full_workflow.py
index 17c830ff..7f2a5708 100644
--- a/tests/integration/test_full_workflow.py
+++ b/tests/integration/test_full_workflow.py
@@ -10,9 +10,7 @@
class TestJobWorkflow:
"""Integration tests for complete job workflow."""
- def test_parse_and_generate_workflow(
- self, fixtures_dir: Path, temp_dir: Path
- ) -> None:
+ def test_parse_and_generate_workflow(self, fixtures_dir: Path, temp_dir: Path) -> None:
"""Test complete workflow: parse job → generate commands."""
# Step 1: Parse job definition
job_dir = fixtures_dir / "jobs" / "complex_job"
@@ -40,7 +38,7 @@ def test_parse_and_generate_workflow(
assert f"# {job.name}.{job.steps[i].id}" in content
# Check step numbers
- assert f"Step {i+1} of 4" in content
+ assert f"Step {i + 1} of 4" in content
def test_simple_job_workflow(self, fixtures_dir: Path, temp_dir: Path) -> None:
"""Test workflow with simple single-step job."""
@@ -68,9 +66,7 @@ def test_simple_job_workflow(self, fixtures_dir: Path, temp_dir: Path) -> None:
assert "input_param" in content
assert "Command Complete" in content # Standalone completion message
- def test_command_generation_with_dependencies(
- self, fixtures_dir: Path, temp_dir: Path
- ) -> None:
+ def test_command_generation_with_dependencies(self, fixtures_dir: Path, temp_dir: Path) -> None:
"""Test that generated commands properly handle dependencies."""
job_dir = fixtures_dir / "jobs" / "complex_job"
job = parse_job_definition(job_dir)
@@ -99,9 +95,7 @@ def test_command_generation_with_dependencies(
assert "## Workflow Complete" in step4_content
assert "## Next Step" not in step4_content
- def test_command_generation_with_file_inputs(
- self, fixtures_dir: Path, temp_dir: Path
- ) -> None:
+ def test_command_generation_with_file_inputs(self, fixtures_dir: Path, temp_dir: Path) -> None:
"""Test that generated commands properly handle file inputs."""
job_dir = fixtures_dir / "jobs" / "complex_job"
job = parse_job_definition(job_dir)
@@ -125,9 +119,7 @@ def test_command_generation_with_file_inputs(
assert "primary_research.md" in step4_content
assert "secondary_research.md" in step4_content
- def test_command_generation_with_user_inputs(
- self, fixtures_dir: Path, temp_dir: Path
- ) -> None:
+ def test_command_generation_with_user_inputs(self, fixtures_dir: Path, temp_dir: Path) -> None:
"""Test that generated commands properly handle user parameter inputs."""
job_dir = fixtures_dir / "jobs" / "complex_job"
job = parse_job_definition(job_dir)
diff --git a/tests/integration/test_install_flow.py b/tests/integration/test_install_flow.py
index 12096d24..ac992f7b 100644
--- a/tests/integration/test_install_flow.py
+++ b/tests/integration/test_install_flow.py
@@ -16,8 +16,9 @@ def test_install_with_claude(self, mock_claude_project: Path) -> None:
runner = CliRunner()
result = runner.invoke(
- cli, ["install", "--platform", "claude", "--path", str(mock_claude_project)],
- catch_exceptions=False
+ cli,
+ ["install", "--platform", "claude", "--path", str(mock_claude_project)],
+ catch_exceptions=False,
)
assert result.exit_code == 0
@@ -54,8 +55,7 @@ def test_install_with_auto_detect(self, mock_claude_project: Path) -> None:
runner = CliRunner()
result = runner.invoke(
- cli, ["install", "--path", str(mock_claude_project)],
- catch_exceptions=False
+ cli, ["install", "--path", str(mock_claude_project)], catch_exceptions=False
)
assert result.exit_code == 0
@@ -101,9 +101,7 @@ def test_install_fails_with_multiple_platforms(self, temp_dir: Path) -> None:
assert result.exit_code != 0
assert "Multiple AI platforms detected" in result.output
- def test_install_with_specified_platform_when_missing(
- self, mock_git_repo: Path
- ) -> None:
+ def test_install_with_specified_platform_when_missing(self, mock_git_repo: Path) -> None:
"""Test that install fails when specified platform is not present."""
runner = CliRunner()
@@ -121,15 +119,17 @@ def test_install_is_idempotent(self, mock_claude_project: Path) -> None:
# First install
result1 = runner.invoke(
- cli, ["install", "--platform", "claude", "--path", str(mock_claude_project)],
- catch_exceptions=False
+ cli,
+ ["install", "--platform", "claude", "--path", str(mock_claude_project)],
+ catch_exceptions=False,
)
assert result1.exit_code == 0
# Second install
result2 = runner.invoke(
- cli, ["install", "--platform", "claude", "--path", str(mock_claude_project)],
- catch_exceptions=False
+ cli,
+ ["install", "--platform", "claude", "--path", str(mock_claude_project)],
+ catch_exceptions=False,
)
assert result2.exit_code == 0
diff --git a/tests/unit/test_evaluate_policies.py b/tests/unit/test_evaluate_policies.py
index dfda87fd..99b88b23 100644
--- a/tests/unit/test_evaluate_policies.py
+++ b/tests/unit/test_evaluate_policies.py
@@ -1,9 +1,7 @@
"""Tests for the hooks evaluate_policies module."""
-import pytest
-
-from deepwork.hooks.evaluate_policies import extract_promise_tags, format_policy_message
from deepwork.core.policy_parser import Policy
+from deepwork.hooks.evaluate_policies import extract_promise_tags, format_policy_message
class TestExtractPromiseTags:
@@ -45,11 +43,11 @@ def test_returns_empty_set_for_no_promises(self) -> None:
def test_multiline_promise_content(self) -> None:
"""Test promise tag with multiline content."""
- text = '''
+ text = """
I have addressed this by:
1. Updating the docs
2. Running tests
- '''
+ """
result = extract_promise_tags(text)
assert result == {"Complex Policy"}
diff --git a/tests/unit/test_generator.py b/tests/unit/test_generator.py
index 5774f10d..92f98a90 100644
--- a/tests/unit/test_generator.py
+++ b/tests/unit/test_generator.py
@@ -5,7 +5,7 @@
import pytest
from deepwork.core.detector import PLATFORMS
-from deepwork.core.generator import GeneratorError, CommandGenerator
+from deepwork.core.generator import CommandGenerator, GeneratorError
from deepwork.core.parser import parse_job_definition
@@ -35,9 +35,7 @@ def test_init_raises_for_missing_templates_dir(self, temp_dir: Path) -> None:
with pytest.raises(GeneratorError, match="Templates directory not found"):
CommandGenerator(nonexistent)
- def test_generate_step_command_simple_job(
- self, fixtures_dir: Path, temp_dir: Path
- ) -> None:
+ def test_generate_step_command_simple_job(self, fixtures_dir: Path, temp_dir: Path) -> None:
"""Test generating command for simple job step."""
job_dir = fixtures_dir / "jobs" / "simple_job"
job = parse_job_definition(job_dir)
@@ -45,9 +43,7 @@ def test_generate_step_command_simple_job(
generator = CommandGenerator()
platform = PLATFORMS["claude"]
- command_path = generator.generate_step_command(
- job, job.steps[0], platform, temp_dir
- )
+ command_path = generator.generate_step_command(job, job.steps[0], platform, temp_dir)
assert command_path.exists()
assert command_path.name == "simple_job.single_step.md"
@@ -69,9 +65,7 @@ def test_generate_step_command_complex_job_first_step(
generator = CommandGenerator()
platform = PLATFORMS["claude"]
- command_path = generator.generate_step_command(
- job, job.steps[0], platform, temp_dir
- )
+ command_path = generator.generate_step_command(job, job.steps[0], platform, temp_dir)
content = command_path.read_text()
assert "# competitive_research.identify_competitors" in content
@@ -94,9 +88,7 @@ def test_generate_step_command_complex_job_middle_step(
platform = PLATFORMS["claude"]
# Generate primary_research (step 2)
- command_path = generator.generate_step_command(
- job, job.steps[1], platform, temp_dir
- )
+ command_path = generator.generate_step_command(job, job.steps[1], platform, temp_dir)
content = command_path.read_text()
assert "# competitive_research.primary_research" in content
@@ -121,9 +113,7 @@ def test_generate_step_command_complex_job_final_step(
platform = PLATFORMS["claude"]
# Generate comparative_report (step 4)
- command_path = generator.generate_step_command(
- job, job.steps[3], platform, temp_dir
- )
+ command_path = generator.generate_step_command(job, job.steps[3], platform, temp_dir)
content = command_path.read_text()
assert "# competitive_research.comparative_report" in content
@@ -230,9 +220,7 @@ def test_generate_step_command_different_platform(
generator = CommandGenerator()
platform = PLATFORMS["gemini"]
- command_path = generator.generate_step_command(
- job, job.steps[0], platform, temp_dir
- )
+ command_path = generator.generate_step_command(job, job.steps[0], platform, temp_dir)
# Gemini uses same filename format
assert command_path.name == "simple_job.single_step.md"
diff --git a/tests/unit/test_hooks_syncer.py b/tests/unit/test_hooks_syncer.py
index 2750eefb..5a8d4ea0 100644
--- a/tests/unit/test_hooks_syncer.py
+++ b/tests/unit/test_hooks_syncer.py
@@ -3,17 +3,14 @@
import json
from pathlib import Path
-import pytest
-
+from deepwork.core.detector import PlatformConfig
from deepwork.core.hooks_syncer import (
HookEntry,
- HooksSyncError,
JobHooks,
collect_job_hooks,
merge_hooks_for_platform,
sync_hooks_to_claude,
)
-from deepwork.core.detector import PlatformConfig
class TestHookEntry:
@@ -280,9 +277,7 @@ def test_does_not_duplicate_existing_hooks(self, temp_dir: Path) -> None:
# Settings with existing hook
existing_settings = {
"hooks": {
- "Stop": [
- {"matcher": "", "hooks": [{"type": "command", "command": "hook.sh"}]}
- ]
+ "Stop": [{"matcher": "", "hooks": [{"type": "command", "command": "hook.sh"}]}]
},
}
settings_file = claude_dir / "settings.json"
diff --git a/tests/unit/test_policy_parser.py b/tests/unit/test_policy_parser.py
index 8e79cfca..031c506e 100644
--- a/tests/unit/test_policy_parser.py
+++ b/tests/unit/test_policy_parser.py
@@ -210,10 +210,7 @@ def test_multiple_safety_patterns(self) -> None:
# Should not fire if any safety file is changed
assert evaluate_policy(policy, ["src/auth/login.py", "SECURITY.md"]) is False
- assert (
- evaluate_policy(policy, ["src/auth/login.py", "docs/security_review.md"])
- is False
- )
+ assert evaluate_policy(policy, ["src/auth/login.py", "docs/security_review.md"]) is False
# Should fire if no safety files changed
assert evaluate_policy(policy, ["src/auth/login.py"]) is True
diff --git a/tests/unit/test_stop_hooks.py b/tests/unit/test_stop_hooks.py
index bc84b406..cd0fcc77 100644
--- a/tests/unit/test_stop_hooks.py
+++ b/tests/unit/test_stop_hooks.py
@@ -1,7 +1,6 @@
"""Tests for stop hook functionality."""
from pathlib import Path
-from unittest.mock import patch
import pytest
@@ -388,9 +387,7 @@ def test_build_context_with_prompt_hook(
self, generator: CommandGenerator, job_with_hooks: JobDefinition
) -> None:
"""Test context building includes prompt stop hook."""
- context = generator._build_step_context(
- job_with_hooks, job_with_hooks.steps[0], 0
- )
+ context = generator._build_step_context(job_with_hooks, job_with_hooks.steps[0], 0)
assert "stop_hooks" in context
assert len(context["stop_hooks"]) == 1
assert context["stop_hooks"][0]["type"] == "prompt"
@@ -451,9 +448,7 @@ def test_build_context_with_missing_prompt_file(
with pytest.raises(GeneratorError, match="prompt file not found"):
generator._build_step_context(job, job.steps[0], 0)
- def test_build_context_no_hooks(
- self, generator: CommandGenerator, tmp_path: Path
- ) -> None:
+ def test_build_context_no_hooks(self, generator: CommandGenerator, tmp_path: Path) -> None:
"""Test context with no stop hooks."""
job_dir = tmp_path / "test_job"
job_dir.mkdir()