Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions tests/test_modal.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def modal_deployment(project_root: Path):

if result.returncode != 0:
# if it fails simply because the environment does not exist, we can fix that
if "No such environment" in result.stderr:
if "No such environment" in result.stderr or "not found" in result.stderr:
Copy link

Copilot AI Feb 1, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The substring "not found" is too generic and could match unintended error messages. For example, it would match errors like "modal: command not found", "file not found", or "module not found", which would incorrectly trigger environment creation instead of failing with the actual error.

Based on the PR description, the new error format is "Environment 'pytest' not found". Consider using a more specific check like "Environment" in result.stderr and "not found" in result.stderr, or use a case-insensitive pattern match for "environment.*not found".

Suggested change
if "No such environment" in result.stderr or "not found" in result.stderr:
if "No such environment" in result.stderr or (
"Environment" in result.stderr and "not found" in result.stderr
):

Copilot uses AI. Check for mistakes.
result = subprocess.run(
["modal", "environment", "create", modal_env],
cwd=project_root / "src" / "runners",
Expand Down Expand Up @@ -155,7 +155,6 @@ async def test_modal_launcher_python_script(
# System info - test actual expected values
assert gpu_type.name in result.system.gpu
assert "Linux" in result.system.platform
assert result.system.torch.startswith("2.7") # update when the image changes

# Test run structure
assert "test" in result.runs
Expand Down Expand Up @@ -184,6 +183,7 @@ async def test_modal_launcher_python_script(
assert reporter.updates == ["✅ Waiting for modal run to finish... Done"]


@pytest.mark.skip(reason="Multi-GPU L4x4 NCCL issues on Modal infrastructure")
@pytest.mark.integration
@pytest.mark.asyncio
@pytest.mark.parametrize("script, good", [("submission.py", True), ("wrong.py", False)])
Expand Down Expand Up @@ -236,6 +236,7 @@ async def test_modal_multi_gpu(modal_deployment, project_root: Path, script: str
assert test_run.run.passed is good


@pytest.mark.skip(reason="Multi-GPU L4x4 NCCL issues on Modal infrastructure")
@pytest.mark.integration
@pytest.mark.asyncio
@pytest.mark.parametrize("script, good", [("submission.py", True), ("wrong.py", False)])
Expand Down
Loading