Skip to content

Commit

Permalink
[pre-commit.ci] auto fixes from pre-commit.com hooks
Browse files Browse the repository at this point in the history
for more information, see https://pre-commit.ci
  • Loading branch information
pre-commit-ci[bot] committed Dec 17, 2024
1 parent 5bb78e4 commit aad30d3
Show file tree
Hide file tree
Showing 6 changed files with 6 additions and 4 deletions.
5 changes: 1 addition & 4 deletions thunder/tests/distributed/helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,7 @@ def forward(self, x):

if torch.distributed.is_available():
from torch.testing._internal import common_distributed, common_utils

# note(crcrpar): How to write a test with `DDP`
# Just add a method to :class:`CompileDDPTest`. The class is responsible for
# - calling `torch.distributed.init_process_group` with NCCL backend
Expand Down Expand Up @@ -201,7 +202,6 @@ def _run(cls, rank, test_name, file_name, pipe, *, fake_pg=False):
torch.distributed.destroy_process_group()
sys.exit(0)


# Configures PyTorch's default process group, must be called at the start of each
# distributed process
def init_per_process_distributed(
Expand All @@ -225,7 +225,6 @@ def init_per_process_distributed(
# so we want to pass the ProcessGroup explicitly
return torch.distributed.distributed_c10d._get_default_group()


# Wraps a function so that it becomes one process of several executing the test
# See test_native_ddp and its helper _test_native_ddp_helper below for an example
# of how to use this wrapper.
Expand Down Expand Up @@ -293,7 +292,6 @@ def error_callback(ex):

return test_fn


# Creates a dataloader for a process
# If sample_seed is specified then the dataloader will load tensors with the same values
# on each process.
Expand Down Expand Up @@ -325,7 +323,6 @@ def to_device(tensors: list[torch.Tensor]) -> list[torch.Tensor]:

return dataloader


def run_test_no_sync_grad_accumulation(
test_case: DistributedParallelTestCase,
get_model_and_optimizer: Callable[[torch.device], tuple[torch.nn.Module, torch.optim.Optimizer]],
Expand Down
1 change: 1 addition & 0 deletions thunder/tests/distributed/test_checkpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@

import pytest
import torch

if not torch.distributed.is_available():
pytest.skip(allow_module_level=True)
from torch.distributed import distributed_c10d as c10d
Expand Down
1 change: 1 addition & 0 deletions thunder/tests/distributed/test_ddp.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import pytest
import torch
import torch.distributed as tdist

if not tdist.is_available():
pytest.skip(allow_module_level=True)
import torch.nn as nn
Expand Down
1 change: 1 addition & 0 deletions thunder/tests/distributed/test_fsdp.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import pytest
import torch
import torch.distributed as tdist

if not tdist.is_available():
pytest.skip(allow_module_level=True)
import torch.nn as nn
Expand Down
1 change: 1 addition & 0 deletions thunder/tests/distributed/test_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@

import pytest
import torch

if not torch.distributed.is_available():
pytest.skip(allow_module_level=True)
from torch.testing import make_tensor
Expand Down
1 change: 1 addition & 0 deletions thunder/tests/distributed/test_tensor_parallel.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import pytest
import torch
import torch.nn as nn

if not torch.distributed.is_available():
pytest.skip(allow_module_level=True)

Expand Down

0 comments on commit aad30d3

Please sign in to comment.