Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/scripts/unittest-linux/run_test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ fi
export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_MOD_unidecode=true
export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_MOD_inflect=true
export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_MOD_pytorch_lightning=true
export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_MULTIGPU_CUDA=true
cd test
pytest torchaudio_unittest -k "not torchscript and not fairseq and not demucs ${PYTEST_K_EXTRA}"
)
2 changes: 2 additions & 0 deletions .github/workflows/unittest-linux-cpu.yml
Original file line number Diff line number Diff line change
Expand Up @@ -92,5 +92,7 @@ jobs:
export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_SOX_DECODER=true
export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_SOX_ENCODER=true
export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_FFMPEG=true
export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_MULTIGPU_CUDA=true

pytest test/torchaudio_unittest -k "not torchscript and not fairseq and not demucs" -x
echo "::endgroup::"
2 changes: 2 additions & 0 deletions .github/workflows/unittest-linux-gpu.yml
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,8 @@ jobs:
export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_SOX_ENCODER=true
export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_FFMPEG=true
export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_MOD_demucs=true
export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_MULTIGPU_CUDA=true

export CUBLAS_WORKSPACE_CONFIG=:16:8

# Set UPLOAD_CHANNEL
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/unittest-macos-cpu.yml
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ jobs:
export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_MOD_unidecode=true
export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_MOD_inflect=true
export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_MOD_pytorch_lightning=true
export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_MULTIGPU_CUDA=true

pytest test/torchaudio_unittest -k "not torchscript and not fairseq and not demucs and not librosa" -x
echo "::endgroup::"
1 change: 1 addition & 0 deletions .github/workflows/unittest-windows-cpu.yml
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@ jobs:
export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_MOD_inflect=true
export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_MOD_pytorch_lightning=true
export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_MOD_sentencepiece=true
export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_MULTIGPU_CUDA=true

.github/scripts/unittest-windows/setup_env.sh
.github/scripts/unittest-windows/install.sh
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/unittest-windows-gpu.yml
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ jobs:
export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_MOD_inflect=true
export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_MOD_pytorch_lightning=true
export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_MOD_sentencepiece=true
export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_MULTIGPU_CUDA=true

.github/scripts/unittest-windows/setup_env.sh
.github/scripts/unittest-windows/install.sh
Expand Down
1 change: 1 addition & 0 deletions src/libtorchaudio/forced_align/gpu/compute.cu
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,7 @@ void forced_align_impl(
const int64_t blank,
Tensor& paths) {
auto device_index = logProbs.get_device_index();
const torch::stable::accelerator::DeviceGuard device_guard(device_index);
auto defaultStream = libtorchaudio::cuda::getCurrentCUDAStream(device_index);
auto cpuDataTranferStream = libtorchaudio::cuda::getStreamFromPool(false, device_index);
const scalar_t kNegInfinity = -std::numeric_limits<scalar_t>::infinity();
Expand Down
2 changes: 2 additions & 0 deletions test/torchaudio_unittest/common_utils/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
skipIfNoQengine,
skipIfPy310,
skipIfRocm,
skipIfSingleCuda,
TempDirMixin,
TestBaseMixin,
TorchaudioTestCase,
Expand Down Expand Up @@ -65,6 +66,7 @@ def inject_request(self, request):
"skipIfNoFFmpeg",
"skipIfNoHWAccel",
"skipIfPy310",
"skipIfSingleCuda",
"disabledInCI",
"get_wav_data",
"normalize_wav",
Expand Down
9 changes: 9 additions & 0 deletions test/torchaudio_unittest/common_utils/case_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -249,6 +249,15 @@ def skipIfNoModule(module, display_name=None):
reason="Tests are failing on CI consistently. Disabled while investigating.",
key="TEMPORARY_DISABLED",
)
skipIfSingleCuda = _skipIf(
not (torch.cuda.is_available() and torch.cuda.device_count() > 1),
reason=(
"CUDA is not available."
if not torch.cuda.is_available()
else f"Not a multi-GPU platform (device count is {torch.cuda.device_count()})."
),
key="NO_MULTIGPU_CUDA",
)


def skipIfNoHWAccel(name):
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import unittest

import torch
from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoCuda
from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoCuda, skipIfSingleCuda

from .functional_impl import Functional, FunctionalCUDAOnly

Expand All @@ -28,6 +28,12 @@ class TestFunctionalCUDAOnlyFloat32(FunctionalCUDAOnly, PytorchTestCase):
device = torch.device("cuda")


@skipIfSingleCuda
class TestFunctionalMultiGPUCUDAOnlyFloat32(FunctionalCUDAOnly, PytorchTestCase):
dtype = torch.float32
device = torch.device("cuda:1")


@skipIfNoCuda
class TestFunctionalCUDAOnlyFloat64(FunctionalCUDAOnly, PytorchTestCase):
dtype = torch.float64
Expand Down
Loading