diff --git a/.github/workflows/code_quality.yml b/.github/workflows/code_quality.yml
index 8635a95e..25fa3839 100644
--- a/.github/workflows/code_quality.yml
+++ b/.github/workflows/code_quality.yml
@@ -17,6 +17,7 @@ jobs:
fail-fast: false
matrix:
tox_env:
+ - format
- lint
- types
@@ -39,27 +40,6 @@ jobs:
- name: Run check for tox env "${{ matrix.tox_env }}"
run: tox -e ${{ matrix.tox_env }}
- black:
- name: Code formatting
- runs-on: ubuntu-latest
-
- steps:
- - name: Set up Python
- uses: actions/setup-python@v5
- with:
- python-version: "3.12"
-
- - name: Checkout repository
- uses: actions/checkout@v4
- with:
- fetch-depth: 0
-
- - name: Install black
- run: pip install black
-
- - name: Run black
- run: black --check --diff .
-
docs:
name: Documentation
runs-on: ubuntu-latest
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 5153dbb9..2a836312 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -21,7 +21,7 @@ repos:
rev: v0.6.7
hooks:
- id: ruff
- # - id: ruff-format
+ - id: ruff-format
- repo: https://github.com/pre-commit/pygrep-hooks
rev: v1.10.0
diff --git a/README.md b/README.md
index 0763c9fc..999b970c 100644
--- a/README.md
+++ b/README.md
@@ -52,8 +52,8 @@ at [OpenAI](https://openai.com/) ([link](https://jack-clark.net/2020/03/17/)).
Code
-
-
+
+
diff --git a/docs/examples/plot_3d_to_2d.py b/docs/examples/plot_3d_to_2d.py
index fb26c78e..89bf9a4b 100644
--- a/docs/examples/plot_3d_to_2d.py
+++ b/docs/examples/plot_3d_to_2d.py
@@ -8,6 +8,7 @@
import matplotlib.pyplot as plt
import torch
+
import torchio as tio
torch.manual_seed(0)
diff --git a/docs/examples/plot_custom_z_spacing.py b/docs/examples/plot_custom_z_spacing.py
index e73ad25e..7afc90b8 100644
--- a/docs/examples/plot_custom_z_spacing.py
+++ b/docs/examples/plot_custom_z_spacing.py
@@ -10,6 +10,7 @@
"""
import torch
+
import torchio as tio
diff --git a/docs/examples/plot_history.py b/docs/examples/plot_history.py
index 3af4424a..4281b1ba 100644
--- a/docs/examples/plot_history.py
+++ b/docs/examples/plot_history.py
@@ -12,6 +12,7 @@
import matplotlib.pyplot as plt
import torch
+
import torchio as tio
torch.manual_seed(0)
@@ -38,9 +39,7 @@
pprint.pprint(transformed.history) # noqa: T203
print('\nComposed transform to reproduce history:') # noqa: T201
print(transformed.get_composed_history()) # noqa: T201
-print(
- '\nComposed transform to invert applied transforms when possible:'
-) # noqa: T201, B950
+print('\nComposed transform to invert applied transforms when possible:')
print(transformed.get_inverse_transform(ignore_intensity=False)) # noqa: T201
loader = tio.SubjectsLoader(
diff --git a/docs/examples/plot_include_exclude.py b/docs/examples/plot_include_exclude.py
index c7356049..500ccd7f 100644
--- a/docs/examples/plot_include_exclude.py
+++ b/docs/examples/plot_include_exclude.py
@@ -7,8 +7,8 @@
"""
import torch
-import torchio as tio
+import torchio as tio
torch.manual_seed(0)
diff --git a/docs/examples/plot_video.py b/docs/examples/plot_video.py
index 0ffd8017..48438e3b 100644
--- a/docs/examples/plot_video.py
+++ b/docs/examples/plot_video.py
@@ -12,9 +12,10 @@
import matplotlib.pyplot as plt
import numpy as np
import torch
-import torchio as tio
from PIL import Image
+import torchio as tio
+
def read_clip(path, undersample=4):
"""Read a GIF a return an array of shape (C, W, H, T)."""
@@ -49,7 +50,7 @@ def get_frame(image, i):
)
-# Source: https://thehigherlearning.wordpress.com/2014/06/25/watching-a-cell-divide-under-an-electron-microscope-is-mesmerizing-gif/ # noqa: B950
+# Source: https://thehigherlearning.wordpress.com/2014/06/25/watching-a-cell-divide-under-an-electron-microscope-is-mesmerizing-gif/
array, delay = read_clip('nBTu3oi.gif')
plt.imshow(array[..., 0].transpose(1, 2, 0))
plt.plot()
diff --git a/docs/source/README.rst b/docs/source/README.rst
index 4f4164f1..090e5a4f 100644
--- a/docs/source/README.rst
+++ b/docs/source/README.rst
@@ -3,7 +3,7 @@ TorchIO
#######
|PyPI-downloads| |PyPI-version| |Conda-version| |Google-Colab-notebook|
-|Docs-status| |Tests-status| |Black|
+|Docs-status| |Tests-status| |Ruff|
|Coverage-codecov| |Code-Quality| |Code-Maintainability| |pre-commit|
|Slack| |Twitter| |Twitter-commits| |YouTube|
@@ -89,9 +89,9 @@ If you found a bug or have a feature request, please open an issue:
:target: https://github.com/fepegar/torchio/actions/workflows/tests.yml
:alt: Tests status
-.. |Black| image:: https://img.shields.io/badge/code%20style-black-000000.svg
- :target: https://github.com/psf/black
- :alt: Code style: black
+.. |Ruff| image:: https://camo.githubusercontent.com/bb88127790fb054cba2caf3f3be2569c1b97bb45a44b47b52d738f8781a8ede4/68747470733a2f2f696d672e736869656c64732e696f2f656e64706f696e743f75726c3d68747470733a2f2f7261772e67697468756275736572636f6e74656e742e636f6d2f636861726c6965726d617273682f727566662f6d61696e2f6173736574732f62616467652f76312e6a736f6e
+ :target: https://docs.astral.sh/ruff/
+ :alt: Code style: Ruff
.. |Coverage-codecov| image:: https://codecov.io/gh/fepegar/torchio/branch/main/graphs/badge.svg
:target: https://codecov.io/github/fepegar/torchio
diff --git a/docs/source/conf.py b/docs/source/conf.py
index f34df5e4..f79c3c9a 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -109,7 +109,7 @@
# further. For a list of options available for each theme, see the
# documentation.
#
-url = 'https://www.journals.elsevier.com/computer-methods-and-programs-in-biomedicine/most-downloaded-articles' # noqa: B950
+url = 'https://www.journals.elsevier.com/computer-methods-and-programs-in-biomedicine/most-downloaded-articles'
text = 'CMPB'
html_href = f'{text}'
message = f'TorchIO becomes one of the most downloaded articles from {html_href}!'
@@ -221,9 +221,7 @@
epub_exclude_files = ['search.html']
# CopyButton configuration
-copybutton_prompt_text = (
- r'>>> |\.\.\. |\$ |In \[\d*\]: | {2,5}\.\.\.: | {5,8}: ' # noqa: B950,FS003
-)
+copybutton_prompt_text = (r'>>> |\.\.\. |\$ |In \[\d*\]: | {2,5}\.\.\.: | {5,8}: ',)
copybutton_prompt_is_regexp = True
# def setup(app):
diff --git a/pyproject.toml b/pyproject.toml
index 80ad5dc4..35a5d57d 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -53,7 +53,6 @@ csv = [
"pandas",
]
dev = [
- "black",
"bump2version",
"coverage",
"mypy",
@@ -98,11 +97,6 @@ Source = "https://github.com/fepegar/torchio"
Documentation = "http://torchio.rtfd.io"
"Release notes" = "https://github.com/fepegar/torchio/releases"
-[tool.black]
-preview = false
-skip-string-normalization = true
-target-version = ['py311']
-
[tool.mypy]
pretty = true
@@ -127,7 +121,6 @@ markers = [
"slow: marks tests as slow (deselect with '-m \"not slow\"')",
"serial",
]
-
filterwarnings = [
# Ignore SimpleITK Swig warnings
"ignore:builtin type .* has no __module__ attribute",
@@ -135,3 +128,10 @@ filterwarnings = [
# Raised by SimpleITK on CI
"ignore:invalid escape sequence",
]
+
+[tool.ruff]
+format.quote-style = 'single'
+lint.select = ["F", "I"]
+
+[tool.ruff.lint.isort]
+force-single-line = true
diff --git a/src/torchio/__init__.py b/src/torchio/__init__.py
index 85de897c..ad9a7c22 100644
--- a/src/torchio/__init__.py
+++ b/src/torchio/__init__.py
@@ -5,29 +5,26 @@
__version__ = '0.20.0'
+from . import datasets
+from . import reference
from . import utils
from .constants import * # noqa: F401, F403
+from .data import GridAggregator
+from .data import GridSampler
+from .data import Image
+from .data import LabelMap
+from .data import LabelSampler
+from .data import Queue
+from .data import ScalarImage
+from .data import Subject
+from .data import SubjectsDataset
+from .data import SubjectsLoader
+from .data import UniformSampler
+from .data import WeightedSampler
+from .data import inference
+from .data import io
+from .data import sampler
from .transforms import * # noqa: F401, F403
-from .data import (
- io,
- sampler,
- inference,
- SubjectsDataset,
- SubjectsLoader,
- Image,
- ScalarImage,
- LabelMap,
- Queue,
- Subject,
- WeightedSampler,
- UniformSampler,
- LabelSampler,
- GridSampler,
- GridAggregator,
-)
-from . import datasets
-from . import reference
-
__all__ = [
'utils',
diff --git a/src/torchio/cli/apply_transform.py b/src/torchio/cli/apply_transform.py
index d54fd47c..91ef070b 100644
--- a/src/torchio/cli/apply_transform.py
+++ b/src/torchio/cli/apply_transform.py
@@ -3,8 +3,9 @@
from pathlib import Path
import typer
-from rich.progress import Progress, SpinnerColumn, TextColumn
-
+from rich.progress import Progress
+from rich.progress import SpinnerColumn
+from rich.progress import TextColumn
app = typer.Typer()
@@ -61,9 +62,10 @@ def main(
Example:
$ tiotr input.nrrd RandomMotion output.nii "degrees=(-5,15) num_transforms=3" -v
- """ # noqa: B950
+ """
# Imports are placed here so that the tool loads faster if not being run
import torch
+
import torchio.transforms as transforms
from torchio.utils import apply_transform_to_file
@@ -79,7 +81,7 @@ def main(
torch.manual_seed(seed)
with Progress(
SpinnerColumn(),
- TextColumn('[progress.description]{task.description}'), # noqa: FS003
+ TextColumn('[progress.description]{task.description}'),
transient=True,
disable=not show_progress,
) as progress:
diff --git a/src/torchio/cli/print_info.py b/src/torchio/cli/print_info.py
index 44f1a9d9..87fef5b6 100644
--- a/src/torchio/cli/print_info.py
+++ b/src/torchio/cli/print_info.py
@@ -3,7 +3,6 @@
import typer
-
app = typer.Typer()
diff --git a/src/torchio/data/__init__.py b/src/torchio/data/__init__.py
index 7ebc6724..d3250c98 100644
--- a/src/torchio/data/__init__.py
+++ b/src/torchio/data/__init__.py
@@ -12,7 +12,6 @@
from .sampler import WeightedSampler
from .subject import Subject
-
__all__ = [
'Queue',
'Subject',
diff --git a/src/torchio/data/dataset.py b/src/torchio/data/dataset.py
index d1bdfc23..4b7c1b22 100644
--- a/src/torchio/data/dataset.py
+++ b/src/torchio/data/dataset.py
@@ -62,7 +62,7 @@ class SubjectsDataset(Dataset):
.. tip:: To quickly iterate over the subjects without loading the images,
use :meth:`dry_iter()`.
- """ # noqa: B950
+ """
def __init__(
self,
diff --git a/src/torchio/data/image.py b/src/torchio/data/image.py
index 31fbb197..e06ce607 100644
--- a/src/torchio/data/image.py
+++ b/src/torchio/data/image.py
@@ -34,9 +34,9 @@
from ..typing import TypeTripletFloat
from ..typing import TypeTripletInt
from ..utils import get_stem
-from ..utils import to_tuple
from ..utils import guess_external_viewer
from ..utils import is_iterable
+from ..utils import to_tuple
from .io import check_uint_to_int
from .io import ensure_4d
from .io import get_rotation_and_spacing_from_affine
@@ -48,7 +48,6 @@
from .io import sitk_to_nib
from .io import write_image
-
PROTECTED_KEYS = DATA, AFFINE, TYPE, PATH, STEM
TypeBound = Tuple[float, float]
TypeBounds = Tuple[TypeBound, TypeBound, TypeBound]
@@ -128,7 +127,7 @@ class Image(dict):
.. _FSL docs: https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Orientation%20Explained
.. _SimpleITK docs: https://simpleitk.readthedocs.io/en/master/fundamentalConcepts.html
.. _Graham Wideman's website: http://www.grahamwideman.com/gw/brain/orientation/orientterms.htm
- """ # noqa: B950
+ """
def __init__(
self,
@@ -470,7 +469,7 @@ def _parse_path(
# https://github.com/fepegar/torchio/pull/838
raise TypeError('The path argument cannot be a dictionary')
elif self._is_paths_sequence(path):
- return [self._parse_single_path(p) for p in path] # type: ignore[union-attr] # noqa: B950
+ return [self._parse_single_path(p) for p in path] # type: ignore[union-attr]
else:
return self._parse_single_path(path) # type: ignore[arg-type]
@@ -655,7 +654,7 @@ def from_sitk(cls, sitk_image):
>>> sitk_image = sitk.Image((224, 224), sitk.sitkVectorFloat32, 3)
>>> tio.ScalarImage.from_sitk(sitk_image)
ScalarImage(shape: (3, 224, 224, 1); spacing: (1.00, 1.00, 1.00); orientation: LPS+; memory: 588.0 KiB; dtype: torch.FloatTensor)
- """ # noqa: B950
+ """
tensor, affine = sitk_to_nib(sitk_image)
return cls(tensor=tensor, affine=affine)
@@ -712,7 +711,7 @@ def to_gif(
eliminating unused colors. This is only useful if the palette
can be compressed to the next smaller power of 2 elements.
reverse: Reverse the temporal order of frames.
- """ # noqa: B950
+ """
from ..visualization import make_gif # avoid circular import
make_gif(
diff --git a/src/torchio/data/inference/aggregator.py b/src/torchio/data/inference/aggregator.py
index 52985cad..dae0b728 100644
--- a/src/torchio/data/inference/aggregator.py
+++ b/src/torchio/data/inference/aggregator.py
@@ -30,7 +30,7 @@ class GridAggregator:
.. note:: Adapted from NiftyNet. See `this NiftyNet tutorial
`_ for more
information about patch-based sampling.
- """ # noqa: B950
+ """
def __init__(self, sampler: GridSampler, overlap_mode: str = 'crop'):
subject = sampler.subject
diff --git a/src/torchio/data/io.py b/src/torchio/data/io.py
index 258aa00e..33de9070 100644
--- a/src/torchio/data/io.py
+++ b/src/torchio/data/io.py
@@ -20,7 +20,6 @@
from ..typing import TypeTripletFloat
from ..typing import TypeTripletInt
-
# Matrices used to switch between LPS and RAS
FLIPXY_33 = np.diag([-1, -1, 1])
FLIPXY_44 = np.diag([-1, -1, 1, 1])
diff --git a/src/torchio/data/loader.py b/src/torchio/data/loader.py
index ebc55dea..768d1936 100644
--- a/src/torchio/data/loader.py
+++ b/src/torchio/data/loader.py
@@ -7,12 +7,11 @@
import numpy as np
import torch
-from torch.utils.data import Dataset
from torch.utils.data import DataLoader
+from torch.utils.data import Dataset
from .subject import Subject
-
T = TypeVar('T')
diff --git a/src/torchio/data/queue.py b/src/torchio/data/queue.py
index 6a77c8b0..1b215180 100644
--- a/src/torchio/data/queue.py
+++ b/src/torchio/data/queue.py
@@ -9,7 +9,7 @@
from torch.utils.data import Dataset
from torch.utils.data import Sampler
-from .. import NUM_SAMPLES
+from ..constants import NUM_SAMPLES
from .dataset import SubjectsDataset
from .sampler import PatchSampler
from .subject import Subject
@@ -180,7 +180,7 @@ class Queue(Dataset):
... inputs = patches_batch['t1'][tio.DATA] # key 't1' is in subject
... targets = patches_batch['brain'][tio.DATA] # key 'brain' is in subject
... logits = model(inputs) # model being an instance of torch.nn.Module
- """ # noqa: B950
+ """
def __init__(
self,
diff --git a/src/torchio/data/sampler/grid.py b/src/torchio/data/sampler/grid.py
index c9a1473a..506c6337 100644
--- a/src/torchio/data/sampler/grid.py
+++ b/src/torchio/data/sampler/grid.py
@@ -94,7 +94,7 @@ def _pad(self, subject: Subject) -> Subject:
border = self.patch_overlap // 2
padding = border.repeat(2)
- pad = Pad(padding, padding_mode=self.padding_mode) # type: ignore[arg-type] # noqa: B950
+ pad = Pad(padding, padding_mode=self.padding_mode) # type: ignore[arg-type]
subject = pad(subject) # type: ignore[assignment]
return subject
@@ -110,7 +110,7 @@ def _generate_patches( # type: ignore[override]
subject = self._pad(subject)
sizes = subject.spatial_shape, self.patch_size, self.patch_overlap
self._parse_sizes(*sizes) # type: ignore[arg-type]
- locations = self._get_patches_locations(*sizes) # type: ignore[arg-type] # noqa: B950
+ locations = self._get_patches_locations(*sizes) # type: ignore[arg-type]
for location in locations:
index_ini = location[:3]
yield self.extract_patch(subject, index_ini)
diff --git a/src/torchio/data/sampler/sampler.py b/src/torchio/data/sampler/sampler.py
index 0cea35a9..1c866b11 100644
--- a/src/torchio/data/sampler/sampler.py
+++ b/src/torchio/data/sampler/sampler.py
@@ -40,7 +40,7 @@ def extract_patch(
subject: Subject,
index_ini: TypeTripletInt,
) -> Subject:
- cropped_subject = self.crop(subject, index_ini, self.patch_size) # type: ignore[arg-type] # noqa: B950
+ cropped_subject = self.crop(subject, index_ini, self.patch_size) # type: ignore[arg-type]
return cropped_subject
def crop(
diff --git a/src/torchio/data/sampler/weighted.py b/src/torchio/data/sampler/weighted.py
index 88d7af01..5f997bc0 100644
--- a/src/torchio/data/sampler/weighted.py
+++ b/src/torchio/data/sampler/weighted.py
@@ -47,7 +47,7 @@ class WeightedSampler(RandomSampler):
.. note:: Values of the probability map near the border will be set to 0 as
the center of the patch cannot be at the border (unless the patch has
size 1 or 2 along that axis).
- """ # noqa: B950
+ """
def __init__(
self,
@@ -225,7 +225,7 @@ def sample_probability_map(
>>> histogram # doctest:+SKIP
array([[ 0, 0, 3479, 3478, 17121, 7023, 3355, 3378, 0],
[ 6808, 6804, 6942, 6809, 6946, 6988, 7002, 6826, 7041]])
- """ # noqa: B950
+ """
# Get first value larger than random number ensuring the random number
# is not exactly 0 (see https://github.com/fepegar/torchio/issues/510)
random_number = max(MIN_FLOAT_32, torch.rand(1).item()) * cdf[-1]
@@ -242,7 +242,7 @@ def sample_probability_map(
message = (
'Error retrieving probability in weighted sampler.'
' Please report this issue at'
- ' https://github.com/fepegar/torchio/issues/new?labels=bug&template=bug_report.md' # noqa: B950
+ ' https://github.com/fepegar/torchio/issues/new?labels=bug&template=bug_report.md'
)
raise RuntimeError(message)
diff --git a/src/torchio/data/subject.py b/src/torchio/data/subject.py
index 767e1c60..9369d823 100644
--- a/src/torchio/data/subject.py
+++ b/src/torchio/data/subject.py
@@ -2,6 +2,7 @@
import copy
import pprint
+from typing import TYPE_CHECKING
from typing import Any
from typing import Callable
from typing import Dict
@@ -9,7 +10,6 @@
from typing import Optional
from typing import Sequence
from typing import Tuple
-from typing import TYPE_CHECKING
import numpy as np
@@ -19,7 +19,8 @@
from .image import Image
if TYPE_CHECKING:
- from ..transforms import Transform, Compose
+ from ..transforms import Compose
+ from ..transforms import Transform
class Subject(dict):
@@ -49,7 +50,7 @@ class Subject(dict):
... 'hospital': 'Hospital Juan NegrĂn',
... }
>>> subject = tio.Subject(subject_dict)
- """ # noqa: B950
+ """
def __init__(self, *args, **kwargs: Dict[str, Any]):
if args:
@@ -162,8 +163,8 @@ def get_applied_transforms(
ignore_intensity: bool = False,
image_interpolation: Optional[str] = None,
) -> List[Transform]:
- from ..transforms.transform import Transform
from ..transforms.intensity_transform import IntensityTransform
+ from ..transforms.transform import Transform
name_to_transform = {cls.__name__: cls for cls in get_subclasses(Transform)}
transforms_list = []
@@ -266,7 +267,7 @@ def check_consistent_attribute(
attribute of two images being compared,
:math:`t_{abs}` is the ``absolute_tolerance`` and
:math:`t_{rel}` is the ``relative_tolerance``.
- """ # noqa: B950
+ """
message = (
f'More than one value for "{attribute}" found in subject images:\n{{}}'
)
@@ -328,7 +329,7 @@ def check_consistent_space(self) -> None:
'As described above, some images in the subject are not in the'
' same space. You probably can use the transforms ToCanonical'
' and Resample to fix this, as explained at'
- ' https://github.com/fepegar/torchio/issues/647#issuecomment-913025695' # noqa: B950
+ ' https://github.com/fepegar/torchio/issues/647#issuecomment-913025695'
)
raise RuntimeError(message) from e
diff --git a/src/torchio/datasets/__init__.py b/src/torchio/datasets/__init__.py
index 9df14235..9c209d29 100644
--- a/src/torchio/datasets/__init__.py
+++ b/src/torchio/datasets/__init__.py
@@ -1,9 +1,9 @@
from .bite import BITE3
from .episurg import EPISURG
from .fpg import FPG
+from .itk_snap import T1T2
from .itk_snap import AorticValve
from .itk_snap import BrainTumor
-from .itk_snap import T1T2
from .ixi import IXI
from .ixi import IXITiny
from .medmnist import AdrenalMNIST3D
@@ -20,7 +20,6 @@
from .rsna_spine_fracture import RSNACervicalSpineFracture
from .slicer import Slicer
-
__all__ = [
'FPG',
'Slicer',
diff --git a/src/torchio/datasets/bite.py b/src/torchio/datasets/bite.py
index 414b9e40..6a5b4b63 100644
--- a/src/torchio/datasets/bite.py
+++ b/src/torchio/datasets/bite.py
@@ -3,11 +3,11 @@
from typing import Dict
from typing import Optional
-from .. import Image
-from .. import LabelMap
-from .. import ScalarImage
-from .. import Subject
-from .. import SubjectsDataset
+from ..data import Image
+from ..data import LabelMap
+from ..data import ScalarImage
+from ..data import Subject
+from ..data import SubjectsDataset
from ..download import download_and_extract_archive
from ..transforms import Transform
from ..typing import TypePath
@@ -56,7 +56,7 @@ class BITE3(BITE):
transform: An instance of
:class:`~torchio.transforms.transform.Transform`.
download: If set to ``True``, will download the data into :attr:`root`.
- """ # noqa: B950
+ """
dirname = 'group3'
diff --git a/src/torchio/datasets/episurg.py b/src/torchio/datasets/episurg.py
index 8c4267da..98490f18 100644
--- a/src/torchio/datasets/episurg.py
+++ b/src/torchio/datasets/episurg.py
@@ -2,10 +2,10 @@
from pathlib import Path
from typing import Optional
-from .. import LabelMap
-from .. import ScalarImage
-from .. import Subject
-from .. import SubjectsDataset
+from ..data import LabelMap
+from ..data import ScalarImage
+from ..data import Subject
+from ..data import SubjectsDataset
from ..download import download_and_extract_archive
from ..transforms import Transform
from ..typing import TypePath
@@ -40,7 +40,7 @@ class EPISURG(SubjectsDataset):
to be downloaded if it is not already present.
"""
- data_url = 'https://s3-eu-west-1.amazonaws.com/pstorage-ucl-2748466690/26153588/EPISURG.zip' # noqa: B950
+ data_url = 'https://s3-eu-west-1.amazonaws.com/pstorage-ucl-2748466690/26153588/EPISURG.zip'
md5 = '5ec5831a2c6fbfdc8489ba2910a6504b'
def __init__(
diff --git a/src/torchio/datasets/fpg.py b/src/torchio/datasets/fpg.py
index 542ebf1d..3b8449c4 100644
--- a/src/torchio/datasets/fpg.py
+++ b/src/torchio/datasets/fpg.py
@@ -1,8 +1,8 @@
import urllib.parse
-from .. import DATA_REPO
-from .. import LabelMap
-from .. import ScalarImage
+from ..constants import DATA_REPO
+from ..data import LabelMap
+from ..data import ScalarImage
from ..data.io import read_matrix
from ..data.subject import _RawSubjectCopySubject
from ..download import download_url
diff --git a/src/torchio/datasets/itk_snap/__init__.py b/src/torchio/datasets/itk_snap/__init__.py
index e7b3a809..130a7a06 100644
--- a/src/torchio/datasets/itk_snap/__init__.py
+++ b/src/torchio/datasets/itk_snap/__init__.py
@@ -1,7 +1,6 @@
+from .itk_snap import T1T2
from .itk_snap import AorticValve
from .itk_snap import BrainTumor
-from .itk_snap import T1T2
-
__all__ = [
'BrainTumor',
diff --git a/src/torchio/datasets/itk_snap/itk_snap.py b/src/torchio/datasets/itk_snap/itk_snap.py
index 8360a387..4450378c 100644
--- a/src/torchio/datasets/itk_snap/itk_snap.py
+++ b/src/torchio/datasets/itk_snap/itk_snap.py
@@ -13,7 +13,7 @@ class SubjectITKSNAP(_RawSubjectCopySubject):
See `the ITK-SNAP website`_ for more information.
.. _the ITK-SNAP website: http://www.itksnap.org/pmwiki/pmwiki.php?n=Downloads.Data
- """ # noqa: B950
+ """
url_base = 'https://www.nitrc.org/frs/download.php/'
diff --git a/src/torchio/datasets/ixi.py b/src/torchio/datasets/ixi.py
index b0f26ece..511509be 100644
--- a/src/torchio/datasets/ixi.py
+++ b/src/torchio/datasets/ixi.py
@@ -18,10 +18,10 @@
from typing import Optional
from typing import Sequence
-from .. import LabelMap
-from .. import ScalarImage
-from .. import Subject
-from .. import SubjectsDataset
+from ..data import LabelMap
+from ..data import ScalarImage
+from ..data import Subject
+from ..data import SubjectsDataset
from ..download import download_and_extract_archive
from ..transforms import Transform
from ..typing import TypePath
@@ -60,9 +60,9 @@ class IXI(SubjectsDataset):
>>> print('Keys in subject:', tuple(sample_subject.keys())) # ('T1', 'T2')
>>> print('Shape of T1 data:', sample_subject['T1'].shape) # [1, 180, 268, 268]
>>> print('Shape of T2 data:', sample_subject['T2'].shape) # [1, 241, 257, 188]
- """ # noqa: B950
+ """
- base_url = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI-{modality}.tar' # noqa: FS003,B950
+ base_url = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI-{modality}.tar'
md5_dict = {
'T1': '34901a0593b41dd19c1a1f746eac2d58',
'T2': 'e3140d78730ecdd32ba92da48c0a9aaa',
@@ -173,7 +173,7 @@ class IXITiny(SubjectsDataset):
download: If set to ``True``, will download the data into :attr:`root`.
.. _notebook: https://github.com/fepegar/torchio/blob/main/tutorials/README.md
- """ # noqa: B950
+ """
url = 'https://www.dropbox.com/s/ogxjwjxdv5mieah/ixi_tiny.zip?dl=1'
md5 = 'bfb60f4074283d78622760230bfa1f98'
diff --git a/src/torchio/datasets/medmnist.py b/src/torchio/datasets/medmnist.py
index ccd6a765..bad491ff 100644
--- a/src/torchio/datasets/medmnist.py
+++ b/src/torchio/datasets/medmnist.py
@@ -1,9 +1,9 @@
import numpy as np
import torch
-from .. import ScalarImage
-from .. import Subject
-from .. import SubjectsDataset
+from ..data import ScalarImage
+from ..data import Subject
+from ..data import SubjectsDataset
from ..download import download_url
from ..utils import get_torchio_cache_dir
diff --git a/src/torchio/datasets/mni/__init__.py b/src/torchio/datasets/mni/__init__.py
index a3b61cdc..62db01d5 100644
--- a/src/torchio/datasets/mni/__init__.py
+++ b/src/torchio/datasets/mni/__init__.py
@@ -3,7 +3,6 @@
from .pediatric import Pediatric
from .sheep import Sheep
-
__all__ = [
'Sheep',
'Colin27',
diff --git a/src/torchio/datasets/mni/colin.py b/src/torchio/datasets/mni/colin.py
index 06812d31..825632b5 100644
--- a/src/torchio/datasets/mni/colin.py
+++ b/src/torchio/datasets/mni/colin.py
@@ -6,7 +6,6 @@
from ...utils import compress
from .mni import SubjectMNI
-
TISSUES_2008 = {
1: 'Cerebro-spinal fluid',
2: 'Gray Matter',
@@ -57,7 +56,7 @@ class Colin27(SubjectMNI):
>>> colin_2008.load()
>>> colin_2008.t1
ScalarImage(shape: (1, 362, 434, 362); spacing: (0.50, 0.50, 0.50); orientation: RAS+; memory: 217.0 MiB; type: intensity)
- """ # noqa: B950
+ """
def __init__(self, version=1998):
if version not in (1998, 2008):
diff --git a/src/torchio/datasets/mni/icbm.py b/src/torchio/datasets/mni/icbm.py
index a375ca99..8dfcd0d9 100644
--- a/src/torchio/datasets/mni/icbm.py
+++ b/src/torchio/datasets/mni/icbm.py
@@ -2,8 +2,8 @@
import torch
-from ... import LabelMap
-from ... import ScalarImage
+from ...data import LabelMap
+from ...data import ScalarImage
from ...download import download_and_extract_archive
from ...utils import compress
from ...utils import get_torchio_cache_dir
@@ -32,7 +32,7 @@ class ICBM2009CNonlinearSymmetric(SubjectMNI):
>>> icbm = tio.datasets.ICBM2009CNonlinearSymmetric(load_4d_tissues=False)
>>> icbm
ICBM2009CNonlinearSymmetric(Keys: ('t1', 'eyes', 'face', 'brain', 't2', 'pd', 'gm', 'wm', 'csf'); images: 9)
- """ # noqa: B950
+ """
def __init__(self, load_4d_tissues: bool = True):
self.name = 'mni_icbm152_nlin_sym_09c_nifti'
diff --git a/src/torchio/datasets/mni/pediatric.py b/src/torchio/datasets/mni/pediatric.py
index 84271987..0b603e03 100644
--- a/src/torchio/datasets/mni/pediatric.py
+++ b/src/torchio/datasets/mni/pediatric.py
@@ -1,12 +1,11 @@
import urllib.parse
-from ... import LabelMap
-from ... import ScalarImage
+from ...data import LabelMap
+from ...data import ScalarImage
from ...download import download_and_extract_archive
from ...utils import compress
from .mni import SubjectMNI
-
SUPPORTED_YEARS = (
(4.5, 18.5),
(4.5, 8.5),
@@ -41,7 +40,7 @@ class Pediatric(SubjectMNI):
``(13, 18.5)``.
symmetric: If ``True``, the left-right symmetric templates will be
used. Else, the asymmetric (natural) templates will be used.
- """ # noqa: B950
+ """
def __init__(self, years, symmetric=False):
self.url_dir = 'http://www.bic.mni.mcgill.ca/~vfonov/nihpd/obj1/'
diff --git a/src/torchio/datasets/rsna_miccai.py b/src/torchio/datasets/rsna_miccai.py
index a41b88c7..2e868524 100644
--- a/src/torchio/datasets/rsna_miccai.py
+++ b/src/torchio/datasets/rsna_miccai.py
@@ -6,9 +6,9 @@
from typing import Sequence
from typing import Union
-from .. import ScalarImage
-from .. import Subject
-from .. import SubjectsDataset
+from ..data import ScalarImage
+from ..data import Subject
+from ..data import SubjectsDataset
from ..typing import TypePath
@@ -50,7 +50,7 @@ class RSNAMICCAI(SubjectsDataset):
.. _RSNA-MICCAI Brain Tumor Radiogenomic Classification challenge: https://www.kaggle.com/c/rsna-miccai-brain-tumor-radiogenomic-classification
- """ # noqa: B950
+ """
id_key = 'BraTS21ID'
label_key = 'MGMT_value'
diff --git a/src/torchio/datasets/rsna_spine_fracture.py b/src/torchio/datasets/rsna_spine_fracture.py
index b99569c4..a86a2c04 100644
--- a/src/torchio/datasets/rsna_spine_fracture.py
+++ b/src/torchio/datasets/rsna_spine_fracture.py
@@ -1,19 +1,18 @@
from pathlib import Path
+from types import ModuleType
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Union
-from types import ModuleType
-from .. import LabelMap
-from .. import ScalarImage
-from .. import Subject
-from .. import SubjectsDataset
+from ..data import LabelMap
+from ..data import ScalarImage
+from ..data import Subject
+from ..data import SubjectsDataset
from ..typing import TypePath
from ..utils import normalize_path
-
TypeBoxes = List[Dict[str, Union[str, float, int]]]
@@ -26,7 +25,7 @@ class RSNACervicalSpineFracture(SubjectsDataset):
instantiating this class.
.. _RSNA 2022 Cervical Spine Fracture Detection: https://www.kaggle.com/competitions/rsna-2022-cervical-spine-fracture-detection/overview/evaluation
- """ # noqa: B950
+ """
UID = 'StudyInstanceUID'
diff --git a/src/torchio/datasets/slicer.py b/src/torchio/datasets/slicer.py
index 5ac39abd..a66cadc1 100644
--- a/src/torchio/datasets/slicer.py
+++ b/src/torchio/datasets/slicer.py
@@ -1,24 +1,19 @@
import urllib.parse
-from .. import ScalarImage
+from ..data import ScalarImage
from ..data.subject import _RawSubjectCopySubject
from ..download import download_url
from ..utils import get_torchio_cache_dir
-
SLICER_URL = 'https://github.com/Slicer/SlicerTestingData/releases/download/'
URLS_DICT = {
'MRHead': (
('MRHead.nrrd',),
- (
- 'SHA256/cc211f0dfd9a05ca3841ce1141b292898b2dd2d3f08286affadf823a7e58df93',
- ), # noqa: B950
+ ('SHA256/cc211f0dfd9a05ca3841ce1141b292898b2dd2d3f08286affadf823a7e58df93',),
),
'DTIBrain': (
('DTI-Brain.nrrd',),
- (
- 'SHA256/5c78d00c86ae8d968caa7a49b870ef8e1c04525b1abc53845751d8bce1f0b91a',
- ), # noqa: B950
+ ('SHA256/5c78d00c86ae8d968caa7a49b870ef8e1c04525b1abc53845751d8bce1f0b91a',),
),
'DTIVolume': (
(
@@ -26,21 +21,17 @@
'DTIVolume.nhdr',
),
(
- 'SHA256/d785837276758ddd9d21d76a3694e7fd866505a05bc305793517774c117cb38d', # noqa: B950
- 'SHA256/67564aa42c7e2eec5c3fd68afb5a910e9eab837b61da780933716a3b922e50fe', # noqa: B950
+ 'SHA256/d785837276758ddd9d21d76a3694e7fd866505a05bc305793517774c117cb38d',
+ 'SHA256/67564aa42c7e2eec5c3fd68afb5a910e9eab837b61da780933716a3b922e50fe',
),
),
'CTChest': (
('CT-chest.nrrd',),
- (
- 'SHA256/4507b664690840abb6cb9af2d919377ffc4ef75b167cb6fd0f747befdb12e38e',
- ), # noqa: B950
+ ('SHA256/4507b664690840abb6cb9af2d919377ffc4ef75b167cb6fd0f747befdb12e38e',),
),
'CTACardio': (
('CTA-cardio.nrrd',),
- (
- 'SHA256/3b0d4eb1a7d8ebb0c5a89cc0504640f76a030b4e869e33ff34c564c3d3b88ad2',
- ), # noqa: B950
+ ('SHA256/3b0d4eb1a7d8ebb0c5a89cc0504640f76a030b4e869e33ff34c564c3d3b88ad2',),
),
}
@@ -56,7 +47,7 @@ class Slicer(_RawSubjectCopySubject):
Args:
name: One of the keys in :attr:`torchio.datasets.slicer.URLS_DICT`.
- """ # noqa: B950
+ """
def __init__(self, name='MRHead'):
filenames, url_files = URLS_DICT[name]
diff --git a/src/torchio/external/due.py b/src/torchio/external/due.py
index 21fd16c9..e0e6514f 100644
--- a/src/torchio/external/due.py
+++ b/src/torchio/external/due.py
@@ -48,7 +48,11 @@ def _donothing_func(*args, **kwargs):
try:
- from duecredit import due, BibTeX, Doi, Url, Text
+ from duecredit import BibTeX
+ from duecredit import Doi
+ from duecredit import Text
+ from duecredit import Url
+ from duecredit import due
if 'due' in locals() and not hasattr(due, 'cite'):
raise RuntimeError(
diff --git a/src/torchio/reference.py b/src/torchio/reference.py
index 7082fc22..b5de0d1a 100644
--- a/src/torchio/reference.py
+++ b/src/torchio/reference.py
@@ -15,7 +15,7 @@
url = {https://www.sciencedirect.com/science/article/pii/S0169260721003102},
author = {P{\'e}rez-Garc{\'i}a, Fernando and Sparks, Rachel and Ourselin, S{\'e}bastien},
keywords = {Medical image computing, Deep learning, Data augmentation, Preprocessing},
-} """ # noqa: B950
+} """
TITLE = (
'TorchIO: a Python library for efficient loading, preprocessing,'
diff --git a/src/torchio/transforms/__init__.py b/src/torchio/transforms/__init__.py
index 087dbe5b..57e220c8 100644
--- a/src/torchio/transforms/__init__.py
+++ b/src/torchio/transforms/__init__.py
@@ -1,55 +1,64 @@
# noreorder
-from .transform import Transform
-from .fourier import FourierTransform
-from .spatial_transform import SpatialTransform
-from .intensity_transform import IntensityTransform
-from .preprocessing.label.label_transform import LabelTransform
-
-# Generic
-from .lambda_transform import Lambda
+from .augmentation.composition import Compose
# Augmentation
from .augmentation.composition import OneOf
-from .augmentation.composition import Compose
-
-from .augmentation.spatial import RandomFlip, Flip
-from .augmentation.spatial import RandomAffine, Affine
+from .augmentation.intensity import BiasField
+from .augmentation.intensity import Blur
+from .augmentation.intensity import Gamma
+from .augmentation.intensity import Ghosting
+from .augmentation.intensity import LabelsToImage
+from .augmentation.intensity import Motion
+from .augmentation.intensity import Noise
+from .augmentation.intensity import RandomBiasField
+from .augmentation.intensity import RandomBlur
+from .augmentation.intensity import RandomGamma
+from .augmentation.intensity import RandomGhosting
+from .augmentation.intensity import RandomLabelsToImage
+from .augmentation.intensity import RandomMotion
+from .augmentation.intensity import RandomNoise
+from .augmentation.intensity import RandomSpike
+from .augmentation.intensity import RandomSwap
+from .augmentation.intensity import Spike
+from .augmentation.intensity import Swap
+from .augmentation.spatial import Affine
+from .augmentation.spatial import ElasticDeformation
+from .augmentation.spatial import Flip
+from .augmentation.spatial import RandomAffine
from .augmentation.spatial import RandomAnisotropy
-from .augmentation.spatial import RandomElasticDeformation, ElasticDeformation
-
-from .augmentation.intensity import RandomSwap, Swap
-from .augmentation.intensity import RandomBlur, Blur
-from .augmentation.intensity import RandomNoise, Noise
-from .augmentation.intensity import RandomSpike, Spike
-from .augmentation.intensity import RandomGamma, Gamma
-from .augmentation.intensity import RandomMotion, Motion
-from .augmentation.intensity import RandomGhosting, Ghosting
-from .augmentation.intensity import RandomBiasField, BiasField
-from .augmentation.intensity import RandomLabelsToImage, LabelsToImage
+from .augmentation.spatial import RandomElasticDeformation
+from .augmentation.spatial import RandomFlip
+from .fourier import FourierTransform
+from .intensity_transform import IntensityTransform
-# Preprocessing
-from .preprocessing import Pad
+# Generic
+from .lambda_transform import Lambda
+from .preprocessing import Clamp
+from .preprocessing import Contour
+from .preprocessing import CopyAffine
from .preprocessing import Crop
-from .preprocessing import Resize
-from .preprocessing import Resample
from .preprocessing import CropOrPad
-from .preprocessing import CopyAffine
-from .preprocessing import ToCanonical
-from .preprocessing import ZNormalization
-from .preprocessing import RescaleIntensity
-from .preprocessing import Clamp
-from .preprocessing import Mask
from .preprocessing import EnsureShapeMultiple
from .preprocessing import HistogramStandardization
-from .preprocessing.intensity.histogram_standardization import train_histogram
+from .preprocessing import KeepLargestComponent
+from .preprocessing import Mask
from .preprocessing import OneHot
-from .preprocessing import Contour
+
+# Preprocessing
+from .preprocessing import Pad
from .preprocessing import RemapLabels
from .preprocessing import RemoveLabels
+from .preprocessing import Resample
+from .preprocessing import RescaleIntensity
+from .preprocessing import Resize
from .preprocessing import SequentialLabels
-from .preprocessing import KeepLargestComponent
-
+from .preprocessing import ToCanonical
+from .preprocessing import ZNormalization
+from .preprocessing.intensity.histogram_standardization import train_histogram
+from .preprocessing.label.label_transform import LabelTransform
+from .spatial_transform import SpatialTransform
+from .transform import Transform
__all__ = [
'Transform',
diff --git a/src/torchio/transforms/augmentation/__init__.py b/src/torchio/transforms/augmentation/__init__.py
index f72f61d6..68bf5627 100644
--- a/src/torchio/transforms/augmentation/__init__.py
+++ b/src/torchio/transforms/augmentation/__init__.py
@@ -1,6 +1,5 @@
from .random_transform import RandomTransform
-
__all__ = [
'RandomTransform',
]
diff --git a/src/torchio/transforms/augmentation/composition.py b/src/torchio/transforms/augmentation/composition.py
index fc9e83cb..7c0d89fb 100644
--- a/src/torchio/transforms/augmentation/composition.py
+++ b/src/torchio/transforms/augmentation/composition.py
@@ -8,10 +8,9 @@
import numpy as np
import torch
-from . import RandomTransform
-from .. import Transform
from ...data.subject import Subject
-
+from ..transform import Transform
+from . import RandomTransform
TypeTransformsDict = Union[Dict[Transform, float], Sequence[Transform]]
diff --git a/src/torchio/transforms/augmentation/intensity/__init__.py b/src/torchio/transforms/augmentation/intensity/__init__.py
index 574a9c96..343a8359 100644
--- a/src/torchio/transforms/augmentation/intensity/__init__.py
+++ b/src/torchio/transforms/augmentation/intensity/__init__.py
@@ -17,7 +17,6 @@
from .random_swap import RandomSwap
from .random_swap import Swap
-
__all__ = [
'RandomSwap',
'Swap',
diff --git a/src/torchio/transforms/augmentation/intensity/random_bias_field.py b/src/torchio/transforms/augmentation/intensity/random_bias_field.py
index 6a420785..f1e797c7 100644
--- a/src/torchio/transforms/augmentation/intensity/random_bias_field.py
+++ b/src/torchio/transforms/augmentation/intensity/random_bias_field.py
@@ -7,10 +7,10 @@
import numpy as np
import torch
-from .. import RandomTransform
-from ... import IntensityTransform
from ....data.subject import Subject
from ....typing import TypeData
+from ...intensity_transform import IntensityTransform
+from .. import RandomTransform
class RandomBiasField(RandomTransform, IntensityTransform):
diff --git a/src/torchio/transforms/augmentation/intensity/random_blur.py b/src/torchio/transforms/augmentation/intensity/random_blur.py
index ef85874b..704def8f 100644
--- a/src/torchio/transforms/augmentation/intensity/random_blur.py
+++ b/src/torchio/transforms/augmentation/intensity/random_blur.py
@@ -7,12 +7,12 @@
import scipy.ndimage as ndi
import torch
-from .. import RandomTransform
-from ... import IntensityTransform
from ....data.subject import Subject
from ....typing import TypeData
from ....typing import TypeSextetFloat
from ....typing import TypeTripletFloat
+from ...intensity_transform import IntensityTransform
+from .. import RandomTransform
class RandomBlur(RandomTransform, IntensityTransform):
diff --git a/src/torchio/transforms/augmentation/intensity/random_gamma.py b/src/torchio/transforms/augmentation/intensity/random_gamma.py
index efc86d7a..bae5a177 100644
--- a/src/torchio/transforms/augmentation/intensity/random_gamma.py
+++ b/src/torchio/transforms/augmentation/intensity/random_gamma.py
@@ -5,11 +5,11 @@
import numpy as np
import torch
-from .. import RandomTransform
-from ... import IntensityTransform
from ....data.subject import Subject
from ....typing import TypeRangeFloat
from ....utils import to_tuple
+from ...intensity_transform import IntensityTransform
+from .. import RandomTransform
class RandomGamma(RandomTransform, IntensityTransform):
@@ -62,7 +62,7 @@ class RandomGamma(RandomTransform, IntensityTransform):
>>> subject = tio.datasets.FPG()
>>> transform = tio.RandomGamma(log_gamma=(-0.3, 0.3)) # gamma between 0.74 and 1.34
>>> transformed = transform(subject)
- """ # noqa: B950
+ """
def __init__(self, log_gamma: TypeRangeFloat = (-0.3, 0.3), **kwargs):
super().__init__(**kwargs)
@@ -115,7 +115,7 @@ class Gamma(IntensityTransform):
>>> subject = tio.datasets.FPG()
>>> transform = tio.Gamma(0.8)
>>> transformed = transform(subject)
- """ # noqa: B950
+ """
def __init__(self, gamma: float, **kwargs):
super().__init__(**kwargs)
diff --git a/src/torchio/transforms/augmentation/intensity/random_ghosting.py b/src/torchio/transforms/augmentation/intensity/random_ghosting.py
index 4992fdcb..2316f0b1 100644
--- a/src/torchio/transforms/augmentation/intensity/random_ghosting.py
+++ b/src/torchio/transforms/augmentation/intensity/random_ghosting.py
@@ -7,10 +7,10 @@
import numpy as np
import torch
-from .. import RandomTransform
-from ... import FourierTransform
-from ... import IntensityTransform
from ....data.subject import Subject
+from ...fourier import FourierTransform
+from ...intensity_transform import IntensityTransform
+from .. import RandomTransform
class RandomGhosting(RandomTransform, IntensityTransform):
diff --git a/src/torchio/transforms/augmentation/intensity/random_labels_to_image.py b/src/torchio/transforms/augmentation/intensity/random_labels_to_image.py
index 5e4a0a6a..972dc6db 100644
--- a/src/torchio/transforms/augmentation/intensity/random_labels_to_image.py
+++ b/src/torchio/transforms/augmentation/intensity/random_labels_to_image.py
@@ -5,14 +5,14 @@
import torch
-from .. import RandomTransform
-from ... import IntensityTransform
from ....data.image import LabelMap
from ....data.image import ScalarImage
from ....data.subject import Subject
from ....typing import TypeData
from ....typing import TypeRangeFloat
from ....utils import check_sequence
+from ...intensity_transform import IntensityTransform
+from .. import RandomTransform
class RandomLabelsToImage(RandomTransform, IntensityTransform):
@@ -125,7 +125,7 @@ class RandomLabelsToImage(RandomTransform, IntensityTransform):
>>> transformed = transform(subject) # subject's key 't1' has been replaced with the simulated image
.. seealso:: :class:`~torchio.transforms.preprocessing.label.remap_labels.RemapLabels`.
- """ # noqa: B950
+ """
def __init__(
self,
@@ -142,8 +142,8 @@ def __init__(
):
super().__init__(**kwargs)
self.label_key = _parse_label_key(label_key)
- self.used_labels = _parse_used_labels(used_labels) # type: ignore[arg-type] # noqa: B950
- self.mean, self.std = self.parse_mean_and_std(mean, std) # type: ignore[arg-type,assignment] # noqa: B950
+ self.used_labels = _parse_used_labels(used_labels) # type: ignore[arg-type]
+ self.mean, self.std = self.parse_mean_and_std(mean, std) # type: ignore[arg-type,assignment]
self.default_mean = self.parse_gaussian_parameter(
default_mean,
'default_mean',
@@ -254,7 +254,7 @@ def apply_transform(self, subject: Subject) -> Subject:
labels = range(label_map.shape[0])
# Raise error if mean and std are not defined for every label
- _check_mean_and_std_length(labels, self.mean, self.std) # type: ignore[arg-type] # noqa: B950
+ _check_mean_and_std_length(labels, self.mean, self.std) # type: ignore[arg-type]
for label in labels:
mean, std = self.get_params(label)
diff --git a/src/torchio/transforms/augmentation/intensity/random_motion.py b/src/torchio/transforms/augmentation/intensity/random_motion.py
index 13b3b66d..3ff81f0d 100644
--- a/src/torchio/transforms/augmentation/intensity/random_motion.py
+++ b/src/torchio/transforms/augmentation/intensity/random_motion.py
@@ -9,12 +9,12 @@
import SimpleITK as sitk
import torch
-from .. import RandomTransform
-from ... import FourierTransform
-from ... import IntensityTransform
from ....data.io import nib_to_sitk
from ....data.subject import Subject
from ....typing import TypeTripletFloat
+from ...fourier import FourierTransform
+from ...intensity_transform import IntensityTransform
+from .. import RandomTransform
class RandomMotion(RandomTransform, IntensityTransform, FourierTransform):
@@ -147,9 +147,7 @@ def __init__(
degrees: Union[TypeTripletFloat, Dict[str, TypeTripletFloat]],
translation: Union[TypeTripletFloat, Dict[str, TypeTripletFloat]],
times: Union[Sequence[float], Dict[str, Sequence[float]]],
- image_interpolation: Union[
- Sequence[str], Dict[str, Sequence[str]]
- ], # noqa: B950
+ image_interpolation: Union[Sequence[str], Dict[str, Sequence[str]]],
**kwargs,
):
super().__init__(**kwargs)
diff --git a/src/torchio/transforms/augmentation/intensity/random_noise.py b/src/torchio/transforms/augmentation/intensity/random_noise.py
index a74b881b..0914810f 100644
--- a/src/torchio/transforms/augmentation/intensity/random_noise.py
+++ b/src/torchio/transforms/augmentation/intensity/random_noise.py
@@ -6,9 +6,9 @@
import torch
-from .. import RandomTransform
-from ... import IntensityTransform
from ....data.subject import Subject
+from ...intensity_transform import IntensityTransform
+from .. import RandomTransform
class RandomNoise(RandomTransform, IntensityTransform):
@@ -99,8 +99,8 @@ def apply_transform(self, subject: Subject) -> Subject:
mean, std, seed = args = self.mean, self.std, self.seed
for name, image in self.get_images_dict(subject).items():
if self.arguments_are_dict():
- values = (arg[name] for arg in args) # type: ignore[index,call-overload] # noqa: B950
- mean, std, seed = values # type: ignore[assignment] # noqa: B950
+ values = (arg[name] for arg in args) # type: ignore[index,call-overload]
+ mean, std, seed = values # type: ignore[assignment]
with self._use_seed(seed):
assert isinstance(mean, float)
assert isinstance(std, float)
diff --git a/src/torchio/transforms/augmentation/intensity/random_spike.py b/src/torchio/transforms/augmentation/intensity/random_spike.py
index 5db9e29d..183f5b6e 100644
--- a/src/torchio/transforms/augmentation/intensity/random_spike.py
+++ b/src/torchio/transforms/augmentation/intensity/random_spike.py
@@ -7,10 +7,10 @@
import numpy as np
import torch
-from .. import RandomTransform
-from ... import FourierTransform
-from ... import IntensityTransform
from ....data.subject import Subject
+from ...fourier import FourierTransform
+from ...intensity_transform import IntensityTransform
+from .. import RandomTransform
class RandomSpike(RandomTransform, IntensityTransform, FourierTransform):
@@ -53,7 +53,7 @@ def __init__(
intensity,
'intensity_range',
)
- self.num_spikes_range: Tuple[int, int] = self._parse_range( # type: ignore[assignment] # noqa: B950
+ self.num_spikes_range: Tuple[int, int] = self._parse_range( # type: ignore[assignment]
num_spikes,
'num_spikes',
min_constraint=0,
diff --git a/src/torchio/transforms/augmentation/intensity/random_swap.py b/src/torchio/transforms/augmentation/intensity/random_swap.py
index 3f7aae64..65d7d74d 100644
--- a/src/torchio/transforms/augmentation/intensity/random_swap.py
+++ b/src/torchio/transforms/augmentation/intensity/random_swap.py
@@ -11,13 +11,12 @@
import numpy as np
import torch
-from .. import RandomTransform
-from ... import IntensityTransform
from ....data.subject import Subject
from ....typing import TypeTripletInt
from ....typing import TypeTuple
from ....utils import to_tuple
-
+from ...intensity_transform import IntensityTransform
+from .. import RandomTransform
TypeLocations = Sequence[Tuple[TypeTripletInt, TypeTripletInt]]
TensorArray = TypeVar('TensorArray', np.ndarray, torch.Tensor)
@@ -143,7 +142,7 @@ def apply_transform(self, subject: Subject) -> Subject:
if self.invert_transform:
assert isinstance(locations, list)
locations.reverse()
- swapped = _swap(image.data, patch_size, locations) # type: ignore[arg-type] # noqa: B950
+ swapped = _swap(image.data, patch_size, locations) # type: ignore[arg-type]
image.set_data(swapped)
return subject
diff --git a/src/torchio/transforms/augmentation/random_transform.py b/src/torchio/transforms/augmentation/random_transform.py
index 86a5df79..0da6e211 100644
--- a/src/torchio/transforms/augmentation/random_transform.py
+++ b/src/torchio/transforms/augmentation/random_transform.py
@@ -4,8 +4,8 @@
import torch
-from .. import Transform
from ...typing import TypeRangeFloat
+from ..transform import Transform
class RandomTransform(Transform):
diff --git a/src/torchio/transforms/augmentation/spatial/__init__.py b/src/torchio/transforms/augmentation/spatial/__init__.py
index 119769cb..4deb7acb 100644
--- a/src/torchio/transforms/augmentation/spatial/__init__.py
+++ b/src/torchio/transforms/augmentation/spatial/__init__.py
@@ -6,7 +6,6 @@
from .random_flip import Flip
from .random_flip import RandomFlip
-
__all__ = [
'RandomFlip',
'Flip',
diff --git a/src/torchio/transforms/augmentation/spatial/random_affine.py b/src/torchio/transforms/augmentation/spatial/random_affine.py
index ea85219a..2cdcb38d 100644
--- a/src/torchio/transforms/augmentation/spatial/random_affine.py
+++ b/src/torchio/transforms/augmentation/spatial/random_affine.py
@@ -8,8 +8,6 @@
import SimpleITK as sitk
import torch
-from .. import RandomTransform
-from ... import SpatialTransform
from ....constants import INTENSITY
from ....constants import TYPE
from ....data.io import nib_to_sitk
@@ -19,7 +17,8 @@
from ....typing import TypeTripletFloat
from ....utils import get_major_sitk_version
from ....utils import to_tuple
-
+from ...spatial_transform import SpatialTransform
+from .. import RandomTransform
TypeOneToSixFloat = Union[TypeRangeFloat, TypeTripletFloat, TypeSextetFloat]
@@ -111,7 +110,7 @@ class RandomAffine(RandomTransform, SpatialTransform):
ct_transformed = transform(ct)
subject.add_image(ct_transformed, 'Transformed')
subject.plot()
- """ # noqa: B950
+ """
def __init__(
self,
diff --git a/src/torchio/transforms/augmentation/spatial/random_anisotropy.py b/src/torchio/transforms/augmentation/spatial/random_anisotropy.py
index 2711f415..c50fe48c 100644
--- a/src/torchio/transforms/augmentation/spatial/random_anisotropy.py
+++ b/src/torchio/transforms/augmentation/spatial/random_anisotropy.py
@@ -4,11 +4,11 @@
import torch
-from .. import RandomTransform
from ....data.subject import Subject
from ....typing import TypeRangeFloat
from ....utils import to_tuple
from ...preprocessing import Resample
+from .. import RandomTransform
class RandomAnisotropy(RandomTransform):
@@ -44,7 +44,7 @@ class RandomAnisotropy(RandomTransform):
... ) # Multiply spacing of one of the 3 axes by a factor randomly chosen in [2, 5]
>>> colin = tio.datasets.Colin27()
>>> transformed = transform(colin)
- """ # noqa: B950
+ """
def __init__(
self,
diff --git a/src/torchio/transforms/augmentation/spatial/random_elastic_deformation.py b/src/torchio/transforms/augmentation/spatial/random_elastic_deformation.py
index b3aa6984..cafb8514 100644
--- a/src/torchio/transforms/augmentation/spatial/random_elastic_deformation.py
+++ b/src/torchio/transforms/augmentation/spatial/random_elastic_deformation.py
@@ -8,15 +8,14 @@
import SimpleITK as sitk
import torch
-from .. import RandomTransform
-from ... import SpatialTransform
from ....data.image import ScalarImage
from ....data.io import nib_to_sitk
from ....data.subject import Subject
from ....typing import TypeTripletFloat
from ....typing import TypeTripletInt
from ....utils import to_tuple
-
+from ...spatial_transform import SpatialTransform
+from .. import RandomTransform
SPLINE_ORDER = 3
@@ -118,7 +117,7 @@ class RandomElasticDeformation(RandomTransform, SpatialTransform):
.. [#] Technically, :math:`2 \epsilon` should be added to the
image bounds, where :math:`\epsilon = 2^{-3}` `according to ITK
source code `_.
- """ # noqa: B950
+ """
def __init__(
self,
@@ -132,9 +131,9 @@ def __init__(
super().__init__(**kwargs)
self._bspline_transformation = None
self.num_control_points = to_tuple(num_control_points, length=3)
- _parse_num_control_points(self.num_control_points) # type: ignore[arg-type] # noqa: B950
+ _parse_num_control_points(self.num_control_points) # type: ignore[arg-type]
self.max_displacement = to_tuple(max_displacement, length=3)
- _parse_max_displacement(self.max_displacement) # type: ignore[arg-type] # noqa: B950
+ _parse_max_displacement(self.max_displacement) # type: ignore[arg-type]
self.num_locked_borders = locked_borders
if locked_borders not in (0, 1, 2):
raise ValueError('locked_borders must be 0, 1, or 2')
diff --git a/src/torchio/transforms/augmentation/spatial/random_flip.py b/src/torchio/transforms/augmentation/spatial/random_flip.py
index b61237b3..89a44ff8 100644
--- a/src/torchio/transforms/augmentation/spatial/random_flip.py
+++ b/src/torchio/transforms/augmentation/spatial/random_flip.py
@@ -5,10 +5,10 @@
import numpy as np
import torch
-from .. import RandomTransform
-from ... import SpatialTransform
from ....data.subject import Subject
from ....utils import to_tuple
+from ...spatial_transform import SpatialTransform
+from .. import RandomTransform
class RandomFlip(RandomTransform, SpatialTransform):
diff --git a/src/torchio/transforms/data_parser.py b/src/torchio/transforms/data_parser.py
index 526bd748..adb9f4ec 100644
--- a/src/torchio/transforms/data_parser.py
+++ b/src/torchio/transforms/data_parser.py
@@ -15,7 +15,6 @@
from ..data.subject import Subject
from ..typing import TypeData
-
TypeTransformInput = Union[
Subject,
Image,
@@ -75,7 +74,7 @@ def get_subject(self):
'If the input is a dictionary, a value for "include" must'
' be specified when instantiating the transform. See the'
' docs for Transform:'
- ' https://torchio.readthedocs.io/transforms/transforms.html#torchio.transforms.Transform' # noqa: B950
+ ' https://torchio.readthedocs.io/transforms/transforms.html#torchio.transforms.Transform'
)
raise RuntimeError(message)
subject = self._get_subject_from_dict(
diff --git a/src/torchio/transforms/interpolation.py b/src/torchio/transforms/interpolation.py
index c3037148..58836ff4 100644
--- a/src/torchio/transforms/interpolation.py
+++ b/src/torchio/transforms/interpolation.py
@@ -1,4 +1,3 @@
-# noqa: B950
import enum
import SimpleITK as sitk
diff --git a/src/torchio/transforms/lambda_transform.py b/src/torchio/transforms/lambda_transform.py
index a93b9f06..1cabc520 100644
--- a/src/torchio/transforms/lambda_transform.py
+++ b/src/torchio/transforms/lambda_transform.py
@@ -28,7 +28,7 @@ class Lambda(Transform):
>>> def double(x):
... return 2 * x
>>> double_transform = tio.Lambda(double)
- """ # noqa: B950
+ """
def __init__(
self,
diff --git a/src/torchio/transforms/preprocessing/__init__.py b/src/torchio/transforms/preprocessing/__init__.py
index 18b99dfe..435e56d5 100644
--- a/src/torchio/transforms/preprocessing/__init__.py
+++ b/src/torchio/transforms/preprocessing/__init__.py
@@ -18,7 +18,6 @@
from .spatial.resize import Resize
from .spatial.to_canonical import ToCanonical
-
__all__ = [
'Pad',
'Crop',
diff --git a/src/torchio/transforms/preprocessing/intensity/__init__.py b/src/torchio/transforms/preprocessing/intensity/__init__.py
index 98910c3d..763a1847 100644
--- a/src/torchio/transforms/preprocessing/intensity/__init__.py
+++ b/src/torchio/transforms/preprocessing/intensity/__init__.py
@@ -1,6 +1,5 @@
from .normalization_transform import NormalizationTransform
-
__all__ = [
'NormalizationTransform',
]
diff --git a/src/torchio/transforms/preprocessing/intensity/histogram_standardization.py b/src/torchio/transforms/preprocessing/intensity/histogram_standardization.py
index bdd93c3c..a6e12a8f 100644
--- a/src/torchio/transforms/preprocessing/intensity/histogram_standardization.py
+++ b/src/torchio/transforms/preprocessing/intensity/histogram_standardization.py
@@ -7,8 +7,8 @@
from typing import Tuple
from typing import Union
-import torch
import numpy as np
+import torch
from tqdm.auto import tqdm
from ....data.io import read_image
@@ -51,7 +51,7 @@ class HistogramStandardization(NormalizationTransform):
>>> transform = tio.HistogramStandardization(landmarks)
>>> torch.save(landmarks, 'path_to_landmarks.pth')
>>> transform = tio.HistogramStandardization('path_to_landmarks.pth')
- """ # noqa: B950
+ """
def __init__(
self,
@@ -161,12 +161,12 @@ def train(
... }
>>>
>>> transform = HistogramStandardization(landmarks_dict)
- """ # noqa: B950
+ """
is_masks_list = isinstance(mask_path, Sequence)
- if is_masks_list and len(mask_path) != len(images_paths): # type: ignore[arg-type] # noqa: B950
+ if is_masks_list and len(mask_path) != len(images_paths): # type: ignore[arg-type]
message = (
- f'Different number of images ({len(images_paths)})' # type: ignore[arg-type] # noqa: B950
- f' and mask ({len(mask_path)}) paths found' # type: ignore[arg-type] # noqa: B950
+ f'Different number of images ({len(images_paths)})' # type: ignore[arg-type]
+ f' and mask ({len(mask_path)}) paths found' # type: ignore[arg-type]
)
raise ValueError(message)
quantiles_cutoff = DEFAULT_CUTOFF if cutoff is None else cutoff
diff --git a/src/torchio/transforms/preprocessing/intensity/mask.py b/src/torchio/transforms/preprocessing/intensity/mask.py
index 1458f853..4b8aaec7 100644
--- a/src/torchio/transforms/preprocessing/intensity/mask.py
+++ b/src/torchio/transforms/preprocessing/intensity/mask.py
@@ -4,10 +4,10 @@
import torch
-from ... import IntensityTransform
from ....data.image import ScalarImage
from ....data.subject import Subject
from ....transforms.transform import TypeMaskingMethod
+from ...intensity_transform import IntensityTransform
class Mask(IntensityTransform):
@@ -45,7 +45,7 @@ class Mask(IntensityTransform):
masked = mask(subject)
subject.add_image(masked.t1, 'Masked')
subject.plot()
- """ # noqa: B950
+ """
def __init__(
self,
diff --git a/src/torchio/transforms/preprocessing/intensity/normalization_transform.py b/src/torchio/transforms/preprocessing/intensity/normalization_transform.py
index cc79750b..7dfa5035 100644
--- a/src/torchio/transforms/preprocessing/intensity/normalization_transform.py
+++ b/src/torchio/transforms/preprocessing/intensity/normalization_transform.py
@@ -1,8 +1,8 @@
import torch
-from ... import IntensityTransform
from ....data.subject import Subject
from ....transforms.transform import TypeMaskingMethod
+from ...intensity_transform import IntensityTransform
class NormalizationTransform(IntensityTransform):
@@ -34,7 +34,7 @@ class NormalizationTransform(IntensityTransform):
>>> transformed = transform(subject) # use only values within the brain
>>> transform = tio.ZNormalization(masking_method=lambda x: x > x.mean())
>>> transformed = transform(subject) # use values above the image mean
- """ # noqa: B950
+ """
def __init__(self, masking_method: TypeMaskingMethod = None, **kwargs):
super().__init__(**kwargs)
diff --git a/src/torchio/transforms/preprocessing/intensity/rescale.py b/src/torchio/transforms/preprocessing/intensity/rescale.py
index cdbdaee3..49db9bec 100644
--- a/src/torchio/transforms/preprocessing/intensity/rescale.py
+++ b/src/torchio/transforms/preprocessing/intensity/rescale.py
@@ -42,7 +42,7 @@ class RescaleIntensity(NormalizationTransform):
.. _this scikit-image example: https://scikit-image.org/docs/dev/auto_examples/color_exposure/plot_equalize.html#sphx-glr-auto-examples-color-exposure-plot-equalize-py
.. _nn-UNet paper: https://arxiv.org/abs/1809.10486
- """ # noqa: B950
+ """
def __init__(
self,
diff --git a/src/torchio/transforms/preprocessing/label/keep_largest_component.py b/src/torchio/transforms/preprocessing/label/keep_largest_component.py
index d5fd76a7..fb17c2f8 100644
--- a/src/torchio/transforms/preprocessing/label/keep_largest_component.py
+++ b/src/torchio/transforms/preprocessing/label/keep_largest_component.py
@@ -16,7 +16,7 @@ class KeepLargestComponent(LabelTransform):
extending this transform, please `open a new issue`_.
.. _open a new issue: https://github.com/fepegar/torchio/issues/new?assignees=&labels=enhancement&template=feature_request.md&title=Improve%20KeepLargestComponent%20transform
- """ # noqa: B950
+ """
def apply_transform(self, subject: Subject) -> Subject:
for image in self.get_images(subject):
diff --git a/src/torchio/transforms/preprocessing/label/remap_labels.py b/src/torchio/transforms/preprocessing/label/remap_labels.py
index 99b79ca4..3ea68c36 100644
--- a/src/torchio/transforms/preprocessing/label/remap_labels.py
+++ b/src/torchio/transforms/preprocessing/label/remap_labels.py
@@ -132,7 +132,7 @@ class RemapLabels(LabelTransform):
>>> transformed = transform(subject)
>>> # Apply the inverse on the right side only. The labels are correctly split into left/right.
>>> inverse_transformed = transformed.apply_inverse_transform()
- """ # noqa: B950
+ """
def __init__(
self,
diff --git a/src/torchio/transforms/preprocessing/spatial/bounds_transform.py b/src/torchio/transforms/preprocessing/spatial/bounds_transform.py
index 8f49fc9b..b65c37be 100644
--- a/src/torchio/transforms/preprocessing/spatial/bounds_transform.py
+++ b/src/torchio/transforms/preprocessing/spatial/bounds_transform.py
@@ -1,5 +1,5 @@
-from ... import SpatialTransform
from ....transforms.transform import TypeBounds
+from ...spatial_transform import SpatialTransform
class BoundsTransform(SpatialTransform):
diff --git a/src/torchio/transforms/preprocessing/spatial/copy_affine.py b/src/torchio/transforms/preprocessing/spatial/copy_affine.py
index 39861fae..83fc809a 100644
--- a/src/torchio/transforms/preprocessing/spatial/copy_affine.py
+++ b/src/torchio/transforms/preprocessing/spatial/copy_affine.py
@@ -1,7 +1,7 @@
import copy
-from ... import SpatialTransform
from ....data.subject import Subject
+from ...spatial_transform import SpatialTransform
class CopyAffine(SpatialTransform):
@@ -60,7 +60,7 @@ class CopyAffine(SpatialTransform):
* https://github.com/fepegar/torchio/issues/430
* https://github.com/fepegar/torchio/issues/382
* https://github.com/fepegar/torchio/pull/592
- """ # noqa: B950
+ """
def __init__(self, target: str, **kwargs):
super().__init__(**kwargs)
diff --git a/src/torchio/transforms/preprocessing/spatial/crop_or_pad.py b/src/torchio/transforms/preprocessing/spatial/crop_or_pad.py
index 8ca20be0..49e68edb 100644
--- a/src/torchio/transforms/preprocessing/spatial/crop_or_pad.py
+++ b/src/torchio/transforms/preprocessing/spatial/crop_or_pad.py
@@ -6,9 +6,9 @@
import numpy as np
-from ... import SpatialTransform
from ....data.subject import Subject
from ....utils import parse_spatial_shape
+from ...spatial_transform import SpatialTransform
from ...transform import TypeSixBounds
from ...transform import TypeTripletInt
from .crop import Crop
@@ -69,7 +69,7 @@ class CropOrPad(SpatialTransform):
t1_pad_crop = crop_pad(t1)
subject = tio.Subject(t1=t1, crop_pad=t1_pad_crop)
subject.plot()
- """ # noqa: B950
+ """
def __init__(
self,
@@ -120,7 +120,7 @@ def _bbox_mask(mask_volume: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
Args:
mask_volume: 3D NumPy array.
- """ # noqa: B950
+ """
i_any = np.any(mask_volume, axis=(1, 2))
j_any = np.any(mask_volume, axis=(0, 2))
k_any = np.any(mask_volume, axis=(0, 1))
@@ -150,7 +150,7 @@ def _get_six_bounds_parameters(
>>> p = np.array((4, 0, 7))
>>> CropOrPad._get_six_bounds_parameters(p)
(2, 2, 0, 0, 4, 3)
- """ # noqa: B950
+ """
parameters = parameters / 2
result = []
for number in parameters:
diff --git a/src/torchio/transforms/preprocessing/spatial/ensure_shape_multiple.py b/src/torchio/transforms/preprocessing/spatial/ensure_shape_multiple.py
index c5bad843..b625686e 100644
--- a/src/torchio/transforms/preprocessing/spatial/ensure_shape_multiple.py
+++ b/src/torchio/transforms/preprocessing/spatial/ensure_shape_multiple.py
@@ -5,10 +5,10 @@
import numpy as np
-from ... import SpatialTransform
from ....data.subject import Subject
from ....typing import TypeTripletInt
from ....utils import to_tuple
+from ...spatial_transform import SpatialTransform
from .crop_or_pad import CropOrPad
@@ -112,7 +112,7 @@ class EnsureShapeMultiple(SpatialTransform):
>>> transformed = transform(image_2d)
>>> transformed.shape
torch.Size([1, 176, 216, 1])
- """ # noqa: B950
+ """
def __init__(
self,
@@ -129,7 +129,7 @@ def __init__(
def apply_transform(self, subject: Subject) -> Subject:
source_shape = np.array(subject.spatial_shape, np.uint16)
- function: Callable = np.floor if self.method == 'crop' else np.ceil # type: ignore[assignment] # noqa: B950
+ function: Callable = np.floor if self.method == 'crop' else np.ceil # type: ignore[assignment]
integer_ratio = function(source_shape / self.target_multiple)
target_shape = integer_ratio * self.target_multiple
target_shape = np.maximum(target_shape, 1)
diff --git a/src/torchio/transforms/preprocessing/spatial/pad.py b/src/torchio/transforms/preprocessing/spatial/pad.py
index 8713de8f..deaaae5f 100644
--- a/src/torchio/transforms/preprocessing/spatial/pad.py
+++ b/src/torchio/transforms/preprocessing/spatial/pad.py
@@ -40,7 +40,7 @@ class Pad(BoundsTransform):
:class:`~torchio.transforms.CropOrPad` instead.
.. _NumPy docs: https://numpy.org/doc/stable/reference/generated/numpy.pad.html
- """ # noqa: B950
+ """
PADDING_MODES = (
'empty',
@@ -101,7 +101,7 @@ def apply_transform(self, subject: Subject) -> Subject:
kwargs = {'mode': self.padding_mode}
pad_params = self.bounds_parameters
paddings = (0, 0), pad_params[:2], pad_params[2:4], pad_params[4:]
- padded = np.pad(image.data, paddings, **kwargs) # type: ignore[call-overload] # noqa: B950
+ padded = np.pad(image.data, paddings, **kwargs) # type: ignore[call-overload]
image.set_data(torch.as_tensor(padded))
image.affine = new_affine
return subject
diff --git a/src/torchio/transforms/preprocessing/spatial/resample.py b/src/torchio/transforms/preprocessing/spatial/resample.py
index d9a91de2..7b5b4614 100644
--- a/src/torchio/transforms/preprocessing/spatial/resample.py
+++ b/src/torchio/transforms/preprocessing/spatial/resample.py
@@ -10,7 +10,6 @@
import SimpleITK as sitk
import torch
-from ... import SpatialTransform
from ....data.image import Image
from ....data.image import ScalarImage
from ....data.io import get_sitk_metadata_from_ras_affine
@@ -18,7 +17,7 @@
from ....data.subject import Subject
from ....typing import TypePath
from ....typing import TypeTripletFloat
-
+from ...spatial_transform import SpatialTransform
TypeSpacing = Union[float, Tuple[float, float, float]]
@@ -76,7 +75,7 @@ class Resample(SpatialTransform):
t1_resampled = resample(subject.t1)
subject.add_image(t1_resampled, 'Downsampled')
subject.plot()
- """ # noqa: B950
+ """
def __init__(
self,
diff --git a/src/torchio/transforms/preprocessing/spatial/resize.py b/src/torchio/transforms/preprocessing/spatial/resize.py
index 0087713a..d8e45016 100644
--- a/src/torchio/transforms/preprocessing/spatial/resize.py
+++ b/src/torchio/transforms/preprocessing/spatial/resize.py
@@ -2,10 +2,10 @@
import numpy as np
-from ... import SpatialTransform
from ....data.subject import Subject
from ....typing import TypeSpatialShape
from ....utils import to_tuple
+from ...spatial_transform import SpatialTransform
from .crop_or_pad import CropOrPad
from .resample import Resample
diff --git a/src/torchio/transforms/preprocessing/spatial/to_canonical.py b/src/torchio/transforms/preprocessing/spatial/to_canonical.py
index 46b61947..ee53814b 100644
--- a/src/torchio/transforms/preprocessing/spatial/to_canonical.py
+++ b/src/torchio/transforms/preprocessing/spatial/to_canonical.py
@@ -2,8 +2,8 @@
import numpy as np
import torch
-from ... import SpatialTransform
from ....data.subject import Subject
+from ...spatial_transform import SpatialTransform
class ToCanonical(SpatialTransform):
@@ -26,7 +26,7 @@ class ToCanonical(SpatialTransform):
:meth:`nibabel.as_closest_canonical`.
.. _NiBabel docs about image orientation: https://nipy.org/nibabel/image_orientation.html
- """ # noqa: B950
+ """
def apply_transform(self, subject: Subject) -> Subject:
for image in subject.get_images(intensity_only=False):
diff --git a/src/torchio/transforms/transform.py b/src/torchio/transforms/transform.py
index 26f52244..e6cf4139 100644
--- a/src/torchio/transforms/transform.py
+++ b/src/torchio/transforms/transform.py
@@ -26,12 +26,12 @@
from ..typing import TypeKeys
from ..typing import TypeNumber
from ..typing import TypeTripletInt
-from ..utils import to_tuple
from ..utils import is_iterable
+from ..utils import to_tuple
from .data_parser import DataParser
from .data_parser import TypeTransformInput
-from .interpolation import get_sitk_interpolator
from .interpolation import Interpolation
+from .interpolation import get_sitk_interpolator
TypeSixBounds = Tuple[int, int, int, int, int, int]
TypeBounds = Union[
@@ -201,9 +201,13 @@ def apply_transform(self, subject: Subject) -> Subject:
raise NotImplementedError
def add_transform_to_subject_history(self, subject):
+ from . import Compose
+ from . import CropOrPad
+ from . import EnsureShapeMultiple
+ from . import OneOf
from .augmentation import RandomTransform
- from . import Compose, OneOf, CropOrPad, EnsureShapeMultiple
- from .preprocessing import SequentialLabels, Resize
+ from .preprocessing import Resize
+ from .preprocessing import SequentialLabels
call_others = (
RandomTransform,
@@ -448,9 +452,9 @@ def parse_bounds(bounds_parameters: TypeBounds) -> Optional[TypeSixBounds]:
if bounds_parameters is None:
return None
try:
- bounds_parameters = tuple(bounds_parameters) # type: ignore[assignment,arg-type] # noqa: B950
+ bounds_parameters = tuple(bounds_parameters) # type: ignore[assignment,arg-type]
except TypeError:
- bounds_parameters = (bounds_parameters,) # type: ignore[assignment] # noqa: B950
+ bounds_parameters = (bounds_parameters,) # type: ignore[assignment]
# Check that numbers are integers
for number in bounds_parameters: # type: ignore[union-attr]
@@ -460,7 +464,7 @@ def parse_bounds(bounds_parameters: TypeBounds) -> Optional[TypeSixBounds]:
f' not "{bounds_parameters}" of type {type(number)}'
)
raise ValueError(message)
- bounds_parameters_tuple = tuple(int(n) for n in bounds_parameters) # type: ignore[assignment,union-attr] # noqa: B950
+ bounds_parameters_tuple = tuple(int(n) for n in bounds_parameters) # type: ignore[assignment,union-attr]
bounds_parameters_length = len(bounds_parameters_tuple)
if bounds_parameters_length == 6:
return bounds_parameters_tuple # type: ignore[return-value]
@@ -511,7 +515,7 @@ def get_mask_from_masking_method(
tensor,
)
elif type(masking_method) in (tuple, list, int):
- return self.get_mask_from_bounds(masking_method, tensor) # type: ignore[arg-type] # noqa: B950
+ return self.get_mask_from_bounds(masking_method, tensor) # type: ignore[arg-type]
first_anat_axes = tuple(s[0] for s in ANATOMICAL_AXES)
message = (
'Masking method must be one of:\n'
diff --git a/src/torchio/typing.py b/src/torchio/typing.py
index 2c1022c6..78898fad 100644
--- a/src/torchio/typing.py
+++ b/src/torchio/typing.py
@@ -8,7 +8,6 @@
import numpy as np
import torch
-
# For typing hints
TypePath = Union[str, Path]
TypeNumber = Union[int, float]
diff --git a/src/torchio/utils.py b/src/torchio/utils.py
index 6f7eecc4..a63e094c 100644
--- a/src/torchio/utils.py
+++ b/src/torchio/utils.py
@@ -16,10 +16,10 @@
from typing import Tuple
from typing import Union
-import nibabel as nib
import numpy as np
import SimpleITK as sitk
import torch
+from nibabel.nifti1 import Nifti1Image
from torch.utils.data import DataLoader
from torch.utils.data._utils.collate import default_collate
from tqdm.auto import trange
@@ -88,7 +88,9 @@ def create_dummy_dataset(
force: bool = False,
verbose: bool = False,
):
- from .data import ScalarImage, LabelMap, Subject
+ from .data import LabelMap
+ from .data import ScalarImage
+ from .data import Subject
output_dir = tempfile.gettempdir() if directory is None else directory
output_dir = Path(output_dir)
@@ -128,11 +130,11 @@ def create_dummy_dataset(
image *= 255
image_path = images_dir / f'image_{i}{suffix}'
- nii = nib.Nifti1Image(image.astype(np.uint8), affine)
+ nii = Nifti1Image(image.astype(np.uint8), affine)
nii.to_filename(str(image_path))
label_path = labels_dir / f'label_{i}{suffix}'
- nii = nib.Nifti1Image(label.astype(np.uint8), affine)
+ nii = Nifti1Image(label.astype(np.uint8), affine)
nii.to_filename(str(label_path))
subject = Subject(
@@ -272,7 +274,9 @@ def get_subjects_from_batch(batch: Dict) -> List:
batch: Dictionary generated by a :class:`tio.SubjectsLoader`
extracting data from a :class:`torchio.SubjectsDataset`.
"""
- from .data import ScalarImage, LabelMap, Subject
+ from .data import LabelMap
+ from .data import ScalarImage
+ from .data import Subject
subjects = []
image_names, batch_size = get_batch_images_and_size(batch)
@@ -352,7 +356,7 @@ def guess_external_viewer() -> Optional[Path]:
itk = 'ITK-SNAP'
slicer = 'Slicer'
if platform == 'darwin':
- app_path = '/Applications/{}.app/Contents/MacOS/{}' # noqa: FS003
+ app_path = '/Applications/{}.app/Contents/MacOS/{}'
itk_snap_path = Path(app_path.format(2 * (itk,)))
if itk_snap_path.is_file():
return itk_snap_path
diff --git a/src/torchio/visualization.py b/src/torchio/visualization.py
index 40234e93..ce6d8240 100644
--- a/src/torchio/visualization.py
+++ b/src/torchio/visualization.py
@@ -169,7 +169,7 @@ def get_num_bins(x: np.ndarray) -> int:
Args:
x: Input values.
- """ # noqa: B950
+ """
# Freedman–Diaconis number of bins
q25, q75 = np.percentile(x, [25, 75])
bin_width = 2 * (q75 - q25) * len(x) ** (-1 / 3)
@@ -219,7 +219,7 @@ def make_gif(
message = 'Please install Pillow to use Image.to_gif(): pip install Pillow'
raise RuntimeError(message) from e
transform = RescaleIntensity((0, 255))
- tensor = transform(tensor) if rescale else tensor # type: ignore[assignment] # noqa: B950
+ tensor = transform(tensor) if rescale else tensor # type: ignore[assignment]
single_channel = len(tensor) == 1
# Move channels dimension to the end and bring selected axis to 0
diff --git a/tests/data/inference/test_aggregator.py b/tests/data/inference/test_aggregator.py
index b1523ff8..a024b3c4 100644
--- a/tests/data/inference/test_aggregator.py
+++ b/tests/data/inference/test_aggregator.py
@@ -1,5 +1,6 @@
import pytest
import torch
+
import torchio as tio
from ...utils import TorchioTestCase
@@ -118,7 +119,7 @@ def test_patch_crop_issue_padding(self):
self.run_patch_crop_issue(padding_mode='constant')
def test_bad_aggregator_shape(self):
- # https://github.com/microsoft/InnerEye-DeepLearning/pull/677/checks?check_run_id=5395915817 # noqa: B950
+ # https://github.com/microsoft/InnerEye-DeepLearning/pull/677/checks?check_run_id=5395915817
tensor = torch.ones(1, 40, 40, 40)
image_name = 'img'
subject = tio.Subject({image_name: tio.ScalarImage(tensor=tensor)})
diff --git a/tests/data/inference/test_grid_sampler.py b/tests/data/inference/test_grid_sampler.py
index 5cfa0306..6e7d1529 100644
--- a/tests/data/inference/test_grid_sampler.py
+++ b/tests/data/inference/test_grid_sampler.py
@@ -1,9 +1,10 @@
#!/usr/bin/env python
from copy import copy
-import torchio as tio
import pytest
+import torchio as tio
+
from ...utils import TorchioTestCase
diff --git a/tests/data/sampler/test_label_sampler.py b/tests/data/sampler/test_label_sampler.py
index 9ebdca28..0fab9922 100644
--- a/tests/data/sampler/test_label_sampler.py
+++ b/tests/data/sampler/test_label_sampler.py
@@ -1,5 +1,6 @@
import pytest
import torch
+
import torchio as tio
from ...utils import TorchioTestCase
diff --git a/tests/data/sampler/test_patch_sampler.py b/tests/data/sampler/test_patch_sampler.py
index 5a46dd92..49bf61e1 100644
--- a/tests/data/sampler/test_patch_sampler.py
+++ b/tests/data/sampler/test_patch_sampler.py
@@ -1,4 +1,5 @@
import pytest
+
from torchio.data import PatchSampler
from ...utils import TorchioTestCase
diff --git a/tests/data/sampler/test_random_sampler.py b/tests/data/sampler/test_random_sampler.py
index 4b04ff6c..3c9e0fb5 100644
--- a/tests/data/sampler/test_random_sampler.py
+++ b/tests/data/sampler/test_random_sampler.py
@@ -1,4 +1,5 @@
import pytest
+
import torchio as tio
from ...utils import TorchioTestCase
diff --git a/tests/data/sampler/test_uniform_sampler.py b/tests/data/sampler/test_uniform_sampler.py
index cf73dbc1..6fe202c1 100644
--- a/tests/data/sampler/test_uniform_sampler.py
+++ b/tests/data/sampler/test_uniform_sampler.py
@@ -1,4 +1,5 @@
import torch
+
import torchio
from torchio.data import UniformSampler
diff --git a/tests/data/sampler/test_weighted_sampler.py b/tests/data/sampler/test_weighted_sampler.py
index 6fd3e8c6..20c53e00 100644
--- a/tests/data/sampler/test_weighted_sampler.py
+++ b/tests/data/sampler/test_weighted_sampler.py
@@ -1,4 +1,5 @@
import torch
+
import torchio as tio
from torchio.data import WeightedSampler
diff --git a/tests/data/test_image.py b/tests/data/test_image.py
index b467132f..d4a00dd7 100644
--- a/tests/data/test_image.py
+++ b/tests/data/test_image.py
@@ -1,5 +1,6 @@
#!/usr/bin/env python
"""Tests for Image."""
+
import copy
import sys
import tempfile
@@ -8,6 +9,7 @@
import numpy as np
import pytest
import torch
+
import torchio as tio
from ..utils import TorchioTestCase
diff --git a/tests/data/test_io.py b/tests/data/test_io.py
index 0624f00f..d3c63429 100644
--- a/tests/data/test_io.py
+++ b/tests/data/test_io.py
@@ -6,7 +6,8 @@
import SimpleITK as sitk
import torch
-from torchio.data import ScalarImage, io
+from torchio.data import ScalarImage
+from torchio.data import io
from ..utils import TorchioTestCase
diff --git a/tests/data/test_queue.py b/tests/data/test_queue.py
index 1e6ae49d..1b8b98e7 100644
--- a/tests/data/test_queue.py
+++ b/tests/data/test_queue.py
@@ -2,8 +2,9 @@
import pytest
import torch
-import torchio as tio
from parameterized import parameterized
+
+import torchio as tio
from torchio.data import UniformSampler
from torchio.utils import create_dummy_dataset
diff --git a/tests/data/test_subject.py b/tests/data/test_subject.py
index 8a9cb747..49a051fb 100644
--- a/tests/data/test_subject.py
+++ b/tests/data/test_subject.py
@@ -5,6 +5,7 @@
import numpy as np
import pytest
import torch
+
import torchio as tio
from ..utils import TorchioTestCase
@@ -115,7 +116,7 @@ def test_same_space(self):
-5.54619071e-01,
-1.57071802e-02,
2.28515778e02,
- ], # noqa: B950
+ ],
[0.00000000e00, 0.00000000e00, 0.00000000e00, 1.00000000e00],
]
)
diff --git a/tests/data/test_subjects_dataset.py b/tests/data/test_subjects_dataset.py
index 9187238b..13c2247f 100644
--- a/tests/data/test_subjects_dataset.py
+++ b/tests/data/test_subjects_dataset.py
@@ -1,5 +1,6 @@
import pytest
import torch
+
import torchio as tio
from ..utils import TorchioTestCase
diff --git a/tests/datasets/test_ixi.py b/tests/datasets/test_ixi.py
index 427c51cd..e4b8e992 100644
--- a/tests/datasets/test_ixi.py
+++ b/tests/datasets/test_ixi.py
@@ -1,4 +1,5 @@
import pytest
+
import torchio as tio
from ..utils import TorchioTestCase
diff --git a/tests/datasets/test_medmnist.py b/tests/datasets/test_medmnist.py
index 680eec49..c9872bae 100644
--- a/tests/datasets/test_medmnist.py
+++ b/tests/datasets/test_medmnist.py
@@ -1,6 +1,7 @@
import os
import pytest
+
import torchio as tio
from torchio.datasets.medmnist import AdrenalMNIST3D
from torchio.datasets.medmnist import FractureMNIST3D
@@ -9,7 +10,6 @@
from torchio.datasets.medmnist import SynapseMNIST3D
from torchio.datasets.medmnist import VesselMNIST3D
-
classes = (
OrganMNIST3D,
NoduleMNIST3D,
diff --git a/tests/test_cli.py b/tests/test_cli.py
index f9fb123b..c619015f 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -1,12 +1,13 @@
#!/usr/bin/env python
"""Tests for CLI tool package."""
+
from typer.testing import CliRunner
+
from torchio.cli import apply_transform
from torchio.cli import print_info
from .utils import TorchioTestCase
-
runner = CliRunner()
@@ -46,4 +47,4 @@ def test_cli_hd(self):
' dtype: torch.DoubleTensor;'
' memory: 46.9 KiB'
')\n'
- ) # noqa: B950
+ )
diff --git a/tests/test_utils.py b/tests/test_utils.py
index 6070896a..0215ef32 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -2,6 +2,7 @@
import pytest
import torch
+
import torchio as tio
from .utils import TorchioTestCase
diff --git a/tests/transforms/augmentation/test_oneof.py b/tests/transforms/augmentation/test_oneof.py
index 49ea2d5f..fb3776b3 100644
--- a/tests/transforms/augmentation/test_oneof.py
+++ b/tests/transforms/augmentation/test_oneof.py
@@ -1,4 +1,5 @@
import pytest
+
import torchio as tio
from ...utils import TorchioTestCase
diff --git a/tests/transforms/augmentation/test_random_affine.py b/tests/transforms/augmentation/test_random_affine.py
index b68bd465..fdb8c3fb 100644
--- a/tests/transforms/augmentation/test_random_affine.py
+++ b/tests/transforms/augmentation/test_random_affine.py
@@ -1,5 +1,6 @@
import pytest
import torch
+
import torchio as tio
from ...utils import TorchioTestCase
diff --git a/tests/transforms/augmentation/test_random_anisotropy.py b/tests/transforms/augmentation/test_random_anisotropy.py
index e91448d8..2730a093 100644
--- a/tests/transforms/augmentation/test_random_anisotropy.py
+++ b/tests/transforms/augmentation/test_random_anisotropy.py
@@ -1,5 +1,6 @@
import pytest
import torch
+
from torchio import RandomAnisotropy
from torchio import ScalarImage
diff --git a/tests/transforms/augmentation/test_random_bias_field.py b/tests/transforms/augmentation/test_random_bias_field.py
index 473c856c..d9b35173 100644
--- a/tests/transforms/augmentation/test_random_bias_field.py
+++ b/tests/transforms/augmentation/test_random_bias_field.py
@@ -1,5 +1,6 @@
import pytest
import torch
+
import torchio as tio
from ...utils import TorchioTestCase
diff --git a/tests/transforms/augmentation/test_random_blur.py b/tests/transforms/augmentation/test_random_blur.py
index 2246a7b6..b25c08bd 100644
--- a/tests/transforms/augmentation/test_random_blur.py
+++ b/tests/transforms/augmentation/test_random_blur.py
@@ -1,4 +1,5 @@
import pytest
+
from torchio import RandomBlur
from ...utils import TorchioTestCase
diff --git a/tests/transforms/augmentation/test_random_elastic_deformation.py b/tests/transforms/augmentation/test_random_elastic_deformation.py
index 6697b48f..534cba17 100644
--- a/tests/transforms/augmentation/test_random_elastic_deformation.py
+++ b/tests/transforms/augmentation/test_random_elastic_deformation.py
@@ -1,4 +1,5 @@
import pytest
+
import torchio as tio
from ...utils import TorchioTestCase
diff --git a/tests/transforms/augmentation/test_random_flip.py b/tests/transforms/augmentation/test_random_flip.py
index 79bc52f9..1e36968d 100644
--- a/tests/transforms/augmentation/test_random_flip.py
+++ b/tests/transforms/augmentation/test_random_flip.py
@@ -1,5 +1,6 @@
import pytest
import torch
+
import torchio as tio
from ...utils import TorchioTestCase
diff --git a/tests/transforms/augmentation/test_random_gamma.py b/tests/transforms/augmentation/test_random_gamma.py
index 7dba2598..b6bd3bdc 100644
--- a/tests/transforms/augmentation/test_random_gamma.py
+++ b/tests/transforms/augmentation/test_random_gamma.py
@@ -1,5 +1,6 @@
import pytest
import torch
+
from torchio import RandomGamma
from ...utils import TorchioTestCase
diff --git a/tests/transforms/augmentation/test_random_ghosting.py b/tests/transforms/augmentation/test_random_ghosting.py
index 944aafe4..563c2f50 100644
--- a/tests/transforms/augmentation/test_random_ghosting.py
+++ b/tests/transforms/augmentation/test_random_ghosting.py
@@ -1,4 +1,5 @@
import pytest
+
from torchio import RandomGhosting
from ...utils import TorchioTestCase
diff --git a/tests/transforms/augmentation/test_random_labels_to_image.py b/tests/transforms/augmentation/test_random_labels_to_image.py
index 7afd9809..4b192d9b 100644
--- a/tests/transforms/augmentation/test_random_labels_to_image.py
+++ b/tests/transforms/augmentation/test_random_labels_to_image.py
@@ -1,4 +1,5 @@
import pytest
+
from torchio.transforms import RandomLabelsToImage
from ...utils import TorchioTestCase
diff --git a/tests/transforms/augmentation/test_random_motion.py b/tests/transforms/augmentation/test_random_motion.py
index 69678434..27bc1756 100644
--- a/tests/transforms/augmentation/test_random_motion.py
+++ b/tests/transforms/augmentation/test_random_motion.py
@@ -1,4 +1,5 @@
import pytest
+
from torchio import RandomMotion
from ...utils import TorchioTestCase
diff --git a/tests/transforms/augmentation/test_random_noise.py b/tests/transforms/augmentation/test_random_noise.py
index 8cddaad0..f18226f2 100644
--- a/tests/transforms/augmentation/test_random_noise.py
+++ b/tests/transforms/augmentation/test_random_noise.py
@@ -1,4 +1,5 @@
import pytest
+
from torchio import RandomNoise
from ...utils import TorchioTestCase
diff --git a/tests/transforms/augmentation/test_random_spike.py b/tests/transforms/augmentation/test_random_spike.py
index 10164417..27c0cbd3 100644
--- a/tests/transforms/augmentation/test_random_spike.py
+++ b/tests/transforms/augmentation/test_random_spike.py
@@ -1,4 +1,5 @@
import pytest
+
from torchio import RandomSpike
from ...utils import TorchioTestCase
diff --git a/tests/transforms/augmentation/test_random_swap.py b/tests/transforms/augmentation/test_random_swap.py
index 0be44e15..1aa70060 100644
--- a/tests/transforms/augmentation/test_random_swap.py
+++ b/tests/transforms/augmentation/test_random_swap.py
@@ -1,4 +1,5 @@
import pytest
+
import torchio as tio
from ...utils import TorchioTestCase
diff --git a/tests/transforms/label/test_remap_labels.py b/tests/transforms/label/test_remap_labels.py
index ed12a001..458e8e2a 100644
--- a/tests/transforms/label/test_remap_labels.py
+++ b/tests/transforms/label/test_remap_labels.py
@@ -1,4 +1,5 @@
import pytest
+
import torchio as tio
from ...utils import TorchioTestCase
diff --git a/tests/transforms/label/test_sequential_labels.py b/tests/transforms/label/test_sequential_labels.py
index 12e2c548..d06771e9 100644
--- a/tests/transforms/label/test_sequential_labels.py
+++ b/tests/transforms/label/test_sequential_labels.py
@@ -1,4 +1,5 @@
import pytest
+
import torchio as tio
from ...utils import TorchioTestCase
diff --git a/tests/transforms/preprocessing/test_clamp.py b/tests/transforms/preprocessing/test_clamp.py
index f58c88f3..73225167 100644
--- a/tests/transforms/preprocessing/test_clamp.py
+++ b/tests/transforms/preprocessing/test_clamp.py
@@ -1,5 +1,6 @@
import pytest
import torch
+
import torchio as tio
from ...utils import TorchioTestCase
diff --git a/tests/transforms/preprocessing/test_contour.py b/tests/transforms/preprocessing/test_contour.py
index 342dece6..ae13459f 100644
--- a/tests/transforms/preprocessing/test_contour.py
+++ b/tests/transforms/preprocessing/test_contour.py
@@ -1,5 +1,6 @@
import pytest
import torch
+
import torchio as tio
from ...utils import TorchioTestCase
diff --git a/tests/transforms/preprocessing/test_copy_affine.py b/tests/transforms/preprocessing/test_copy_affine.py
index 611da6ed..6c11f623 100644
--- a/tests/transforms/preprocessing/test_copy_affine.py
+++ b/tests/transforms/preprocessing/test_copy_affine.py
@@ -1,6 +1,7 @@
import numpy as np
import pytest
import torch
+
import torchio as tio
from ...utils import TorchioTestCase
diff --git a/tests/transforms/preprocessing/test_crop.py b/tests/transforms/preprocessing/test_crop.py
index e1d5194d..9b60cb1a 100644
--- a/tests/transforms/preprocessing/test_crop.py
+++ b/tests/transforms/preprocessing/test_crop.py
@@ -1,4 +1,5 @@
import torch
+
import torchio as tio
from ...utils import TorchioTestCase
diff --git a/tests/transforms/preprocessing/test_crop_pad.py b/tests/transforms/preprocessing/test_crop_pad.py
index 90b1f1e7..bed573fb 100644
--- a/tests/transforms/preprocessing/test_crop_pad.py
+++ b/tests/transforms/preprocessing/test_crop_pad.py
@@ -1,5 +1,6 @@
import numpy as np
import pytest
+
import torchio as tio
from ...utils import TorchioTestCase
diff --git a/tests/transforms/preprocessing/test_ensure_shape_multiple.py b/tests/transforms/preprocessing/test_ensure_shape_multiple.py
index f835585e..2515ec04 100644
--- a/tests/transforms/preprocessing/test_ensure_shape_multiple.py
+++ b/tests/transforms/preprocessing/test_ensure_shape_multiple.py
@@ -1,4 +1,5 @@
import pytest
+
import torchio as tio
from ...utils import TorchioTestCase
diff --git a/tests/transforms/preprocessing/test_histogram_standardization.py b/tests/transforms/preprocessing/test_histogram_standardization.py
index 06a9da72..425731d3 100644
--- a/tests/transforms/preprocessing/test_histogram_standardization.py
+++ b/tests/transforms/preprocessing/test_histogram_standardization.py
@@ -1,6 +1,7 @@
import numpy as np
import pytest
import torch
+
from torchio import LabelMap
from torchio import ScalarImage
from torchio import Subject
diff --git a/tests/transforms/preprocessing/test_keep_largest.py b/tests/transforms/preprocessing/test_keep_largest.py
index 30097768..4a86af15 100644
--- a/tests/transforms/preprocessing/test_keep_largest.py
+++ b/tests/transforms/preprocessing/test_keep_largest.py
@@ -1,5 +1,6 @@
import pytest
import torch
+
import torchio as tio
from ...utils import TorchioTestCase
diff --git a/tests/transforms/preprocessing/test_mask.py b/tests/transforms/preprocessing/test_mask.py
index 23f8004d..d7faa22a 100644
--- a/tests/transforms/preprocessing/test_mask.py
+++ b/tests/transforms/preprocessing/test_mask.py
@@ -1,7 +1,8 @@
import pytest
-import torchio as tio
import torch
+import torchio as tio
+
from ...utils import TorchioTestCase
diff --git a/tests/transforms/preprocessing/test_one_hot.py b/tests/transforms/preprocessing/test_one_hot.py
index 01d06f59..70fa08e1 100644
--- a/tests/transforms/preprocessing/test_one_hot.py
+++ b/tests/transforms/preprocessing/test_one_hot.py
@@ -1,5 +1,6 @@
import pytest
import torch
+
import torchio as tio
from ...utils import TorchioTestCase
diff --git a/tests/transforms/preprocessing/test_pad.py b/tests/transforms/preprocessing/test_pad.py
index a0777367..2991dba0 100644
--- a/tests/transforms/preprocessing/test_pad.py
+++ b/tests/transforms/preprocessing/test_pad.py
@@ -1,5 +1,6 @@
import SimpleITK as sitk
import torch
+
import torchio as tio
from torchio.data.io import sitk_to_nib
diff --git a/tests/transforms/preprocessing/test_resample.py b/tests/transforms/preprocessing/test_resample.py
index fd8eca6f..0d076015 100644
--- a/tests/transforms/preprocessing/test_resample.py
+++ b/tests/transforms/preprocessing/test_resample.py
@@ -1,6 +1,7 @@
import numpy as np
import pytest
import torch
+
import torchio as tio
from ...utils import TorchioTestCase
diff --git a/tests/transforms/preprocessing/test_rescale.py b/tests/transforms/preprocessing/test_rescale.py
index b9d205a8..02f2b670 100644
--- a/tests/transforms/preprocessing/test_rescale.py
+++ b/tests/transforms/preprocessing/test_rescale.py
@@ -3,6 +3,7 @@
import numpy as np
import pytest
import torch
+
import torchio as tio
from ...utils import TorchioTestCase
diff --git a/tests/transforms/preprocessing/test_resize.py b/tests/transforms/preprocessing/test_resize.py
index 61e865ae..3774b8ca 100644
--- a/tests/transforms/preprocessing/test_resize.py
+++ b/tests/transforms/preprocessing/test_resize.py
@@ -1,6 +1,7 @@
import numpy as np
import pytest
import torch
+
import torchio as tio
from ...utils import TorchioTestCase
diff --git a/tests/transforms/preprocessing/test_to_canonical.py b/tests/transforms/preprocessing/test_to_canonical.py
index 73da5649..a32be9bb 100644
--- a/tests/transforms/preprocessing/test_to_canonical.py
+++ b/tests/transforms/preprocessing/test_to_canonical.py
@@ -1,5 +1,6 @@
import numpy as np
import torch
+
import torchio as tio
from ...utils import TorchioTestCase
diff --git a/tests/transforms/preprocessing/test_z_normalization.py b/tests/transforms/preprocessing/test_z_normalization.py
index 3878e5fc..0a0bf010 100644
--- a/tests/transforms/preprocessing/test_z_normalization.py
+++ b/tests/transforms/preprocessing/test_z_normalization.py
@@ -1,5 +1,6 @@
import pytest
import torch
+
import torchio as tio
from ...utils import TorchioTestCase
diff --git a/tests/transforms/test_invertibility.py b/tests/transforms/test_invertibility.py
index 4d962d85..bda8b3a4 100644
--- a/tests/transforms/test_invertibility.py
+++ b/tests/transforms/test_invertibility.py
@@ -2,6 +2,7 @@
import warnings
import torch
+
import torchio as tio
from ..utils import TorchioTestCase
diff --git a/tests/transforms/test_lambda_transform.py b/tests/transforms/test_lambda_transform.py
index f0c2797d..2c2d401c 100644
--- a/tests/transforms/test_lambda_transform.py
+++ b/tests/transforms/test_lambda_transform.py
@@ -1,5 +1,6 @@
import pytest
import torch
+
from torchio import LABEL
from torchio.transforms import Lambda
diff --git a/tests/transforms/test_transforms.py b/tests/transforms/test_transforms.py
index 4c424c0e..93b5f4b0 100644
--- a/tests/transforms/test_transforms.py
+++ b/tests/transforms/test_transforms.py
@@ -5,6 +5,7 @@
import pytest
import SimpleITK as sitk
import torch
+
import torchio as tio
from ..utils import TorchioTestCase
diff --git a/tests/utils.py b/tests/utils.py
index fe50dbdc..989c8c2e 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -12,6 +12,7 @@
import numpy as np
import pytest
import torch
+
import torchio as tio
diff --git a/tox.ini b/tox.ini
index 486bcb00..d3d5ad31 100644
--- a/tox.ini
+++ b/tox.ini
@@ -23,6 +23,12 @@ skip_install = True
deps = ruff
commands = ruff check
+[testenv:format]
+description = Run code formatter
+skip_install = True
+deps = ruff
+commands = ruff format --diff
+
[testenv:types]
deps =
mypy
diff --git a/tutorials/example_heteromodal.py b/tutorials/example_heteromodal.py
index 4a3fa06a..f3967b50 100644
--- a/tutorials/example_heteromodal.py
+++ b/tutorials/example_heteromodal.py
@@ -8,6 +8,7 @@
import logging
import torch.nn as nn
+
import torchio as tio
from torchio import LabelMap
from torchio import Queue
|