From b99e8502d8d87e6e121d7818dee0badc23771718 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fernando=20P=C3=A9rez-Garc=C3=ADa?= Date: Sun, 22 Sep 2024 22:30:07 +0100 Subject: [PATCH] Use Ruff for formatting (#1204) * Use Ruff for formatting * Remove Black --- .github/workflows/code_quality.yml | 22 +---- .pre-commit-config.yaml | 2 +- README.md | 4 +- docs/examples/plot_3d_to_2d.py | 1 + docs/examples/plot_custom_z_spacing.py | 1 + docs/examples/plot_history.py | 5 +- docs/examples/plot_include_exclude.py | 2 +- docs/examples/plot_video.py | 5 +- docs/source/README.rst | 8 +- docs/source/conf.py | 6 +- pyproject.toml | 14 ++-- src/torchio/__init__.py | 37 ++++----- src/torchio/cli/apply_transform.py | 10 ++- src/torchio/cli/print_info.py | 1 - src/torchio/data/__init__.py | 1 - src/torchio/data/dataset.py | 2 +- src/torchio/data/image.py | 11 ++- src/torchio/data/inference/aggregator.py | 2 +- src/torchio/data/io.py | 1 - src/torchio/data/loader.py | 3 +- src/torchio/data/queue.py | 4 +- src/torchio/data/sampler/grid.py | 4 +- src/torchio/data/sampler/sampler.py | 2 +- src/torchio/data/sampler/weighted.py | 6 +- src/torchio/data/subject.py | 13 +-- src/torchio/datasets/__init__.py | 3 +- src/torchio/datasets/bite.py | 12 +-- src/torchio/datasets/episurg.py | 10 +-- src/torchio/datasets/fpg.py | 6 +- src/torchio/datasets/itk_snap/__init__.py | 3 +- src/torchio/datasets/itk_snap/itk_snap.py | 2 +- src/torchio/datasets/ixi.py | 14 ++-- src/torchio/datasets/medmnist.py | 6 +- src/torchio/datasets/mni/__init__.py | 1 - src/torchio/datasets/mni/colin.py | 3 +- src/torchio/datasets/mni/icbm.py | 6 +- src/torchio/datasets/mni/pediatric.py | 7 +- src/torchio/datasets/rsna_miccai.py | 8 +- src/torchio/datasets/rsna_spine_fracture.py | 13 ++- src/torchio/datasets/slicer.py | 25 ++---- src/torchio/external/due.py | 6 +- src/torchio/reference.py | 2 +- src/torchio/transforms/__init__.py | 83 ++++++++++--------- .../transforms/augmentation/__init__.py | 1 - .../transforms/augmentation/composition.py | 5 +- .../augmentation/intensity/__init__.py | 1 - .../intensity/random_bias_field.py | 4 +- .../augmentation/intensity/random_blur.py | 4 +- .../augmentation/intensity/random_gamma.py | 8 +- .../augmentation/intensity/random_ghosting.py | 6 +- .../intensity/random_labels_to_image.py | 12 +-- .../augmentation/intensity/random_motion.py | 10 +-- .../augmentation/intensity/random_noise.py | 8 +- .../augmentation/intensity/random_spike.py | 8 +- .../augmentation/intensity/random_swap.py | 7 +- .../augmentation/random_transform.py | 2 +- .../augmentation/spatial/__init__.py | 1 - .../augmentation/spatial/random_affine.py | 7 +- .../augmentation/spatial/random_anisotropy.py | 4 +- .../spatial/random_elastic_deformation.py | 11 ++- .../augmentation/spatial/random_flip.py | 4 +- src/torchio/transforms/data_parser.py | 3 +- src/torchio/transforms/interpolation.py | 1 - src/torchio/transforms/lambda_transform.py | 2 +- .../transforms/preprocessing/__init__.py | 1 - .../preprocessing/intensity/__init__.py | 1 - .../intensity/histogram_standardization.py | 12 +-- .../preprocessing/intensity/mask.py | 4 +- .../intensity/normalization_transform.py | 4 +- .../preprocessing/intensity/rescale.py | 2 +- .../label/keep_largest_component.py | 2 +- .../preprocessing/label/remap_labels.py | 2 +- .../preprocessing/spatial/bounds_transform.py | 2 +- .../preprocessing/spatial/copy_affine.py | 4 +- .../preprocessing/spatial/crop_or_pad.py | 8 +- .../spatial/ensure_shape_multiple.py | 6 +- .../transforms/preprocessing/spatial/pad.py | 4 +- .../preprocessing/spatial/resample.py | 5 +- .../preprocessing/spatial/resize.py | 2 +- .../preprocessing/spatial/to_canonical.py | 4 +- src/torchio/transforms/transform.py | 20 +++-- src/torchio/typing.py | 1 - src/torchio/utils.py | 16 ++-- src/torchio/visualization.py | 4 +- tests/data/inference/test_aggregator.py | 3 +- tests/data/inference/test_grid_sampler.py | 3 +- tests/data/sampler/test_label_sampler.py | 1 + tests/data/sampler/test_patch_sampler.py | 1 + tests/data/sampler/test_random_sampler.py | 1 + tests/data/sampler/test_uniform_sampler.py | 1 + tests/data/sampler/test_weighted_sampler.py | 1 + tests/data/test_image.py | 2 + tests/data/test_io.py | 3 +- tests/data/test_queue.py | 3 +- tests/data/test_subject.py | 3 +- tests/data/test_subjects_dataset.py | 1 + tests/datasets/test_ixi.py | 1 + tests/datasets/test_medmnist.py | 2 +- tests/test_cli.py | 5 +- tests/test_utils.py | 1 + tests/transforms/augmentation/test_oneof.py | 1 + .../augmentation/test_random_affine.py | 1 + .../augmentation/test_random_anisotropy.py | 1 + .../augmentation/test_random_bias_field.py | 1 + .../augmentation/test_random_blur.py | 1 + .../test_random_elastic_deformation.py | 1 + .../augmentation/test_random_flip.py | 1 + .../augmentation/test_random_gamma.py | 1 + .../augmentation/test_random_ghosting.py | 1 + .../test_random_labels_to_image.py | 1 + .../augmentation/test_random_motion.py | 1 + .../augmentation/test_random_noise.py | 1 + .../augmentation/test_random_spike.py | 1 + .../augmentation/test_random_swap.py | 1 + tests/transforms/label/test_remap_labels.py | 1 + .../label/test_sequential_labels.py | 1 + tests/transforms/preprocessing/test_clamp.py | 1 + .../transforms/preprocessing/test_contour.py | 1 + .../preprocessing/test_copy_affine.py | 1 + tests/transforms/preprocessing/test_crop.py | 1 + .../transforms/preprocessing/test_crop_pad.py | 1 + .../test_ensure_shape_multiple.py | 1 + .../test_histogram_standardization.py | 1 + .../preprocessing/test_keep_largest.py | 1 + tests/transforms/preprocessing/test_mask.py | 3 +- .../transforms/preprocessing/test_one_hot.py | 1 + tests/transforms/preprocessing/test_pad.py | 1 + .../transforms/preprocessing/test_resample.py | 1 + .../transforms/preprocessing/test_rescale.py | 1 + tests/transforms/preprocessing/test_resize.py | 1 + .../preprocessing/test_to_canonical.py | 1 + .../preprocessing/test_z_normalization.py | 1 + tests/transforms/test_invertibility.py | 1 + tests/transforms/test_lambda_transform.py | 1 + tests/transforms/test_transforms.py | 1 + tests/utils.py | 1 + tox.ini | 6 ++ tutorials/example_heteromodal.py | 1 + 138 files changed, 346 insertions(+), 321 deletions(-) diff --git a/.github/workflows/code_quality.yml b/.github/workflows/code_quality.yml index 8635a95e4..25fa3839d 100644 --- a/.github/workflows/code_quality.yml +++ b/.github/workflows/code_quality.yml @@ -17,6 +17,7 @@ jobs: fail-fast: false matrix: tox_env: + - format - lint - types @@ -39,27 +40,6 @@ jobs: - name: Run check for tox env "${{ matrix.tox_env }}" run: tox -e ${{ matrix.tox_env }} - black: - name: Code formatting - runs-on: ubuntu-latest - - steps: - - name: Set up Python - uses: actions/setup-python@v5 - with: - python-version: "3.12" - - - name: Checkout repository - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - name: Install black - run: pip install black - - - name: Run black - run: black --check --diff . - docs: name: Documentation runs-on: ubuntu-latest diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 5153dbb90..2a8363128 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -21,7 +21,7 @@ repos: rev: v0.6.7 hooks: - id: ruff - # - id: ruff-format + - id: ruff-format - repo: https://github.com/pre-commit/pygrep-hooks rev: v1.10.0 diff --git a/README.md b/README.md index 0763c9fcb..999b970cd 100644 --- a/README.md +++ b/README.md @@ -52,8 +52,8 @@ at [OpenAI](https://openai.com/) ([link](https://jack-clark.net/2020/03/17/)). Code - - Code quality + + Code style Code quality diff --git a/docs/examples/plot_3d_to_2d.py b/docs/examples/plot_3d_to_2d.py index fb26c78e5..89bf9a4bc 100644 --- a/docs/examples/plot_3d_to_2d.py +++ b/docs/examples/plot_3d_to_2d.py @@ -8,6 +8,7 @@ import matplotlib.pyplot as plt import torch + import torchio as tio torch.manual_seed(0) diff --git a/docs/examples/plot_custom_z_spacing.py b/docs/examples/plot_custom_z_spacing.py index e73ad25e6..7afc90b8a 100644 --- a/docs/examples/plot_custom_z_spacing.py +++ b/docs/examples/plot_custom_z_spacing.py @@ -10,6 +10,7 @@ """ import torch + import torchio as tio diff --git a/docs/examples/plot_history.py b/docs/examples/plot_history.py index 3af4424aa..4281b1bad 100644 --- a/docs/examples/plot_history.py +++ b/docs/examples/plot_history.py @@ -12,6 +12,7 @@ import matplotlib.pyplot as plt import torch + import torchio as tio torch.manual_seed(0) @@ -38,9 +39,7 @@ pprint.pprint(transformed.history) # noqa: T203 print('\nComposed transform to reproduce history:') # noqa: T201 print(transformed.get_composed_history()) # noqa: T201 -print( - '\nComposed transform to invert applied transforms when possible:' -) # noqa: T201, B950 +print('\nComposed transform to invert applied transforms when possible:') print(transformed.get_inverse_transform(ignore_intensity=False)) # noqa: T201 loader = tio.SubjectsLoader( diff --git a/docs/examples/plot_include_exclude.py b/docs/examples/plot_include_exclude.py index c73560491..500ccd7f8 100644 --- a/docs/examples/plot_include_exclude.py +++ b/docs/examples/plot_include_exclude.py @@ -7,8 +7,8 @@ """ import torch -import torchio as tio +import torchio as tio torch.manual_seed(0) diff --git a/docs/examples/plot_video.py b/docs/examples/plot_video.py index 0ffd8017b..48438e3be 100644 --- a/docs/examples/plot_video.py +++ b/docs/examples/plot_video.py @@ -12,9 +12,10 @@ import matplotlib.pyplot as plt import numpy as np import torch -import torchio as tio from PIL import Image +import torchio as tio + def read_clip(path, undersample=4): """Read a GIF a return an array of shape (C, W, H, T).""" @@ -49,7 +50,7 @@ def get_frame(image, i): ) -# Source: https://thehigherlearning.wordpress.com/2014/06/25/watching-a-cell-divide-under-an-electron-microscope-is-mesmerizing-gif/ # noqa: B950 +# Source: https://thehigherlearning.wordpress.com/2014/06/25/watching-a-cell-divide-under-an-electron-microscope-is-mesmerizing-gif/ array, delay = read_clip('nBTu3oi.gif') plt.imshow(array[..., 0].transpose(1, 2, 0)) plt.plot() diff --git a/docs/source/README.rst b/docs/source/README.rst index 4f4164f15..090e5a4f5 100644 --- a/docs/source/README.rst +++ b/docs/source/README.rst @@ -3,7 +3,7 @@ TorchIO ####### |PyPI-downloads| |PyPI-version| |Conda-version| |Google-Colab-notebook| -|Docs-status| |Tests-status| |Black| +|Docs-status| |Tests-status| |Ruff| |Coverage-codecov| |Code-Quality| |Code-Maintainability| |pre-commit| |Slack| |Twitter| |Twitter-commits| |YouTube| @@ -89,9 +89,9 @@ If you found a bug or have a feature request, please open an issue: :target: https://github.com/fepegar/torchio/actions/workflows/tests.yml :alt: Tests status -.. |Black| image:: https://img.shields.io/badge/code%20style-black-000000.svg - :target: https://github.com/psf/black - :alt: Code style: black +.. |Ruff| image:: https://camo.githubusercontent.com/bb88127790fb054cba2caf3f3be2569c1b97bb45a44b47b52d738f8781a8ede4/68747470733a2f2f696d672e736869656c64732e696f2f656e64706f696e743f75726c3d68747470733a2f2f7261772e67697468756275736572636f6e74656e742e636f6d2f636861726c6965726d617273682f727566662f6d61696e2f6173736574732f62616467652f76312e6a736f6e + :target: https://docs.astral.sh/ruff/ + :alt: Code style: Ruff .. |Coverage-codecov| image:: https://codecov.io/gh/fepegar/torchio/branch/main/graphs/badge.svg :target: https://codecov.io/github/fepegar/torchio diff --git a/docs/source/conf.py b/docs/source/conf.py index f34df5e4a..f79c3c9a2 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -109,7 +109,7 @@ # further. For a list of options available for each theme, see the # documentation. # -url = 'https://www.journals.elsevier.com/computer-methods-and-programs-in-biomedicine/most-downloaded-articles' # noqa: B950 +url = 'https://www.journals.elsevier.com/computer-methods-and-programs-in-biomedicine/most-downloaded-articles' text = 'CMPB' html_href = f'{text}' message = f'TorchIO becomes one of the most downloaded articles from {html_href}!' @@ -221,9 +221,7 @@ epub_exclude_files = ['search.html'] # CopyButton configuration -copybutton_prompt_text = ( - r'>>> |\.\.\. |\$ |In \[\d*\]: | {2,5}\.\.\.: | {5,8}: ' # noqa: B950,FS003 -) +copybutton_prompt_text = (r'>>> |\.\.\. |\$ |In \[\d*\]: | {2,5}\.\.\.: | {5,8}: ',) copybutton_prompt_is_regexp = True # def setup(app): diff --git a/pyproject.toml b/pyproject.toml index 80ad5dc4d..35a5d57db 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -53,7 +53,6 @@ csv = [ "pandas", ] dev = [ - "black", "bump2version", "coverage", "mypy", @@ -98,11 +97,6 @@ Source = "https://github.com/fepegar/torchio" Documentation = "http://torchio.rtfd.io" "Release notes" = "https://github.com/fepegar/torchio/releases" -[tool.black] -preview = false -skip-string-normalization = true -target-version = ['py311'] - [tool.mypy] pretty = true @@ -127,7 +121,6 @@ markers = [ "slow: marks tests as slow (deselect with '-m \"not slow\"')", "serial", ] - filterwarnings = [ # Ignore SimpleITK Swig warnings "ignore:builtin type .* has no __module__ attribute", @@ -135,3 +128,10 @@ filterwarnings = [ # Raised by SimpleITK on CI "ignore:invalid escape sequence", ] + +[tool.ruff] +format.quote-style = 'single' +lint.select = ["F", "I"] + +[tool.ruff.lint.isort] +force-single-line = true diff --git a/src/torchio/__init__.py b/src/torchio/__init__.py index 85de897c9..ad9a7c22d 100644 --- a/src/torchio/__init__.py +++ b/src/torchio/__init__.py @@ -5,29 +5,26 @@ __version__ = '0.20.0' +from . import datasets +from . import reference from . import utils from .constants import * # noqa: F401, F403 +from .data import GridAggregator +from .data import GridSampler +from .data import Image +from .data import LabelMap +from .data import LabelSampler +from .data import Queue +from .data import ScalarImage +from .data import Subject +from .data import SubjectsDataset +from .data import SubjectsLoader +from .data import UniformSampler +from .data import WeightedSampler +from .data import inference +from .data import io +from .data import sampler from .transforms import * # noqa: F401, F403 -from .data import ( - io, - sampler, - inference, - SubjectsDataset, - SubjectsLoader, - Image, - ScalarImage, - LabelMap, - Queue, - Subject, - WeightedSampler, - UniformSampler, - LabelSampler, - GridSampler, - GridAggregator, -) -from . import datasets -from . import reference - __all__ = [ 'utils', diff --git a/src/torchio/cli/apply_transform.py b/src/torchio/cli/apply_transform.py index d54fd47ce..91ef070b5 100644 --- a/src/torchio/cli/apply_transform.py +++ b/src/torchio/cli/apply_transform.py @@ -3,8 +3,9 @@ from pathlib import Path import typer -from rich.progress import Progress, SpinnerColumn, TextColumn - +from rich.progress import Progress +from rich.progress import SpinnerColumn +from rich.progress import TextColumn app = typer.Typer() @@ -61,9 +62,10 @@ def main( Example: $ tiotr input.nrrd RandomMotion output.nii "degrees=(-5,15) num_transforms=3" -v - """ # noqa: B950 + """ # Imports are placed here so that the tool loads faster if not being run import torch + import torchio.transforms as transforms from torchio.utils import apply_transform_to_file @@ -79,7 +81,7 @@ def main( torch.manual_seed(seed) with Progress( SpinnerColumn(), - TextColumn('[progress.description]{task.description}'), # noqa: FS003 + TextColumn('[progress.description]{task.description}'), transient=True, disable=not show_progress, ) as progress: diff --git a/src/torchio/cli/print_info.py b/src/torchio/cli/print_info.py index 44f1a9d98..87fef5b62 100644 --- a/src/torchio/cli/print_info.py +++ b/src/torchio/cli/print_info.py @@ -3,7 +3,6 @@ import typer - app = typer.Typer() diff --git a/src/torchio/data/__init__.py b/src/torchio/data/__init__.py index 7ebc6724e..d3250c983 100644 --- a/src/torchio/data/__init__.py +++ b/src/torchio/data/__init__.py @@ -12,7 +12,6 @@ from .sampler import WeightedSampler from .subject import Subject - __all__ = [ 'Queue', 'Subject', diff --git a/src/torchio/data/dataset.py b/src/torchio/data/dataset.py index d1bdfc23e..4b7c1b223 100644 --- a/src/torchio/data/dataset.py +++ b/src/torchio/data/dataset.py @@ -62,7 +62,7 @@ class SubjectsDataset(Dataset): .. tip:: To quickly iterate over the subjects without loading the images, use :meth:`dry_iter()`. - """ # noqa: B950 + """ def __init__( self, diff --git a/src/torchio/data/image.py b/src/torchio/data/image.py index 31fbb197c..e06ce6077 100644 --- a/src/torchio/data/image.py +++ b/src/torchio/data/image.py @@ -34,9 +34,9 @@ from ..typing import TypeTripletFloat from ..typing import TypeTripletInt from ..utils import get_stem -from ..utils import to_tuple from ..utils import guess_external_viewer from ..utils import is_iterable +from ..utils import to_tuple from .io import check_uint_to_int from .io import ensure_4d from .io import get_rotation_and_spacing_from_affine @@ -48,7 +48,6 @@ from .io import sitk_to_nib from .io import write_image - PROTECTED_KEYS = DATA, AFFINE, TYPE, PATH, STEM TypeBound = Tuple[float, float] TypeBounds = Tuple[TypeBound, TypeBound, TypeBound] @@ -128,7 +127,7 @@ class Image(dict): .. _FSL docs: https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Orientation%20Explained .. _SimpleITK docs: https://simpleitk.readthedocs.io/en/master/fundamentalConcepts.html .. _Graham Wideman's website: http://www.grahamwideman.com/gw/brain/orientation/orientterms.htm - """ # noqa: B950 + """ def __init__( self, @@ -470,7 +469,7 @@ def _parse_path( # https://github.com/fepegar/torchio/pull/838 raise TypeError('The path argument cannot be a dictionary') elif self._is_paths_sequence(path): - return [self._parse_single_path(p) for p in path] # type: ignore[union-attr] # noqa: B950 + return [self._parse_single_path(p) for p in path] # type: ignore[union-attr] else: return self._parse_single_path(path) # type: ignore[arg-type] @@ -655,7 +654,7 @@ def from_sitk(cls, sitk_image): >>> sitk_image = sitk.Image((224, 224), sitk.sitkVectorFloat32, 3) >>> tio.ScalarImage.from_sitk(sitk_image) ScalarImage(shape: (3, 224, 224, 1); spacing: (1.00, 1.00, 1.00); orientation: LPS+; memory: 588.0 KiB; dtype: torch.FloatTensor) - """ # noqa: B950 + """ tensor, affine = sitk_to_nib(sitk_image) return cls(tensor=tensor, affine=affine) @@ -712,7 +711,7 @@ def to_gif( eliminating unused colors. This is only useful if the palette can be compressed to the next smaller power of 2 elements. reverse: Reverse the temporal order of frames. - """ # noqa: B950 + """ from ..visualization import make_gif # avoid circular import make_gif( diff --git a/src/torchio/data/inference/aggregator.py b/src/torchio/data/inference/aggregator.py index 52985cad7..dae0b7283 100644 --- a/src/torchio/data/inference/aggregator.py +++ b/src/torchio/data/inference/aggregator.py @@ -30,7 +30,7 @@ class GridAggregator: .. note:: Adapted from NiftyNet. See `this NiftyNet tutorial `_ for more information about patch-based sampling. - """ # noqa: B950 + """ def __init__(self, sampler: GridSampler, overlap_mode: str = 'crop'): subject = sampler.subject diff --git a/src/torchio/data/io.py b/src/torchio/data/io.py index 258aa00e6..33de90706 100644 --- a/src/torchio/data/io.py +++ b/src/torchio/data/io.py @@ -20,7 +20,6 @@ from ..typing import TypeTripletFloat from ..typing import TypeTripletInt - # Matrices used to switch between LPS and RAS FLIPXY_33 = np.diag([-1, -1, 1]) FLIPXY_44 = np.diag([-1, -1, 1, 1]) diff --git a/src/torchio/data/loader.py b/src/torchio/data/loader.py index ebc55deac..768d1936a 100644 --- a/src/torchio/data/loader.py +++ b/src/torchio/data/loader.py @@ -7,12 +7,11 @@ import numpy as np import torch -from torch.utils.data import Dataset from torch.utils.data import DataLoader +from torch.utils.data import Dataset from .subject import Subject - T = TypeVar('T') diff --git a/src/torchio/data/queue.py b/src/torchio/data/queue.py index 6a77c8b0c..1b2151805 100644 --- a/src/torchio/data/queue.py +++ b/src/torchio/data/queue.py @@ -9,7 +9,7 @@ from torch.utils.data import Dataset from torch.utils.data import Sampler -from .. import NUM_SAMPLES +from ..constants import NUM_SAMPLES from .dataset import SubjectsDataset from .sampler import PatchSampler from .subject import Subject @@ -180,7 +180,7 @@ class Queue(Dataset): ... inputs = patches_batch['t1'][tio.DATA] # key 't1' is in subject ... targets = patches_batch['brain'][tio.DATA] # key 'brain' is in subject ... logits = model(inputs) # model being an instance of torch.nn.Module - """ # noqa: B950 + """ def __init__( self, diff --git a/src/torchio/data/sampler/grid.py b/src/torchio/data/sampler/grid.py index c9a1473a7..506c63378 100644 --- a/src/torchio/data/sampler/grid.py +++ b/src/torchio/data/sampler/grid.py @@ -94,7 +94,7 @@ def _pad(self, subject: Subject) -> Subject: border = self.patch_overlap // 2 padding = border.repeat(2) - pad = Pad(padding, padding_mode=self.padding_mode) # type: ignore[arg-type] # noqa: B950 + pad = Pad(padding, padding_mode=self.padding_mode) # type: ignore[arg-type] subject = pad(subject) # type: ignore[assignment] return subject @@ -110,7 +110,7 @@ def _generate_patches( # type: ignore[override] subject = self._pad(subject) sizes = subject.spatial_shape, self.patch_size, self.patch_overlap self._parse_sizes(*sizes) # type: ignore[arg-type] - locations = self._get_patches_locations(*sizes) # type: ignore[arg-type] # noqa: B950 + locations = self._get_patches_locations(*sizes) # type: ignore[arg-type] for location in locations: index_ini = location[:3] yield self.extract_patch(subject, index_ini) diff --git a/src/torchio/data/sampler/sampler.py b/src/torchio/data/sampler/sampler.py index 0cea35a90..1c866b11a 100644 --- a/src/torchio/data/sampler/sampler.py +++ b/src/torchio/data/sampler/sampler.py @@ -40,7 +40,7 @@ def extract_patch( subject: Subject, index_ini: TypeTripletInt, ) -> Subject: - cropped_subject = self.crop(subject, index_ini, self.patch_size) # type: ignore[arg-type] # noqa: B950 + cropped_subject = self.crop(subject, index_ini, self.patch_size) # type: ignore[arg-type] return cropped_subject def crop( diff --git a/src/torchio/data/sampler/weighted.py b/src/torchio/data/sampler/weighted.py index 88d7af019..5f997bc01 100644 --- a/src/torchio/data/sampler/weighted.py +++ b/src/torchio/data/sampler/weighted.py @@ -47,7 +47,7 @@ class WeightedSampler(RandomSampler): .. note:: Values of the probability map near the border will be set to 0 as the center of the patch cannot be at the border (unless the patch has size 1 or 2 along that axis). - """ # noqa: B950 + """ def __init__( self, @@ -225,7 +225,7 @@ def sample_probability_map( >>> histogram # doctest:+SKIP array([[ 0, 0, 3479, 3478, 17121, 7023, 3355, 3378, 0], [ 6808, 6804, 6942, 6809, 6946, 6988, 7002, 6826, 7041]]) - """ # noqa: B950 + """ # Get first value larger than random number ensuring the random number # is not exactly 0 (see https://github.com/fepegar/torchio/issues/510) random_number = max(MIN_FLOAT_32, torch.rand(1).item()) * cdf[-1] @@ -242,7 +242,7 @@ def sample_probability_map( message = ( 'Error retrieving probability in weighted sampler.' ' Please report this issue at' - ' https://github.com/fepegar/torchio/issues/new?labels=bug&template=bug_report.md' # noqa: B950 + ' https://github.com/fepegar/torchio/issues/new?labels=bug&template=bug_report.md' ) raise RuntimeError(message) diff --git a/src/torchio/data/subject.py b/src/torchio/data/subject.py index 767e1c606..9369d823f 100644 --- a/src/torchio/data/subject.py +++ b/src/torchio/data/subject.py @@ -2,6 +2,7 @@ import copy import pprint +from typing import TYPE_CHECKING from typing import Any from typing import Callable from typing import Dict @@ -9,7 +10,6 @@ from typing import Optional from typing import Sequence from typing import Tuple -from typing import TYPE_CHECKING import numpy as np @@ -19,7 +19,8 @@ from .image import Image if TYPE_CHECKING: - from ..transforms import Transform, Compose + from ..transforms import Compose + from ..transforms import Transform class Subject(dict): @@ -49,7 +50,7 @@ class Subject(dict): ... 'hospital': 'Hospital Juan Negrín', ... } >>> subject = tio.Subject(subject_dict) - """ # noqa: B950 + """ def __init__(self, *args, **kwargs: Dict[str, Any]): if args: @@ -162,8 +163,8 @@ def get_applied_transforms( ignore_intensity: bool = False, image_interpolation: Optional[str] = None, ) -> List[Transform]: - from ..transforms.transform import Transform from ..transforms.intensity_transform import IntensityTransform + from ..transforms.transform import Transform name_to_transform = {cls.__name__: cls for cls in get_subclasses(Transform)} transforms_list = [] @@ -266,7 +267,7 @@ def check_consistent_attribute( attribute of two images being compared, :math:`t_{abs}` is the ``absolute_tolerance`` and :math:`t_{rel}` is the ``relative_tolerance``. - """ # noqa: B950 + """ message = ( f'More than one value for "{attribute}" found in subject images:\n{{}}' ) @@ -328,7 +329,7 @@ def check_consistent_space(self) -> None: 'As described above, some images in the subject are not in the' ' same space. You probably can use the transforms ToCanonical' ' and Resample to fix this, as explained at' - ' https://github.com/fepegar/torchio/issues/647#issuecomment-913025695' # noqa: B950 + ' https://github.com/fepegar/torchio/issues/647#issuecomment-913025695' ) raise RuntimeError(message) from e diff --git a/src/torchio/datasets/__init__.py b/src/torchio/datasets/__init__.py index 9df142352..9c209d290 100644 --- a/src/torchio/datasets/__init__.py +++ b/src/torchio/datasets/__init__.py @@ -1,9 +1,9 @@ from .bite import BITE3 from .episurg import EPISURG from .fpg import FPG +from .itk_snap import T1T2 from .itk_snap import AorticValve from .itk_snap import BrainTumor -from .itk_snap import T1T2 from .ixi import IXI from .ixi import IXITiny from .medmnist import AdrenalMNIST3D @@ -20,7 +20,6 @@ from .rsna_spine_fracture import RSNACervicalSpineFracture from .slicer import Slicer - __all__ = [ 'FPG', 'Slicer', diff --git a/src/torchio/datasets/bite.py b/src/torchio/datasets/bite.py index 414b9e40f..6a5b4b637 100644 --- a/src/torchio/datasets/bite.py +++ b/src/torchio/datasets/bite.py @@ -3,11 +3,11 @@ from typing import Dict from typing import Optional -from .. import Image -from .. import LabelMap -from .. import ScalarImage -from .. import Subject -from .. import SubjectsDataset +from ..data import Image +from ..data import LabelMap +from ..data import ScalarImage +from ..data import Subject +from ..data import SubjectsDataset from ..download import download_and_extract_archive from ..transforms import Transform from ..typing import TypePath @@ -56,7 +56,7 @@ class BITE3(BITE): transform: An instance of :class:`~torchio.transforms.transform.Transform`. download: If set to ``True``, will download the data into :attr:`root`. - """ # noqa: B950 + """ dirname = 'group3' diff --git a/src/torchio/datasets/episurg.py b/src/torchio/datasets/episurg.py index 8c4267da1..98490f18a 100644 --- a/src/torchio/datasets/episurg.py +++ b/src/torchio/datasets/episurg.py @@ -2,10 +2,10 @@ from pathlib import Path from typing import Optional -from .. import LabelMap -from .. import ScalarImage -from .. import Subject -from .. import SubjectsDataset +from ..data import LabelMap +from ..data import ScalarImage +from ..data import Subject +from ..data import SubjectsDataset from ..download import download_and_extract_archive from ..transforms import Transform from ..typing import TypePath @@ -40,7 +40,7 @@ class EPISURG(SubjectsDataset): to be downloaded if it is not already present. """ - data_url = 'https://s3-eu-west-1.amazonaws.com/pstorage-ucl-2748466690/26153588/EPISURG.zip' # noqa: B950 + data_url = 'https://s3-eu-west-1.amazonaws.com/pstorage-ucl-2748466690/26153588/EPISURG.zip' md5 = '5ec5831a2c6fbfdc8489ba2910a6504b' def __init__( diff --git a/src/torchio/datasets/fpg.py b/src/torchio/datasets/fpg.py index 542ebf1d0..3b8449c43 100644 --- a/src/torchio/datasets/fpg.py +++ b/src/torchio/datasets/fpg.py @@ -1,8 +1,8 @@ import urllib.parse -from .. import DATA_REPO -from .. import LabelMap -from .. import ScalarImage +from ..constants import DATA_REPO +from ..data import LabelMap +from ..data import ScalarImage from ..data.io import read_matrix from ..data.subject import _RawSubjectCopySubject from ..download import download_url diff --git a/src/torchio/datasets/itk_snap/__init__.py b/src/torchio/datasets/itk_snap/__init__.py index e7b3a8096..130a7a068 100644 --- a/src/torchio/datasets/itk_snap/__init__.py +++ b/src/torchio/datasets/itk_snap/__init__.py @@ -1,7 +1,6 @@ +from .itk_snap import T1T2 from .itk_snap import AorticValve from .itk_snap import BrainTumor -from .itk_snap import T1T2 - __all__ = [ 'BrainTumor', diff --git a/src/torchio/datasets/itk_snap/itk_snap.py b/src/torchio/datasets/itk_snap/itk_snap.py index 8360a3877..4450378cb 100644 --- a/src/torchio/datasets/itk_snap/itk_snap.py +++ b/src/torchio/datasets/itk_snap/itk_snap.py @@ -13,7 +13,7 @@ class SubjectITKSNAP(_RawSubjectCopySubject): See `the ITK-SNAP website`_ for more information. .. _the ITK-SNAP website: http://www.itksnap.org/pmwiki/pmwiki.php?n=Downloads.Data - """ # noqa: B950 + """ url_base = 'https://www.nitrc.org/frs/download.php/' diff --git a/src/torchio/datasets/ixi.py b/src/torchio/datasets/ixi.py index b0f26ecea..511509be1 100644 --- a/src/torchio/datasets/ixi.py +++ b/src/torchio/datasets/ixi.py @@ -18,10 +18,10 @@ from typing import Optional from typing import Sequence -from .. import LabelMap -from .. import ScalarImage -from .. import Subject -from .. import SubjectsDataset +from ..data import LabelMap +from ..data import ScalarImage +from ..data import Subject +from ..data import SubjectsDataset from ..download import download_and_extract_archive from ..transforms import Transform from ..typing import TypePath @@ -60,9 +60,9 @@ class IXI(SubjectsDataset): >>> print('Keys in subject:', tuple(sample_subject.keys())) # ('T1', 'T2') >>> print('Shape of T1 data:', sample_subject['T1'].shape) # [1, 180, 268, 268] >>> print('Shape of T2 data:', sample_subject['T2'].shape) # [1, 241, 257, 188] - """ # noqa: B950 + """ - base_url = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI-{modality}.tar' # noqa: FS003,B950 + base_url = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI-{modality}.tar' md5_dict = { 'T1': '34901a0593b41dd19c1a1f746eac2d58', 'T2': 'e3140d78730ecdd32ba92da48c0a9aaa', @@ -173,7 +173,7 @@ class IXITiny(SubjectsDataset): download: If set to ``True``, will download the data into :attr:`root`. .. _notebook: https://github.com/fepegar/torchio/blob/main/tutorials/README.md - """ # noqa: B950 + """ url = 'https://www.dropbox.com/s/ogxjwjxdv5mieah/ixi_tiny.zip?dl=1' md5 = 'bfb60f4074283d78622760230bfa1f98' diff --git a/src/torchio/datasets/medmnist.py b/src/torchio/datasets/medmnist.py index ccd6a765f..bad491ffd 100644 --- a/src/torchio/datasets/medmnist.py +++ b/src/torchio/datasets/medmnist.py @@ -1,9 +1,9 @@ import numpy as np import torch -from .. import ScalarImage -from .. import Subject -from .. import SubjectsDataset +from ..data import ScalarImage +from ..data import Subject +from ..data import SubjectsDataset from ..download import download_url from ..utils import get_torchio_cache_dir diff --git a/src/torchio/datasets/mni/__init__.py b/src/torchio/datasets/mni/__init__.py index a3b61cdcf..62db01d5d 100644 --- a/src/torchio/datasets/mni/__init__.py +++ b/src/torchio/datasets/mni/__init__.py @@ -3,7 +3,6 @@ from .pediatric import Pediatric from .sheep import Sheep - __all__ = [ 'Sheep', 'Colin27', diff --git a/src/torchio/datasets/mni/colin.py b/src/torchio/datasets/mni/colin.py index 06812d310..825632b52 100644 --- a/src/torchio/datasets/mni/colin.py +++ b/src/torchio/datasets/mni/colin.py @@ -6,7 +6,6 @@ from ...utils import compress from .mni import SubjectMNI - TISSUES_2008 = { 1: 'Cerebro-spinal fluid', 2: 'Gray Matter', @@ -57,7 +56,7 @@ class Colin27(SubjectMNI): >>> colin_2008.load() >>> colin_2008.t1 ScalarImage(shape: (1, 362, 434, 362); spacing: (0.50, 0.50, 0.50); orientation: RAS+; memory: 217.0 MiB; type: intensity) - """ # noqa: B950 + """ def __init__(self, version=1998): if version not in (1998, 2008): diff --git a/src/torchio/datasets/mni/icbm.py b/src/torchio/datasets/mni/icbm.py index a375ca994..8dfcd0d9b 100644 --- a/src/torchio/datasets/mni/icbm.py +++ b/src/torchio/datasets/mni/icbm.py @@ -2,8 +2,8 @@ import torch -from ... import LabelMap -from ... import ScalarImage +from ...data import LabelMap +from ...data import ScalarImage from ...download import download_and_extract_archive from ...utils import compress from ...utils import get_torchio_cache_dir @@ -32,7 +32,7 @@ class ICBM2009CNonlinearSymmetric(SubjectMNI): >>> icbm = tio.datasets.ICBM2009CNonlinearSymmetric(load_4d_tissues=False) >>> icbm ICBM2009CNonlinearSymmetric(Keys: ('t1', 'eyes', 'face', 'brain', 't2', 'pd', 'gm', 'wm', 'csf'); images: 9) - """ # noqa: B950 + """ def __init__(self, load_4d_tissues: bool = True): self.name = 'mni_icbm152_nlin_sym_09c_nifti' diff --git a/src/torchio/datasets/mni/pediatric.py b/src/torchio/datasets/mni/pediatric.py index 84271987f..0b603e038 100644 --- a/src/torchio/datasets/mni/pediatric.py +++ b/src/torchio/datasets/mni/pediatric.py @@ -1,12 +1,11 @@ import urllib.parse -from ... import LabelMap -from ... import ScalarImage +from ...data import LabelMap +from ...data import ScalarImage from ...download import download_and_extract_archive from ...utils import compress from .mni import SubjectMNI - SUPPORTED_YEARS = ( (4.5, 18.5), (4.5, 8.5), @@ -41,7 +40,7 @@ class Pediatric(SubjectMNI): ``(13, 18.5)``. symmetric: If ``True``, the left-right symmetric templates will be used. Else, the asymmetric (natural) templates will be used. - """ # noqa: B950 + """ def __init__(self, years, symmetric=False): self.url_dir = 'http://www.bic.mni.mcgill.ca/~vfonov/nihpd/obj1/' diff --git a/src/torchio/datasets/rsna_miccai.py b/src/torchio/datasets/rsna_miccai.py index a41b88c74..2e868524f 100644 --- a/src/torchio/datasets/rsna_miccai.py +++ b/src/torchio/datasets/rsna_miccai.py @@ -6,9 +6,9 @@ from typing import Sequence from typing import Union -from .. import ScalarImage -from .. import Subject -from .. import SubjectsDataset +from ..data import ScalarImage +from ..data import Subject +from ..data import SubjectsDataset from ..typing import TypePath @@ -50,7 +50,7 @@ class RSNAMICCAI(SubjectsDataset): .. _RSNA-MICCAI Brain Tumor Radiogenomic Classification challenge: https://www.kaggle.com/c/rsna-miccai-brain-tumor-radiogenomic-classification - """ # noqa: B950 + """ id_key = 'BraTS21ID' label_key = 'MGMT_value' diff --git a/src/torchio/datasets/rsna_spine_fracture.py b/src/torchio/datasets/rsna_spine_fracture.py index b99569c40..a86a2c044 100644 --- a/src/torchio/datasets/rsna_spine_fracture.py +++ b/src/torchio/datasets/rsna_spine_fracture.py @@ -1,19 +1,18 @@ from pathlib import Path +from types import ModuleType from typing import Any from typing import Dict from typing import List from typing import Optional from typing import Union -from types import ModuleType -from .. import LabelMap -from .. import ScalarImage -from .. import Subject -from .. import SubjectsDataset +from ..data import LabelMap +from ..data import ScalarImage +from ..data import Subject +from ..data import SubjectsDataset from ..typing import TypePath from ..utils import normalize_path - TypeBoxes = List[Dict[str, Union[str, float, int]]] @@ -26,7 +25,7 @@ class RSNACervicalSpineFracture(SubjectsDataset): instantiating this class. .. _RSNA 2022 Cervical Spine Fracture Detection: https://www.kaggle.com/competitions/rsna-2022-cervical-spine-fracture-detection/overview/evaluation - """ # noqa: B950 + """ UID = 'StudyInstanceUID' diff --git a/src/torchio/datasets/slicer.py b/src/torchio/datasets/slicer.py index 5ac39abd8..a66cadc1f 100644 --- a/src/torchio/datasets/slicer.py +++ b/src/torchio/datasets/slicer.py @@ -1,24 +1,19 @@ import urllib.parse -from .. import ScalarImage +from ..data import ScalarImage from ..data.subject import _RawSubjectCopySubject from ..download import download_url from ..utils import get_torchio_cache_dir - SLICER_URL = 'https://github.com/Slicer/SlicerTestingData/releases/download/' URLS_DICT = { 'MRHead': ( ('MRHead.nrrd',), - ( - 'SHA256/cc211f0dfd9a05ca3841ce1141b292898b2dd2d3f08286affadf823a7e58df93', - ), # noqa: B950 + ('SHA256/cc211f0dfd9a05ca3841ce1141b292898b2dd2d3f08286affadf823a7e58df93',), ), 'DTIBrain': ( ('DTI-Brain.nrrd',), - ( - 'SHA256/5c78d00c86ae8d968caa7a49b870ef8e1c04525b1abc53845751d8bce1f0b91a', - ), # noqa: B950 + ('SHA256/5c78d00c86ae8d968caa7a49b870ef8e1c04525b1abc53845751d8bce1f0b91a',), ), 'DTIVolume': ( ( @@ -26,21 +21,17 @@ 'DTIVolume.nhdr', ), ( - 'SHA256/d785837276758ddd9d21d76a3694e7fd866505a05bc305793517774c117cb38d', # noqa: B950 - 'SHA256/67564aa42c7e2eec5c3fd68afb5a910e9eab837b61da780933716a3b922e50fe', # noqa: B950 + 'SHA256/d785837276758ddd9d21d76a3694e7fd866505a05bc305793517774c117cb38d', + 'SHA256/67564aa42c7e2eec5c3fd68afb5a910e9eab837b61da780933716a3b922e50fe', ), ), 'CTChest': ( ('CT-chest.nrrd',), - ( - 'SHA256/4507b664690840abb6cb9af2d919377ffc4ef75b167cb6fd0f747befdb12e38e', - ), # noqa: B950 + ('SHA256/4507b664690840abb6cb9af2d919377ffc4ef75b167cb6fd0f747befdb12e38e',), ), 'CTACardio': ( ('CTA-cardio.nrrd',), - ( - 'SHA256/3b0d4eb1a7d8ebb0c5a89cc0504640f76a030b4e869e33ff34c564c3d3b88ad2', - ), # noqa: B950 + ('SHA256/3b0d4eb1a7d8ebb0c5a89cc0504640f76a030b4e869e33ff34c564c3d3b88ad2',), ), } @@ -56,7 +47,7 @@ class Slicer(_RawSubjectCopySubject): Args: name: One of the keys in :attr:`torchio.datasets.slicer.URLS_DICT`. - """ # noqa: B950 + """ def __init__(self, name='MRHead'): filenames, url_files = URLS_DICT[name] diff --git a/src/torchio/external/due.py b/src/torchio/external/due.py index 21fd16c9a..e0e6514f6 100644 --- a/src/torchio/external/due.py +++ b/src/torchio/external/due.py @@ -48,7 +48,11 @@ def _donothing_func(*args, **kwargs): try: - from duecredit import due, BibTeX, Doi, Url, Text + from duecredit import BibTeX + from duecredit import Doi + from duecredit import Text + from duecredit import Url + from duecredit import due if 'due' in locals() and not hasattr(due, 'cite'): raise RuntimeError( diff --git a/src/torchio/reference.py b/src/torchio/reference.py index 7082fc223..b5de0d1a9 100644 --- a/src/torchio/reference.py +++ b/src/torchio/reference.py @@ -15,7 +15,7 @@ url = {https://www.sciencedirect.com/science/article/pii/S0169260721003102}, author = {P{\'e}rez-Garc{\'i}a, Fernando and Sparks, Rachel and Ourselin, S{\'e}bastien}, keywords = {Medical image computing, Deep learning, Data augmentation, Preprocessing}, -} """ # noqa: B950 +} """ TITLE = ( 'TorchIO: a Python library for efficient loading, preprocessing,' diff --git a/src/torchio/transforms/__init__.py b/src/torchio/transforms/__init__.py index 087dbe5b0..57e220c8d 100644 --- a/src/torchio/transforms/__init__.py +++ b/src/torchio/transforms/__init__.py @@ -1,55 +1,64 @@ # noreorder -from .transform import Transform -from .fourier import FourierTransform -from .spatial_transform import SpatialTransform -from .intensity_transform import IntensityTransform -from .preprocessing.label.label_transform import LabelTransform - -# Generic -from .lambda_transform import Lambda +from .augmentation.composition import Compose # Augmentation from .augmentation.composition import OneOf -from .augmentation.composition import Compose - -from .augmentation.spatial import RandomFlip, Flip -from .augmentation.spatial import RandomAffine, Affine +from .augmentation.intensity import BiasField +from .augmentation.intensity import Blur +from .augmentation.intensity import Gamma +from .augmentation.intensity import Ghosting +from .augmentation.intensity import LabelsToImage +from .augmentation.intensity import Motion +from .augmentation.intensity import Noise +from .augmentation.intensity import RandomBiasField +from .augmentation.intensity import RandomBlur +from .augmentation.intensity import RandomGamma +from .augmentation.intensity import RandomGhosting +from .augmentation.intensity import RandomLabelsToImage +from .augmentation.intensity import RandomMotion +from .augmentation.intensity import RandomNoise +from .augmentation.intensity import RandomSpike +from .augmentation.intensity import RandomSwap +from .augmentation.intensity import Spike +from .augmentation.intensity import Swap +from .augmentation.spatial import Affine +from .augmentation.spatial import ElasticDeformation +from .augmentation.spatial import Flip +from .augmentation.spatial import RandomAffine from .augmentation.spatial import RandomAnisotropy -from .augmentation.spatial import RandomElasticDeformation, ElasticDeformation - -from .augmentation.intensity import RandomSwap, Swap -from .augmentation.intensity import RandomBlur, Blur -from .augmentation.intensity import RandomNoise, Noise -from .augmentation.intensity import RandomSpike, Spike -from .augmentation.intensity import RandomGamma, Gamma -from .augmentation.intensity import RandomMotion, Motion -from .augmentation.intensity import RandomGhosting, Ghosting -from .augmentation.intensity import RandomBiasField, BiasField -from .augmentation.intensity import RandomLabelsToImage, LabelsToImage +from .augmentation.spatial import RandomElasticDeformation +from .augmentation.spatial import RandomFlip +from .fourier import FourierTransform +from .intensity_transform import IntensityTransform -# Preprocessing -from .preprocessing import Pad +# Generic +from .lambda_transform import Lambda +from .preprocessing import Clamp +from .preprocessing import Contour +from .preprocessing import CopyAffine from .preprocessing import Crop -from .preprocessing import Resize -from .preprocessing import Resample from .preprocessing import CropOrPad -from .preprocessing import CopyAffine -from .preprocessing import ToCanonical -from .preprocessing import ZNormalization -from .preprocessing import RescaleIntensity -from .preprocessing import Clamp -from .preprocessing import Mask from .preprocessing import EnsureShapeMultiple from .preprocessing import HistogramStandardization -from .preprocessing.intensity.histogram_standardization import train_histogram +from .preprocessing import KeepLargestComponent +from .preprocessing import Mask from .preprocessing import OneHot -from .preprocessing import Contour + +# Preprocessing +from .preprocessing import Pad from .preprocessing import RemapLabels from .preprocessing import RemoveLabels +from .preprocessing import Resample +from .preprocessing import RescaleIntensity +from .preprocessing import Resize from .preprocessing import SequentialLabels -from .preprocessing import KeepLargestComponent - +from .preprocessing import ToCanonical +from .preprocessing import ZNormalization +from .preprocessing.intensity.histogram_standardization import train_histogram +from .preprocessing.label.label_transform import LabelTransform +from .spatial_transform import SpatialTransform +from .transform import Transform __all__ = [ 'Transform', diff --git a/src/torchio/transforms/augmentation/__init__.py b/src/torchio/transforms/augmentation/__init__.py index f72f61d6f..68bf56270 100644 --- a/src/torchio/transforms/augmentation/__init__.py +++ b/src/torchio/transforms/augmentation/__init__.py @@ -1,6 +1,5 @@ from .random_transform import RandomTransform - __all__ = [ 'RandomTransform', ] diff --git a/src/torchio/transforms/augmentation/composition.py b/src/torchio/transforms/augmentation/composition.py index fc9e83cb0..7c0d89fbe 100644 --- a/src/torchio/transforms/augmentation/composition.py +++ b/src/torchio/transforms/augmentation/composition.py @@ -8,10 +8,9 @@ import numpy as np import torch -from . import RandomTransform -from .. import Transform from ...data.subject import Subject - +from ..transform import Transform +from . import RandomTransform TypeTransformsDict = Union[Dict[Transform, float], Sequence[Transform]] diff --git a/src/torchio/transforms/augmentation/intensity/__init__.py b/src/torchio/transforms/augmentation/intensity/__init__.py index 574a9c963..343a83591 100644 --- a/src/torchio/transforms/augmentation/intensity/__init__.py +++ b/src/torchio/transforms/augmentation/intensity/__init__.py @@ -17,7 +17,6 @@ from .random_swap import RandomSwap from .random_swap import Swap - __all__ = [ 'RandomSwap', 'Swap', diff --git a/src/torchio/transforms/augmentation/intensity/random_bias_field.py b/src/torchio/transforms/augmentation/intensity/random_bias_field.py index 6a420785d..f1e797c71 100644 --- a/src/torchio/transforms/augmentation/intensity/random_bias_field.py +++ b/src/torchio/transforms/augmentation/intensity/random_bias_field.py @@ -7,10 +7,10 @@ import numpy as np import torch -from .. import RandomTransform -from ... import IntensityTransform from ....data.subject import Subject from ....typing import TypeData +from ...intensity_transform import IntensityTransform +from .. import RandomTransform class RandomBiasField(RandomTransform, IntensityTransform): diff --git a/src/torchio/transforms/augmentation/intensity/random_blur.py b/src/torchio/transforms/augmentation/intensity/random_blur.py index ef85874b3..704def8f6 100644 --- a/src/torchio/transforms/augmentation/intensity/random_blur.py +++ b/src/torchio/transforms/augmentation/intensity/random_blur.py @@ -7,12 +7,12 @@ import scipy.ndimage as ndi import torch -from .. import RandomTransform -from ... import IntensityTransform from ....data.subject import Subject from ....typing import TypeData from ....typing import TypeSextetFloat from ....typing import TypeTripletFloat +from ...intensity_transform import IntensityTransform +from .. import RandomTransform class RandomBlur(RandomTransform, IntensityTransform): diff --git a/src/torchio/transforms/augmentation/intensity/random_gamma.py b/src/torchio/transforms/augmentation/intensity/random_gamma.py index efc86d7a4..bae5a1777 100644 --- a/src/torchio/transforms/augmentation/intensity/random_gamma.py +++ b/src/torchio/transforms/augmentation/intensity/random_gamma.py @@ -5,11 +5,11 @@ import numpy as np import torch -from .. import RandomTransform -from ... import IntensityTransform from ....data.subject import Subject from ....typing import TypeRangeFloat from ....utils import to_tuple +from ...intensity_transform import IntensityTransform +from .. import RandomTransform class RandomGamma(RandomTransform, IntensityTransform): @@ -62,7 +62,7 @@ class RandomGamma(RandomTransform, IntensityTransform): >>> subject = tio.datasets.FPG() >>> transform = tio.RandomGamma(log_gamma=(-0.3, 0.3)) # gamma between 0.74 and 1.34 >>> transformed = transform(subject) - """ # noqa: B950 + """ def __init__(self, log_gamma: TypeRangeFloat = (-0.3, 0.3), **kwargs): super().__init__(**kwargs) @@ -115,7 +115,7 @@ class Gamma(IntensityTransform): >>> subject = tio.datasets.FPG() >>> transform = tio.Gamma(0.8) >>> transformed = transform(subject) - """ # noqa: B950 + """ def __init__(self, gamma: float, **kwargs): super().__init__(**kwargs) diff --git a/src/torchio/transforms/augmentation/intensity/random_ghosting.py b/src/torchio/transforms/augmentation/intensity/random_ghosting.py index 4992fdcbe..2316f0b15 100644 --- a/src/torchio/transforms/augmentation/intensity/random_ghosting.py +++ b/src/torchio/transforms/augmentation/intensity/random_ghosting.py @@ -7,10 +7,10 @@ import numpy as np import torch -from .. import RandomTransform -from ... import FourierTransform -from ... import IntensityTransform from ....data.subject import Subject +from ...fourier import FourierTransform +from ...intensity_transform import IntensityTransform +from .. import RandomTransform class RandomGhosting(RandomTransform, IntensityTransform): diff --git a/src/torchio/transforms/augmentation/intensity/random_labels_to_image.py b/src/torchio/transforms/augmentation/intensity/random_labels_to_image.py index 5e4a0a6a0..972dc6db5 100644 --- a/src/torchio/transforms/augmentation/intensity/random_labels_to_image.py +++ b/src/torchio/transforms/augmentation/intensity/random_labels_to_image.py @@ -5,14 +5,14 @@ import torch -from .. import RandomTransform -from ... import IntensityTransform from ....data.image import LabelMap from ....data.image import ScalarImage from ....data.subject import Subject from ....typing import TypeData from ....typing import TypeRangeFloat from ....utils import check_sequence +from ...intensity_transform import IntensityTransform +from .. import RandomTransform class RandomLabelsToImage(RandomTransform, IntensityTransform): @@ -125,7 +125,7 @@ class RandomLabelsToImage(RandomTransform, IntensityTransform): >>> transformed = transform(subject) # subject's key 't1' has been replaced with the simulated image .. seealso:: :class:`~torchio.transforms.preprocessing.label.remap_labels.RemapLabels`. - """ # noqa: B950 + """ def __init__( self, @@ -142,8 +142,8 @@ def __init__( ): super().__init__(**kwargs) self.label_key = _parse_label_key(label_key) - self.used_labels = _parse_used_labels(used_labels) # type: ignore[arg-type] # noqa: B950 - self.mean, self.std = self.parse_mean_and_std(mean, std) # type: ignore[arg-type,assignment] # noqa: B950 + self.used_labels = _parse_used_labels(used_labels) # type: ignore[arg-type] + self.mean, self.std = self.parse_mean_and_std(mean, std) # type: ignore[arg-type,assignment] self.default_mean = self.parse_gaussian_parameter( default_mean, 'default_mean', @@ -254,7 +254,7 @@ def apply_transform(self, subject: Subject) -> Subject: labels = range(label_map.shape[0]) # Raise error if mean and std are not defined for every label - _check_mean_and_std_length(labels, self.mean, self.std) # type: ignore[arg-type] # noqa: B950 + _check_mean_and_std_length(labels, self.mean, self.std) # type: ignore[arg-type] for label in labels: mean, std = self.get_params(label) diff --git a/src/torchio/transforms/augmentation/intensity/random_motion.py b/src/torchio/transforms/augmentation/intensity/random_motion.py index 13b3b66d7..3ff81f0d7 100644 --- a/src/torchio/transforms/augmentation/intensity/random_motion.py +++ b/src/torchio/transforms/augmentation/intensity/random_motion.py @@ -9,12 +9,12 @@ import SimpleITK as sitk import torch -from .. import RandomTransform -from ... import FourierTransform -from ... import IntensityTransform from ....data.io import nib_to_sitk from ....data.subject import Subject from ....typing import TypeTripletFloat +from ...fourier import FourierTransform +from ...intensity_transform import IntensityTransform +from .. import RandomTransform class RandomMotion(RandomTransform, IntensityTransform, FourierTransform): @@ -147,9 +147,7 @@ def __init__( degrees: Union[TypeTripletFloat, Dict[str, TypeTripletFloat]], translation: Union[TypeTripletFloat, Dict[str, TypeTripletFloat]], times: Union[Sequence[float], Dict[str, Sequence[float]]], - image_interpolation: Union[ - Sequence[str], Dict[str, Sequence[str]] - ], # noqa: B950 + image_interpolation: Union[Sequence[str], Dict[str, Sequence[str]]], **kwargs, ): super().__init__(**kwargs) diff --git a/src/torchio/transforms/augmentation/intensity/random_noise.py b/src/torchio/transforms/augmentation/intensity/random_noise.py index a74b881b7..0914810fb 100644 --- a/src/torchio/transforms/augmentation/intensity/random_noise.py +++ b/src/torchio/transforms/augmentation/intensity/random_noise.py @@ -6,9 +6,9 @@ import torch -from .. import RandomTransform -from ... import IntensityTransform from ....data.subject import Subject +from ...intensity_transform import IntensityTransform +from .. import RandomTransform class RandomNoise(RandomTransform, IntensityTransform): @@ -99,8 +99,8 @@ def apply_transform(self, subject: Subject) -> Subject: mean, std, seed = args = self.mean, self.std, self.seed for name, image in self.get_images_dict(subject).items(): if self.arguments_are_dict(): - values = (arg[name] for arg in args) # type: ignore[index,call-overload] # noqa: B950 - mean, std, seed = values # type: ignore[assignment] # noqa: B950 + values = (arg[name] for arg in args) # type: ignore[index,call-overload] + mean, std, seed = values # type: ignore[assignment] with self._use_seed(seed): assert isinstance(mean, float) assert isinstance(std, float) diff --git a/src/torchio/transforms/augmentation/intensity/random_spike.py b/src/torchio/transforms/augmentation/intensity/random_spike.py index 5db9e29d9..183f5b6e0 100644 --- a/src/torchio/transforms/augmentation/intensity/random_spike.py +++ b/src/torchio/transforms/augmentation/intensity/random_spike.py @@ -7,10 +7,10 @@ import numpy as np import torch -from .. import RandomTransform -from ... import FourierTransform -from ... import IntensityTransform from ....data.subject import Subject +from ...fourier import FourierTransform +from ...intensity_transform import IntensityTransform +from .. import RandomTransform class RandomSpike(RandomTransform, IntensityTransform, FourierTransform): @@ -53,7 +53,7 @@ def __init__( intensity, 'intensity_range', ) - self.num_spikes_range: Tuple[int, int] = self._parse_range( # type: ignore[assignment] # noqa: B950 + self.num_spikes_range: Tuple[int, int] = self._parse_range( # type: ignore[assignment] num_spikes, 'num_spikes', min_constraint=0, diff --git a/src/torchio/transforms/augmentation/intensity/random_swap.py b/src/torchio/transforms/augmentation/intensity/random_swap.py index 3f7aae64c..65d7d74db 100644 --- a/src/torchio/transforms/augmentation/intensity/random_swap.py +++ b/src/torchio/transforms/augmentation/intensity/random_swap.py @@ -11,13 +11,12 @@ import numpy as np import torch -from .. import RandomTransform -from ... import IntensityTransform from ....data.subject import Subject from ....typing import TypeTripletInt from ....typing import TypeTuple from ....utils import to_tuple - +from ...intensity_transform import IntensityTransform +from .. import RandomTransform TypeLocations = Sequence[Tuple[TypeTripletInt, TypeTripletInt]] TensorArray = TypeVar('TensorArray', np.ndarray, torch.Tensor) @@ -143,7 +142,7 @@ def apply_transform(self, subject: Subject) -> Subject: if self.invert_transform: assert isinstance(locations, list) locations.reverse() - swapped = _swap(image.data, patch_size, locations) # type: ignore[arg-type] # noqa: B950 + swapped = _swap(image.data, patch_size, locations) # type: ignore[arg-type] image.set_data(swapped) return subject diff --git a/src/torchio/transforms/augmentation/random_transform.py b/src/torchio/transforms/augmentation/random_transform.py index 86a5df79f..0da6e211b 100644 --- a/src/torchio/transforms/augmentation/random_transform.py +++ b/src/torchio/transforms/augmentation/random_transform.py @@ -4,8 +4,8 @@ import torch -from .. import Transform from ...typing import TypeRangeFloat +from ..transform import Transform class RandomTransform(Transform): diff --git a/src/torchio/transforms/augmentation/spatial/__init__.py b/src/torchio/transforms/augmentation/spatial/__init__.py index 119769cb4..4deb7acb3 100644 --- a/src/torchio/transforms/augmentation/spatial/__init__.py +++ b/src/torchio/transforms/augmentation/spatial/__init__.py @@ -6,7 +6,6 @@ from .random_flip import Flip from .random_flip import RandomFlip - __all__ = [ 'RandomFlip', 'Flip', diff --git a/src/torchio/transforms/augmentation/spatial/random_affine.py b/src/torchio/transforms/augmentation/spatial/random_affine.py index ea85219ad..2cdcb38d5 100644 --- a/src/torchio/transforms/augmentation/spatial/random_affine.py +++ b/src/torchio/transforms/augmentation/spatial/random_affine.py @@ -8,8 +8,6 @@ import SimpleITK as sitk import torch -from .. import RandomTransform -from ... import SpatialTransform from ....constants import INTENSITY from ....constants import TYPE from ....data.io import nib_to_sitk @@ -19,7 +17,8 @@ from ....typing import TypeTripletFloat from ....utils import get_major_sitk_version from ....utils import to_tuple - +from ...spatial_transform import SpatialTransform +from .. import RandomTransform TypeOneToSixFloat = Union[TypeRangeFloat, TypeTripletFloat, TypeSextetFloat] @@ -111,7 +110,7 @@ class RandomAffine(RandomTransform, SpatialTransform): ct_transformed = transform(ct) subject.add_image(ct_transformed, 'Transformed') subject.plot() - """ # noqa: B950 + """ def __init__( self, diff --git a/src/torchio/transforms/augmentation/spatial/random_anisotropy.py b/src/torchio/transforms/augmentation/spatial/random_anisotropy.py index 2711f415c..c50fe48ce 100644 --- a/src/torchio/transforms/augmentation/spatial/random_anisotropy.py +++ b/src/torchio/transforms/augmentation/spatial/random_anisotropy.py @@ -4,11 +4,11 @@ import torch -from .. import RandomTransform from ....data.subject import Subject from ....typing import TypeRangeFloat from ....utils import to_tuple from ...preprocessing import Resample +from .. import RandomTransform class RandomAnisotropy(RandomTransform): @@ -44,7 +44,7 @@ class RandomAnisotropy(RandomTransform): ... ) # Multiply spacing of one of the 3 axes by a factor randomly chosen in [2, 5] >>> colin = tio.datasets.Colin27() >>> transformed = transform(colin) - """ # noqa: B950 + """ def __init__( self, diff --git a/src/torchio/transforms/augmentation/spatial/random_elastic_deformation.py b/src/torchio/transforms/augmentation/spatial/random_elastic_deformation.py index b3aa69840..cafb8514c 100644 --- a/src/torchio/transforms/augmentation/spatial/random_elastic_deformation.py +++ b/src/torchio/transforms/augmentation/spatial/random_elastic_deformation.py @@ -8,15 +8,14 @@ import SimpleITK as sitk import torch -from .. import RandomTransform -from ... import SpatialTransform from ....data.image import ScalarImage from ....data.io import nib_to_sitk from ....data.subject import Subject from ....typing import TypeTripletFloat from ....typing import TypeTripletInt from ....utils import to_tuple - +from ...spatial_transform import SpatialTransform +from .. import RandomTransform SPLINE_ORDER = 3 @@ -118,7 +117,7 @@ class RandomElasticDeformation(RandomTransform, SpatialTransform): .. [#] Technically, :math:`2 \epsilon` should be added to the image bounds, where :math:`\epsilon = 2^{-3}` `according to ITK source code `_. - """ # noqa: B950 + """ def __init__( self, @@ -132,9 +131,9 @@ def __init__( super().__init__(**kwargs) self._bspline_transformation = None self.num_control_points = to_tuple(num_control_points, length=3) - _parse_num_control_points(self.num_control_points) # type: ignore[arg-type] # noqa: B950 + _parse_num_control_points(self.num_control_points) # type: ignore[arg-type] self.max_displacement = to_tuple(max_displacement, length=3) - _parse_max_displacement(self.max_displacement) # type: ignore[arg-type] # noqa: B950 + _parse_max_displacement(self.max_displacement) # type: ignore[arg-type] self.num_locked_borders = locked_borders if locked_borders not in (0, 1, 2): raise ValueError('locked_borders must be 0, 1, or 2') diff --git a/src/torchio/transforms/augmentation/spatial/random_flip.py b/src/torchio/transforms/augmentation/spatial/random_flip.py index b61237b30..89a44ff8c 100644 --- a/src/torchio/transforms/augmentation/spatial/random_flip.py +++ b/src/torchio/transforms/augmentation/spatial/random_flip.py @@ -5,10 +5,10 @@ import numpy as np import torch -from .. import RandomTransform -from ... import SpatialTransform from ....data.subject import Subject from ....utils import to_tuple +from ...spatial_transform import SpatialTransform +from .. import RandomTransform class RandomFlip(RandomTransform, SpatialTransform): diff --git a/src/torchio/transforms/data_parser.py b/src/torchio/transforms/data_parser.py index 526bd7488..adb9f4ec6 100644 --- a/src/torchio/transforms/data_parser.py +++ b/src/torchio/transforms/data_parser.py @@ -15,7 +15,6 @@ from ..data.subject import Subject from ..typing import TypeData - TypeTransformInput = Union[ Subject, Image, @@ -75,7 +74,7 @@ def get_subject(self): 'If the input is a dictionary, a value for "include" must' ' be specified when instantiating the transform. See the' ' docs for Transform:' - ' https://torchio.readthedocs.io/transforms/transforms.html#torchio.transforms.Transform' # noqa: B950 + ' https://torchio.readthedocs.io/transforms/transforms.html#torchio.transforms.Transform' ) raise RuntimeError(message) subject = self._get_subject_from_dict( diff --git a/src/torchio/transforms/interpolation.py b/src/torchio/transforms/interpolation.py index c3037148f..58836ff45 100644 --- a/src/torchio/transforms/interpolation.py +++ b/src/torchio/transforms/interpolation.py @@ -1,4 +1,3 @@ -# noqa: B950 import enum import SimpleITK as sitk diff --git a/src/torchio/transforms/lambda_transform.py b/src/torchio/transforms/lambda_transform.py index a93b9f067..1cabc520b 100644 --- a/src/torchio/transforms/lambda_transform.py +++ b/src/torchio/transforms/lambda_transform.py @@ -28,7 +28,7 @@ class Lambda(Transform): >>> def double(x): ... return 2 * x >>> double_transform = tio.Lambda(double) - """ # noqa: B950 + """ def __init__( self, diff --git a/src/torchio/transforms/preprocessing/__init__.py b/src/torchio/transforms/preprocessing/__init__.py index 18b99dfed..435e56d57 100644 --- a/src/torchio/transforms/preprocessing/__init__.py +++ b/src/torchio/transforms/preprocessing/__init__.py @@ -18,7 +18,6 @@ from .spatial.resize import Resize from .spatial.to_canonical import ToCanonical - __all__ = [ 'Pad', 'Crop', diff --git a/src/torchio/transforms/preprocessing/intensity/__init__.py b/src/torchio/transforms/preprocessing/intensity/__init__.py index 98910c3d2..763a18477 100644 --- a/src/torchio/transforms/preprocessing/intensity/__init__.py +++ b/src/torchio/transforms/preprocessing/intensity/__init__.py @@ -1,6 +1,5 @@ from .normalization_transform import NormalizationTransform - __all__ = [ 'NormalizationTransform', ] diff --git a/src/torchio/transforms/preprocessing/intensity/histogram_standardization.py b/src/torchio/transforms/preprocessing/intensity/histogram_standardization.py index bdd93c3c0..a6e12a8f5 100644 --- a/src/torchio/transforms/preprocessing/intensity/histogram_standardization.py +++ b/src/torchio/transforms/preprocessing/intensity/histogram_standardization.py @@ -7,8 +7,8 @@ from typing import Tuple from typing import Union -import torch import numpy as np +import torch from tqdm.auto import tqdm from ....data.io import read_image @@ -51,7 +51,7 @@ class HistogramStandardization(NormalizationTransform): >>> transform = tio.HistogramStandardization(landmarks) >>> torch.save(landmarks, 'path_to_landmarks.pth') >>> transform = tio.HistogramStandardization('path_to_landmarks.pth') - """ # noqa: B950 + """ def __init__( self, @@ -161,12 +161,12 @@ def train( ... } >>> >>> transform = HistogramStandardization(landmarks_dict) - """ # noqa: B950 + """ is_masks_list = isinstance(mask_path, Sequence) - if is_masks_list and len(mask_path) != len(images_paths): # type: ignore[arg-type] # noqa: B950 + if is_masks_list and len(mask_path) != len(images_paths): # type: ignore[arg-type] message = ( - f'Different number of images ({len(images_paths)})' # type: ignore[arg-type] # noqa: B950 - f' and mask ({len(mask_path)}) paths found' # type: ignore[arg-type] # noqa: B950 + f'Different number of images ({len(images_paths)})' # type: ignore[arg-type] + f' and mask ({len(mask_path)}) paths found' # type: ignore[arg-type] ) raise ValueError(message) quantiles_cutoff = DEFAULT_CUTOFF if cutoff is None else cutoff diff --git a/src/torchio/transforms/preprocessing/intensity/mask.py b/src/torchio/transforms/preprocessing/intensity/mask.py index 1458f8539..4b8aaec74 100644 --- a/src/torchio/transforms/preprocessing/intensity/mask.py +++ b/src/torchio/transforms/preprocessing/intensity/mask.py @@ -4,10 +4,10 @@ import torch -from ... import IntensityTransform from ....data.image import ScalarImage from ....data.subject import Subject from ....transforms.transform import TypeMaskingMethod +from ...intensity_transform import IntensityTransform class Mask(IntensityTransform): @@ -45,7 +45,7 @@ class Mask(IntensityTransform): masked = mask(subject) subject.add_image(masked.t1, 'Masked') subject.plot() - """ # noqa: B950 + """ def __init__( self, diff --git a/src/torchio/transforms/preprocessing/intensity/normalization_transform.py b/src/torchio/transforms/preprocessing/intensity/normalization_transform.py index cc79750b5..7dfa5035b 100644 --- a/src/torchio/transforms/preprocessing/intensity/normalization_transform.py +++ b/src/torchio/transforms/preprocessing/intensity/normalization_transform.py @@ -1,8 +1,8 @@ import torch -from ... import IntensityTransform from ....data.subject import Subject from ....transforms.transform import TypeMaskingMethod +from ...intensity_transform import IntensityTransform class NormalizationTransform(IntensityTransform): @@ -34,7 +34,7 @@ class NormalizationTransform(IntensityTransform): >>> transformed = transform(subject) # use only values within the brain >>> transform = tio.ZNormalization(masking_method=lambda x: x > x.mean()) >>> transformed = transform(subject) # use values above the image mean - """ # noqa: B950 + """ def __init__(self, masking_method: TypeMaskingMethod = None, **kwargs): super().__init__(**kwargs) diff --git a/src/torchio/transforms/preprocessing/intensity/rescale.py b/src/torchio/transforms/preprocessing/intensity/rescale.py index cdbdaee30..49db9bec6 100644 --- a/src/torchio/transforms/preprocessing/intensity/rescale.py +++ b/src/torchio/transforms/preprocessing/intensity/rescale.py @@ -42,7 +42,7 @@ class RescaleIntensity(NormalizationTransform): .. _this scikit-image example: https://scikit-image.org/docs/dev/auto_examples/color_exposure/plot_equalize.html#sphx-glr-auto-examples-color-exposure-plot-equalize-py .. _nn-UNet paper: https://arxiv.org/abs/1809.10486 - """ # noqa: B950 + """ def __init__( self, diff --git a/src/torchio/transforms/preprocessing/label/keep_largest_component.py b/src/torchio/transforms/preprocessing/label/keep_largest_component.py index d5fd76a7f..fb17c2f82 100644 --- a/src/torchio/transforms/preprocessing/label/keep_largest_component.py +++ b/src/torchio/transforms/preprocessing/label/keep_largest_component.py @@ -16,7 +16,7 @@ class KeepLargestComponent(LabelTransform): extending this transform, please `open a new issue`_. .. _open a new issue: https://github.com/fepegar/torchio/issues/new?assignees=&labels=enhancement&template=feature_request.md&title=Improve%20KeepLargestComponent%20transform - """ # noqa: B950 + """ def apply_transform(self, subject: Subject) -> Subject: for image in self.get_images(subject): diff --git a/src/torchio/transforms/preprocessing/label/remap_labels.py b/src/torchio/transforms/preprocessing/label/remap_labels.py index 99b79ca4e..3ea68c36a 100644 --- a/src/torchio/transforms/preprocessing/label/remap_labels.py +++ b/src/torchio/transforms/preprocessing/label/remap_labels.py @@ -132,7 +132,7 @@ class RemapLabels(LabelTransform): >>> transformed = transform(subject) >>> # Apply the inverse on the right side only. The labels are correctly split into left/right. >>> inverse_transformed = transformed.apply_inverse_transform() - """ # noqa: B950 + """ def __init__( self, diff --git a/src/torchio/transforms/preprocessing/spatial/bounds_transform.py b/src/torchio/transforms/preprocessing/spatial/bounds_transform.py index 8f49fc9bc..b65c37bea 100644 --- a/src/torchio/transforms/preprocessing/spatial/bounds_transform.py +++ b/src/torchio/transforms/preprocessing/spatial/bounds_transform.py @@ -1,5 +1,5 @@ -from ... import SpatialTransform from ....transforms.transform import TypeBounds +from ...spatial_transform import SpatialTransform class BoundsTransform(SpatialTransform): diff --git a/src/torchio/transforms/preprocessing/spatial/copy_affine.py b/src/torchio/transforms/preprocessing/spatial/copy_affine.py index 39861fae8..83fc809a6 100644 --- a/src/torchio/transforms/preprocessing/spatial/copy_affine.py +++ b/src/torchio/transforms/preprocessing/spatial/copy_affine.py @@ -1,7 +1,7 @@ import copy -from ... import SpatialTransform from ....data.subject import Subject +from ...spatial_transform import SpatialTransform class CopyAffine(SpatialTransform): @@ -60,7 +60,7 @@ class CopyAffine(SpatialTransform): * https://github.com/fepegar/torchio/issues/430 * https://github.com/fepegar/torchio/issues/382 * https://github.com/fepegar/torchio/pull/592 - """ # noqa: B950 + """ def __init__(self, target: str, **kwargs): super().__init__(**kwargs) diff --git a/src/torchio/transforms/preprocessing/spatial/crop_or_pad.py b/src/torchio/transforms/preprocessing/spatial/crop_or_pad.py index 8ca20be05..49e68edb7 100644 --- a/src/torchio/transforms/preprocessing/spatial/crop_or_pad.py +++ b/src/torchio/transforms/preprocessing/spatial/crop_or_pad.py @@ -6,9 +6,9 @@ import numpy as np -from ... import SpatialTransform from ....data.subject import Subject from ....utils import parse_spatial_shape +from ...spatial_transform import SpatialTransform from ...transform import TypeSixBounds from ...transform import TypeTripletInt from .crop import Crop @@ -69,7 +69,7 @@ class CropOrPad(SpatialTransform): t1_pad_crop = crop_pad(t1) subject = tio.Subject(t1=t1, crop_pad=t1_pad_crop) subject.plot() - """ # noqa: B950 + """ def __init__( self, @@ -120,7 +120,7 @@ def _bbox_mask(mask_volume: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: Args: mask_volume: 3D NumPy array. - """ # noqa: B950 + """ i_any = np.any(mask_volume, axis=(1, 2)) j_any = np.any(mask_volume, axis=(0, 2)) k_any = np.any(mask_volume, axis=(0, 1)) @@ -150,7 +150,7 @@ def _get_six_bounds_parameters( >>> p = np.array((4, 0, 7)) >>> CropOrPad._get_six_bounds_parameters(p) (2, 2, 0, 0, 4, 3) - """ # noqa: B950 + """ parameters = parameters / 2 result = [] for number in parameters: diff --git a/src/torchio/transforms/preprocessing/spatial/ensure_shape_multiple.py b/src/torchio/transforms/preprocessing/spatial/ensure_shape_multiple.py index c5bad843c..b625686e0 100644 --- a/src/torchio/transforms/preprocessing/spatial/ensure_shape_multiple.py +++ b/src/torchio/transforms/preprocessing/spatial/ensure_shape_multiple.py @@ -5,10 +5,10 @@ import numpy as np -from ... import SpatialTransform from ....data.subject import Subject from ....typing import TypeTripletInt from ....utils import to_tuple +from ...spatial_transform import SpatialTransform from .crop_or_pad import CropOrPad @@ -112,7 +112,7 @@ class EnsureShapeMultiple(SpatialTransform): >>> transformed = transform(image_2d) >>> transformed.shape torch.Size([1, 176, 216, 1]) - """ # noqa: B950 + """ def __init__( self, @@ -129,7 +129,7 @@ def __init__( def apply_transform(self, subject: Subject) -> Subject: source_shape = np.array(subject.spatial_shape, np.uint16) - function: Callable = np.floor if self.method == 'crop' else np.ceil # type: ignore[assignment] # noqa: B950 + function: Callable = np.floor if self.method == 'crop' else np.ceil # type: ignore[assignment] integer_ratio = function(source_shape / self.target_multiple) target_shape = integer_ratio * self.target_multiple target_shape = np.maximum(target_shape, 1) diff --git a/src/torchio/transforms/preprocessing/spatial/pad.py b/src/torchio/transforms/preprocessing/spatial/pad.py index 8713de8f2..deaaae5fb 100644 --- a/src/torchio/transforms/preprocessing/spatial/pad.py +++ b/src/torchio/transforms/preprocessing/spatial/pad.py @@ -40,7 +40,7 @@ class Pad(BoundsTransform): :class:`~torchio.transforms.CropOrPad` instead. .. _NumPy docs: https://numpy.org/doc/stable/reference/generated/numpy.pad.html - """ # noqa: B950 + """ PADDING_MODES = ( 'empty', @@ -101,7 +101,7 @@ def apply_transform(self, subject: Subject) -> Subject: kwargs = {'mode': self.padding_mode} pad_params = self.bounds_parameters paddings = (0, 0), pad_params[:2], pad_params[2:4], pad_params[4:] - padded = np.pad(image.data, paddings, **kwargs) # type: ignore[call-overload] # noqa: B950 + padded = np.pad(image.data, paddings, **kwargs) # type: ignore[call-overload] image.set_data(torch.as_tensor(padded)) image.affine = new_affine return subject diff --git a/src/torchio/transforms/preprocessing/spatial/resample.py b/src/torchio/transforms/preprocessing/spatial/resample.py index d9a91de2a..7b5b46147 100644 --- a/src/torchio/transforms/preprocessing/spatial/resample.py +++ b/src/torchio/transforms/preprocessing/spatial/resample.py @@ -10,7 +10,6 @@ import SimpleITK as sitk import torch -from ... import SpatialTransform from ....data.image import Image from ....data.image import ScalarImage from ....data.io import get_sitk_metadata_from_ras_affine @@ -18,7 +17,7 @@ from ....data.subject import Subject from ....typing import TypePath from ....typing import TypeTripletFloat - +from ...spatial_transform import SpatialTransform TypeSpacing = Union[float, Tuple[float, float, float]] @@ -76,7 +75,7 @@ class Resample(SpatialTransform): t1_resampled = resample(subject.t1) subject.add_image(t1_resampled, 'Downsampled') subject.plot() - """ # noqa: B950 + """ def __init__( self, diff --git a/src/torchio/transforms/preprocessing/spatial/resize.py b/src/torchio/transforms/preprocessing/spatial/resize.py index 0087713a4..d8e450169 100644 --- a/src/torchio/transforms/preprocessing/spatial/resize.py +++ b/src/torchio/transforms/preprocessing/spatial/resize.py @@ -2,10 +2,10 @@ import numpy as np -from ... import SpatialTransform from ....data.subject import Subject from ....typing import TypeSpatialShape from ....utils import to_tuple +from ...spatial_transform import SpatialTransform from .crop_or_pad import CropOrPad from .resample import Resample diff --git a/src/torchio/transforms/preprocessing/spatial/to_canonical.py b/src/torchio/transforms/preprocessing/spatial/to_canonical.py index 46b619473..ee53814bb 100644 --- a/src/torchio/transforms/preprocessing/spatial/to_canonical.py +++ b/src/torchio/transforms/preprocessing/spatial/to_canonical.py @@ -2,8 +2,8 @@ import numpy as np import torch -from ... import SpatialTransform from ....data.subject import Subject +from ...spatial_transform import SpatialTransform class ToCanonical(SpatialTransform): @@ -26,7 +26,7 @@ class ToCanonical(SpatialTransform): :meth:`nibabel.as_closest_canonical`. .. _NiBabel docs about image orientation: https://nipy.org/nibabel/image_orientation.html - """ # noqa: B950 + """ def apply_transform(self, subject: Subject) -> Subject: for image in subject.get_images(intensity_only=False): diff --git a/src/torchio/transforms/transform.py b/src/torchio/transforms/transform.py index 26f522443..e6cf41396 100644 --- a/src/torchio/transforms/transform.py +++ b/src/torchio/transforms/transform.py @@ -26,12 +26,12 @@ from ..typing import TypeKeys from ..typing import TypeNumber from ..typing import TypeTripletInt -from ..utils import to_tuple from ..utils import is_iterable +from ..utils import to_tuple from .data_parser import DataParser from .data_parser import TypeTransformInput -from .interpolation import get_sitk_interpolator from .interpolation import Interpolation +from .interpolation import get_sitk_interpolator TypeSixBounds = Tuple[int, int, int, int, int, int] TypeBounds = Union[ @@ -201,9 +201,13 @@ def apply_transform(self, subject: Subject) -> Subject: raise NotImplementedError def add_transform_to_subject_history(self, subject): + from . import Compose + from . import CropOrPad + from . import EnsureShapeMultiple + from . import OneOf from .augmentation import RandomTransform - from . import Compose, OneOf, CropOrPad, EnsureShapeMultiple - from .preprocessing import SequentialLabels, Resize + from .preprocessing import Resize + from .preprocessing import SequentialLabels call_others = ( RandomTransform, @@ -448,9 +452,9 @@ def parse_bounds(bounds_parameters: TypeBounds) -> Optional[TypeSixBounds]: if bounds_parameters is None: return None try: - bounds_parameters = tuple(bounds_parameters) # type: ignore[assignment,arg-type] # noqa: B950 + bounds_parameters = tuple(bounds_parameters) # type: ignore[assignment,arg-type] except TypeError: - bounds_parameters = (bounds_parameters,) # type: ignore[assignment] # noqa: B950 + bounds_parameters = (bounds_parameters,) # type: ignore[assignment] # Check that numbers are integers for number in bounds_parameters: # type: ignore[union-attr] @@ -460,7 +464,7 @@ def parse_bounds(bounds_parameters: TypeBounds) -> Optional[TypeSixBounds]: f' not "{bounds_parameters}" of type {type(number)}' ) raise ValueError(message) - bounds_parameters_tuple = tuple(int(n) for n in bounds_parameters) # type: ignore[assignment,union-attr] # noqa: B950 + bounds_parameters_tuple = tuple(int(n) for n in bounds_parameters) # type: ignore[assignment,union-attr] bounds_parameters_length = len(bounds_parameters_tuple) if bounds_parameters_length == 6: return bounds_parameters_tuple # type: ignore[return-value] @@ -511,7 +515,7 @@ def get_mask_from_masking_method( tensor, ) elif type(masking_method) in (tuple, list, int): - return self.get_mask_from_bounds(masking_method, tensor) # type: ignore[arg-type] # noqa: B950 + return self.get_mask_from_bounds(masking_method, tensor) # type: ignore[arg-type] first_anat_axes = tuple(s[0] for s in ANATOMICAL_AXES) message = ( 'Masking method must be one of:\n' diff --git a/src/torchio/typing.py b/src/torchio/typing.py index 2c1022c64..78898fad4 100644 --- a/src/torchio/typing.py +++ b/src/torchio/typing.py @@ -8,7 +8,6 @@ import numpy as np import torch - # For typing hints TypePath = Union[str, Path] TypeNumber = Union[int, float] diff --git a/src/torchio/utils.py b/src/torchio/utils.py index 6f7eecc4a..a63e094c4 100644 --- a/src/torchio/utils.py +++ b/src/torchio/utils.py @@ -16,10 +16,10 @@ from typing import Tuple from typing import Union -import nibabel as nib import numpy as np import SimpleITK as sitk import torch +from nibabel.nifti1 import Nifti1Image from torch.utils.data import DataLoader from torch.utils.data._utils.collate import default_collate from tqdm.auto import trange @@ -88,7 +88,9 @@ def create_dummy_dataset( force: bool = False, verbose: bool = False, ): - from .data import ScalarImage, LabelMap, Subject + from .data import LabelMap + from .data import ScalarImage + from .data import Subject output_dir = tempfile.gettempdir() if directory is None else directory output_dir = Path(output_dir) @@ -128,11 +130,11 @@ def create_dummy_dataset( image *= 255 image_path = images_dir / f'image_{i}{suffix}' - nii = nib.Nifti1Image(image.astype(np.uint8), affine) + nii = Nifti1Image(image.astype(np.uint8), affine) nii.to_filename(str(image_path)) label_path = labels_dir / f'label_{i}{suffix}' - nii = nib.Nifti1Image(label.astype(np.uint8), affine) + nii = Nifti1Image(label.astype(np.uint8), affine) nii.to_filename(str(label_path)) subject = Subject( @@ -272,7 +274,9 @@ def get_subjects_from_batch(batch: Dict) -> List: batch: Dictionary generated by a :class:`tio.SubjectsLoader` extracting data from a :class:`torchio.SubjectsDataset`. """ - from .data import ScalarImage, LabelMap, Subject + from .data import LabelMap + from .data import ScalarImage + from .data import Subject subjects = [] image_names, batch_size = get_batch_images_and_size(batch) @@ -352,7 +356,7 @@ def guess_external_viewer() -> Optional[Path]: itk = 'ITK-SNAP' slicer = 'Slicer' if platform == 'darwin': - app_path = '/Applications/{}.app/Contents/MacOS/{}' # noqa: FS003 + app_path = '/Applications/{}.app/Contents/MacOS/{}' itk_snap_path = Path(app_path.format(2 * (itk,))) if itk_snap_path.is_file(): return itk_snap_path diff --git a/src/torchio/visualization.py b/src/torchio/visualization.py index 40234e931..ce6d82400 100644 --- a/src/torchio/visualization.py +++ b/src/torchio/visualization.py @@ -169,7 +169,7 @@ def get_num_bins(x: np.ndarray) -> int: Args: x: Input values. - """ # noqa: B950 + """ # Freedman–Diaconis number of bins q25, q75 = np.percentile(x, [25, 75]) bin_width = 2 * (q75 - q25) * len(x) ** (-1 / 3) @@ -219,7 +219,7 @@ def make_gif( message = 'Please install Pillow to use Image.to_gif(): pip install Pillow' raise RuntimeError(message) from e transform = RescaleIntensity((0, 255)) - tensor = transform(tensor) if rescale else tensor # type: ignore[assignment] # noqa: B950 + tensor = transform(tensor) if rescale else tensor # type: ignore[assignment] single_channel = len(tensor) == 1 # Move channels dimension to the end and bring selected axis to 0 diff --git a/tests/data/inference/test_aggregator.py b/tests/data/inference/test_aggregator.py index b1523ff8d..a024b3c4f 100644 --- a/tests/data/inference/test_aggregator.py +++ b/tests/data/inference/test_aggregator.py @@ -1,5 +1,6 @@ import pytest import torch + import torchio as tio from ...utils import TorchioTestCase @@ -118,7 +119,7 @@ def test_patch_crop_issue_padding(self): self.run_patch_crop_issue(padding_mode='constant') def test_bad_aggregator_shape(self): - # https://github.com/microsoft/InnerEye-DeepLearning/pull/677/checks?check_run_id=5395915817 # noqa: B950 + # https://github.com/microsoft/InnerEye-DeepLearning/pull/677/checks?check_run_id=5395915817 tensor = torch.ones(1, 40, 40, 40) image_name = 'img' subject = tio.Subject({image_name: tio.ScalarImage(tensor=tensor)}) diff --git a/tests/data/inference/test_grid_sampler.py b/tests/data/inference/test_grid_sampler.py index 5cfa03063..6e7d1529a 100644 --- a/tests/data/inference/test_grid_sampler.py +++ b/tests/data/inference/test_grid_sampler.py @@ -1,9 +1,10 @@ #!/usr/bin/env python from copy import copy -import torchio as tio import pytest +import torchio as tio + from ...utils import TorchioTestCase diff --git a/tests/data/sampler/test_label_sampler.py b/tests/data/sampler/test_label_sampler.py index 9ebdca287..0fab9922e 100644 --- a/tests/data/sampler/test_label_sampler.py +++ b/tests/data/sampler/test_label_sampler.py @@ -1,5 +1,6 @@ import pytest import torch + import torchio as tio from ...utils import TorchioTestCase diff --git a/tests/data/sampler/test_patch_sampler.py b/tests/data/sampler/test_patch_sampler.py index 5a46dd929..49bf61e19 100644 --- a/tests/data/sampler/test_patch_sampler.py +++ b/tests/data/sampler/test_patch_sampler.py @@ -1,4 +1,5 @@ import pytest + from torchio.data import PatchSampler from ...utils import TorchioTestCase diff --git a/tests/data/sampler/test_random_sampler.py b/tests/data/sampler/test_random_sampler.py index 4b04ff6cc..3c9e0fb57 100644 --- a/tests/data/sampler/test_random_sampler.py +++ b/tests/data/sampler/test_random_sampler.py @@ -1,4 +1,5 @@ import pytest + import torchio as tio from ...utils import TorchioTestCase diff --git a/tests/data/sampler/test_uniform_sampler.py b/tests/data/sampler/test_uniform_sampler.py index cf73dbc13..6fe202c15 100644 --- a/tests/data/sampler/test_uniform_sampler.py +++ b/tests/data/sampler/test_uniform_sampler.py @@ -1,4 +1,5 @@ import torch + import torchio from torchio.data import UniformSampler diff --git a/tests/data/sampler/test_weighted_sampler.py b/tests/data/sampler/test_weighted_sampler.py index 6fd3e8c6c..20c53e001 100644 --- a/tests/data/sampler/test_weighted_sampler.py +++ b/tests/data/sampler/test_weighted_sampler.py @@ -1,4 +1,5 @@ import torch + import torchio as tio from torchio.data import WeightedSampler diff --git a/tests/data/test_image.py b/tests/data/test_image.py index b467132fa..d4a00dd78 100644 --- a/tests/data/test_image.py +++ b/tests/data/test_image.py @@ -1,5 +1,6 @@ #!/usr/bin/env python """Tests for Image.""" + import copy import sys import tempfile @@ -8,6 +9,7 @@ import numpy as np import pytest import torch + import torchio as tio from ..utils import TorchioTestCase diff --git a/tests/data/test_io.py b/tests/data/test_io.py index 0624f00ff..d3c634294 100644 --- a/tests/data/test_io.py +++ b/tests/data/test_io.py @@ -6,7 +6,8 @@ import SimpleITK as sitk import torch -from torchio.data import ScalarImage, io +from torchio.data import ScalarImage +from torchio.data import io from ..utils import TorchioTestCase diff --git a/tests/data/test_queue.py b/tests/data/test_queue.py index 1e6ae49df..1b8b98e72 100644 --- a/tests/data/test_queue.py +++ b/tests/data/test_queue.py @@ -2,8 +2,9 @@ import pytest import torch -import torchio as tio from parameterized import parameterized + +import torchio as tio from torchio.data import UniformSampler from torchio.utils import create_dummy_dataset diff --git a/tests/data/test_subject.py b/tests/data/test_subject.py index 8a9cb7476..49a051fb6 100644 --- a/tests/data/test_subject.py +++ b/tests/data/test_subject.py @@ -5,6 +5,7 @@ import numpy as np import pytest import torch + import torchio as tio from ..utils import TorchioTestCase @@ -115,7 +116,7 @@ def test_same_space(self): -5.54619071e-01, -1.57071802e-02, 2.28515778e02, - ], # noqa: B950 + ], [0.00000000e00, 0.00000000e00, 0.00000000e00, 1.00000000e00], ] ) diff --git a/tests/data/test_subjects_dataset.py b/tests/data/test_subjects_dataset.py index 9187238bb..13c2247f0 100644 --- a/tests/data/test_subjects_dataset.py +++ b/tests/data/test_subjects_dataset.py @@ -1,5 +1,6 @@ import pytest import torch + import torchio as tio from ..utils import TorchioTestCase diff --git a/tests/datasets/test_ixi.py b/tests/datasets/test_ixi.py index 427c51cd5..e4b8e9922 100644 --- a/tests/datasets/test_ixi.py +++ b/tests/datasets/test_ixi.py @@ -1,4 +1,5 @@ import pytest + import torchio as tio from ..utils import TorchioTestCase diff --git a/tests/datasets/test_medmnist.py b/tests/datasets/test_medmnist.py index 680eec494..c9872baee 100644 --- a/tests/datasets/test_medmnist.py +++ b/tests/datasets/test_medmnist.py @@ -1,6 +1,7 @@ import os import pytest + import torchio as tio from torchio.datasets.medmnist import AdrenalMNIST3D from torchio.datasets.medmnist import FractureMNIST3D @@ -9,7 +10,6 @@ from torchio.datasets.medmnist import SynapseMNIST3D from torchio.datasets.medmnist import VesselMNIST3D - classes = ( OrganMNIST3D, NoduleMNIST3D, diff --git a/tests/test_cli.py b/tests/test_cli.py index f9fb123b2..c619015f6 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -1,12 +1,13 @@ #!/usr/bin/env python """Tests for CLI tool package.""" + from typer.testing import CliRunner + from torchio.cli import apply_transform from torchio.cli import print_info from .utils import TorchioTestCase - runner = CliRunner() @@ -46,4 +47,4 @@ def test_cli_hd(self): ' dtype: torch.DoubleTensor;' ' memory: 46.9 KiB' ')\n' - ) # noqa: B950 + ) diff --git a/tests/test_utils.py b/tests/test_utils.py index 6070896aa..0215ef322 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -2,6 +2,7 @@ import pytest import torch + import torchio as tio from .utils import TorchioTestCase diff --git a/tests/transforms/augmentation/test_oneof.py b/tests/transforms/augmentation/test_oneof.py index 49ea2d5f6..fb3776b36 100644 --- a/tests/transforms/augmentation/test_oneof.py +++ b/tests/transforms/augmentation/test_oneof.py @@ -1,4 +1,5 @@ import pytest + import torchio as tio from ...utils import TorchioTestCase diff --git a/tests/transforms/augmentation/test_random_affine.py b/tests/transforms/augmentation/test_random_affine.py index b68bd4654..fdb8c3fbd 100644 --- a/tests/transforms/augmentation/test_random_affine.py +++ b/tests/transforms/augmentation/test_random_affine.py @@ -1,5 +1,6 @@ import pytest import torch + import torchio as tio from ...utils import TorchioTestCase diff --git a/tests/transforms/augmentation/test_random_anisotropy.py b/tests/transforms/augmentation/test_random_anisotropy.py index e91448d86..2730a0939 100644 --- a/tests/transforms/augmentation/test_random_anisotropy.py +++ b/tests/transforms/augmentation/test_random_anisotropy.py @@ -1,5 +1,6 @@ import pytest import torch + from torchio import RandomAnisotropy from torchio import ScalarImage diff --git a/tests/transforms/augmentation/test_random_bias_field.py b/tests/transforms/augmentation/test_random_bias_field.py index 473c856cb..d9b351739 100644 --- a/tests/transforms/augmentation/test_random_bias_field.py +++ b/tests/transforms/augmentation/test_random_bias_field.py @@ -1,5 +1,6 @@ import pytest import torch + import torchio as tio from ...utils import TorchioTestCase diff --git a/tests/transforms/augmentation/test_random_blur.py b/tests/transforms/augmentation/test_random_blur.py index 2246a7b61..b25c08bd0 100644 --- a/tests/transforms/augmentation/test_random_blur.py +++ b/tests/transforms/augmentation/test_random_blur.py @@ -1,4 +1,5 @@ import pytest + from torchio import RandomBlur from ...utils import TorchioTestCase diff --git a/tests/transforms/augmentation/test_random_elastic_deformation.py b/tests/transforms/augmentation/test_random_elastic_deformation.py index 6697b48fc..534cba174 100644 --- a/tests/transforms/augmentation/test_random_elastic_deformation.py +++ b/tests/transforms/augmentation/test_random_elastic_deformation.py @@ -1,4 +1,5 @@ import pytest + import torchio as tio from ...utils import TorchioTestCase diff --git a/tests/transforms/augmentation/test_random_flip.py b/tests/transforms/augmentation/test_random_flip.py index 79bc52f91..1e36968d8 100644 --- a/tests/transforms/augmentation/test_random_flip.py +++ b/tests/transforms/augmentation/test_random_flip.py @@ -1,5 +1,6 @@ import pytest import torch + import torchio as tio from ...utils import TorchioTestCase diff --git a/tests/transforms/augmentation/test_random_gamma.py b/tests/transforms/augmentation/test_random_gamma.py index 7dba25981..b6bd3bdc3 100644 --- a/tests/transforms/augmentation/test_random_gamma.py +++ b/tests/transforms/augmentation/test_random_gamma.py @@ -1,5 +1,6 @@ import pytest import torch + from torchio import RandomGamma from ...utils import TorchioTestCase diff --git a/tests/transforms/augmentation/test_random_ghosting.py b/tests/transforms/augmentation/test_random_ghosting.py index 944aafe49..563c2f502 100644 --- a/tests/transforms/augmentation/test_random_ghosting.py +++ b/tests/transforms/augmentation/test_random_ghosting.py @@ -1,4 +1,5 @@ import pytest + from torchio import RandomGhosting from ...utils import TorchioTestCase diff --git a/tests/transforms/augmentation/test_random_labels_to_image.py b/tests/transforms/augmentation/test_random_labels_to_image.py index 7afd98098..4b192d9b2 100644 --- a/tests/transforms/augmentation/test_random_labels_to_image.py +++ b/tests/transforms/augmentation/test_random_labels_to_image.py @@ -1,4 +1,5 @@ import pytest + from torchio.transforms import RandomLabelsToImage from ...utils import TorchioTestCase diff --git a/tests/transforms/augmentation/test_random_motion.py b/tests/transforms/augmentation/test_random_motion.py index 69678434a..27bc17564 100644 --- a/tests/transforms/augmentation/test_random_motion.py +++ b/tests/transforms/augmentation/test_random_motion.py @@ -1,4 +1,5 @@ import pytest + from torchio import RandomMotion from ...utils import TorchioTestCase diff --git a/tests/transforms/augmentation/test_random_noise.py b/tests/transforms/augmentation/test_random_noise.py index 8cddaad06..f18226f27 100644 --- a/tests/transforms/augmentation/test_random_noise.py +++ b/tests/transforms/augmentation/test_random_noise.py @@ -1,4 +1,5 @@ import pytest + from torchio import RandomNoise from ...utils import TorchioTestCase diff --git a/tests/transforms/augmentation/test_random_spike.py b/tests/transforms/augmentation/test_random_spike.py index 101644178..27c0cbd32 100644 --- a/tests/transforms/augmentation/test_random_spike.py +++ b/tests/transforms/augmentation/test_random_spike.py @@ -1,4 +1,5 @@ import pytest + from torchio import RandomSpike from ...utils import TorchioTestCase diff --git a/tests/transforms/augmentation/test_random_swap.py b/tests/transforms/augmentation/test_random_swap.py index 0be44e15b..1aa70060f 100644 --- a/tests/transforms/augmentation/test_random_swap.py +++ b/tests/transforms/augmentation/test_random_swap.py @@ -1,4 +1,5 @@ import pytest + import torchio as tio from ...utils import TorchioTestCase diff --git a/tests/transforms/label/test_remap_labels.py b/tests/transforms/label/test_remap_labels.py index ed12a001b..458e8e2a3 100644 --- a/tests/transforms/label/test_remap_labels.py +++ b/tests/transforms/label/test_remap_labels.py @@ -1,4 +1,5 @@ import pytest + import torchio as tio from ...utils import TorchioTestCase diff --git a/tests/transforms/label/test_sequential_labels.py b/tests/transforms/label/test_sequential_labels.py index 12e2c5483..d06771e98 100644 --- a/tests/transforms/label/test_sequential_labels.py +++ b/tests/transforms/label/test_sequential_labels.py @@ -1,4 +1,5 @@ import pytest + import torchio as tio from ...utils import TorchioTestCase diff --git a/tests/transforms/preprocessing/test_clamp.py b/tests/transforms/preprocessing/test_clamp.py index f58c88f32..73225167a 100644 --- a/tests/transforms/preprocessing/test_clamp.py +++ b/tests/transforms/preprocessing/test_clamp.py @@ -1,5 +1,6 @@ import pytest import torch + import torchio as tio from ...utils import TorchioTestCase diff --git a/tests/transforms/preprocessing/test_contour.py b/tests/transforms/preprocessing/test_contour.py index 342dece68..ae13459fc 100644 --- a/tests/transforms/preprocessing/test_contour.py +++ b/tests/transforms/preprocessing/test_contour.py @@ -1,5 +1,6 @@ import pytest import torch + import torchio as tio from ...utils import TorchioTestCase diff --git a/tests/transforms/preprocessing/test_copy_affine.py b/tests/transforms/preprocessing/test_copy_affine.py index 611da6ed9..6c11f6235 100644 --- a/tests/transforms/preprocessing/test_copy_affine.py +++ b/tests/transforms/preprocessing/test_copy_affine.py @@ -1,6 +1,7 @@ import numpy as np import pytest import torch + import torchio as tio from ...utils import TorchioTestCase diff --git a/tests/transforms/preprocessing/test_crop.py b/tests/transforms/preprocessing/test_crop.py index e1d5194dc..9b60cb1a2 100644 --- a/tests/transforms/preprocessing/test_crop.py +++ b/tests/transforms/preprocessing/test_crop.py @@ -1,4 +1,5 @@ import torch + import torchio as tio from ...utils import TorchioTestCase diff --git a/tests/transforms/preprocessing/test_crop_pad.py b/tests/transforms/preprocessing/test_crop_pad.py index 90b1f1e79..bed573fbd 100644 --- a/tests/transforms/preprocessing/test_crop_pad.py +++ b/tests/transforms/preprocessing/test_crop_pad.py @@ -1,5 +1,6 @@ import numpy as np import pytest + import torchio as tio from ...utils import TorchioTestCase diff --git a/tests/transforms/preprocessing/test_ensure_shape_multiple.py b/tests/transforms/preprocessing/test_ensure_shape_multiple.py index f835585ee..2515ec04c 100644 --- a/tests/transforms/preprocessing/test_ensure_shape_multiple.py +++ b/tests/transforms/preprocessing/test_ensure_shape_multiple.py @@ -1,4 +1,5 @@ import pytest + import torchio as tio from ...utils import TorchioTestCase diff --git a/tests/transforms/preprocessing/test_histogram_standardization.py b/tests/transforms/preprocessing/test_histogram_standardization.py index 06a9da72e..425731d38 100644 --- a/tests/transforms/preprocessing/test_histogram_standardization.py +++ b/tests/transforms/preprocessing/test_histogram_standardization.py @@ -1,6 +1,7 @@ import numpy as np import pytest import torch + from torchio import LabelMap from torchio import ScalarImage from torchio import Subject diff --git a/tests/transforms/preprocessing/test_keep_largest.py b/tests/transforms/preprocessing/test_keep_largest.py index 300977688..4a86af150 100644 --- a/tests/transforms/preprocessing/test_keep_largest.py +++ b/tests/transforms/preprocessing/test_keep_largest.py @@ -1,5 +1,6 @@ import pytest import torch + import torchio as tio from ...utils import TorchioTestCase diff --git a/tests/transforms/preprocessing/test_mask.py b/tests/transforms/preprocessing/test_mask.py index 23f8004d1..d7faa22a5 100644 --- a/tests/transforms/preprocessing/test_mask.py +++ b/tests/transforms/preprocessing/test_mask.py @@ -1,7 +1,8 @@ import pytest -import torchio as tio import torch +import torchio as tio + from ...utils import TorchioTestCase diff --git a/tests/transforms/preprocessing/test_one_hot.py b/tests/transforms/preprocessing/test_one_hot.py index 01d06f596..70fa08e11 100644 --- a/tests/transforms/preprocessing/test_one_hot.py +++ b/tests/transforms/preprocessing/test_one_hot.py @@ -1,5 +1,6 @@ import pytest import torch + import torchio as tio from ...utils import TorchioTestCase diff --git a/tests/transforms/preprocessing/test_pad.py b/tests/transforms/preprocessing/test_pad.py index a07773675..2991dba09 100644 --- a/tests/transforms/preprocessing/test_pad.py +++ b/tests/transforms/preprocessing/test_pad.py @@ -1,5 +1,6 @@ import SimpleITK as sitk import torch + import torchio as tio from torchio.data.io import sitk_to_nib diff --git a/tests/transforms/preprocessing/test_resample.py b/tests/transforms/preprocessing/test_resample.py index fd8eca6f4..0d076015b 100644 --- a/tests/transforms/preprocessing/test_resample.py +++ b/tests/transforms/preprocessing/test_resample.py @@ -1,6 +1,7 @@ import numpy as np import pytest import torch + import torchio as tio from ...utils import TorchioTestCase diff --git a/tests/transforms/preprocessing/test_rescale.py b/tests/transforms/preprocessing/test_rescale.py index b9d205a81..02f2b670d 100644 --- a/tests/transforms/preprocessing/test_rescale.py +++ b/tests/transforms/preprocessing/test_rescale.py @@ -3,6 +3,7 @@ import numpy as np import pytest import torch + import torchio as tio from ...utils import TorchioTestCase diff --git a/tests/transforms/preprocessing/test_resize.py b/tests/transforms/preprocessing/test_resize.py index 61e865ae5..3774b8ca5 100644 --- a/tests/transforms/preprocessing/test_resize.py +++ b/tests/transforms/preprocessing/test_resize.py @@ -1,6 +1,7 @@ import numpy as np import pytest import torch + import torchio as tio from ...utils import TorchioTestCase diff --git a/tests/transforms/preprocessing/test_to_canonical.py b/tests/transforms/preprocessing/test_to_canonical.py index 73da5649d..a32be9bb3 100644 --- a/tests/transforms/preprocessing/test_to_canonical.py +++ b/tests/transforms/preprocessing/test_to_canonical.py @@ -1,5 +1,6 @@ import numpy as np import torch + import torchio as tio from ...utils import TorchioTestCase diff --git a/tests/transforms/preprocessing/test_z_normalization.py b/tests/transforms/preprocessing/test_z_normalization.py index 3878e5fc2..0a0bf010a 100644 --- a/tests/transforms/preprocessing/test_z_normalization.py +++ b/tests/transforms/preprocessing/test_z_normalization.py @@ -1,5 +1,6 @@ import pytest import torch + import torchio as tio from ...utils import TorchioTestCase diff --git a/tests/transforms/test_invertibility.py b/tests/transforms/test_invertibility.py index 4d962d85d..bda8b3a4c 100644 --- a/tests/transforms/test_invertibility.py +++ b/tests/transforms/test_invertibility.py @@ -2,6 +2,7 @@ import warnings import torch + import torchio as tio from ..utils import TorchioTestCase diff --git a/tests/transforms/test_lambda_transform.py b/tests/transforms/test_lambda_transform.py index f0c2797dd..2c2d401ce 100644 --- a/tests/transforms/test_lambda_transform.py +++ b/tests/transforms/test_lambda_transform.py @@ -1,5 +1,6 @@ import pytest import torch + from torchio import LABEL from torchio.transforms import Lambda diff --git a/tests/transforms/test_transforms.py b/tests/transforms/test_transforms.py index 4c424c0e1..93b5f4b05 100644 --- a/tests/transforms/test_transforms.py +++ b/tests/transforms/test_transforms.py @@ -5,6 +5,7 @@ import pytest import SimpleITK as sitk import torch + import torchio as tio from ..utils import TorchioTestCase diff --git a/tests/utils.py b/tests/utils.py index fe50dbdcc..989c8c2e6 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -12,6 +12,7 @@ import numpy as np import pytest import torch + import torchio as tio diff --git a/tox.ini b/tox.ini index 486bcb00b..d3d5ad318 100644 --- a/tox.ini +++ b/tox.ini @@ -23,6 +23,12 @@ skip_install = True deps = ruff commands = ruff check +[testenv:format] +description = Run code formatter +skip_install = True +deps = ruff +commands = ruff format --diff + [testenv:types] deps = mypy diff --git a/tutorials/example_heteromodal.py b/tutorials/example_heteromodal.py index 4a3fa06a3..f3967b504 100644 --- a/tutorials/example_heteromodal.py +++ b/tutorials/example_heteromodal.py @@ -8,6 +8,7 @@ import logging import torch.nn as nn + import torchio as tio from torchio import LabelMap from torchio import Queue