From 9150afcdc9539e9e358adef02d21f56d9d9b0924 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fernando=20P=C3=A9rez-Garc=C3=ADa?= Date: Tue, 5 Jul 2022 00:02:11 +0200 Subject: [PATCH] Fix mypy errors and add mypy check to CI (#914) --- .github/workflows/lint.yml | 27 ------- .mypy.ini | 13 +++- .pre-commit-config.yaml | 2 +- src/torchio/data/dataset.py | 8 ++- src/torchio/data/image.py | 39 ++++++---- src/torchio/data/inference/aggregator.py | 14 ++-- src/torchio/data/io.py | 32 ++++++--- src/torchio/data/sampler/grid.py | 37 +++++----- src/torchio/data/sampler/label.py | 7 +- src/torchio/data/sampler/sampler.py | 24 +++---- src/torchio/data/sampler/uniform.py | 11 ++- src/torchio/data/sampler/weighted.py | 28 +++++--- src/torchio/data/subject.py | 19 ++--- src/torchio/datasets/bite.py | 6 +- src/torchio/datasets/rsna_miccai.py | 3 +- src/torchio/datasets/visible_human.py | 6 +- .../transforms/augmentation/composition.py | 8 ++- .../intensity/random_bias_field.py | 14 ++-- .../augmentation/intensity/random_blur.py | 8 ++- .../augmentation/intensity/random_gamma.py | 14 ++-- .../augmentation/intensity/random_ghosting.py | 27 ++++--- .../intensity/random_labels_to_image.py | 38 ++++++---- .../augmentation/intensity/random_motion.py | 20 ++++-- .../augmentation/intensity/random_noise.py | 18 +++-- .../augmentation/intensity/random_spike.py | 16 +++-- .../augmentation/intensity/random_swap.py | 72 +++++++++++-------- .../augmentation/random_transform.py | 12 ++-- .../augmentation/spatial/random_affine.py | 17 +++-- .../augmentation/spatial/random_anisotropy.py | 12 ++-- .../spatial/random_elastic_deformation.py | 15 ++-- .../augmentation/spatial/random_flip.py | 1 + src/torchio/transforms/data_parser.py | 4 +- src/torchio/transforms/intensity_transform.py | 2 +- src/torchio/transforms/lambda_transform.py | 2 +- .../preprocessing/intensity/clamp.py | 3 +- .../intensity/histogram_standardization.py | 39 +++++----- .../preprocessing/intensity/mask.py | 9 ++- .../preprocessing/intensity/rescale.py | 4 +- .../intensity/z_normalization.py | 8 ++- .../transforms/preprocessing/label/one_hot.py | 2 +- .../preprocessing/label/remap_labels.py | 2 +- .../preprocessing/label/remove_labels.py | 2 +- .../preprocessing/spatial/copy_affine.py | 2 +- .../transforms/preprocessing/spatial/crop.py | 3 +- .../preprocessing/spatial/crop_or_pad.py | 25 ++++--- .../spatial/ensure_shape_multiple.py | 10 ++- .../transforms/preprocessing/spatial/pad.py | 10 +-- .../preprocessing/spatial/resample.py | 14 ++-- .../preprocessing/spatial/resize.py | 8 ++- src/torchio/transforms/transform.py | 36 +++++----- src/torchio/typing.py | 7 +- src/torchio/utils.py | 24 ++++--- src/torchio/visualization.py | 5 +- tox.ini | 27 ++----- 54 files changed, 474 insertions(+), 342 deletions(-) delete mode 100644 .github/workflows/lint.yml diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml deleted file mode 100644 index b211fe961..000000000 --- a/.github/workflows/lint.yml +++ /dev/null @@ -1,27 +0,0 @@ -name: Lint - -on: [push, pull_request] - -jobs: - Lint: - runs-on: ubuntu-latest - - steps: - - name: Clone the repository - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Set up Python 3.10 - uses: actions/setup-python@v4 - with: - python-version: "3.10" - - - name: Upgrade pip - run: python -m pip install --upgrade pip - - - name: Install dependencies - run: pip install tox pre-commit flake8 - - - name: Run lint - run: tox -e lint diff --git a/.mypy.ini b/.mypy.ini index aadb4fe8a..e32c257e1 100644 --- a/.mypy.ini +++ b/.mypy.ini @@ -1,13 +1,22 @@ [mypy] +[mypy-PIL.*] +ignore_missing_imports = True + [mypy-SimpleITK.*] ignore_missing_imports = True +[mypy-duecredit.*] +ignore_missing_imports = True + +[mypy-matplotlib.*] +ignore_missing_imports = True + [mypy-nibabel.*] ignore_missing_imports = True -[mypy-tqdm.*] +[mypy-scipy.*] ignore_missing_imports = True -[mypy-PIL.*] +[mypy-tqdm.*] ignore_missing_imports = True diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d04a849dc..eb2b378f3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -61,4 +61,4 @@ repos: rev: v2.34.0 hooks: - id: pyupgrade - args: ['--py37-plus'] + args: ['--py37-plus', '--keep-runtime-typing'] diff --git a/src/torchio/data/dataset.py b/src/torchio/data/dataset.py index 0bce27d9c..9ba5a7f6a 100644 --- a/src/torchio/data/dataset.py +++ b/src/torchio/data/dataset.py @@ -1,5 +1,7 @@ +from __future__ import annotations + import copy -from typing import Sequence, Optional, Callable, Iterable, Dict +from typing import Sequence, Optional, Callable, Iterable, List from torch.utils.data import Dataset @@ -94,14 +96,14 @@ def __getitem__(self, index: int) -> Subject: return subject @classmethod - def from_batch(cls: 'SubjectsDataset', batch: Dict) -> 'SubjectsDataset': + def from_batch(cls, batch: dict) -> SubjectsDataset: """Instantiate a dataset from a batch generated by a data loader. Args: batch: Dictionary generated by a data loader, containing data that can be converted to instances of :class:`~.torchio.Subject`. """ - subjects = get_subjects_from_batch(batch) + subjects: List[Subject] = get_subjects_from_batch(batch) return cls(subjects) def dry_iter(self): diff --git a/src/torchio/data/image.py b/src/torchio/data/image.py index 880c94e2e..eafa7011f 100644 --- a/src/torchio/data/image.py +++ b/src/torchio/data/image.py @@ -16,6 +16,7 @@ TypeData, TypeDataAffine, TypeTripletInt, + TypeQuartetInt, TypeTripletFloat, TypeDirection3D, ) @@ -235,6 +236,8 @@ def affine(self) -> np.ndarray: if self._loaded or self._is_dir() or self._is_multipath(): affine = self[AFFINE] else: + assert self.path is not None + assert isinstance(self.path, (str, Path)) affine = read_affine(self.path) return affine @@ -247,13 +250,18 @@ def type(self) -> str: # noqa: A003 return self[TYPE] @property - def shape(self) -> Tuple[int, int, int, int]: + def shape(self) -> TypeQuartetInt: """Tensor shape as :math:`(C, W, H, D)`.""" custom_reader = self.reader is not read_image - multipath = not isinstance(self.path, (str, Path)) - if self._loaded or custom_reader or multipath or self.path.is_dir(): - shape = tuple(self.data.shape) + multipath = self._is_multipath() + if isinstance(self.path, Path): + is_dir = self.path.is_dir() + shape: TypeQuartetInt + if self._loaded or custom_reader or multipath or is_dir: + channels, si, sj, sk = self.data.shape + shape = channels, si, sj, sk else: + assert isinstance(self.path, (str, Path)) shape = read_shape(self.path) return shape @@ -289,18 +297,20 @@ def direction(self) -> TypeDirection3D: _, _, direction = get_sitk_metadata_from_ras_affine( self.affine, lps=False, ) - return direction + return direction # type: ignore[return-value] @property def spacing(self) -> Tuple[float, float, float]: """Voxel spacing in mm.""" _, spacing = get_rotation_and_spacing_from_affine(self.affine) - return tuple(spacing) + sx, sy, sz = spacing + return sx, sy, sz @property def origin(self) -> Tuple[float, float, float]: """Center of first voxel in array, in mm.""" - return tuple(self.affine[:3, 3]) + ox, oy, oz = self.affine[:3, 3] + return ox, oy, oz @property def itemsize(self): @@ -421,7 +431,7 @@ def _parse_single_path( def _parse_path( self, - path: Union[TypePath, Sequence[TypePath], None], + path: Optional[Union[TypePath, Sequence[TypePath]]], ) -> Optional[Union[Path, List[Path]]]: if path is None: return None @@ -429,9 +439,9 @@ def _parse_path( # https://github.com/fepegar/torchio/pull/838 raise TypeError('The path argument cannot be a dictionary') elif self._is_paths_sequence(path): - return [self._parse_single_path(p) for p in path] + return [self._parse_single_path(p) for p in path] # type: ignore[union-attr] # noqa: E501 else: - return self._parse_single_path(path) + return self._parse_single_path(path) # type: ignore[arg-type] def _parse_tensor( self, @@ -510,7 +520,12 @@ def load(self) -> None: """ if self._loaded: return - paths = self.path if self._is_multipath() else [self.path] + + paths: List[Path] + if self._is_multipath(): + paths = self.path # type: ignore[assignment] + else: + paths = [self.path] # type: ignore[list-item] tensor, affine = self.read_and_check(paths[0]) tensors = [tensor] for path in paths[1:]: @@ -786,7 +801,7 @@ def __init__(self, *args, **kwargs): def count_nonzero(self) -> int: """Get the number of voxels that are not 0.""" - return self.data.count_nonzero().item() + return int(self.data.count_nonzero().item()) def count_labels(self) -> Dict[int, int]: """Get the number of voxels in each label.""" diff --git a/src/torchio/data/inference/aggregator.py b/src/torchio/data/inference/aggregator.py index f0fca7de0..9a8a7b48a 100644 --- a/src/torchio/data/inference/aggregator.py +++ b/src/torchio/data/inference/aggregator.py @@ -1,5 +1,5 @@ import warnings -from typing import Tuple +from typing import Optional, Tuple import torch import numpy as np @@ -32,11 +32,11 @@ def __init__(self, sampler: GridSampler, overlap_mode: str = 'crop'): subject = sampler.subject self.volume_padded = sampler.padding_mode is not None self.spatial_shape = subject.spatial_shape - self._output_tensor = None + self._output_tensor: Optional[torch.Tensor] = None self.patch_overlap = sampler.patch_overlap self._parse_overlap_mode(overlap_mode) self.overlap_mode = overlap_mode - self._avgmask_tensor = None + self._avgmask_tensor: Optional[torch.Tensor] = None @staticmethod def _parse_overlap_mode(overlap_mode): @@ -127,6 +127,7 @@ def add_batch( ) raise RuntimeError(message) self._initialize_output_tensor(batch) + assert isinstance(self._output_tensor, torch.Tensor) if self.overlap_mode == 'crop': for patch, location in zip(batch, locations): cropped_patch, new_location = self._crop_patch( @@ -143,6 +144,7 @@ def add_batch( ] = cropped_patch elif self.overlap_mode == 'average': self._initialize_avgmask_tensor(batch) + assert isinstance(self._avgmask_tensor, torch.Tensor) for patch, location in zip(batch, locations): i_ini, j_ini, k_ini, i_fin, j_fin, k_fin = location self._output_tensor[ @@ -160,6 +162,7 @@ def add_batch( def get_output_tensor(self) -> torch.Tensor: """Get the aggregated volume after dense inference.""" + assert isinstance(self._output_tensor, torch.Tensor) if self._output_tensor.dtype == torch.int64: message = ( 'Medical image frameworks such as ITK do not support int64.' @@ -168,6 +171,7 @@ def get_output_tensor(self) -> torch.Tensor: warnings.warn(message, RuntimeWarning) self._output_tensor = self._output_tensor.type(torch.int32) if self.overlap_mode == 'average': + assert isinstance(self._avgmask_tensor, torch.Tensor) # true_divide is used instead of / in case the PyTorch version is # old and one the operands is int: # https://github.com/fepegar/torchio/issues/526 @@ -180,7 +184,7 @@ def get_output_tensor(self) -> torch.Tensor: from ...transforms import Crop border = self.patch_overlap // 2 cropping = border.repeat(2) - crop = Crop(cropping) - return crop(output) + crop = Crop(cropping) # type: ignore[arg-type] + return crop(output) # type: ignore[return-value] else: return output diff --git a/src/torchio/data/io.py b/src/torchio/data/io.py index b4b959462..1ba45d36a 100644 --- a/src/torchio/data/io.py +++ b/src/torchio/data/io.py @@ -14,6 +14,9 @@ TypeDataAffine, TypeDirection, TypeTripletFloat, + TypeDoubletInt, + TypeTripletInt, + TypeQuartetInt, ) @@ -87,18 +90,28 @@ def _read_dicom(directory: TypePath): return image -def read_shape(path: TypePath) -> Tuple[int, int, int, int]: +def read_shape(path: TypePath) -> TypeQuartetInt: reader = sitk.ImageFileReader() reader.SetFileName(str(path)) reader.ReadImageInformation() num_channels = reader.GetNumberOfComponents() - spatial_shape = reader.GetSize() num_dimensions = reader.GetDimension() + assert 2 <= num_dimensions <= 4 if num_dimensions == 2: - spatial_shape = *spatial_shape, 1 - elif num_dimensions == 4: # assume bad NIfTI - *spatial_shape, num_channels = spatial_shape - shape = (num_channels,) + tuple(spatial_shape) + spatial_shape_2d: TypeDoubletInt = reader.GetSize() + assert len(spatial_shape_2d) == 2 + si, sj = spatial_shape_2d + sk = 1 + elif num_dimensions == 4: + # We assume bad NIfTI file (channels encoded as spatial dimension) + spatial_shape_4d: TypeQuartetInt = reader.GetSize() + assert len(spatial_shape_4d) == 4 + si, sj, sk, num_channels = spatial_shape_4d + elif num_dimensions == 3: + spatial_shape_3d: TypeTripletInt = reader.GetSize() + assert len(spatial_shape_3d) == 3 + si, sj, sk = spatial_shape_3d + shape = num_channels, si, sj, sk return shape @@ -130,7 +143,7 @@ def write_image( def _write_nibabel( - tensor: TypeData, + tensor: torch.Tensor, affine: TypeData, path: TypePath, ) -> None: @@ -384,10 +397,11 @@ def get_sitk_metadata_from_ras_affine( origin_array = origin_lps if lps else origin_ras direction_array = direction_lps if lps else direction_ras direction_array = direction_array.flatten() - # The following are to comply with typing hints - # (there must be prettier ways to do this) + # The following are to comply with mypy + # (although there must be prettier ways to do this) ox, oy, oz = origin_array sx, sy, sz = spacing_array + direction: TypeDirection if is_2d: d1, d2, d3, d4 = direction_array direction = d1, d2, d3, d4 diff --git a/src/torchio/data/sampler/grid.py b/src/torchio/data/sampler/grid.py index 6c8ea71b9..b7e4146b8 100644 --- a/src/torchio/data/sampler/grid.py +++ b/src/torchio/data/sampler/grid.py @@ -73,6 +73,7 @@ def __init__( self.padding_mode = padding_mode if subject is not None and not isinstance(subject, Subject): raise ValueError('The subject argument must be None or Subject') + assert subject is not None self.subject = self._pad(subject) self.locations = self._compute_locations(self.subject) @@ -91,25 +92,25 @@ def _pad(self, subject: Subject) -> Subject: from ...transforms import Pad border = self.patch_overlap // 2 padding = border.repeat(2) - pad = Pad(padding, padding_mode=self.padding_mode) - subject = pad(subject) + pad = Pad(padding, padding_mode=self.padding_mode) # type: ignore[arg-type] # noqa: E501 + subject = pad(subject) # type: ignore[assignment] return subject def _compute_locations(self, subject: Subject): if subject is None: return None sizes = subject.spatial_shape, self.patch_size, self.patch_overlap - self._parse_sizes(*sizes) - return self._get_patches_locations(*sizes) + self._parse_sizes(*sizes) # type: ignore[arg-type] + return self._get_patches_locations(*sizes) # type: ignore[arg-type] - def _generate_patches( + def _generate_patches( # type: ignore[override] self, subject: Subject, ) -> Generator[Subject, None, None]: subject = self._pad(subject) sizes = subject.spatial_shape, self.patch_size, self.patch_overlap - self._parse_sizes(*sizes) - locations = self._get_patches_locations(*sizes) + self._parse_sizes(*sizes) # type: ignore[arg-type] + locations = self._get_patches_locations(*sizes) # type: ignore[arg-type] # noqa: E501 for location in locations: index_ini = location[:3] yield self.extract_patch(subject, index_ini) @@ -120,25 +121,25 @@ def _parse_sizes( patch_size: TypeTripletInt, patch_overlap: TypeTripletInt, ) -> None: - image_size = np.array(image_size) - patch_size = np.array(patch_size) - patch_overlap = np.array(patch_overlap) - if np.any(patch_size > image_size): + image_size_array = np.array(image_size) + patch_size_array = np.array(patch_size) + patch_overlap_array = np.array(patch_overlap) + if np.any(patch_size_array > image_size_array): message = ( - f'Patch size {tuple(patch_size)} cannot be' - f' larger than image size {tuple(image_size)}' + f'Patch size {tuple(patch_size_array)} cannot be' + f' larger than image size {tuple(image_size_array)}' ) raise ValueError(message) - if np.any(patch_overlap >= patch_size): + if np.any(patch_overlap_array >= patch_size_array): message = ( - f'Patch overlap {tuple(patch_overlap)} must be smaller' - f' than patch size {tuple(patch_size)}' + f'Patch overlap {tuple(patch_overlap_array)} must be smaller' + f' than patch size {tuple(patch_size_array)}' ) raise ValueError(message) - if np.any(patch_overlap % 2): + if np.any(patch_overlap_array % 2): message = ( 'Patch overlap must be a tuple of even integers,' - f' not {tuple(patch_overlap)}' + f' not {tuple(patch_overlap_array)}' ) raise ValueError(message) diff --git a/src/torchio/data/sampler/label.py b/src/torchio/data/sampler/label.py index d7c5975b9..0cfdff4de 100644 --- a/src/torchio/data/sampler/label.py +++ b/src/torchio/data/sampler/label.py @@ -2,8 +2,9 @@ import torch import numpy as np +import numpy.typing as npt -from ...data.image import LabelMap +from ...data.image import Image from ...data.subject import Subject from ...typing import TypeSpatialShape from ...constants import TYPE, LABEL @@ -66,7 +67,7 @@ def __init__( super().__init__(patch_size, probability_map=label_name) self.label_probabilities_dict = label_probabilities - def get_probability_map_image(self, subject: Subject) -> LabelMap: + def get_probability_map_image(self, subject: Subject) -> Image: if self.probability_map_name is None: for image in subject.get_images(intensity_only=False): if image[TYPE] == LABEL: @@ -105,7 +106,7 @@ def get_probability_map(self, subject: Subject) -> torch.Tensor: def get_probabilities_from_label_map( label_map: torch.Tensor, label_probabilities_dict: Dict[int, float], - patch_size: np.ndarray, + patch_size: npt.NDArray[np.uint], ) -> torch.Tensor: """Create probability map according to label map probabilities.""" patch_size = patch_size.astype(int) diff --git a/src/torchio/data/sampler/sampler.py b/src/torchio/data/sampler/sampler.py index a090a0ea4..15af902fb 100644 --- a/src/torchio/data/sampler/sampler.py +++ b/src/torchio/data/sampler/sampler.py @@ -37,7 +37,7 @@ def extract_patch( subject: Subject, index_ini: TypeTripletInt, ) -> Subject: - cropped_subject = self.crop(subject, index_ini, self.patch_size) + cropped_subject = self.crop(subject, index_ini, self.patch_size) # type: ignore[arg-type] # noqa: E501 return cropped_subject def crop( @@ -48,10 +48,10 @@ def crop( ) -> Subject: transform = self._get_crop_transform(subject, index_ini, patch_size) cropped_subject = transform(subject) - index_ini = np.asarray(index_ini) - patch_size = np.asarray(patch_size) - index_fin = index_ini + patch_size - location = index_ini.tolist() + index_fin.tolist() + index_ini_array = np.asarray(index_ini) + patch_size_array = np.asarray(patch_size) + index_fin = index_ini_array + patch_size_array + location = index_ini_array.tolist() + index_fin.tolist() cropped_subject[LOCATION] = torch.as_tensor(location) cropped_subject.update_attributes() return cropped_subject @@ -64,16 +64,16 @@ def _get_crop_transform( ): from ...transforms.preprocessing.spatial.crop import Crop shape = np.array(subject.spatial_shape, dtype=np.uint16) - index_ini = np.array(index_ini, dtype=np.uint16) - patch_size = np.array(patch_size, dtype=np.uint16) - assert len(index_ini) == 3 - assert len(patch_size) == 3 - index_fin = index_ini + patch_size - crop_ini = index_ini.tolist() + index_ini_array = np.array(index_ini, dtype=np.uint16) + patch_size_array = np.array(patch_size, dtype=np.uint16) + assert len(index_ini_array) == 3 + assert len(patch_size_array) == 3 + index_fin = index_ini_array + patch_size_array + crop_ini = index_ini_array.tolist() crop_fin = (shape - index_fin).tolist() start = () cropping = sum(zip(crop_ini, crop_fin), start) - return Crop(cropping) + return Crop(cropping) # type: ignore[arg-type] def __call__( self, diff --git a/src/torchio/data/sampler/uniform.py b/src/torchio/data/sampler/uniform.py index 69052f9df..9f2074f97 100644 --- a/src/torchio/data/sampler/uniform.py +++ b/src/torchio/data/sampler/uniform.py @@ -2,7 +2,6 @@ from ...data.subject import Subject from .sampler import RandomSampler from typing import Generator -import numpy as np class UniformSampler(RandomSampler): @@ -23,11 +22,11 @@ def _generate_patches( valid_range = subject.spatial_shape - self.patch_size patches_left = num_patches if num_patches is not None else True while patches_left: - index_ini = [ - torch.randint(x + 1, (1,)).item() + i, j, k = tuple( + int(torch.randint(x + 1, (1,)).item()) for x in valid_range - ] - index_ini_array = np.asarray(index_ini) - yield self.extract_patch(subject, index_ini_array) + ) + index_ini = i, j, k + yield self.extract_patch(subject, index_ini) if num_patches is not None: patches_left -= 1 diff --git a/src/torchio/data/sampler/weighted.py b/src/torchio/data/sampler/weighted.py index 855af4709..649e7697c 100644 --- a/src/torchio/data/sampler/weighted.py +++ b/src/torchio/data/sampler/weighted.py @@ -1,4 +1,4 @@ -from typing import Optional, Tuple, Generator +from typing import Optional, Generator import torch import numpy as np @@ -51,7 +51,7 @@ class WeightedSampler(RandomSampler): def __init__( self, patch_size: TypeSpatialShape, - probability_map: str, + probability_map: Optional[str], ): super().__init__(patch_size) self.probability_map_name = probability_map @@ -63,18 +63,19 @@ def _generate_patches( num_patches: Optional[int] = None, ) -> Generator[Subject, None, None]: probability_map = self.get_probability_map(subject) - probability_map = self.process_probability_map( + probability_map_array = self.process_probability_map( probability_map, subject, ) - cdf = self.get_cumulative_distribution_function(probability_map) + cdf = self.get_cumulative_distribution_function(probability_map_array) patches_left = num_patches if num_patches is not None else True while patches_left: - yield self.extract_patch(subject, probability_map, cdf) + yield self.extract_patch(subject, probability_map_array, cdf) if num_patches is not None: patches_left -= 1 def get_probability_map_image(self, subject: Subject) -> Image: + assert self.probability_map_name is not None if self.probability_map_name in subject: return subject[self.probability_map_name] else: @@ -121,7 +122,7 @@ def process_probability_map( @staticmethod def clear_probability_borders( probability_map: np.ndarray, - patch_size: TypeSpatialShape, + patch_size: np.ndarray, ) -> None: # Set probability to 0 on voxels that wouldn't possibly be sampled # given the current patch size @@ -165,21 +166,28 @@ def clear_probability_borders( @staticmethod def get_cumulative_distribution_function( probability_map: np.ndarray, - ) -> Tuple[np.ndarray, np.ndarray]: + ) -> np.ndarray: """Return the cumulative distribution function of a probability map.""" flat_map = probability_map.flatten() flat_map_normalized = flat_map / flat_map.sum() cdf = np.cumsum(flat_map_normalized) return cdf - def extract_patch( + def extract_patch( # type: ignore[override] self, subject: Subject, probability_map: np.ndarray, cdf: np.ndarray, ) -> Subject: - index_ini = self.get_random_index_ini(probability_map, cdf) - cropped_subject = self.crop(subject, index_ini, self.patch_size) + i, j, k = self.get_random_index_ini(probability_map, cdf) + index_ini = i, j, k + si, sj, sk = self.patch_size + patch_size = si, sj, sk + cropped_subject = self.crop( + subject, + index_ini, + patch_size, + ) return cropped_subject def get_random_index_ini( diff --git a/src/torchio/data/subject.py b/src/torchio/data/subject.py index 02f429b48..c1b34203a 100644 --- a/src/torchio/data/subject.py +++ b/src/torchio/data/subject.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import copy import pprint from typing import ( @@ -57,7 +59,7 @@ def __init__(self, *args, **kwargs: Dict[str, Any]): super().__init__(**kwargs) self._parse_images(self.get_images(intensity_only=False)) self.update_attributes() # this allows me to do e.g. subject.t1 - self.applied_transforms = [] + self.applied_transforms: List[Tuple[str, dict]] = [] def __repr__(self): num_images = len(self.get_images(intensity_only=False)) @@ -74,7 +76,7 @@ def __len__(self): return len(self.get_images(intensity_only=False)) @staticmethod - def _parse_images(images: List[Tuple[str, Image]]) -> None: + def _parse_images(images: List[Image]) -> None: # Check that it's not empty if not images: raise TypeError('A subject without images cannot be created') @@ -141,7 +143,7 @@ def get_applied_transforms( self, ignore_intensity: bool = False, image_interpolation: Optional[str] = None, - ) -> List['Transform']: + ) -> List[Transform]: from ..transforms.transform import Transform from ..transforms.intensity_transform import IntensityTransform name_to_transform = { @@ -164,7 +166,7 @@ def get_composed_history( self, ignore_intensity: bool = False, image_interpolation: Optional[str] = None, - ) -> 'Compose': + ) -> Compose: from ..transforms.augmentation.composition import Compose transforms = self.get_applied_transforms( ignore_intensity=ignore_intensity, @@ -177,7 +179,7 @@ def get_inverse_transform( warn: bool = True, ignore_intensity: bool = True, image_interpolation: Optional[str] = None, - ) -> 'Compose': + ) -> Compose: """Get a reversed list of the inverses of the applied transforms. Args: @@ -195,7 +197,7 @@ def get_inverse_transform( inverse_transform = history_transform.inverse(warn=warn) return inverse_transform - def apply_inverse_transform(self, **kwargs) -> 'Subject': + def apply_inverse_transform(self, **kwargs) -> Subject: """Try to apply the inverse of all applied transforms, in reverse order. Args: @@ -203,7 +205,8 @@ def apply_inverse_transform(self, **kwargs) -> 'Subject': :meth:`~torchio.data.subject.Subject.get_inverse_transform`. """ inverse_transform = self.get_inverse_transform(**kwargs) - transformed = inverse_transform(self) + transformed: Subject + transformed = inverse_transform(self) # type: ignore[assignment] transformed.clear_history() return transformed @@ -351,7 +354,7 @@ def get_first_image(self) -> Image: def add_transform( self, - transform: 'Transform', + transform: Transform, parameters_dict: dict, ) -> None: self.applied_transforms.append((transform.name, parameters_dict)) diff --git a/src/torchio/datasets/bite.py b/src/torchio/datasets/bite.py index f7ae99b30..8c4d738d2 100644 --- a/src/torchio/datasets/bite.py +++ b/src/torchio/datasets/bite.py @@ -1,11 +1,11 @@ import abc from pathlib import Path -from typing import Optional +from typing import Optional, Dict from ..typing import TypePath from ..transforms import Transform from ..download import download_and_extract_archive -from .. import SubjectsDataset, Subject, ScalarImage, LabelMap +from .. import SubjectsDataset, Subject, ScalarImage, LabelMap, Image class BITE(SubjectsDataset, abc.ABC): @@ -75,7 +75,7 @@ def _get_subjects_list(self, root: Path): subject_dir = subjects_dir / subject_id preop_path = subject_dir / f'{subject_id}_preop_mri.mnc' postop_path = subject_dir / f'{subject_id}_postop_mri.mnc' - images_dict = {} + images_dict: Dict[str, Image] = {} images_dict['preop'] = ScalarImage(preop_path) images_dict['postop'] = ScalarImage(postop_path) for fp in subject_dir.glob('*tumor*'): diff --git a/src/torchio/datasets/rsna_miccai.py b/src/torchio/datasets/rsna_miccai.py index 80604451b..1e9301e7f 100644 --- a/src/torchio/datasets/rsna_miccai.py +++ b/src/torchio/datasets/rsna_miccai.py @@ -1,7 +1,7 @@ import csv import warnings from pathlib import Path -from typing import List, Sequence +from typing import List, Sequence, Dict, Union from ..typing import TypePath from .. import SubjectsDataset, Subject, ScalarImage @@ -97,6 +97,7 @@ def _get_subjects( int(subject_id) except ValueError: continue + images_dict: Dict[str, Union[str, int, ScalarImage]] images_dict = {self.id_key: subject_dir.name} if train and labels_dict: images_dict[self.label_key] = labels_dict[subject_id] diff --git a/src/torchio/datasets/visible_human.py b/src/torchio/datasets/visible_human.py index 88a2dcdb3..3a4ac46d2 100644 --- a/src/torchio/datasets/visible_human.py +++ b/src/torchio/datasets/visible_human.py @@ -1,5 +1,6 @@ import abc import tempfile +from typing import Tuple from ..data.subject import _RawSubjectCopySubject from .. import ScalarImage @@ -10,6 +11,7 @@ class VisibleHuman(abc.ABC, _RawSubjectCopySubject): URL = 'https://mri.radiology.uiowa.edu/website_documents/visible_human_tar_files/{}{}.tar.gz' # noqa: E501, FS003 + PARTS: Tuple[str, ...] def __init__(self, part: str): self.part = self._parse_part(part) @@ -37,9 +39,9 @@ def cache_part_dir(self): def url(self): return self.URL.format(self.PREFIX, self.part) - def _parse_part(self, part: str) -> None: + def _parse_part(self, part: str) -> str: part_capital = part.capitalize() - if part_capital not in self.PARTS: + if part_capital not in self.PARTS: # type: ignore[assignment] message = f'Part "{part}" not in available parts: {self.PARTS}' raise ValueError(message) return part_capital diff --git a/src/torchio/transforms/augmentation/composition.py b/src/torchio/transforms/augmentation/composition.py index b85893c04..3c8dee399 100644 --- a/src/torchio/transforms/augmentation/composition.py +++ b/src/torchio/transforms/augmentation/composition.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import warnings from typing import Union, Sequence, Dict @@ -44,13 +46,13 @@ def __repr__(self) -> str: def apply_transform(self, subject: Subject) -> Subject: for transform in self.transforms: - subject = transform(subject) + subject = transform(subject) # type: ignore[assignment] return subject def is_invertible(self) -> bool: return all(t.is_invertible() for t in self.transforms) - def inverse(self, warn: bool = True) -> Transform: + def inverse(self, warn: bool = True) -> Compose: """Return a composed transform with inverted order and transforms. Args: @@ -107,7 +109,7 @@ def apply_transform(self, subject: Subject) -> Subject: transforms = list(self.transforms_dict.keys()) transform = transforms[index] transformed = transform(subject) - return transformed + return transformed # type: ignore[return-value] def _get_transforms_dict( self, diff --git a/src/torchio/transforms/augmentation/intensity/random_bias_field.py b/src/torchio/transforms/augmentation/intensity/random_bias_field.py index 09488adb2..253271dee 100644 --- a/src/torchio/transforms/augmentation/intensity/random_bias_field.py +++ b/src/torchio/transforms/augmentation/intensity/random_bias_field.py @@ -46,7 +46,7 @@ def __init__( self.order = _parse_order(order) def apply_transform(self, subject: Subject) -> Subject: - arguments = defaultdict(dict) + arguments: Dict[str, dict] = defaultdict(dict) for image_name in self.get_images_dict(subject): coefficients = self.get_params( self.order, @@ -56,6 +56,7 @@ def apply_transform(self, subject: Subject) -> Subject: arguments['order'][image_name] = self.order transform = BiasField(**self.add_include_exclude(arguments)) transformed = transform(subject) + assert isinstance(transformed, Subject) return transformed def get_params( @@ -69,8 +70,8 @@ def get_params( for x_order in range(0, order + 1): for y_order in range(0, order + 1 - x_order): for _ in range(0, order + 1 - (x_order + y_order)): - number = self.sample_uniform(*coefficients_range) - random_coefficients.append(number.item()) + sample = self.sample_uniform(*coefficients_range) + random_coefficients.append(sample) return random_coefficients @@ -93,7 +94,7 @@ def __init__( self.coefficients = coefficients self.order = order self.invert_transform = False - self.args_names = 'coefficients', 'order' + self.args_names = ['coefficients', 'order'] def arguments_are_dict(self): coefficients_dict = isinstance(self.coefficients, dict) @@ -107,9 +108,12 @@ def apply_transform(self, subject: Subject) -> Subject: coefficients, order = self.coefficients, self.order for name, image in self.get_images_dict(subject).items(): if self.arguments_are_dict(): + assert isinstance(self.coefficients, dict) + assert isinstance(self.order, dict) coefficients, order = self.coefficients[name], self.order[name] + assert isinstance(order, int) bias_field = self.generate_bias_field( - image.data, order, coefficients, + image.data, order, coefficients, # type: ignore[arg-type] ) if self.invert_transform: np.divide(1, bias_field, out=bias_field) diff --git a/src/torchio/transforms/augmentation/intensity/random_blur.py b/src/torchio/transforms/augmentation/intensity/random_blur.py index dad7875c4..a85d8ba57 100644 --- a/src/torchio/transforms/augmentation/intensity/random_blur.py +++ b/src/torchio/transforms/augmentation/intensity/random_blur.py @@ -38,12 +38,13 @@ def __init__( self.std_ranges = self.parse_params(std, None, 'std', min_constraint=0) def apply_transform(self, subject: Subject) -> Subject: - arguments = defaultdict(dict) + arguments: Dict[str, dict] = defaultdict(dict) for name in self.get_images_dict(subject): std = self.get_params(self.std_ranges) arguments['std'][name] = std transform = Blur(**self.add_include_exclude(arguments)) transformed = transform(subject) + assert isinstance(transformed, Subject) return transformed def get_params(self, std_ranges: TypeSextetFloat) -> TypeTripletFloat: @@ -68,14 +69,15 @@ def __init__( ): super().__init__(**kwargs) self.std = std - self.args_names = ('std',) + self.args_names = ['std'] def apply_transform(self, subject: Subject) -> Subject: stds = self.std for name, image in self.get_images_dict(subject).items(): if self.arguments_are_dict(): + assert isinstance(self.std, dict) stds = self.std[name] - stds_channels = np.tile(stds, (image.num_channels, 1)) + stds_channels: np.ndarray = np.tile(stds, (image.num_channels, 1)) # type: ignore # noqa: E501 transformed_tensors = [] for std, channel in zip(stds_channels, image.data): transformed_tensor = blur( diff --git a/src/torchio/transforms/augmentation/intensity/random_gamma.py b/src/torchio/transforms/augmentation/intensity/random_gamma.py index 0ec1c659c..2d216544c 100644 --- a/src/torchio/transforms/augmentation/intensity/random_gamma.py +++ b/src/torchio/transforms/augmentation/intensity/random_gamma.py @@ -1,7 +1,8 @@ +from typing import Tuple, Dict from collections import defaultdict -from typing import Tuple import torch +import numpy as np from ....utils import to_tuple from ....typing import TypeRangeFloat @@ -11,8 +12,7 @@ class RandomGamma(RandomTransform, IntensityTransform): - r"""Randomly change contrast of an image by raising its values to the power - :math:`\gamma`. + r"""Randomly change contrast of an image by raising its values to the power :math:`\gamma`. Args: log_gamma: Tuple :math:`(a, b)` to compute the exponent @@ -70,7 +70,7 @@ def __init__( self.log_gamma_range = self._parse_range(log_gamma, 'log_gamma') def apply_transform(self, subject: Subject) -> Subject: - arguments = defaultdict(dict) + arguments: Dict[str, dict] = defaultdict(dict) for name, image in self.get_images_dict(subject).items(): gammas = [ self.get_params(self.log_gamma_range) @@ -79,10 +79,11 @@ def apply_transform(self, subject: Subject) -> Subject: arguments['gamma'][name] = gammas transform = Gamma(**self.add_include_exclude(arguments)) transformed = transform(subject) + assert isinstance(transformed, Subject) return transformed def get_params(self, log_gamma_range: Tuple[float, float]) -> float: - gamma = self.sample_uniform(*log_gamma_range).exp().item() + gamma = np.exp(self.sample_uniform(*log_gamma_range)) return gamma @@ -126,13 +127,14 @@ def __init__( ): super().__init__(**kwargs) self.gamma = gamma - self.args_names = ('gamma',) + self.args_names = ['gamma'] self.invert_transform = False def apply_transform(self, subject: Subject) -> Subject: gamma = self.gamma for name, image in self.get_images_dict(subject).items(): if self.arguments_are_dict(): + assert isinstance(self.gamma, dict) gamma = self.gamma[name] gammas = to_tuple(gamma, length=len(image.data)) transformed_tensors = [] diff --git a/src/torchio/transforms/augmentation/intensity/random_ghosting.py b/src/torchio/transforms/augmentation/intensity/random_ghosting.py index 4ece0a4e3..d6b5c23d5 100644 --- a/src/torchio/transforms/augmentation/intensity/random_ghosting.py +++ b/src/torchio/transforms/augmentation/intensity/random_ghosting.py @@ -1,5 +1,5 @@ from collections import defaultdict -from typing import Tuple, Union, Dict +from typing import Iterable, Tuple, Union, Dict import torch import numpy as np @@ -57,9 +57,10 @@ def __init__( super().__init__(**kwargs) if not isinstance(axes, tuple): try: - axes = tuple(axes) + axes = tuple(axes) # type: ignore[arg-type] except TypeError: - axes = (axes,) + axes = (axes,) # type: ignore[assignment] + assert isinstance(axes, Iterable) for axis in axes: if not isinstance(axis, str) and axis not in (0, 1, 2): raise ValueError(f'Axes must be in (0, 1, 2), not "{axes}"') @@ -73,15 +74,16 @@ def __init__( self.restore = _parse_restore(restore) def apply_transform(self, subject: Subject) -> Subject: - arguments = defaultdict(dict) + arguments: Dict[str, dict] = defaultdict(dict) if any(isinstance(n, str) for n in self.axes): subject.check_consistent_orientation() for name, image in self.get_images_dict(subject).items(): is_2d = image.is_2d() axes = [a for a in self.axes if a != 2] if is_2d else self.axes + min_ghosts, max_ghosts = self.num_ghosts_range params = self.get_params( - self.num_ghosts_range, - axes, + (int(min_ghosts), int(max_ghosts)), + axes, # type: ignore[arg-type] self.intensity_range, ) num_ghosts_param, axis_param, intensity_param = params @@ -91,6 +93,7 @@ def apply_transform(self, subject: Subject) -> Subject: arguments['restore'][name] = self.restore transform = Ghosting(**self.add_include_exclude(arguments)) transformed = transform(subject) + assert isinstance(transformed, Subject) return transformed def get_params( @@ -102,7 +105,7 @@ def get_params( ng_min, ng_max = num_ghosts_range num_ghosts = torch.randint(ng_min, ng_max + 1, (1,)).item() axis = axes[torch.randint(0, len(axes), (1,))] - intensity = self.sample_uniform(*intensity_range).item() + intensity = self.sample_uniform(*intensity_range) return num_ghosts, axis, intensity @@ -146,7 +149,7 @@ def __init__( self.num_ghosts = num_ghosts self.intensity = intensity self.restore = restore - self.args_names = 'num_ghosts', 'axis', 'intensity', 'restore' + self.args_names = ['num_ghosts', 'axis', 'intensity', 'restore'] def apply_transform(self, subject: Subject) -> Subject: axis = self.axis @@ -155,12 +158,20 @@ def apply_transform(self, subject: Subject) -> Subject: restore = self.restore for name, image in self.get_images_dict(subject).items(): if self.arguments_are_dict(): + assert isinstance(self.axis, dict) + assert isinstance(self.num_ghosts, dict) + assert isinstance(self.intensity, dict) + assert isinstance(self.restore, dict) axis = self.axis[name] num_ghosts = self.num_ghosts[name] intensity = self.intensity[name] restore = self.restore[name] transformed_tensors = [] for tensor in image.data: + assert isinstance(num_ghosts, int) + assert isinstance(axis, int) + assert isinstance(intensity, float) + assert isinstance(restore, float) transformed_tensor = self.add_artifact( tensor, num_ghosts, diff --git a/src/torchio/transforms/augmentation/intensity/random_labels_to_image.py b/src/torchio/transforms/augmentation/intensity/random_labels_to_image.py index fb88946bd..f9e43e508 100644 --- a/src/torchio/transforms/augmentation/intensity/random_labels_to_image.py +++ b/src/torchio/transforms/augmentation/intensity/random_labels_to_image.py @@ -137,8 +137,8 @@ def __init__( ): super().__init__(**kwargs) self.label_key = _parse_label_key(label_key) - self.used_labels = _parse_used_labels(used_labels) - self.mean, self.std = self.parse_mean_and_std(mean, std) + self.used_labels = _parse_used_labels(used_labels) # type: ignore[arg-type] # noqa: E501 + self.mean, self.std = self.parse_mean_and_std(mean, std) # type: ignore[arg-type,assignment] # noqa: E501 self.default_mean = self.parse_gaussian_parameter( default_mean, 'default_mean', ) @@ -250,28 +250,34 @@ def apply_transform(self, subject: Subject) -> Subject: labels = range(label_map.shape[0]) # Raise error if mean and std are not defined for every label - _check_mean_and_std_length(labels, self.mean, self.std) + _check_mean_and_std_length(labels, self.mean, self.std) # type: ignore[arg-type] # noqa: E501 for label in labels: mean, std = self.get_params(label) - arguments['mean'].append(mean) - arguments['std'].append(std) + means = arguments['mean'] + stds = arguments['std'] + assert isinstance(means, list) + assert isinstance(stds, list) + means.append(mean) + stds.append(std) transform = LabelsToImage(**self.add_include_exclude(arguments)) transformed = transform(subject) + assert isinstance(transformed, Subject) return transformed def get_params(self, label: int) -> Tuple[float, float]: if self.mean is None: mean_range = self.default_mean else: + assert isinstance(self.mean, Sequence) mean_range = self.mean[label] if self.std is None: std_range = self.default_std else: std_range = self.std[label] - mean = self.sample_uniform(*mean_range).item() - std = self.sample_uniform(*std_range).item() + mean = self.sample_uniform(*mean_range) # type: ignore[misc] + std = self.sample_uniform(*std_range) # type: ignore[misc] return mean, std @@ -330,11 +336,11 @@ def __init__( super().__init__(**kwargs) self.label_key = _parse_label_key(label_key) self.used_labels = _parse_used_labels(used_labels) - self.mean, self.std = mean, std + self.mean, self.std = mean, std # type: ignore[assignment] self.image_key = image_key self.ignore_background = ignore_background self.discretize = discretize - self.args_names = ( + self.args_names = [ 'label_key', 'mean', 'std', @@ -342,7 +348,7 @@ def __init__( 'used_labels', 'ignore_background', 'discretize', - ) + ] def apply_transform(self, subject: Subject) -> Subject: original_image = subject.get(self.image_key) @@ -373,12 +379,18 @@ def apply_transform(self, subject: Subject) -> Subject: labels_in_image = range(label_map.shape[0]) # Raise error if mean and std are not defined for every label - _check_mean_and_std_length(labels_in_image, self.mean, self.std) + _check_mean_and_std_length( + labels_in_image, + self.mean, # type: ignore[arg-type] + self.std, + ) for i, label in enumerate(labels_in_image): if label == 0 and self.ignore_background: continue if self.used_labels is None or label in self.used_labels: + assert isinstance(self.mean, Sequence) + assert isinstance(self.std, Sequence) mean = self.mean[i] std = self.std[i] if is_discretized: @@ -426,7 +438,9 @@ def _parse_label_key(label_key: Optional[str]) -> Optional[str]: return label_key -def _parse_used_labels(used_labels: Sequence[int]) -> Sequence[int]: +def _parse_used_labels( + used_labels: Optional[Sequence[int]], +) -> Optional[Sequence[int]]: if used_labels is None: return None check_sequence(used_labels, 'used_labels') diff --git a/src/torchio/transforms/augmentation/intensity/random_motion.py b/src/torchio/transforms/augmentation/intensity/random_motion.py index 0e5ea3611..cbdfa00e0 100644 --- a/src/torchio/transforms/augmentation/intensity/random_motion.py +++ b/src/torchio/transforms/augmentation/intensity/random_motion.py @@ -67,7 +67,7 @@ def __init__( ) def apply_transform(self, subject: Subject) -> Subject: - arguments = defaultdict(dict) + arguments: Dict[str, dict] = defaultdict(dict) for name, image in self.get_images_dict(subject).items(): params = self.get_params( self.degrees_range, @@ -82,6 +82,7 @@ def apply_transform(self, subject: Subject) -> Subject: arguments['image_interpolation'][name] = self.image_interpolation transform = Motion(**self.add_include_exclude(arguments)) transformed = transform(subject) + assert isinstance(transformed, Subject) return transformed def get_params( @@ -145,12 +146,12 @@ def __init__( self.translation = translation self.times = times self.image_interpolation = image_interpolation - self.args_names = ( + self.args_names = [ 'degrees', 'translation', 'times', 'image_interpolation', - ) + ] def apply_transform(self, subject: Subject) -> Subject: degrees = self.degrees @@ -159,6 +160,10 @@ def apply_transform(self, subject: Subject) -> Subject: image_interpolation = self.image_interpolation for image_name, image in self.get_images_dict(subject).items(): if self.arguments_are_dict(): + assert isinstance(self.degrees, dict) + assert isinstance(self.translation, dict) + assert isinstance(self.times, dict) + assert isinstance(self.image_interpolation, dict) degrees = self.degrees[image_name] translation = self.translation[image_name] times = self.times[image_name] @@ -171,14 +176,15 @@ def apply_transform(self, subject: Subject) -> Subject: force_3d=True, ) transforms = self.get_rigid_transforms( - degrees, - translation, + np.asarray(degrees), + np.asarray(translation), sitk_image, ) + assert isinstance(image_interpolation, str) transformed_channel = self.add_artifact( sitk_image, transforms, - times, + np.asarray(times), image_interpolation, ) result_arrays.append(transformed_channel) @@ -246,7 +252,7 @@ def resample_images( return images @staticmethod - def sort_spectra(spectra: np.ndarray, times: np.ndarray): + def sort_spectra(spectra: List[torch.Tensor], times: np.ndarray): """Use original spectrum to fill the center of k-space""" num_spectra = len(spectra) if np.any(times > 0.5): diff --git a/src/torchio/transforms/augmentation/intensity/random_noise.py b/src/torchio/transforms/augmentation/intensity/random_noise.py index 9c9e1515a..bf236da5d 100644 --- a/src/torchio/transforms/augmentation/intensity/random_noise.py +++ b/src/torchio/transforms/augmentation/intensity/random_noise.py @@ -39,7 +39,7 @@ def __init__( self.std_range = self._parse_range(std, 'std', min_constraint=0) def apply_transform(self, subject: Subject) -> Subject: - arguments = defaultdict(dict) + arguments: Dict[str, dict] = defaultdict(dict) for image_name in self.get_images_dict(subject): mean, std, seed = self.get_params(self.mean_range, self.std_range) arguments['mean'][image_name] = mean @@ -47,15 +47,16 @@ def apply_transform(self, subject: Subject) -> Subject: arguments['seed'][image_name] = seed transform = Noise(**self.add_include_exclude(arguments)) transformed = transform(subject) + assert isinstance(transformed, Subject) return transformed def get_params( self, mean_range: Tuple[float, float], std_range: Tuple[float, float], - ) -> Tuple[float, float]: - mean = self.sample_uniform(*mean_range).item() - std = self.sample_uniform(*std_range).item() + ) -> Tuple[float, float, int]: + mean = self.sample_uniform(*mean_range) + std = self.sample_uniform(*std_range) seed = self._get_random_seed() return mean, std, seed @@ -82,18 +83,21 @@ def __init__( **kwargs ): super().__init__(**kwargs) - self.mean = mean + self.mean = mean # type: ignore[assignment] self.std = std self.seed = seed self.invert_transform = False - self.args_names = 'mean', 'std', 'seed' + self.args_names = ['mean', 'std', 'seed'] def apply_transform(self, subject: Subject) -> Subject: mean, std, seed = args = self.mean, self.std, self.seed for name, image in self.get_images_dict(subject).items(): if self.arguments_are_dict(): - mean, std, seed = (arg[name] for arg in args) + values = (arg[name] for arg in args) # type: ignore[index,call-overload] # noqa: E501 + mean, std, seed = values # type: ignore[assignment] # noqa: E501 with self._use_seed(seed): + assert isinstance(mean, float) + assert isinstance(std, float) noise = get_noise(image.data, mean, std) if self.invert_transform: noise *= -1 diff --git a/src/torchio/transforms/augmentation/intensity/random_spike.py b/src/torchio/transforms/augmentation/intensity/random_spike.py index 958202bd0..51fd23721 100644 --- a/src/torchio/transforms/augmentation/intensity/random_spike.py +++ b/src/torchio/transforms/augmentation/intensity/random_spike.py @@ -1,3 +1,4 @@ +from numbers import Number from collections import defaultdict from typing import Tuple, Union, Dict @@ -47,12 +48,12 @@ def __init__( self.intensity_range = self._parse_range( intensity, 'intensity_range', ) - self.num_spikes_range = self._parse_range( + self.num_spikes_range: Tuple[int, int] = self._parse_range( # type: ignore[assignment] # noqa: E501 num_spikes, 'num_spikes', min_constraint=0, type_constraint=int, ) def apply_transform(self, subject: Subject) -> Subject: - arguments = defaultdict(dict) + arguments: Dict[str, dict] = defaultdict(dict) for image_name in self.get_images_dict(subject): spikes_positions_param, intensity_param = self.get_params( self.num_spikes_range, @@ -62,6 +63,7 @@ def apply_transform(self, subject: Subject) -> Subject: arguments['intensity'][image_name] = intensity_param transform = Spike(**self.add_include_exclude(arguments)) transformed = transform(subject) + assert isinstance(transformed, Subject) return transformed def get_params( @@ -70,10 +72,10 @@ def get_params( intensity_range: Tuple[float, float], ) -> Tuple[np.ndarray, float]: ns_min, ns_max = num_spikes_range - num_spikes_param = torch.randint(ns_min, ns_max + 1, (1,)).item() + num_spikes_param = int(torch.randint(ns_min, ns_max + 1, (1,)).item()) intensity_param = self.sample_uniform(*intensity_range) spikes_positions = torch.rand(num_spikes_param, 3).numpy() - return spikes_positions, intensity_param.item() + return spikes_positions, intensity_param class Spike(IntensityTransform, FourierTransform): @@ -103,7 +105,7 @@ def __init__( super().__init__(**kwargs) self.spikes_positions = spikes_positions self.intensity = intensity - self.args_names = 'spikes_positions', 'intensity' + self.args_names = ['spikes_positions', 'intensity'] self.invert_transform = False def apply_transform(self, subject: Subject) -> Subject: @@ -112,12 +114,14 @@ def apply_transform(self, subject: Subject) -> Subject: for image_name, image in self.get_images_dict(subject).items(): if self.arguments_are_dict(): spikes_positions = self.spikes_positions[image_name] + assert isinstance(self.intensity, dict) intensity = self.intensity[image_name] transformed_tensors = [] for channel in image.data: + assert isinstance(intensity, Number) transformed_tensor = self.add_artifact( channel, - spikes_positions, + np.asarray(spikes_positions), intensity, ) transformed_tensors.append(transformed_tensor) diff --git a/src/torchio/transforms/augmentation/intensity/random_swap.py b/src/torchio/transforms/augmentation/intensity/random_swap.py index e6a173414..e1480f5bc 100644 --- a/src/torchio/transforms/augmentation/intensity/random_swap.py +++ b/src/torchio/transforms/augmentation/intensity/random_swap.py @@ -1,17 +1,20 @@ +from __future__ import annotations + from collections import defaultdict -from typing import Tuple, Union, List, Sequence, Dict +from typing import Tuple, Union, List, Sequence, Dict, TypeVar import torch import numpy as np from ....data.subject import Subject from ....utils import to_tuple -from ....typing import TypeTuple, TypeData, TypeTripletInt +from ....typing import TypeTuple, TypeTripletInt from ... import IntensityTransform from .. import RandomTransform TypeLocations = Sequence[Tuple[TypeTripletInt, TypeTripletInt]] +TensorArray = TypeVar('TensorArray', np.ndarray, torch.Tensor) class RandomSwap(RandomTransform, IntensityTransform): @@ -58,17 +61,18 @@ def get_params( patch_size: np.ndarray, num_iterations: int, ) -> List[Tuple[TypeTripletInt, TypeTripletInt]]: - spatial_shape = tensor.shape[-3:] + si, sj, sk = tensor.shape[-3:] + spatial_shape = si, sj, sk # for mypy locations = [] for _ in range(num_iterations): first_ini, first_fin = get_random_indices_from_shape( spatial_shape, - patch_size, + patch_size.tolist(), ) while True: second_ini, second_fin = get_random_indices_from_shape( spatial_shape, - patch_size, + patch_size.tolist(), ) larger_than_initial = np.all(second_ini >= first_ini) less_than_final = np.all(second_fin <= first_fin) @@ -78,10 +82,10 @@ def get_params( break # patches don't overlap location = tuple(first_ini), tuple(second_ini) locations.append(location) - return locations + return locations # type: ignore[return-value] def apply_transform(self, subject: Subject) -> Subject: - arguments = defaultdict(dict) + arguments: Dict[str, dict] = defaultdict(dict) for name, image in self.get_images_dict(subject).items(): locations = self.get_params( image.data, @@ -92,6 +96,7 @@ def apply_transform(self, subject: Subject) -> Subject: arguments['patch_size'][name] = self.patch_size transform = Swap(**self.add_include_exclude(arguments)) transformed = transform(subject) + assert isinstance(transformed, Subject) return transformed @@ -118,75 +123,86 @@ def __init__( super().__init__(**kwargs) self.locations = locations self.patch_size = patch_size - self.args_names = 'locations', 'patch_size' + self.args_names = ['locations', 'patch_size'] self.invert_transform = False def apply_transform(self, subject: Subject) -> Subject: locations, patch_size = self.locations, self.patch_size for name, image in self.get_images_dict(subject).items(): if self.arguments_are_dict(): + assert isinstance(self.locations, dict) + assert isinstance(self.patch_size, dict) locations = self.locations[name] patch_size = self.patch_size[name] if self.invert_transform: + assert isinstance(locations, list) locations.reverse() - image.set_data(swap(image.data, patch_size, locations)) + swapped = _swap(image.data, patch_size, locations) # type: ignore[arg-type] # noqa: E501 + image.set_data(swapped) return subject -def swap( +def _swap( tensor: torch.Tensor, patch_size: TypeTuple, locations: List[Tuple[np.ndarray, np.ndarray]], -) -> None: +) -> torch.Tensor: + # Note this function modifies the input in-place tensor = tensor.clone() - patch_size = np.array(patch_size) + patch_size_array = np.array(patch_size) for first_ini, second_ini in locations: - first_fin = first_ini + patch_size - second_fin = second_ini + patch_size - first_patch = crop(tensor, first_ini, first_fin) - second_patch = crop(tensor, second_ini, second_fin).clone() - insert(tensor, first_patch, second_ini) - insert(tensor, second_patch, first_ini) + first_fin = first_ini + patch_size_array + second_fin = second_ini + patch_size_array + first_patch = _crop(tensor, first_ini, first_fin) + second_patch = _crop(tensor, second_ini, second_fin).clone() + _insert(tensor, first_patch, second_ini) + _insert(tensor, second_patch, first_ini) return tensor -def insert(tensor: TypeData, patch: TypeData, index_ini: np.ndarray) -> None: +def _insert( + tensor: TensorArray, + patch: TensorArray, + index_ini: np.ndarray, +) -> None: index_fin = index_ini + np.array(patch.shape[-3:]) i_ini, j_ini, k_ini = index_ini i_fin, j_fin, k_fin = index_fin tensor[:, i_ini:i_fin, j_ini:j_fin, k_ini:k_fin] = patch -def crop( - image: Union[np.ndarray, torch.Tensor], +def _crop( + image: TensorArray, index_ini: np.ndarray, index_fin: np.ndarray, -) -> Union[np.ndarray, torch.Tensor]: +) -> TensorArray: i_ini, j_ini, k_ini = index_ini i_fin, j_fin, k_fin = index_fin return image[:, i_ini:i_fin, j_ini:j_fin, k_ini:k_fin] def get_random_indices_from_shape( - spatial_shape: TypeTripletInt, - patch_size: TypeTripletInt, + spatial_shape: Sequence[int], + patch_size: Sequence[int], ) -> Tuple[np.ndarray, np.ndarray]: + assert len(spatial_shape) == 3 + assert len(patch_size) in (1, 3) shape_array = np.array(spatial_shape) patch_size_array = np.array(patch_size) - max_index_ini = shape_array - patch_size_array - if (max_index_ini < 0).any(): + max_index_ini_unchecked = shape_array - patch_size_array + if (max_index_ini_unchecked < 0).any(): message = ( f'Patch size {patch_size} cannot be' f' larger than image spatial shape {spatial_shape}' ) raise ValueError(message) - max_index_ini = max_index_ini.astype(np.uint16) + max_index_ini = max_index_ini_unchecked.astype(np.uint16) coordinates = [] for max_coordinate in max_index_ini.tolist(): if max_coordinate == 0: coordinate = 0 else: - coordinate = torch.randint(max_coordinate, size=(1,)).item() + coordinate = int(torch.randint(max_coordinate, size=(1,)).item()) coordinates.append(coordinate) index_ini = np.array(coordinates, np.uint16) index_fin = index_ini + patch_size_array diff --git a/src/torchio/transforms/augmentation/random_transform.py b/src/torchio/transforms/augmentation/random_transform.py index f4201ef15..2d7ee4899 100644 --- a/src/torchio/transforms/augmentation/random_transform.py +++ b/src/torchio/transforms/augmentation/random_transform.py @@ -1,6 +1,4 @@ -""" -This is the docstring of random transform module -""" +from __future__ import annotations from typing import Tuple @@ -41,17 +39,17 @@ def parse_translation( return self._parse_range(translation, 'translation') @staticmethod - def sample_uniform(a, b): - return torch.FloatTensor(1).uniform_(a, b) + def sample_uniform(a: float, b: float) -> float: + return torch.FloatTensor(1).uniform_(a, b).item() @staticmethod - def _get_random_seed(): + def _get_random_seed() -> int: """Generate a random seed. Returns: A random seed as an int. """ - return torch.randint(0, 2**31, (1,)).item() + return int(torch.randint(0, 2**31, (1,)).item()) def sample_uniform_sextet(self, params): results = [] diff --git a/src/torchio/transforms/augmentation/spatial/random_affine.py b/src/torchio/transforms/augmentation/spatial/random_affine.py index 1913a8837..71c04788e 100644 --- a/src/torchio/transforms/augmentation/spatial/random_affine.py +++ b/src/torchio/transforms/augmentation/spatial/random_affine.py @@ -170,6 +170,7 @@ def apply_transform(self, subject: Subject) -> Subject: } transform = Affine(**self.add_include_exclude(arguments)) transformed = transform(subject) + assert isinstance(transformed, Subject) return transformed @@ -251,7 +252,7 @@ def __init__( ) self.invert_transform = False self.check_shape = check_shape - self.args_names = ( + self.args_names = [ 'scales', 'degrees', 'translation', @@ -260,7 +261,7 @@ def __init__( 'image_interpolation', 'label_interpolation', 'check_shape', - ) + ] @staticmethod def _get_scaling_transform( @@ -269,8 +270,8 @@ def _get_scaling_transform( ) -> sitk.ScaleTransform: # 1.5 means the objects look 1.5 times larger transform = sitk.ScaleTransform(3) - scaling_params = np.array(scaling_params).astype(float) - transform.SetScale(scaling_params) + scaling_params_array = np.array(scaling_params).astype(float) + transform.SetScale(scaling_params_array) if center_lps is not None: transform.SetCenter(center_lps) return transform @@ -282,11 +283,11 @@ def _get_rotation_transform( center_lps: Optional[TypeTripletFloat] = None, ) -> sitk.Euler3DTransform: - def ras_to_lps(triplet: np.ndarray): + def ras_to_lps(triplet: Sequence[float]): return np.array((-1, -1, 1), dtype=float) * np.asarray(triplet) transform = sitk.Euler3DTransform() - radians = np.radians(degrees) + radians = np.radians(degrees).tolist() # SimpleITK uses LPS radians_lps = ras_to_lps(radians) @@ -346,6 +347,7 @@ def get_affine_transform(self, image): def apply_transform(self, subject: Subject) -> Subject: if self.check_shape: subject.check_consistent_spatial_shape() + default_value: float for image in self.get_images(subject): transform = self.get_affine_transform(image) transformed_tensors = [] @@ -371,7 +373,8 @@ def apply_transform(self, subject: Subject) -> Subject: sitk_image, filter_otsu=True, ) else: - default_value = self.default_pad_value + assert isinstance(self.default_pad_value, Number) + default_value = float(self.default_pad_value) transformed_tensor = self.apply_affine_transform( sitk_image, transform, diff --git a/src/torchio/transforms/augmentation/spatial/random_anisotropy.py b/src/torchio/transforms/augmentation/spatial/random_anisotropy.py index 0d474afeb..de394e66e 100644 --- a/src/torchio/transforms/augmentation/spatial/random_anisotropy.py +++ b/src/torchio/transforms/augmentation/spatial/random_anisotropy.py @@ -1,5 +1,5 @@ import warnings -from typing import Union, Tuple, List +from typing import Union, Tuple import torch @@ -66,9 +66,9 @@ def get_params( self, axes: Tuple[int, ...], downsampling_range: Tuple[float, float], - ) -> List[bool]: + ) -> Tuple[int, float]: axis = axes[torch.randint(0, len(axes), (1,))] - downsampling = self.sample_uniform(*downsampling_range).item() + downsampling = self.sample_uniform(*downsampling_range) return axis, downsampling @staticmethod @@ -101,17 +101,19 @@ def apply_transform(self, subject: Subject) -> Subject: 'scalars_only': self.scalars_only, } + sx, sy, sz = target_spacing # for mypy downsample = Resample( - target=tuple(target_spacing), + target=(sx, sy, sz), **self.add_include_exclude(arguments) ) downsampled = downsample(subject) image = subject.get_first_image() target = image.spatial_shape, image.affine upsample = Resample( - target=target, + target=target, # type: ignore[arg-type] image_interpolation=self.image_interpolation, scalars_only=self.scalars_only, ) upsampled = upsample(downsampled) + assert isinstance(upsampled, Subject) return upsampled diff --git a/src/torchio/transforms/augmentation/spatial/random_elastic_deformation.py b/src/torchio/transforms/augmentation/spatial/random_elastic_deformation.py index 08968d0a5..b55eb97a7 100644 --- a/src/torchio/transforms/augmentation/spatial/random_elastic_deformation.py +++ b/src/torchio/transforms/augmentation/spatial/random_elastic_deformation.py @@ -129,9 +129,9 @@ def __init__( super().__init__(**kwargs) self._bspline_transformation = None self.num_control_points = to_tuple(num_control_points, length=3) - _parse_num_control_points(self.num_control_points) + _parse_num_control_points(self.num_control_points) # type: ignore[arg-type] # noqa: E501 self.max_displacement = to_tuple(max_displacement, length=3) - _parse_max_displacement(self.max_displacement) + _parse_max_displacement(self.max_displacement) # type: ignore[arg-type] # noqa: E501 self.num_locked_borders = locked_borders if locked_borders not in (0, 1, 2): raise ValueError('locked_borders must be 0, 1, or 2') @@ -176,8 +176,8 @@ def get_params( def apply_transform(self, subject: Subject) -> Subject: subject.check_consistent_spatial_shape() control_points = self.get_params( - self.num_control_points, - self.max_displacement, + self.num_control_points, # type: ignore[arg-type] + self.max_displacement, # type: ignore[arg-type] self.num_locked_borders, ) @@ -190,6 +190,7 @@ def apply_transform(self, subject: Subject) -> Subject: transform = ElasticDeformation(**self.add_include_exclude(arguments)) transformed = transform(subject) + assert isinstance(transformed, Subject) return transformed @@ -223,12 +224,12 @@ def __init__( label_interpolation, ) self.invert_transform = False - self.args_names = ( + self.args_names = [ 'control_points', 'image_interpolation', 'label_interpolation', 'max_displacement', - ) + ] def get_bspline_transform( self, @@ -298,7 +299,7 @@ def apply_bspline_transform( bspline_transform = self.get_bspline_transform(image) self.parse_free_form_transform( bspline_transform, - self.max_displacement, + self.max_displacement, # type: ignore[arg-type] ) interpolator = self.get_sitk_interpolator(interpolation) resampler = sitk.ResampleImageFilter() diff --git a/src/torchio/transforms/augmentation/spatial/random_flip.py b/src/torchio/transforms/augmentation/spatial/random_flip.py index dd413c0ec..193960b25 100644 --- a/src/torchio/transforms/augmentation/spatial/random_flip.py +++ b/src/torchio/transforms/augmentation/spatial/random_flip.py @@ -58,6 +58,7 @@ def apply_transform(self, subject: Subject) -> Subject: arguments = {'axes': axes} transform = Flip(**self.add_include_exclude(arguments)) transformed = transform(subject) + assert isinstance(transformed, Subject) return transformed @staticmethod diff --git a/src/torchio/transforms/data_parser.py b/src/torchio/transforms/data_parser.py index 68d96e8e6..98f896bcd 100644 --- a/src/torchio/transforms/data_parser.py +++ b/src/torchio/transforms/data_parser.py @@ -116,7 +116,7 @@ def _parse_tensor(self, data: TypeData) -> Subject: raise ValueError(message) return self._get_subject_from_tensor(data) - def _get_subject_from_tensor(self, tensor: torch.Tensor) -> Subject: + def _get_subject_from_tensor(self, tensor: TypeData) -> Subject: image = ScalarImage(tensor=tensor) return self._get_subject_from_image(image) @@ -131,7 +131,7 @@ def _get_subject_from_dict( label_keys: Optional[Sequence[str]] = None, ) -> Subject: subject_dict = {} - label_keys = {} if label_keys is None else label_keys + label_keys = [] if label_keys is None else label_keys for key, value in data.items(): if key in image_keys: class_ = LabelMap if key in label_keys else ScalarImage diff --git a/src/torchio/transforms/intensity_transform.py b/src/torchio/transforms/intensity_transform.py index 8070c09a0..f64402d95 100644 --- a/src/torchio/transforms/intensity_transform.py +++ b/src/torchio/transforms/intensity_transform.py @@ -26,7 +26,7 @@ def get_images(self, subject: Subject) -> List[Image]: def arguments_are_dict(self) -> bool: """Check if main arguments are dict. - Return True if the type of all attributes specified in the + Return ``True`` if the type of all attributes specified in the :attr:`args_names` have ``dict`` type. """ args = [getattr(self, name) for name in self.args_names] diff --git a/src/torchio/transforms/lambda_transform.py b/src/torchio/transforms/lambda_transform.py index 3e1f60092..a2b0bc678 100644 --- a/src/torchio/transforms/lambda_transform.py +++ b/src/torchio/transforms/lambda_transform.py @@ -36,7 +36,7 @@ def __init__( super().__init__(**kwargs) self.function = function self.types_to_apply = types_to_apply - self.args_names = 'function', 'types_to_apply' + self.args_names = ['function', 'types_to_apply'] def apply_transform(self, subject: Subject) -> Subject: images = subject.get_images( diff --git a/src/torchio/transforms/preprocessing/intensity/clamp.py b/src/torchio/transforms/preprocessing/intensity/clamp.py index a5caf16d7..63cdecce5 100644 --- a/src/torchio/transforms/preprocessing/intensity/clamp.py +++ b/src/torchio/transforms/preprocessing/intensity/clamp.py @@ -43,10 +43,11 @@ def __init__( ): super().__init__(**kwargs) self.out_min, self.out_max = out_min, out_max - self.args_names = 'out_min', 'out_max' + self.args_names = ['out_min', 'out_max'] def apply_transform(self, subject: Subject) -> Subject: for image in self.get_images(subject): + assert isinstance(image, ScalarImage) self.apply_clamp(image) return subject diff --git a/src/torchio/transforms/preprocessing/intensity/histogram_standardization.py b/src/torchio/transforms/preprocessing/intensity/histogram_standardization.py index 69cca5d8b..2948abe1d 100644 --- a/src/torchio/transforms/preprocessing/intensity/histogram_standardization.py +++ b/src/torchio/transforms/preprocessing/intensity/histogram_standardization.py @@ -54,7 +54,7 @@ def __init__( super().__init__(masking_method=masking_method, **kwargs) self.landmarks = landmarks self.landmarks_dict = self._parse_landmarks(landmarks) - self.args_names = 'landmarks', 'masking_method' + self.args_names = ['landmarks', 'masking_method'] @staticmethod def _parse_landmarks(landmarks: TypeLandmarks) -> Dict[str, np.ndarray]: @@ -89,7 +89,7 @@ def apply_normalization( raise KeyError(message) image = subject[image_name] landmarks = self.landmarks_dict[image_name] - normalized = normalize(image.data, landmarks, mask=mask) + normalized = _normalize(image.data, landmarks, mask=mask.numpy()) image.set_data(normalized) @classmethod @@ -153,16 +153,17 @@ def train( >>> transform = HistogramStandardization(landmarks_dict) """ # noqa: E501 is_masks_list = isinstance(mask_path, Sequence) - if is_masks_list and len(mask_path) != len(images_paths): + if is_masks_list and len(mask_path) != len(images_paths): # type: ignore[arg-type] # noqa: E501 message = ( - f'Different number of images ({len(images_paths)})' - f' and mask ({len(mask_path)}) paths found' + f'Different number of images ({len(images_paths)})' # type: ignore[arg-type] # noqa: E501 + f' and mask ({len(mask_path)}) paths found' # type: ignore[arg-type] # noqa: E501 ) raise ValueError(message) quantiles_cutoff = DEFAULT_CUTOFF if cutoff is None else cutoff percentiles_cutoff = 100 * np.array(quantiles_cutoff) percentiles_database = [] - percentiles = _get_percentiles(percentiles_cutoff) + a, b = percentiles_cutoff # for mypy + percentiles = _get_percentiles((a, b)) for i, image_file_path in enumerate(tqdm(images_paths)): tensor, _ = read_image(image_file_path) if masking_function is not None: @@ -172,16 +173,17 @@ def train( mask = np.ones_like(tensor, dtype=bool) else: if is_masks_list: + assert isinstance(mask_path, Sequence) path = mask_path[i] else: - path = mask_path + path = mask_path # type: ignore[assignment] mask, _ = read_image(path) mask = mask.numpy() > 0 array = tensor.numpy() percentile_values = np.percentile(array[mask], percentiles) percentiles_database.append(percentile_values) - percentiles_database = np.vstack(percentiles_database) - mapping = _get_average_mapping(percentiles_database) + percentiles_database_array = np.vstack(percentiles_database) + mapping = _get_average_mapping(percentiles_database_array) if output_path is not None: output_path = Path(output_path).expanduser() @@ -195,18 +197,18 @@ def train( return mapping -def _standardize_cutoff(cutoff: np.ndarray) -> np.ndarray: +def _standardize_cutoff(cutoff: Sequence[float]) -> np.ndarray: """Standardize the cutoff values given in the configuration. Computes percentile landmark normalization by default. """ - cutoff = np.asarray(cutoff) - cutoff[0] = max(0, cutoff[0]) - cutoff[1] = min(1, cutoff[1]) - cutoff[0] = np.min([cutoff[0], 0.09]) - cutoff[1] = np.max([cutoff[1], 0.91]) - return cutoff + cutoff_array = np.asarray(cutoff) + cutoff_array[0] = max(0, cutoff_array[0]) + cutoff_array[1] = min(1, cutoff_array[1]) + cutoff_array[0] = np.min([cutoff_array[0], 0.09]) + cutoff_array[1] = np.max([cutoff_array[1], 0.91]) + return cutoff_array def _get_average_mapping(percentiles_database: np.ndarray) -> np.ndarray: @@ -236,7 +238,7 @@ def _get_percentiles(percentiles_cutoff: Tuple[float, float]) -> np.ndarray: return np.array(percentiles) -def normalize( +def _normalize( tensor: torch.Tensor, landmarks: np.ndarray, mask: Optional[np.ndarray], @@ -259,7 +261,8 @@ def normalize( quantiles_cutoff = _standardize_cutoff(cutoff_) percentiles_cutoff = 100 * np.array(quantiles_cutoff) - percentiles = _get_percentiles(percentiles_cutoff) + a, b = percentiles_cutoff # for mypy + percentiles = _get_percentiles((a, b)) percentile_values = np.percentile(data[mask], percentiles) # Apply linear histogram standardization diff --git a/src/torchio/transforms/preprocessing/intensity/mask.py b/src/torchio/transforms/preprocessing/intensity/mask.py index 759d19c0f..8e2656cfc 100644 --- a/src/torchio/transforms/preprocessing/intensity/mask.py +++ b/src/torchio/transforms/preprocessing/intensity/mask.py @@ -3,7 +3,7 @@ import torch -from ....data.image import LabelMap +from ....data.image import ScalarImage from ....data.subject import Subject from ....transforms.transform import TypeMaskingMethod from ... import IntensityTransform @@ -66,10 +66,15 @@ def apply_transform(self, subject: Subject) -> Subject: image.data, self.masking_labels, ) + assert isinstance(image, ScalarImage) self.apply_masking(image, mask_data) return subject - def apply_masking(self, image: LabelMap, mask_data: torch.Tensor) -> None: + def apply_masking( + self, + image: ScalarImage, + mask_data: torch.Tensor, + ) -> None: masked = mask(image.data, mask_data, self.outside_value) image.set_data(masked) diff --git a/src/torchio/transforms/preprocessing/intensity/rescale.py b/src/torchio/transforms/preprocessing/intensity/rescale.py index 81ecd2ad7..4753b11fb 100644 --- a/src/torchio/transforms/preprocessing/intensity/rescale.py +++ b/src/torchio/transforms/preprocessing/intensity/rescale.py @@ -58,7 +58,7 @@ def __init__( self.percentiles = self._parse_range( percentiles, 'percentiles', min_constraint=0, max_constraint=100, ) - self.args_names = 'out_min_max', 'percentiles', 'masking_method' + self.args_names = ['out_min_max', 'percentiles', 'masking_method'] def apply_normalization( self, @@ -87,7 +87,7 @@ def rescale( return tensor values = array[mask] cutoff = np.percentile(values, self.percentiles) - np.clip(array, *cutoff, out=array) + np.clip(array, *cutoff, out=array) # type: ignore[call-overload] if self.in_min_max is None: in_min, in_max = array.min(), array.max() else: diff --git a/src/torchio/transforms/preprocessing/intensity/z_normalization.py b/src/torchio/transforms/preprocessing/intensity/z_normalization.py index ab9b0cb3a..1e9c93e16 100644 --- a/src/torchio/transforms/preprocessing/intensity/z_normalization.py +++ b/src/torchio/transforms/preprocessing/intensity/z_normalization.py @@ -1,3 +1,4 @@ +from typing import Optional import torch from ....data.subject import Subject from .normalization_transform import NormalizationTransform, TypeMaskingMethod @@ -18,7 +19,7 @@ def __init__( **kwargs ): super().__init__(masking_method=masking_method, **kwargs) - self.args_names = ('masking_method',) + self.args_names = ['masking_method'] def apply_normalization( self, @@ -40,7 +41,10 @@ def apply_normalization( image.set_data(standardized) @staticmethod - def znorm(tensor: torch.Tensor, mask: torch.Tensor) -> torch.Tensor: + def znorm( + tensor: torch.Tensor, + mask: torch.Tensor, + ) -> Optional[torch.Tensor]: tensor = tensor.clone().float() values = tensor.masked_select(mask) mean, std = values.mean(), values.std() diff --git a/src/torchio/transforms/preprocessing/label/one_hot.py b/src/torchio/transforms/preprocessing/label/one_hot.py index 529e0e64d..1cff7eaa3 100644 --- a/src/torchio/transforms/preprocessing/label/one_hot.py +++ b/src/torchio/transforms/preprocessing/label/one_hot.py @@ -15,7 +15,7 @@ class OneHot(LabelTransform): def __init__(self, num_classes: int = -1, **kwargs): super().__init__(**kwargs) self.num_classes = num_classes - self.args_names = ('num_classes',) + self.args_names = ['num_classes'] self.invert_transform = False def apply_transform(self, subject): diff --git a/src/torchio/transforms/preprocessing/label/remap_labels.py b/src/torchio/transforms/preprocessing/label/remap_labels.py index 0c9e2a295..3f95d3a0a 100644 --- a/src/torchio/transforms/preprocessing/label/remap_labels.py +++ b/src/torchio/transforms/preprocessing/label/remap_labels.py @@ -143,7 +143,7 @@ def __init__( self.kwargs = kwargs self.remapping = remapping self.masking_method = masking_method - self.args_names = 'remapping', 'masking_method' + self.args_names = ['remapping', 'masking_method'] def apply_transform(self, subject): for image in self.get_images(subject): diff --git a/src/torchio/transforms/preprocessing/label/remove_labels.py b/src/torchio/transforms/preprocessing/label/remove_labels.py index 8b1bb57a2..627a70837 100644 --- a/src/torchio/transforms/preprocessing/label/remove_labels.py +++ b/src/torchio/transforms/preprocessing/label/remove_labels.py @@ -65,7 +65,7 @@ def __init__( self.labels = labels self.background_label = background_label self.masking_method = masking_method - self.args_names = 'labels', 'background_label', 'masking_method' + self.args_names = ['labels', 'background_label', 'masking_method'] def is_invertible(self): return False diff --git a/src/torchio/transforms/preprocessing/spatial/copy_affine.py b/src/torchio/transforms/preprocessing/spatial/copy_affine.py index 7a333bbc7..45beee2cd 100644 --- a/src/torchio/transforms/preprocessing/spatial/copy_affine.py +++ b/src/torchio/transforms/preprocessing/spatial/copy_affine.py @@ -68,7 +68,7 @@ def __init__(self, target: str, **kwargs): ) raise ValueError(message) self.target = target - self.args_names = ('target',) + self.args_names = ['target'] def apply_transform(self, subject: Subject) -> Subject: if self.target not in subject: diff --git a/src/torchio/transforms/preprocessing/spatial/crop.py b/src/torchio/transforms/preprocessing/spatial/crop.py index d0e0b1ade..9ad8c6f85 100644 --- a/src/torchio/transforms/preprocessing/spatial/crop.py +++ b/src/torchio/transforms/preprocessing/spatial/crop.py @@ -36,9 +36,10 @@ def __init__( ): super().__init__(cropping, **kwargs) self.cropping = cropping - self.args_names = ('cropping',) + self.args_names = ['cropping'] def apply_transform(self, sample) -> Subject: + assert self.bounds_parameters is not None low = self.bounds_parameters[::2] high = self.bounds_parameters[1::2] index_ini = low diff --git a/src/torchio/transforms/preprocessing/spatial/crop_or_pad.py b/src/torchio/transforms/preprocessing/spatial/crop_or_pad.py index 28118b4fd..4aa74f188 100644 --- a/src/torchio/transforms/preprocessing/spatial/crop_or_pad.py +++ b/src/torchio/transforms/preprocessing/spatial/crop_or_pad.py @@ -152,7 +152,8 @@ def _get_six_bounds_parameters( for number in parameters: ini, fin = int(np.ceil(number)), int(np.floor(number)) result.extend([ini, fin]) - return tuple(result) + i1, i2, j1, j2, k1, k2 = result + return i1, i2, j1, j2, k1, k2 def _compute_cropping_padding_from_shapes( self, @@ -257,18 +258,26 @@ def _compute_mask_center_crop_or_pad( padding.extend([pad_ini, pad_fin]) cropping.extend([crop_ini, crop_fin]) # Conversion for SimpleITK compatibility - padding = np.asarray(padding, dtype=int) - cropping = np.asarray(cropping, dtype=int) - padding_params = tuple(padding.tolist()) if padding.any() else None - cropping_params = tuple(cropping.tolist()) if cropping.any() else None - return padding_params, cropping_params + padding_array = np.asarray(padding, dtype=int) + cropping_array = np.asarray(cropping, dtype=int) + if padding_array.any(): + padding_params = tuple(padding_array.tolist()) + else: + padding_params = None + if cropping_array.any(): + cropping_params = tuple(cropping_array.tolist()) + else: + cropping_params = None + return padding_params, cropping_params # type: ignore[return-value] def apply_transform(self, subject: Subject) -> Subject: subject.check_consistent_space() padding_params, cropping_params = self.compute_crop_or_pad(subject) padding_kwargs = {'padding_mode': self.padding_mode} if padding_params is not None: - subject = Pad(padding_params, **padding_kwargs)(subject) + pad = Pad(padding_params, **padding_kwargs) + subject = pad(subject) # type: ignore[assignment] if cropping_params is not None: - subject = Crop(cropping_params)(subject) + crop = Crop(cropping_params) + subject = crop(subject) # type: ignore[assignment] return subject diff --git a/src/torchio/transforms/preprocessing/spatial/ensure_shape_multiple.py b/src/torchio/transforms/preprocessing/spatial/ensure_shape_multiple.py index da460cbfe..dc83a3e75 100644 --- a/src/torchio/transforms/preprocessing/spatial/ensure_shape_multiple.py +++ b/src/torchio/transforms/preprocessing/spatial/ensure_shape_multiple.py @@ -1,4 +1,6 @@ -from typing import Union +from __future__ import annotations + +from typing import Callable, Union import numpy as np @@ -125,8 +127,10 @@ def __init__( def apply_transform(self, subject: Subject) -> Subject: source_shape = np.array(subject.spatial_shape, np.uint16) - function = np.floor if self.method == 'crop' else np.ceil + function: Callable = np.floor if self.method == 'crop' else np.ceil # type: ignore[assignment] # noqa: E501 integer_ratio = function(source_shape / self.target_multiple) target_shape = integer_ratio * self.target_multiple target_shape = np.maximum(target_shape, 1) - return CropOrPad(target_shape.astype(int))(subject) + transform = CropOrPad(target_shape.astype(int)) + subject = transform(subject) # type: ignore[assignment] + return subject diff --git a/src/torchio/transforms/preprocessing/spatial/pad.py b/src/torchio/transforms/preprocessing/spatial/pad.py index 0b541b082..b4f33e032 100644 --- a/src/torchio/transforms/preprocessing/spatial/pad.py +++ b/src/torchio/transforms/preprocessing/spatial/pad.py @@ -1,9 +1,9 @@ from numbers import Number -from typing import Union +from typing import Union, Dict +import torch import numpy as np import nibabel as nib -import torch from ....data.subject import Subject from .bounds_transform import BoundsTransform, TypeBounds @@ -62,7 +62,7 @@ def __init__( self.padding = padding self.check_padding_mode(padding_mode) self.padding_mode = padding_mode - self.args_names = 'padding', 'padding_mode' + self.args_names = ['padding', 'padding_mode'] @classmethod def check_padding_mode(cls, padding_mode): @@ -75,11 +75,13 @@ def check_padding_mode(cls, padding_mode): raise KeyError(message) def apply_transform(self, subject: Subject) -> Subject: + assert self.bounds_parameters is not None low = self.bounds_parameters[::2] for image in self.get_images(subject): new_origin = nib.affines.apply_affine(image.affine, -np.array(low)) new_affine = image.affine.copy() new_affine[:3, 3] = new_origin + kwargs: Dict[str, Union[str, float]] if isinstance(self.padding_mode, Number): kwargs = { 'mode': 'constant', @@ -89,7 +91,7 @@ def apply_transform(self, subject: Subject) -> Subject: kwargs = {'mode': self.padding_mode} pad_params = self.bounds_parameters paddings = (0, 0), pad_params[:2], pad_params[2:4], pad_params[4:] - padded = np.pad(image.data, paddings, **kwargs) + padded = np.pad(image.data, paddings, **kwargs) # type: ignore[call-overload] # noqa: E501 image.set_data(torch.as_tensor(padded)) image.affine = new_affine return subject diff --git a/src/torchio/transforms/preprocessing/spatial/resample.py b/src/torchio/transforms/preprocessing/spatial/resample.py index 71e818dd3..5e5b63058 100644 --- a/src/torchio/transforms/preprocessing/spatial/resample.py +++ b/src/torchio/transforms/preprocessing/spatial/resample.py @@ -1,7 +1,7 @@ from pathlib import Path from numbers import Number -from typing import Union, Tuple, Optional from collections.abc import Iterable +from typing import Union, Tuple, Optional, Sized import torch import numpy as np @@ -91,16 +91,17 @@ def __init__( ) self.pre_affine_name = pre_affine_name self.scalars_only = scalars_only - self.args_names = ( + self.args_names = [ 'target', 'image_interpolation', 'label_interpolation', 'pre_affine_name', 'scalars_only', - ) + ] @staticmethod def _parse_spacing(spacing: TypeSpacing) -> Tuple[float, float, float]: + result: Iterable if isinstance(spacing, Iterable) and len(spacing) == 3: result = spacing elif isinstance(spacing, Number): @@ -153,6 +154,7 @@ def check_affine_key_presence(affine_name: str, subject: Subject): def apply_transform(self, subject: Subject) -> Subject: use_pre_affine = self.pre_affine_name is not None if use_pre_affine: + assert self.pre_affine_name is not None # for mypy self.check_affine_key_presence(self.pre_affine_name, subject) for image in self.get_images(subject): @@ -177,6 +179,7 @@ def apply_transform(self, subject: Subject) -> Subject: # Apply given affine matrix if found in image if use_pre_affine and self.pre_affine_name in image: + assert self.pre_affine_name is not None # for mypy self.check_affine(self.pre_affine_name, image) matrix = image[self.pre_affine_name] if isinstance(matrix, torch.Tensor): @@ -189,7 +192,7 @@ def apply_transform(self, subject: Subject) -> Subject: resampler.SetInterpolator(interpolator) self._set_resampler_reference( resampler, - self.target, + self.target, # type: ignore[arg-type] floating_sitk, subject, ) @@ -241,8 +244,9 @@ def _set_resampler_reference( elif isinstance(target, Number): # one number for target was passed self._set_resampler_from_spacing(resampler, target, floating_sitk) elif isinstance(target, Iterable) and len(target) == 2: + assert not isinstance(target, str) # for mypy shape, affine = target - if not (isinstance(shape, Iterable) and len(shape) == 3): + if not (isinstance(shape, Sized) and len(shape) == 3): message = ( f'Target shape must be a sequence of three integers, but' f' "{shape}" was passed' diff --git a/src/torchio/transforms/preprocessing/spatial/resize.py b/src/torchio/transforms/preprocessing/spatial/resize.py index 2eb3eb045..f678249a5 100644 --- a/src/torchio/transforms/preprocessing/spatial/resize.py +++ b/src/torchio/transforms/preprocessing/spatial/resize.py @@ -43,11 +43,11 @@ def __init__( self.label_interpolation = self.parse_interpolation( label_interpolation, ) - self.args_names = ( + self.args_names = [ 'target_shape', 'image_interpolation', 'label_interpolation', - ) + ] def apply_transform(self, subject: Subject) -> Subject: shape_in = np.asarray(subject.spatial_shape) @@ -62,6 +62,7 @@ def apply_transform(self, subject: Subject) -> Subject: label_interpolation=self.label_interpolation, ) resampled = resample(subject) + assert isinstance(resampled, Subject) # Sometimes, the output shape is one voxel too large # Probably because Resample uses np.ceil to compute the shape if not resampled.spatial_shape == tuple(shape_out): @@ -70,6 +71,7 @@ def apply_transform(self, subject: Subject) -> Subject: f' != target shape {tuple(shape_out)}. Fixing with CropOrPad' ) warnings.warn(message) - crop_pad = CropOrPad(shape_out) + crop_pad = CropOrPad(shape_out) # type: ignore[arg-type] resampled = crop_pad(resampled) + assert isinstance(resampled, Subject) return resampled diff --git a/src/torchio/transforms/transform.py b/src/torchio/transforms/transform.py index 41fdac7e4..cde1106c6 100644 --- a/src/torchio/transforms/transform.py +++ b/src/torchio/transforms/transform.py @@ -3,7 +3,7 @@ import warnings from abc import ABC, abstractmethod from contextlib import contextmanager -from typing import Union, Tuple, Optional, Dict, Sequence +from typing import Union, Tuple, Optional, Dict, Sequence, List import torch import numpy as np @@ -109,7 +109,7 @@ def __init__( # args_names is the sequence of parameters from self that need to be # passed to a non-random version of a random transform. They are also # used to invert invertible transforms - self.args_names = () + self.args_names: List[str] = [] def __call__( self, @@ -291,7 +291,7 @@ def _parse_range( return (min_range, nums_range) try: - min_value, max_value = nums_range + min_value, max_value = nums_range # type: ignore[misc] except (TypeError, ValueError): raise ValueError( f'If {name} is not a single number, it must be' @@ -332,7 +332,7 @@ def _parse_range( f'If "{name}" is a sequence, its values must be of' f' type "{type_constraint}", not "{type(nums_range)}"', ) - return nums_range + return nums_range # type: ignore[return-value] @staticmethod def parse_interpolation(interpolation: str) -> str: @@ -377,7 +377,7 @@ def nib_to_sitk(data: TypeData, affine: TypeData) -> sitk.Image: @staticmethod def sitk_to_nib(image: sitk.Image) -> TypeDataAffine: - return sitk_to_nib(image) + return sitk_to_nib(image) # type: ignore[return-value] def _get_reproducing_arguments(self): """ @@ -417,33 +417,34 @@ def get_sitk_interpolator(interpolation: str) -> int: return get_sitk_interpolator(interpolation) @staticmethod - def parse_bounds(bounds_parameters: TypeBounds) -> TypeSixBounds: + def parse_bounds(bounds_parameters: TypeBounds) -> Optional[TypeSixBounds]: if bounds_parameters is None: return None try: - bounds_parameters = tuple(bounds_parameters) + bounds_parameters = tuple(bounds_parameters) # type: ignore[assignment,arg-type] # noqa: E501 except TypeError: - bounds_parameters = (bounds_parameters,) + bounds_parameters = (bounds_parameters,) # type: ignore[assignment] # noqa: E501 # Check that numbers are integers - for number in bounds_parameters: + for number in bounds_parameters: # type: ignore[union-attr] if not isinstance(number, (int, np.integer)) or number < 0: message = ( 'Bounds values must be integers greater or equal to zero,' f' not "{bounds_parameters}" of type {type(number)}' ) raise ValueError(message) - bounds_parameters = tuple(int(n) for n in bounds_parameters) - bounds_parameters_length = len(bounds_parameters) + bounds_parameters_tuple = tuple(int(n) for n in bounds_parameters) # type: ignore[assignment,union-attr] # noqa: E501 + bounds_parameters_length = len(bounds_parameters_tuple) if bounds_parameters_length == 6: - return bounds_parameters + return bounds_parameters_tuple # type: ignore[return-value] if bounds_parameters_length == 1: - return 6 * bounds_parameters + return 6 * bounds_parameters_tuple # type: ignore[return-value] if bounds_parameters_length == 3: - return tuple(np.repeat(bounds_parameters, 2).tolist()) + repeat = np.repeat(bounds_parameters_tuple, 2).tolist() + return tuple(repeat) # type: ignore[return-value] message = ( 'Bounds parameter must be an integer or a tuple of' - f' 3 or 6 integers, not {bounds_parameters}' + f' 3 or 6 integers, not {bounds_parameters_tuple}' ) raise ValueError(message) @@ -461,7 +462,7 @@ def get_mask_from_masking_method( masking_method: TypeMaskingMethod, subject: Subject, tensor: torch.Tensor, - labels: list = None, + labels: Optional[Sequence[int]] = None, ) -> torch.Tensor: if masking_method is None: return self.ones(tensor) @@ -482,7 +483,7 @@ def get_mask_from_masking_method( possible_axis, tensor, ) elif type(masking_method) in (tuple, list, int): - return self.get_mask_from_bounds(masking_method, tensor) + return self.get_mask_from_bounds(masking_method, tensor) # type: ignore[arg-type] # noqa: E501 first_anat_axes = tuple(s[0] for s in ANATOMICAL_AXES) message = ( 'Masking method must be one of:\n' @@ -532,6 +533,7 @@ def get_mask_from_bounds( tensor: torch.Tensor, ) -> torch.Tensor: bounds_parameters = self.parse_bounds(bounds_parameters) + assert bounds_parameters is not None low = bounds_parameters[::2] high = bounds_parameters[1::2] i0, j0, k0 = low diff --git a/src/torchio/typing.py b/src/torchio/typing.py index 59ab5af28..d2c034ac3 100644 --- a/src/torchio/typing.py +++ b/src/torchio/typing.py @@ -11,12 +11,17 @@ TypeKeys = Optional[Sequence[str]] TypeData = Union[torch.Tensor, np.ndarray] TypeDataAffine = Tuple[torch.Tensor, np.ndarray] + +TypeDoubletInt = Tuple[int, int] TypeTripletInt = Tuple[int, int, int] +TypeQuartetInt = Tuple[int, int, int, int] TypeSextetInt = Tuple[int, int, int, int, int, int] + TypeTripletFloat = Tuple[float, float, float] TypeSextetFloat = Tuple[float, float, float, float, float, float] + TypeTuple = Union[int, TypeTripletInt] -TypeRangeInt = Union[int, Tuple[int, int]] +TypeRangeInt = Union[int, TypeDoubletInt] TypeSpatialShape = Union[int, TypeTripletInt] TypeRangeFloat = Union[float, Tuple[float, float]] TypeCallable = Callable[[torch.Tensor], torch.Tensor] diff --git a/src/torchio/utils.py b/src/torchio/utils.py index 7d6b6b15d..8878da786 100644 --- a/src/torchio/utils.py +++ b/src/torchio/utils.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import ast import os import sys @@ -45,11 +47,12 @@ def get_stem( """ '/home/user/image.nii.gz' -> 'image' """ - def _get_stem(path_string): + def _get_stem(path_string: TypePath) -> str: return Path(path_string).name.split('.')[0] - if isinstance(path, (str, Path)): + if isinstance(path, (str, os.PathLike)): return _get_stem(path) - return [_get_stem(p) for p in path] + else: # path is actually a sequence of paths + return [_get_stem(p) for p in path] def create_dummy_dataset( @@ -304,7 +307,7 @@ def guess_external_viewer() -> Optional[Path]: and Windows. """ if 'SITK_SHOW_COMMAND' in os.environ: - return os.environ['SITK_SHOW_COMMAND'] + return Path(os.environ['SITK_SHOW_COMMAND']) platform = sys.platform itk = 'ITK-SNAP' slicer = 'Slicer' @@ -331,12 +334,13 @@ def guess_external_viewer() -> Optional[Path]: if slicer_path.is_file(): return slicer_path elif 'linux' in platform: - itk_snap_path = shutil.which('itksnap') - if itk_snap_path is not None: - return Path(itk_snap_path) - slicer_path = shutil.which('Slicer') - if slicer_path is not None: - return Path(slicer_path) + itk_snap_which = shutil.which('itksnap') + if itk_snap_which is not None: + return Path(itk_snap_which) + slicer_which = shutil.which('Slicer') + if slicer_which is not None: + return Path(slicer_which) + return None # for mypy def parse_spatial_shape(shape): diff --git a/src/torchio/visualization.py b/src/torchio/visualization.py index 15a47a3c6..9ec7a89fc 100644 --- a/src/torchio/visualization.py +++ b/src/torchio/visualization.py @@ -48,7 +48,7 @@ def plot_volume( sag_axis, cor_axis, axi_axis = axes if reorient: - image = ToCanonical()(image) + image = ToCanonical()(image) # type: ignore[assignment] data = image.data[channel] if indices is None: indices = np.array(data.shape) // 2 @@ -220,7 +220,8 @@ def make_gif( ' pip install Pillow' ) raise RuntimeError(message) from e - tensor = RescaleIntensity((0, 255))(tensor) if rescale else tensor + transform = RescaleIntensity((0, 255)) + tensor = transform(tensor) if rescale else tensor # type: ignore[assignment] # noqa: E501 single_channel = len(tensor) == 1 # Move channels dimension to the end and bring selected axis to 0 diff --git a/tox.ini b/tox.ini index 9790f3586..0884330ba 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py{37, 38, 39, 310} +envlist = py{37, 38, 39, 310}, mypy isolated_build = True [gh-actions] @@ -7,7 +7,7 @@ python = 3.7: py37 3.8: py38 3.9: py39 - 3.10: py310 + 3.10: py310, mypy [testenv] extras = dev @@ -22,26 +22,9 @@ passenv = setenv = PYTHONPATH = {toxinidir} -[testenv:format] -whitelist_externals = - pre-commit -requires = - pre-commit -skip_install = True -commands = - pre-commit run --all-files --show-diff-on-failure - -[testenv:lint] -whitelist_externals = - pre-commit -requires = - pre-commit -deps = - flake8 >= 3.8 -commands = - pre-commit run --all-files - flake8 - +[testenv:mypy] +deps = mypy +commands = mypy src [flake8] ; See link below for available options