From 9ffe0c6a160b88f40f17b8fedc99f32f4ffd6032 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 29 Apr 2024 21:54:15 +0000 Subject: [PATCH] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../mpl_drawers/discrete_scroll_plot.py | 3 ++- .../fast_fourier_transform_spectrogram.py | 3 ++- draugr/metrics/metric_collection.py | 4 ++-- .../pooled_queue_processor.py | 3 ++- .../drawing/masks/gauss_circles.py | 8 ++++--- .../transformation/cv2_transforms.py | 6 ++--- .../linux_utilities/user_utilities.py | 22 ++++++++++--------- draugr/stopping/stopping_key.py | 1 - .../experimental/confusion_matrix.py | 3 ++- .../experimental/layers/radial_basis.py | 3 +-- .../evaluation/cross_validation.py | 3 ++- .../torch_transforms/batch_transforms.py | 12 +++++----- .../optimisation/parameters/complexity.py | 1 + draugr/writers/mixins/embed_writer_mixin.py | 3 ++- .../hyperparameter_tuning_with_hparams.py | 2 ++ 15 files changed, 45 insertions(+), 32 deletions(-) diff --git a/draugr/drawers/mpl_drawers/discrete_scroll_plot.py b/draugr/drawers/mpl_drawers/discrete_scroll_plot.py index 50eef904..a19b2e66 100644 --- a/draugr/drawers/mpl_drawers/discrete_scroll_plot.py +++ b/draugr/drawers/mpl_drawers/discrete_scroll_plot.py @@ -25,7 +25,8 @@ class DiscreteScrollPlot(MplDrawer): """ Waterfall plot - only supports a single trajectory at a time, do not supply parallel trajectories to draw method, will get truncated to num actions, effectively dropping actions for other envs than the first.""" + only supports a single trajectory at a time, do not supply parallel trajectories to draw method, will get truncated to num actions, effectively dropping actions for other envs than the first. + """ @passes_kws_to(MplDrawer.__init__) def __init__( diff --git a/draugr/drawers/mpl_drawers/spectral/fast_fourier_transform_spectrogram.py b/draugr/drawers/mpl_drawers/spectral/fast_fourier_transform_spectrogram.py index bf5663c3..9deca9a7 100644 --- a/draugr/drawers/mpl_drawers/spectral/fast_fourier_transform_spectrogram.py +++ b/draugr/drawers/mpl_drawers/spectral/fast_fourier_transform_spectrogram.py @@ -26,7 +26,8 @@ class FastFourierTransformSpectrogramPlot(MplDrawer): """ TODO: CENTER Align fft maybe, to mimick librosa stft - Short Time Fourier Transform (STFT), with step size of 1 and window lenght of n_fft, and no window function ( TODO: Hanning Smoothing)""" + Short Time Fourier Transform (STFT), with step size of 1 and window lenght of n_fft, and no window function ( TODO: Hanning Smoothing) + """ def __init__( self, diff --git a/draugr/metrics/metric_collection.py b/draugr/metrics/metric_collection.py index 4ecb0121..5a358d9e 100644 --- a/draugr/metrics/metric_collection.py +++ b/draugr/metrics/metric_collection.py @@ -52,10 +52,10 @@ def append(self, *args: Sequence, **kwargs: MutableMapping): :type args: :param kwargs: :type kwargs:""" - for (arg, (k, v)) in zip(args, self._metrics.items()): + for arg, (k, v) in zip(args, self._metrics.items()): self._metrics[k].append(arg) - for (k, v) in kwargs: + for k, v in kwargs: self._metrics[k].append(v) def remove_metric(self, name): diff --git a/draugr/multiprocessing_utilities/pooled_queue_processor.py b/draugr/multiprocessing_utilities/pooled_queue_processor.py index 417fcb3b..0afccd72 100644 --- a/draugr/multiprocessing_utilities/pooled_queue_processor.py +++ b/draugr/multiprocessing_utilities/pooled_queue_processor.py @@ -55,7 +55,8 @@ class PooledQueueProcessor(object): This is a workaround of Pythons extremely slow interprocess communication pipes. The ideal solution would be to use a multiprocessing.queue, but it apparently communication is band limited. - This solution has processes complete tasks (batches) and a thread add the results to a queue.queue.""" + This solution has processes complete tasks (batches) and a thread add the results to a queue.queue. + """ def __init__( self, diff --git a/draugr/opencv_utilities/drawing/masks/gauss_circles.py b/draugr/opencv_utilities/drawing/masks/gauss_circles.py index ad71d67d..90a5be8f 100644 --- a/draugr/opencv_utilities/drawing/masks/gauss_circles.py +++ b/draugr/opencv_utilities/drawing/masks/gauss_circles.py @@ -39,6 +39,7 @@ def ellipse_bbox(h, k, a, b, theta): # ---------------------------------------------------------------------------- + # Rotated elliptical gradient - slow, Python-only approach def make_gradient_v1(width, height, h, k, a, b, theta): """ @@ -76,6 +77,7 @@ def make_gradient_v1(width, height, h, k, a, b, theta): # ---------------------------------------------------------------------------- + # Rotated elliptical gradient - faster, vectorized numpy approach def make_gradient_v2(width, height, h, k, a, b, theta): """ @@ -156,7 +158,7 @@ def draw_image(a, b, theta, inner_scale, save_intermediate=False): if save_intermediate: show_image( transparency, - wait=True + wait=True, # save_path = basep/"eligrad-t.png" ) @@ -167,7 +169,7 @@ def draw_image(a, b, theta, inner_scale, save_intermediate=False): if save_intermediate: show_image( intensity, - wait=True + wait=True, # save_path = str(basep / "eligrad-i1.png") ) @@ -192,7 +194,7 @@ def draw_image(a, b, theta, inner_scale, save_intermediate=False): show_image( draw_image(a, b, theta, inner_scale, True), - wait=True + wait=True, # save_path = str(basep/"eligrad.png") ) diff --git a/draugr/opencv_utilities/transformation/cv2_transforms.py b/draugr/opencv_utilities/transformation/cv2_transforms.py index c7150530..17952287 100644 --- a/draugr/opencv_utilities/transformation/cv2_transforms.py +++ b/draugr/opencv_utilities/transformation/cv2_transforms.py @@ -386,9 +386,9 @@ def __call__( (int(height * ratio), int(width * ratio), depth), dtype=image.dtype ) expand_image[..., :] = self.mean - expand_image[ - int(top) : int(top + height), int(left) : int(left + width) - ] = image + expand_image[int(top) : int(top + height), int(left) : int(left + width)] = ( + image + ) image = expand_image boxes = boxes.copy() diff --git a/draugr/os_utilities/linux_utilities/user_utilities.py b/draugr/os_utilities/linux_utilities/user_utilities.py index ad19be4a..6cd3e051 100644 --- a/draugr/os_utilities/linux_utilities/user_utilities.py +++ b/draugr/os_utilities/linux_utilities/user_utilities.py @@ -49,11 +49,11 @@ def make_user( with ContextWrapper( sh.contrib.sudo, construction_kwargs=dict( - password=getpass.getpass( - prompt=f"[sudo] password for {getpass.getuser()}: " - ) - if get_sudo - else None, + password=( + getpass.getpass(prompt=f"[sudo] password for {getpass.getuser()}: ") + if get_sudo + else None + ), _with=True, ), enabled=get_sudo, @@ -85,11 +85,13 @@ def remove_user( with ContextWrapper( sh.contrib.sudo, construction_kwargs=dict( - password=getpass.getpass( - prompt=f"[sudo] password for {getpass.getuser()}: " - ) - if get_sudo - else None, + password=( + getpass.getpass( + prompt=f"[sudo] password for {getpass.getuser()}: " + ) + if get_sudo + else None + ), _with=True, ), enabled=get_sudo, diff --git a/draugr/stopping/stopping_key.py b/draugr/stopping/stopping_key.py index 69f93cf5..39ba1526 100644 --- a/draugr/stopping/stopping_key.py +++ b/draugr/stopping/stopping_key.py @@ -45,7 +45,6 @@ def add_early_stopping_key_combination( verbose: bool = False, combinations: Iterable = default_combinations, ): # -> keyboard.Listener: - """ :param combinations: diff --git a/draugr/tensorboard_utilities/experimental/confusion_matrix.py b/draugr/tensorboard_utilities/experimental/confusion_matrix.py index faee4446..33edf7ae 100644 --- a/draugr/tensorboard_utilities/experimental/confusion_matrix.py +++ b/draugr/tensorboard_utilities/experimental/confusion_matrix.py @@ -168,7 +168,8 @@ def asda(): def plot_to_image(figure): """Converts the matplotlib plot specified by 'figure' to a PNG image and - returns it. The supplied figure is closed and inaccessible after this call.""" + returns it. The supplied figure is closed and inaccessible after this call. + """ # Save the plot to a PNG in memory. buf = io.BytesIO() pyplot.savefig(buf, format="png") diff --git a/draugr/torch_utilities/architectures/experimental/layers/radial_basis.py b/draugr/torch_utilities/architectures/experimental/layers/radial_basis.py index 998c8df8..8c9de882 100644 --- a/draugr/torch_utilities/architectures/experimental/layers/radial_basis.py +++ b/draugr/torch_utilities/architectures/experimental/layers/radial_basis.py @@ -63,7 +63,6 @@ def gaussian(self, alpha): if __name__ == "__main__": - def _main(): - ... + def _main(): ... _main() diff --git a/draugr/torch_utilities/evaluation/cross_validation.py b/draugr/torch_utilities/evaluation/cross_validation.py index 251a4f08..c14ba4af 100644 --- a/draugr/torch_utilities/evaluation/cross_validation.py +++ b/draugr/torch_utilities/evaluation/cross_validation.py @@ -22,7 +22,8 @@ def cross_validation_generator( *datasets: Dataset, n_splits: int = 10 ) -> Tuple[Subset, Subset]: """ - Learning the parameters of a prediction function and testing it on the same data is a methodological mistake: a model that would just repeat the labels of the samples that it has just seen would have a perfect score but would fail to predict anything useful on yet-unseen data. This situation is called overfitting. To avoid it, it is common practice when performing a (supervised) machine learning experiment to hold out part of the available data as a test set""" + Learning the parameters of a prediction function and testing it on the same data is a methodological mistake: a model that would just repeat the labels of the samples that it has just seen would have a perfect score but would fail to predict anything useful on yet-unseen data. This situation is called overfitting. To avoid it, it is common practice when performing a (supervised) machine learning experiment to hold out part of the available data as a test set + """ cum = ConcatDataset(datasets) for train_index, val_index in KFold(n_splits=n_splits).split(cum): diff --git a/draugr/torch_utilities/operations/torch_transforms/batch_transforms.py b/draugr/torch_utilities/operations/torch_transforms/batch_transforms.py index abb060c0..bef73465 100644 --- a/draugr/torch_utilities/operations/torch_transforms/batch_transforms.py +++ b/draugr/torch_utilities/operations/torch_transforms/batch_transforms.py @@ -41,7 +41,8 @@ class BatchNormalize: std (sequence): Sequence of standard deviations for each channel. inplace(bool,optional): Bool to make this operation in-place. dtype (torch.dtype,optional): The data type of tensors to which the transform will be applied. - device (torch.device,optional): The device of tensors to which the transform will be applied.""" + device (torch.device,optional): The device of tensors to which the transform will be applied. + """ def __init__(self, mean, std, inplace=False, dtype=torch.float, device="cpu"): self.mean = torch.as_tensor(mean, dtype=dtype, device=device)[ @@ -96,7 +97,8 @@ class BatchRandomCrop: padding (int, optional): Optional padding on each border of the image. Default is None, i.e no padding. dtype (torch.dtype,optional): The data type of tensors to which the transform will be applied. - device (torch.device,optional): The device of tensors to which the transform will be applied.""" + device (torch.device,optional): The device of tensors to which the transform will be applied. + """ def __init__(self, size, padding=None, dtype=torch.float, device="cpu"): self.size = size @@ -121,9 +123,9 @@ def __call__(self, tensor): dtype=self.dtype, device=self.device, ) - padded[ - :, :, self.padding : -self.padding, self.padding : -self.padding - ] = tensor + padded[:, :, self.padding : -self.padding, self.padding : -self.padding] = ( + tensor + ) else: padded = tensor diff --git a/draugr/torch_utilities/optimisation/parameters/complexity.py b/draugr/torch_utilities/optimisation/parameters/complexity.py index deac0f18..713f2254 100644 --- a/draugr/torch_utilities/optimisation/parameters/complexity.py +++ b/draugr/torch_utilities/optimisation/parameters/complexity.py @@ -590,6 +590,7 @@ def remove_flops_counter_hook_function(module) -> None: # --- Masked flops counting + # Also being run in the initialization def add_flops_mask_variable_or_reset(module) -> None: """ diff --git a/draugr/writers/mixins/embed_writer_mixin.py b/draugr/writers/mixins/embed_writer_mixin.py index b66b2597..e33ed2d4 100644 --- a/draugr/writers/mixins/embed_writer_mixin.py +++ b/draugr/writers/mixins/embed_writer_mixin.py @@ -14,7 +14,8 @@ class EmbedWriterMixin(ABC): """ - Writer mixin that provides an interface for 'writing' embeds/projections(2d,3d) for interactive visualisation""" + Writer mixin that provides an interface for 'writing' embeds/projections(2d,3d) for interactive visualisation + """ @abstractmethod def embed( diff --git a/samples/torch_samples/hyperparameter_tuning_with_hparams.py b/samples/torch_samples/hyperparameter_tuning_with_hparams.py index 063b0c3f..c57e1d42 100644 --- a/samples/torch_samples/hyperparameter_tuning_with_hparams.py +++ b/samples/torch_samples/hyperparameter_tuning_with_hparams.py @@ -114,6 +114,7 @@ # # The model will be quite simple: two dense layers with a dropout layer between them. The training code will look familiar, although the hyperparameters are no longer hardcoded. Instead, the hyperparameters are provided in an `hparams` dictionary and used throughout the training function: + # %% def train_test_model(hparams): """description""" @@ -141,6 +142,7 @@ def train_test_model(hparams): # %% [markdown] # For each run, log an hparams summary with the hyperparameters and final accuracy: + # %% def run(run_dir, hparams): """description"""