From 4300695a78c8b28a9cb0025f797e937deced1226 Mon Sep 17 00:00:00 2001 From: Baudouin Raoult Date: Sun, 20 Oct 2024 19:31:46 +0000 Subject: [PATCH 1/2] Fix pre-commit regex --- .pre-commit-config.yaml | 3 +-- CHANGELOG.md | 3 +++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e01d6a37..4de59323 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -43,13 +43,12 @@ repos: rev: v0.6.9 hooks: - id: ruff - # Next line if for documenation cod snippets - exclude: '^[^_].*_\.py$' args: - --line-length=120 - --fix - --exit-non-zero-on-fix - --preview + - --exclude=docs/**/*_.py - repo: https://github.com/sphinx-contrib/sphinx-lint rev: v1.0.0 hooks: diff --git a/CHANGELOG.md b/CHANGELOG.md index 9b103bf6..6e90ba32 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,9 @@ Keep it human-readable, your future self will thank you! - Feature: New `Boolean1DMask` class. Enables rollout training for limited area models. [#79](https://github.com/ecmwf/anemoi-training/pulls/79) ### Fixed + +- Fix pre-commit regex + ### Changed ## [0.2.0 - Feature release](https://github.com/ecmwf/anemoi-training/compare/0.1.0...0.2.0) - 2024-10-16 From ba4ef472858dd8a1b42c65f571555618d0b2d5c8 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sun, 20 Oct 2024 19:32:11 +0000 Subject: [PATCH 2/2] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/anemoi/training/diagnostics/callbacks/__init__.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/anemoi/training/diagnostics/callbacks/__init__.py b/src/anemoi/training/diagnostics/callbacks/__init__.py index cf085eab..a671d6e2 100644 --- a/src/anemoi/training/diagnostics/callbacks/__init__.py +++ b/src/anemoi/training/diagnostics/callbacks/__init__.py @@ -378,7 +378,7 @@ def _plot( # prepare predicted output tensor for plotting output_tensor = self.post_processors( - y_pred[self.sample_idx : self.sample_idx + 1, ...].cpu() + y_pred[self.sample_idx : self.sample_idx + 1, ...].cpu(), ).numpy() fig = plot_predicted_multilevel_flat_sample( @@ -457,7 +457,7 @@ def on_validation_epoch_start(self, trainer: pl.Trainer, pl_module: pl.Lightning epoch = trainer.current_epoch if model.trainable_data is not None: - data_coords = np.rad2deg(graph[(self._graph_name_data, "to", self._graph_name_data)].ecoords_rad.numpy()) + data_coords = np.rad2deg(graph[self._graph_name_data, "to", self._graph_name_data].ecoords_rad.numpy()) self.plot( trainer, @@ -470,7 +470,7 @@ def on_validation_epoch_start(self, trainer: pl.Trainer, pl_module: pl.Lightning if model.trainable_hidden is not None: hidden_coords = np.rad2deg( - graph[(self._graph_name_hidden, "to", self._graph_name_hidden)].hcoords_rad.numpy(), + graph[self._graph_name_hidden, "to", self._graph_name_hidden].hcoords_rad.numpy(), ) self.plot( @@ -609,7 +609,7 @@ def _plot( for rollout_step in range(pl_module.rollout): y_hat = outputs[1][rollout_step] y_true = batch[ - :, pl_module.multi_step + rollout_step, ..., pl_module.data_indices.internal_data.output.full + :, pl_module.multi_step + rollout_step, ..., pl_module.data_indices.internal_data.output.full, ] loss = pl_module.loss(y_hat, y_true, squash=False).cpu().numpy() @@ -971,7 +971,7 @@ def tracker_metadata(self, trainer: pl.Trainer) -> dict: return {} - def _remove_checkpoint(self, trainer: "pl.Trainer", filepath: str) -> None: + def _remove_checkpoint(self, trainer: pl.Trainer, filepath: str) -> None: """Calls the strategy to remove the checkpoint file.""" super()._remove_checkpoint(trainer, filepath) trainer.strategy.remove_checkpoint(self._get_inference_checkpoint_filepath(filepath))