Skip to content

Commit

Permalink
Log cycle during both train and validation
Browse files Browse the repository at this point in the history
  • Loading branch information
golmschenk committed Nov 2, 2024
1 parent fb7cdd7 commit 6a07e88
Showing 1 changed file with 1 addition and 1 deletion.
2 changes: 1 addition & 1 deletion src/qusi/internal/module.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,6 @@ def on_train_epoch_start(self) -> None:
# Due to Lightning's inconsistent step ordering, performing this during the train epoch start gives the most
# consistent results.
self.cycle += 1
self.log(name='cycle', value=self.cycle, reduce_fx=torch.max, rank_zero_only=True, on_step=False, on_epoch=True)

def training_step(self, batch: tuple[Any, Any], batch_index: int) -> STEP_OUTPUT:
return self.compute_loss_and_metrics(batch, self.train_metric_group)
Expand Down Expand Up @@ -129,6 +128,7 @@ def log_loss_and_metrics(self, metric_group: MetricGroup, logging_name_prefix: s
self.log(name=logging_name_prefix + 'loss',
value=mean_cycle_loss, sync_dist=True, on_step=False,
on_epoch=True)
self.log(name='cycle', value=self.cycle, reduce_fx=torch.max, rank_zero_only=True, on_step=False, on_epoch=True)
for state_based_logging_metric in metric_group.state_based_logging_metrics:
state_based_logging_metric_name = get_metric_name(state_based_logging_metric)
self.log(name=logging_name_prefix + state_based_logging_metric_name,
Expand Down

0 comments on commit 6a07e88

Please sign in to comment.