Skip to content

Commit

Permalink
Fixed flake8 and add CI step missed from porting from travis (#1531)
Browse files Browse the repository at this point in the history
  • Loading branch information
vfdev-5 authored Dec 25, 2020
1 parent 1c58a75 commit 915d69e
Show file tree
Hide file tree
Showing 18 changed files with 64 additions and 55 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/trigger_circle_ci.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ def assert_workflows_successful(pipeline_id, headers):

workflow_id = get_workflow_id(pipeline_id, headers)

base_url = f"https://app.circleci.com/pipelines/github/pytorch/ignite"
base_url = "https://app.circleci.com/pipelines/github/pytorch/ignite"
url = None

while True:
Expand Down
8 changes: 8 additions & 0 deletions .github/workflows/unit-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,14 @@ jobs:
pip install -r requirements-dev.txt
python setup.py install
- name: Check code formatting
shell: bash -l {0}
run: |
pip install flake8 "black==19.10b0" "isort==4.3.21"
flake8 ignite/ tests/ examples/
black --check .
isort -rc -c .
- name: Run Mypy
shell: bash -l {0}
if: ${{ matrix.os == 'ubuntu-latest' }}
Expand Down
4 changes: 2 additions & 2 deletions examples/contrib/mnist/mnist_with_tqdm_logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def log_training_results(engine):
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
pbar.log_message(
f"Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
f"Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)

@trainer.on(Events.EPOCH_COMPLETED)
Expand All @@ -87,7 +87,7 @@ def log_validation_results(engine):
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
pbar.log_message(
f"Validation Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
f"Validation Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)

pbar.n = pbar.last_print_n = 0
Expand Down
4 changes: 2 additions & 2 deletions examples/mnist/mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ def log_training_results(engine):
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
tqdm.write(
f"Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
f"Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)

@trainer.on(Events.EPOCH_COMPLETED)
Expand All @@ -90,7 +90,7 @@ def log_validation_results(engine):
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
tqdm.write(
f"Validation Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
f"Validation Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)

pbar.n = pbar.last_print_n = 0
Expand Down
4 changes: 2 additions & 2 deletions examples/mnist/mnist_save_resume_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ def log_training_results(engine):
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
tqdm.write(
f"Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
f"Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)
writer.add_scalar("training/avg_loss", avg_nll, engine.state.epoch)
writer.add_scalar("training/avg_accuracy", avg_accuracy, engine.state.epoch)
Expand All @@ -221,7 +221,7 @@ def log_validation_results(engine):
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
tqdm.write(
f"Validation Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
f"Validation Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)
pbar.n = pbar.last_print_n = 0
writer.add_scalar("valdation/avg_loss", avg_nll, engine.state.epoch)
Expand Down
8 changes: 4 additions & 4 deletions examples/mnist/mnist_with_tensorboard.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,8 +92,8 @@ def run(train_batch_size, val_batch_size, epochs, lr, momentum, log_interval, lo
@trainer.on(Events.ITERATION_COMPLETED(every=log_interval))
def log_training_loss(engine):
print(
f"Epoch[{engine.state.epoch}] Iteration[{engine.state.iteration}/{len(train_loader)}] Loss: {engine.state.output:.2f}"
""
f"Epoch[{engine.state.epoch}] Iteration[{engine.state.iteration}/{len(train_loader)}] "
f"Loss: {engine.state.output:.2f}"
)
writer.add_scalar("training/loss", engine.state.output, engine.state.iteration)

Expand All @@ -104,7 +104,7 @@ def log_training_results(engine):
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
print(
f"Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
f"Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)
writer.add_scalar("training/avg_loss", avg_nll, engine.state.epoch)
writer.add_scalar("training/avg_accuracy", avg_accuracy, engine.state.epoch)
Expand All @@ -116,7 +116,7 @@ def log_validation_results(engine):
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
print(
f"Validation Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
f"Validation Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)
writer.add_scalar("valdation/avg_loss", avg_nll, engine.state.epoch)
writer.add_scalar("valdation/avg_accuracy", avg_accuracy, engine.state.epoch)
Expand Down
4 changes: 2 additions & 2 deletions examples/mnist/mnist_with_tensorboard_on_tpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def log_training_results(engine):
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
print(
f"Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
f"Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)
writer.add_scalar("training/avg_loss", avg_nll, engine.state.epoch)
writer.add_scalar("training/avg_accuracy", avg_accuracy, engine.state.epoch)
Expand All @@ -128,7 +128,7 @@ def log_validation_results(engine):
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
print(
f"Validation Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
f"Validation Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)
writer.add_scalar("valdation/avg_loss", avg_nll, engine.state.epoch)
writer.add_scalar("valdation/avg_accuracy", avg_accuracy, engine.state.epoch)
Expand Down
8 changes: 4 additions & 4 deletions examples/mnist/mnist_with_visdom.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,8 +83,8 @@ def run(train_batch_size, val_batch_size, epochs, lr, momentum, log_interval):
@trainer.on(Events.ITERATION_COMPLETED(every=log_interval))
def log_training_loss(engine):
print(
f"Epoch[{engine.state.epoch}] Iteration[{engine.state.iteration}/{len(train_loader)}] Loss: {engine.state.output:.2f}"
""
f"Epoch[{engine.state.epoch}] Iteration[{engine.state.iteration}/{len(train_loader)}] "
f"Loss: {engine.state.output:.2f}"
)
vis.line(
X=np.array([engine.state.iteration]),
Expand All @@ -100,7 +100,7 @@ def log_training_results(engine):
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
print(
f"Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
f"Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)
vis.line(
X=np.array([engine.state.epoch]), Y=np.array([avg_accuracy]), win=train_avg_accuracy_window, update="append"
Expand All @@ -114,7 +114,7 @@ def log_validation_results(engine):
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
print(
f"Validation Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
f"Validation Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)
vis.line(
X=np.array([engine.state.epoch]), Y=np.array([avg_accuracy]), win=val_avg_accuracy_window, update="append"
Expand Down
3 changes: 2 additions & 1 deletion examples/reinforcement_learning/actor_critic.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,8 @@ def update_model(engine):
def log_episode(engine):
i_episode = engine.state.epoch
print(
f"Episode {i_episode}\tLast length: {engine.state.timestep:5d}\tAverage length: {engine.state.running_reward:.2f}"
f"Episode {i_episode}\tLast length: {engine.state.timestep:5d}"
f"\tAverage length: {engine.state.running_reward:.2f}"
)

@trainer.on(EPISODE_COMPLETED)
Expand Down
3 changes: 2 additions & 1 deletion examples/reinforcement_learning/reinforce.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,8 @@ def update_model(engine):
def log_episode(engine):
i_episode = engine.state.epoch
print(
f"Episode {i_episode}\tLast length: {engine.state.timestep:5d}\tAverage length: {engine.state.running_reward:.2f}"
f"Episode {i_episode}\tLast length: {engine.state.timestep:5d}"
f"\tAverage length: {engine.state.running_reward:.2f}"
)

@trainer.on(EPISODE_COMPLETED)
Expand Down
9 changes: 6 additions & 3 deletions ignite/engine/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -666,14 +666,16 @@ def switch_batch(engine):
if max_epochs < self.state.epoch:
raise ValueError(
"Argument max_epochs should be larger than the start epoch "
f"defined in the state: {max_epochs} vs {self.state.epoch}. Please, set engine.state.max_epochs = None "
f"defined in the state: {max_epochs} vs {self.state.epoch}. "
"Please, set engine.state.max_epochs = None "
"before calling engine.run() in order to restart the training from the beginning."
)
self.state.max_epochs = max_epochs
if epoch_length is not None:
if epoch_length != self.state.epoch_length:
raise ValueError(
f"Argument epoch_length should be same as in the state, given {epoch_length} vs {self.state.epoch_length}"
"Argument epoch_length should be same as in the state, "
f"but given {epoch_length} vs {self.state.epoch_length}"
)

if self.state.max_epochs is None or self._is_done(self.state):
Expand Down Expand Up @@ -703,7 +705,8 @@ def switch_batch(engine):
self.logger.info(f"Engine run starting with max_epochs={max_epochs}.")
else:
self.logger.info(
f"Engine run resuming from iteration {self.state.iteration}, epoch {self.state.epoch} until {self.state.max_epochs} epochs"
f"Engine run resuming from iteration {self.state.iteration}, "
f"epoch {self.state.epoch} until {self.state.max_epochs} epochs"
)

self.state.dataloader = data
Expand Down
7 changes: 4 additions & 3 deletions ignite/metrics/metric.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,8 +209,8 @@ def __init__(
# check if reset and update methods are decorated. Compute may not be decorated
if not (hasattr(self.reset, "_decorated") and hasattr(self.update, "_decorated")):
warnings.warn(
f"{self.__class__.__name__} class does not support distributed setting. Computed result is not collected "
"across all computing devices",
f"{self.__class__.__name__} class does not support distributed setting. "
"Computed result is not collected across all computing devices",
RuntimeWarning,
)

Expand Down Expand Up @@ -282,7 +282,8 @@ def iteration_completed(self, engine: Engine) -> None:
if isinstance(output, Mapping):
if self.required_output_keys is None:
raise TypeError(
f"Transformed engine output for {self.__class__.__name__} metric should be a tuple/list, but given {type(output)}"
f"Transformed engine output for {self.__class__.__name__} metric should be a tuple/list, "
f"but given {type(output)}"
)
if not all([k in output for k in self.required_output_keys]):
raise ValueError(
Expand Down
3 changes: 2 additions & 1 deletion tests/ignite/engine/test_deterministic.py
Original file line number Diff line number Diff line change
Expand Up @@ -405,9 +405,10 @@ def _(engine):

def update_fn(_, batch):
batch_to_device = batch.to(device)
cfg_msg = f"{num_workers} {resume_iteration}"
assert batch_checker.check(
batch
), f"{num_workers} {resume_iteration} | {batch_checker.counter}: {batch_checker.true_batch} vs {batch}"
), f"{cfg_msg} | {batch_checker.counter}: {batch_checker.true_batch} vs {batch}"

engine = DeterministicEngine(update_fn)

Expand Down
13 changes: 6 additions & 7 deletions tests/ignite/metrics/test_mean_absolute_error.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,17 +77,16 @@ def _test_distrib_accumulator_device(device):
metric_devices.append(idist.device())
for metric_device in metric_devices:
mae = MeanAbsoluteError(device=metric_device)
assert mae._device == metric_device
assert (
mae._sum_of_absolute_errors.device == metric_device
), f"{type(mae._sum_of_absolute_errors.device)}:{mae._sum_of_absolute_errors.device} vs {type(metric_device)}:{metric_device}"

for dev in [mae._device, mae._sum_of_absolute_errors.device]:
assert dev == metric_device, f"{type(dev)}:{dev} vs {type(metric_device)}:{metric_device}"

y_pred = torch.tensor([[2.0], [-2.0]])
y = torch.zeros(2)
mae.update((y_pred, y))
assert (
mae._sum_of_absolute_errors.device == metric_device
), f"{type(mae._sum_of_absolute_errors.device)}:{mae._sum_of_absolute_errors.device} vs {type(metric_device)}:{metric_device}"

for dev in [mae._device, mae._sum_of_absolute_errors.device]:
assert dev == metric_device, f"{type(dev)}:{dev} vs {type(metric_device)}:{metric_device}"


def test_accumulator_detached():
Expand Down
11 changes: 4 additions & 7 deletions tests/ignite/metrics/test_mean_pairwise_distance.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,18 +91,15 @@ def _test_distrib_accumulator_device(device):
for metric_device in metric_devices:

mpd = MeanPairwiseDistance(device=metric_device)
assert mpd._device == metric_device
assert (
mpd._sum_of_distances.device == metric_device
), f"{type(mpd._sum_of_distances.device)}:{mpd._sum_of_distances.device} vs {type(metric_device)}:{metric_device}"
for dev in [mpd._device, mpd._sum_of_distances.device]:
assert dev == metric_device, f"{type(dev)}:{dev} vs {type(metric_device)}:{metric_device}"

y_pred = torch.Tensor([[3.0, 4.0], [-3.0, -4.0]])
y = torch.zeros(2, 2)
mpd.update((y_pred, y))

assert (
mpd._sum_of_distances.device == metric_device
), f"{type(mpd._sum_of_distances.device)}:{mpd._sum_of_distances.device} vs {type(metric_device)}:{metric_device}"
for dev in [mpd._device, mpd._sum_of_distances.device]:
assert dev == metric_device, f"{type(dev)}:{dev} vs {type(metric_device)}:{metric_device}"


def test_accumulator_detached():
Expand Down
13 changes: 6 additions & 7 deletions tests/ignite/metrics/test_mean_squared_error.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,17 +79,16 @@ def _test_distrib_accumulator_device(device):

device = torch.device(device)
mse = MeanSquaredError(device=metric_device)
assert mse._device == metric_device
assert (
mse._sum_of_squared_errors.device == metric_device
), f"{type(mse._sum_of_squared_errors.device)}:{mse._sum_of_squared_errors.device} vs {type(metric_device)}:{metric_device}"

for dev in [mse._device, mse._sum_of_squared_errors.device]:
assert dev == metric_device, f"{type(dev)}:{dev} vs {type(metric_device)}:{metric_device}"

y_pred = torch.tensor([[2.0], [-2.0]])
y = torch.zeros(2)
mse.update((y_pred, y))
assert (
mse._sum_of_squared_errors.device == metric_device
), f"{type(mse._sum_of_squared_errors.device)}:{mse._sum_of_squared_errors.device} vs {type(metric_device)}:{metric_device}"

for dev in [mse._device, mse._sum_of_squared_errors.device]:
assert dev == metric_device, f"{type(dev)}:{dev} vs {type(metric_device)}:{metric_device}"


def test_accumulator_detached():
Expand Down
3 changes: 2 additions & 1 deletion tests/ignite/metrics/test_running_average.py
Original file line number Diff line number Diff line change
Expand Up @@ -289,9 +289,10 @@ def running_avg_output_update(engine):

@trainer.on(Events.ITERATION_COMPLETED)
def assert_equal_running_avg_output_values(engine):
it = engine.state.iteration
assert engine.state.running_avg_output == pytest.approx(
engine.state.metrics["running_avg_output"]
), f"{engine.state.iteration}: {engine.state.running_avg_output} vs {engine.state.metrics['running_avg_output']}"
), f"{it}: {engine.state.running_avg_output} vs {engine.state.metrics['running_avg_output']}"

trainer.run(data, max_epochs=3)

Expand Down
12 changes: 5 additions & 7 deletions tests/ignite/metrics/test_ssim.py
Original file line number Diff line number Diff line change
Expand Up @@ -182,18 +182,16 @@ def _test_distrib_accumulator_device(device):
for metric_device in metric_devices:

ssim = SSIM(data_range=1.0, device=metric_device)
assert ssim._device == metric_device
assert (
ssim._kernel.device == metric_device
), f"{type(ssim._kernel.device)}:{ssim._kernel.device} vs {type(metric_device)}:{metric_device}"

for dev in [ssim._device, ssim._kernel.device]:
assert dev == metric_device, f"{type(dev)}:{dev} vs {type(metric_device)}:{metric_device}"

y_pred = torch.rand(2, 3, 28, 28, dtype=torch.float, device=device)
y = y_pred * 0.65
ssim.update((y_pred, y))

assert (
ssim._sum_of_batchwise_ssim.device == metric_device
), f"{type(ssim._sum_of_batchwise_ssim.device)}:{ssim._sum_of_batchwise_ssim.device} vs {type(metric_device)}:{metric_device}"
dev = ssim._sum_of_batchwise_ssim.device
assert dev == metric_device, f"{type(dev)}:{dev} vs {type(metric_device)}:{metric_device}"


@pytest.mark.distributed
Expand Down

0 comments on commit 915d69e

Please sign in to comment.