From 07369ca79b02178879ba5a9a16aadbb983901a00 Mon Sep 17 00:00:00 2001 From: chris-santiago Date: Wed, 30 Aug 2023 23:55:56 -0400 Subject: [PATCH] fix mapping to noisy encoder --- autoencoders/conf/model/deep_dae.yaml | 2 +- .../2023-08-30/23-53-47/.hydra/config.yaml | 89 +++++++++ .../2023-08-30/23-53-47/.hydra/hydra.yaml | 175 ++++++++++++++++++ .../2023-08-30/23-53-47/.hydra/overrides.yaml | 3 + .../23-53-47/checkpoints/best_k_models.yaml | 2 + outputs/results.txt | 1 + 6 files changed, 271 insertions(+), 1 deletion(-) create mode 100644 outputs/deep-DAE/train/2023-08-30/23-53-47/.hydra/config.yaml create mode 100644 outputs/deep-DAE/train/2023-08-30/23-53-47/.hydra/hydra.yaml create mode 100644 outputs/deep-DAE/train/2023-08-30/23-53-47/.hydra/overrides.yaml create mode 100644 outputs/deep-DAE/train/2023-08-30/23-53-47/checkpoints/best_k_models.yaml diff --git a/autoencoders/conf/model/deep_dae.yaml b/autoencoders/conf/model/deep_dae.yaml index 19eae13..8f1f753 100644 --- a/autoencoders/conf/model/deep_dae.yaml +++ b/autoencoders/conf/model/deep_dae.yaml @@ -16,5 +16,5 @@ nn: loss_func: _target_: torch.nn.MSELoss encoder: - _target_: autoencoders.models.deep_ae.NoisyCNNEncoder + _target_: autoencoders.modules.NoisyCNNEncoder _partial_: True \ No newline at end of file diff --git a/outputs/deep-DAE/train/2023-08-30/23-53-47/.hydra/config.yaml b/outputs/deep-DAE/train/2023-08-30/23-53-47/.hydra/config.yaml new file mode 100644 index 0000000..5a425f8 --- /dev/null +++ b/outputs/deep-DAE/train/2023-08-30/23-53-47/.hydra/config.yaml @@ -0,0 +1,89 @@ +data: + batch_size: 256 + n_workers: 10 + name: mnist + train: + _target_: torch.utils.data.DataLoader + dataset: + _target_: autoencoders.data.AutoEncoderDataset + dataset: + _target_: autoencoders.data.get_mnist_dataset + train: true + batch_size: ${data.batch_size} + shuffle: true + num_workers: ${data.n_workers} + valid: + _target_: torch.utils.data.DataLoader + dataset: + _target_: autoencoders.data.AutoEncoderDataset + dataset: + _target_: autoencoders.data.get_mnist_dataset + train: false + batch_size: ${data.batch_size} + shuffle: false + num_workers: ${data.n_workers} +model: + optimizer: + _target_: torch.optim.Adam + _partial_: true + lr: 0.001 + betas: + - 0.9 + - 0.999 + weight_decay: 0 + scheduler: + _target_: torch.optim.lr_scheduler.CyclicLR + _partial_: true + base_lr: 0.001 + max_lr: 3.0 + cycle_momentum: false + name: deep-DAE + nn: + _target_: autoencoders.models.deep_ae.DeepAutoEncoder + base_channels: 32 + latent_dim: 128 + input_channels: 1 + loss_func: + _target_: torch.nn.MSELoss + encoder: + _target_: autoencoders.modules.NoisyCNNEncoder + _partial_: true +trainer: + _target_: pytorch_lightning.Trainer + max_epochs: 100 + accelerator: mps + devices: 1 + logger: + _target_: pytorch_lightning.loggers.WandbLogger + project: autoencoders + name: null + id: null + group: null + job_type: null + save_dir: ${hydra:runtime.output_dir} + log_model: true + tags: ${tags} +callbacks: + model_summary: + _target_: pytorch_lightning.callbacks.RichModelSummary + progress_bar: + _target_: pytorch_lightning.callbacks.RichProgressBar + refresh_rate: 5 + leave: true + early_stopping: + _target_: pytorch_lightning.callbacks.EarlyStopping + monitor: train-loss + min_delta: 0.001 + patience: 5 + check_on_train_epoch_end: true + model_checkpoint: + _target_: pytorch_lightning.callbacks.ModelCheckpoint + dirpath: ${hydra:runtime.output_dir}/checkpoints + monitor: train-loss + save_top_k: 1 + save_on_train_epoch_end: true + log_images: + _target_: autoencoders.callbacks.LogReconstructedImagesCallback +tags: +- ${data.name} +- ${model.name} diff --git a/outputs/deep-DAE/train/2023-08-30/23-53-47/.hydra/hydra.yaml b/outputs/deep-DAE/train/2023-08-30/23-53-47/.hydra/hydra.yaml new file mode 100644 index 0000000..7346abe --- /dev/null +++ b/outputs/deep-DAE/train/2023-08-30/23-53-47/.hydra/hydra.yaml @@ -0,0 +1,175 @@ +hydra: + run: + dir: outputs/${model.name}/${hydra.job.name}/${now:%Y-%m-%d}/${now:%H-%M-%S} + sweep: + dir: outputs/${model.name}/${hydra.job.name}/multirun + subdir: ${hydra.job.override_dirname}/${now:%Y-%m-%d}/${now:%H-%M-%S} + launcher: + _target_: hydra_plugins.hydra_joblib_launcher.joblib_launcher.JoblibLauncher + n_jobs: -1 + backend: null + prefer: processes + require: null + verbose: 0 + timeout: null + pre_dispatch: 2*n_jobs + batch_size: auto + temp_folder: null + max_nbytes: null + mmap_mode: r + sweeper: + _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper + max_batch_size: null + params: null + help: + app_name: ${hydra.job.name} + header: '${hydra.help.app_name} is powered by Hydra. + + ' + footer: 'Powered by Hydra (https://hydra.cc) + + Use --hydra-help to view Hydra specific help + + ' + template: '${hydra.help.header} + + == Configuration groups == + + Compose your configuration from those groups (group=option) + + + $APP_CONFIG_GROUPS + + + == Config == + + Override anything in the config (foo.bar=value) + + + $CONFIG + + + ${hydra.help.footer} + + ' + hydra_help: + template: 'Hydra (${hydra.runtime.version}) + + See https://hydra.cc for more info. + + + == Flags == + + $FLAGS_HELP + + + == Configuration groups == + + Compose your configuration from those groups (For example, append hydra/job_logging=disabled + to command line) + + + $HYDRA_CONFIG_GROUPS + + + Use ''--cfg hydra'' to Show the Hydra config. + + ' + hydra_help: ??? + hydra_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][HYDRA] %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + root: + level: INFO + handlers: + - console + loggers: + logging_example: + level: DEBUG + disable_existing_loggers: false + job_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + file: + class: logging.FileHandler + formatter: simple + filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log + root: + level: INFO + handlers: + - console + - file + disable_existing_loggers: false + env: {} + mode: RUN + searchpath: [] + callbacks: {} + output_subdir: .hydra + overrides: + hydra: + - hydra.mode=RUN + task: + - model=deep_dae + - model.nn.base_channels=32 + - model.nn.latent_dim=128 + job: + name: train + chdir: null + override_dirname: model.nn.base_channels=32,model.nn.latent_dim=128,model=deep_dae + id: ??? + num: ??? + config_name: config + env_set: {} + env_copy: [] + config: + override_dirname: + kv_sep: '=' + item_sep: ',' + exclude_keys: [] + runtime: + version: 1.3.2 + version_base: '1.3' + cwd: /Users/chrissantiago/Dropbox/GitHub/autoencoders + config_sources: + - path: hydra.conf + schema: pkg + provider: hydra + - path: /Users/chrissantiago/Dropbox/GitHub/autoencoders/autoencoders/conf + schema: file + provider: main + - path: '' + schema: structured + provider: schema + output_dir: /Users/chrissantiago/Dropbox/GitHub/autoencoders/outputs/deep-DAE/train/2023-08-30/23-53-47 + choices: + experiment: null + callbacks: encoder + trainer: default + model: deep_dae + scheduler@model.scheduler: cyclic + optimizer@model.optimizer: adam + data: mnist + hydra/env: default + hydra/callbacks: null + hydra/job_logging: default + hydra/hydra_logging: default + hydra/hydra_help: default + hydra/help: default + hydra/sweeper: basic + hydra/launcher: joblib + hydra/output: default + verbose: false diff --git a/outputs/deep-DAE/train/2023-08-30/23-53-47/.hydra/overrides.yaml b/outputs/deep-DAE/train/2023-08-30/23-53-47/.hydra/overrides.yaml new file mode 100644 index 0000000..92e49f2 --- /dev/null +++ b/outputs/deep-DAE/train/2023-08-30/23-53-47/.hydra/overrides.yaml @@ -0,0 +1,3 @@ +- model=deep_dae +- model.nn.base_channels=32 +- model.nn.latent_dim=128 diff --git a/outputs/deep-DAE/train/2023-08-30/23-53-47/checkpoints/best_k_models.yaml b/outputs/deep-DAE/train/2023-08-30/23-53-47/checkpoints/best_k_models.yaml new file mode 100644 index 0000000..b7394d5 --- /dev/null +++ b/outputs/deep-DAE/train/2023-08-30/23-53-47/checkpoints/best_k_models.yaml @@ -0,0 +1,2 @@ +? /Users/chrissantiago/Dropbox/GitHub/autoencoders/outputs/deep-DAE/train/2023-08-30/23-53-47/checkpoints/epoch=4-step=1175.ckpt +: 0.024780165404081345 diff --git a/outputs/results.txt b/outputs/results.txt index 86bf1c7..8fa8a9b 100644 --- a/outputs/results.txt +++ b/outputs/results.txt @@ -5,3 +5,4 @@ {"deep-DAE": {"acc": 0.8581, "auc": 0.9213}} {"deep-autoencoder": {"acc": 0.9321, "auc": 0.9624}} {"VAE": {"acc": 0.9725, "auc": 0.9847}} +{"deep-DAE": {"acc": 0.8663, "auc": 0.9259}}