From 5f46bb72f681779715078fa5a11e76f906ad21e5 Mon Sep 17 00:00:00 2001 From: chris-santiago Date: Tue, 29 Aug 2023 21:37:20 -0400 Subject: [PATCH] basic trials --- autoencoders/eval.py | 2 +- autoencoders/train.py | 2 +- outputs/.gitignore | 2 + .../2023-08-29/21-19-54/.hydra/config.yaml | 90 +++++++++ .../2023-08-29/21-19-54/.hydra/hydra.yaml | 173 +++++++++++++++++ .../2023-08-29/21-19-54/.hydra/overrides.yaml | 1 + .../21-19-54/checkpoints/best_k_models.yaml | 1 + .../2023-08-29/21-13-51/.hydra/config.yaml | 90 +++++++++ .../2023-08-29/21-13-51/.hydra/hydra.yaml | 172 +++++++++++++++++ .../2023-08-29/21-13-51/.hydra/overrides.yaml | 1 + .../21-13-51/checkpoints/best_k_models.yaml | 2 + .../2023-08-29/21-28-11/.hydra/config.yaml | 89 +++++++++ .../2023-08-29/21-28-11/.hydra/hydra.yaml | 173 +++++++++++++++++ .../2023-08-29/21-28-11/.hydra/overrides.yaml | 1 + .../21-28-11/checkpoints/best_k_models.yaml | 2 + .../2023-08-29/21-31-57/.hydra/config.yaml | 89 +++++++++ .../2023-08-29/21-31-57/.hydra/hydra.yaml | 174 ++++++++++++++++++ .../2023-08-29/21-31-57/.hydra/overrides.yaml | 2 + .../21-31-57/checkpoints/best_k_models.yaml | 2 + .../2023-08-29/21-24-59/.hydra/config.yaml | 86 +++++++++ .../2023-08-29/21-24-59/.hydra/hydra.yaml | 173 +++++++++++++++++ .../2023-08-29/21-24-59/.hydra/overrides.yaml | 1 + .../21-24-59/checkpoints/best_k_models.yaml | 2 + .../2023-08-29/21-33-38/.hydra/config.yaml | 86 +++++++++ .../2023-08-29/21-33-38/.hydra/hydra.yaml | 174 ++++++++++++++++++ .../2023-08-29/21-33-38/.hydra/overrides.yaml | 2 + .../21-33-38/checkpoints/best_k_models.yaml | 2 + outputs/results.txt | 6 + 28 files changed, 1598 insertions(+), 2 deletions(-) create mode 100644 outputs/.gitignore create mode 100644 outputs/DAE/train/2023-08-29/21-19-54/.hydra/config.yaml create mode 100644 outputs/DAE/train/2023-08-29/21-19-54/.hydra/hydra.yaml create mode 100644 outputs/DAE/train/2023-08-29/21-19-54/.hydra/overrides.yaml create mode 100644 outputs/DAE/train/2023-08-29/21-19-54/checkpoints/best_k_models.yaml create mode 100644 outputs/autoencoder/train/2023-08-29/21-13-51/.hydra/config.yaml create mode 100644 outputs/autoencoder/train/2023-08-29/21-13-51/.hydra/hydra.yaml create mode 100644 outputs/autoencoder/train/2023-08-29/21-13-51/.hydra/overrides.yaml create mode 100644 outputs/autoencoder/train/2023-08-29/21-13-51/checkpoints/best_k_models.yaml create mode 100644 outputs/deep-DAE/train/2023-08-29/21-28-11/.hydra/config.yaml create mode 100644 outputs/deep-DAE/train/2023-08-29/21-28-11/.hydra/hydra.yaml create mode 100644 outputs/deep-DAE/train/2023-08-29/21-28-11/.hydra/overrides.yaml create mode 100644 outputs/deep-DAE/train/2023-08-29/21-28-11/checkpoints/best_k_models.yaml create mode 100644 outputs/deep-DAE/train/2023-08-29/21-31-57/.hydra/config.yaml create mode 100644 outputs/deep-DAE/train/2023-08-29/21-31-57/.hydra/hydra.yaml create mode 100644 outputs/deep-DAE/train/2023-08-29/21-31-57/.hydra/overrides.yaml create mode 100644 outputs/deep-DAE/train/2023-08-29/21-31-57/checkpoints/best_k_models.yaml create mode 100644 outputs/deep-autoencoder/train/2023-08-29/21-24-59/.hydra/config.yaml create mode 100644 outputs/deep-autoencoder/train/2023-08-29/21-24-59/.hydra/hydra.yaml create mode 100644 outputs/deep-autoencoder/train/2023-08-29/21-24-59/.hydra/overrides.yaml create mode 100644 outputs/deep-autoencoder/train/2023-08-29/21-24-59/checkpoints/best_k_models.yaml create mode 100644 outputs/deep-autoencoder/train/2023-08-29/21-33-38/.hydra/config.yaml create mode 100644 outputs/deep-autoencoder/train/2023-08-29/21-33-38/.hydra/hydra.yaml create mode 100644 outputs/deep-autoencoder/train/2023-08-29/21-33-38/.hydra/overrides.yaml create mode 100644 outputs/deep-autoencoder/train/2023-08-29/21-33-38/checkpoints/best_k_models.yaml create mode 100644 outputs/results.txt diff --git a/autoencoders/eval.py b/autoencoders/eval.py index 0dfe6d2..62e0895 100644 --- a/autoencoders/eval.py +++ b/autoencoders/eval.py @@ -23,7 +23,7 @@ def evaluate_linear( x_test = encoder.encode(ds.data[train_length:].unsqueeze(1) / 255).numpy() y_test = ds.targets[train_length:] - lr = LogisticRegression(max_iter=300) + lr = LogisticRegression(max_iter=1000) lr.fit(x_train, y_train) labels = lr.predict(x_test) labels_ohe = F.one_hot(torch.tensor(labels)).float() diff --git a/autoencoders/train.py b/autoencoders/train.py index 46141b9..9515ae5 100644 --- a/autoencoders/train.py +++ b/autoencoders/train.py @@ -28,7 +28,7 @@ def main(cfg): results = autoencoders.eval.evaluate_linear(module=model, trainer=trainer) autoencoders.eval.to_json( - results={cfg.model.name: results}, filepath=constants.OUTPUTS.joinpath("results.json") + results={cfg.model.name: results}, filepath=constants.OUTPUTS.joinpath("results.txt") ) diff --git a/outputs/.gitignore b/outputs/.gitignore new file mode 100644 index 0000000..be8ef3d --- /dev/null +++ b/outputs/.gitignore @@ -0,0 +1,2 @@ +**/*.ckpt +**/wandb/ \ No newline at end of file diff --git a/outputs/DAE/train/2023-08-29/21-19-54/.hydra/config.yaml b/outputs/DAE/train/2023-08-29/21-19-54/.hydra/config.yaml new file mode 100644 index 0000000..16ac679 --- /dev/null +++ b/outputs/DAE/train/2023-08-29/21-19-54/.hydra/config.yaml @@ -0,0 +1,90 @@ +data: + batch_size: 256 + n_workers: 10 + name: mnist + train: + _target_: torch.utils.data.DataLoader + dataset: + _target_: autoencoders.data.AutoEncoderDataset + dataset: + _target_: autoencoders.data.get_mnist_dataset + train: true + batch_size: ${data.batch_size} + shuffle: true + num_workers: ${data.n_workers} + valid: + _target_: torch.utils.data.DataLoader + dataset: + _target_: autoencoders.data.AutoEncoderDataset + dataset: + _target_: autoencoders.data.get_mnist_dataset + train: false + batch_size: ${data.batch_size} + shuffle: false + num_workers: ${data.n_workers} +model: + optimizer: + _target_: torch.optim.Adam + _partial_: true + lr: 0.001 + betas: + - 0.9 + - 0.999 + weight_decay: 0 + scheduler: + _target_: torch.optim.lr_scheduler.CyclicLR + _partial_: true + base_lr: 0.001 + max_lr: 3.0 + cycle_momentum: false + name: DAE + nn: + _target_: autoencoders.models.base_dae.DenoisingAutoEncoder + layers: + - 128 + - 64 + - 64 + input_shape: + - 28 + - 28 + loss_func: + _target_: torch.nn.MSELoss +trainer: + _target_: pytorch_lightning.Trainer + max_epochs: 100 + accelerator: mps + devices: 1 + logger: + _target_: pytorch_lightning.loggers.WandbLogger + project: autoencoders + name: null + id: null + group: null + job_type: null + save_dir: ${hydra:runtime.output_dir} + log_model: true + tags: ${tags} +callbacks: + model_summary: + _target_: pytorch_lightning.callbacks.RichModelSummary + progress_bar: + _target_: pytorch_lightning.callbacks.RichProgressBar + refresh_rate: 5 + leave: true + early_stopping: + _target_: pytorch_lightning.callbacks.EarlyStopping + monitor: train-loss + min_delta: 0.001 + patience: 5 + check_on_train_epoch_end: true + model_checkpoint: + _target_: pytorch_lightning.callbacks.ModelCheckpoint + dirpath: ${hydra:runtime.output_dir}/checkpoints + monitor: train-loss + save_top_k: 1 + save_on_train_epoch_end: true + log_images: + _target_: autoencoders.callbacks.LogReconstructedImagesCallback +tags: +- ${data.name} +- ${model.name} diff --git a/outputs/DAE/train/2023-08-29/21-19-54/.hydra/hydra.yaml b/outputs/DAE/train/2023-08-29/21-19-54/.hydra/hydra.yaml new file mode 100644 index 0000000..8d5e2e6 --- /dev/null +++ b/outputs/DAE/train/2023-08-29/21-19-54/.hydra/hydra.yaml @@ -0,0 +1,173 @@ +hydra: + run: + dir: outputs/${model.name}/${hydra.job.name}/${now:%Y-%m-%d}/${now:%H-%M-%S} + sweep: + dir: outputs/${model.name}/${hydra.job.name}/multirun + subdir: ${hydra.job.override_dirname}/${now:%Y-%m-%d}/${now:%H-%M-%S} + launcher: + _target_: hydra_plugins.hydra_joblib_launcher.joblib_launcher.JoblibLauncher + n_jobs: -1 + backend: null + prefer: processes + require: null + verbose: 0 + timeout: null + pre_dispatch: 2*n_jobs + batch_size: auto + temp_folder: null + max_nbytes: null + mmap_mode: r + sweeper: + _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper + max_batch_size: null + params: null + help: + app_name: ${hydra.job.name} + header: '${hydra.help.app_name} is powered by Hydra. + + ' + footer: 'Powered by Hydra (https://hydra.cc) + + Use --hydra-help to view Hydra specific help + + ' + template: '${hydra.help.header} + + == Configuration groups == + + Compose your configuration from those groups (group=option) + + + $APP_CONFIG_GROUPS + + + == Config == + + Override anything in the config (foo.bar=value) + + + $CONFIG + + + ${hydra.help.footer} + + ' + hydra_help: + template: 'Hydra (${hydra.runtime.version}) + + See https://hydra.cc for more info. + + + == Flags == + + $FLAGS_HELP + + + == Configuration groups == + + Compose your configuration from those groups (For example, append hydra/job_logging=disabled + to command line) + + + $HYDRA_CONFIG_GROUPS + + + Use ''--cfg hydra'' to Show the Hydra config. + + ' + hydra_help: ??? + hydra_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][HYDRA] %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + root: + level: INFO + handlers: + - console + loggers: + logging_example: + level: DEBUG + disable_existing_loggers: false + job_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + file: + class: logging.FileHandler + formatter: simple + filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log + root: + level: INFO + handlers: + - console + - file + disable_existing_loggers: false + env: {} + mode: RUN + searchpath: [] + callbacks: {} + output_subdir: .hydra + overrides: + hydra: + - hydra.mode=RUN + task: + - model=base_dae + job: + name: train + chdir: null + override_dirname: model=base_dae + id: ??? + num: ??? + config_name: config + env_set: {} + env_copy: [] + config: + override_dirname: + kv_sep: '=' + item_sep: ',' + exclude_keys: [] + runtime: + version: 1.3.2 + version_base: '1.3' + cwd: /Users/chrissantiago/Dropbox/GitHub/autoencoders + config_sources: + - path: hydra.conf + schema: pkg + provider: hydra + - path: /Users/chrissantiago/Dropbox/GitHub/autoencoders/autoencoders/conf + schema: file + provider: main + - path: '' + schema: structured + provider: schema + output_dir: /Users/chrissantiago/Dropbox/GitHub/autoencoders/outputs/DAE/train/2023-08-29/21-19-54 + choices: + experiment: null + callbacks: encoder + trainer: default + model: base_dae + scheduler@model.scheduler: cyclic + optimizer@model.optimizer: adam + data: mnist + hydra/env: default + hydra/callbacks: null + hydra/job_logging: default + hydra/hydra_logging: default + hydra/hydra_help: default + hydra/help: default + hydra/sweeper: basic + hydra/launcher: joblib + hydra/output: default + verbose: false diff --git a/outputs/DAE/train/2023-08-29/21-19-54/.hydra/overrides.yaml b/outputs/DAE/train/2023-08-29/21-19-54/.hydra/overrides.yaml new file mode 100644 index 0000000..f9ca22a --- /dev/null +++ b/outputs/DAE/train/2023-08-29/21-19-54/.hydra/overrides.yaml @@ -0,0 +1 @@ +- model=base_dae diff --git a/outputs/DAE/train/2023-08-29/21-19-54/checkpoints/best_k_models.yaml b/outputs/DAE/train/2023-08-29/21-19-54/checkpoints/best_k_models.yaml new file mode 100644 index 0000000..733f101 --- /dev/null +++ b/outputs/DAE/train/2023-08-29/21-19-54/checkpoints/best_k_models.yaml @@ -0,0 +1 @@ +/Users/chrissantiago/Dropbox/GitHub/autoencoders/outputs/DAE/train/2023-08-29/21-19-54/checkpoints/epoch=10-step=2585.ckpt: 0.03252919763326645 diff --git a/outputs/autoencoder/train/2023-08-29/21-13-51/.hydra/config.yaml b/outputs/autoencoder/train/2023-08-29/21-13-51/.hydra/config.yaml new file mode 100644 index 0000000..d4d9c3d --- /dev/null +++ b/outputs/autoencoder/train/2023-08-29/21-13-51/.hydra/config.yaml @@ -0,0 +1,90 @@ +data: + batch_size: 256 + n_workers: 10 + name: mnist + train: + _target_: torch.utils.data.DataLoader + dataset: + _target_: autoencoders.data.AutoEncoderDataset + dataset: + _target_: autoencoders.data.get_mnist_dataset + train: true + batch_size: ${data.batch_size} + shuffle: true + num_workers: ${data.n_workers} + valid: + _target_: torch.utils.data.DataLoader + dataset: + _target_: autoencoders.data.AutoEncoderDataset + dataset: + _target_: autoencoders.data.get_mnist_dataset + train: false + batch_size: ${data.batch_size} + shuffle: false + num_workers: ${data.n_workers} +model: + optimizer: + _target_: torch.optim.Adam + _partial_: true + lr: 0.001 + betas: + - 0.9 + - 0.999 + weight_decay: 0 + scheduler: + _target_: torch.optim.lr_scheduler.CyclicLR + _partial_: true + base_lr: 0.001 + max_lr: 3.0 + cycle_momentum: false + name: autoencoder + nn: + _target_: autoencoders.models.base.AutoEncoder + layers: + - 128 + - 64 + - 16 + input_shape: + - 28 + - 28 + loss_func: + _target_: torch.nn.MSELoss +trainer: + _target_: pytorch_lightning.Trainer + max_epochs: 100 + accelerator: mps + devices: 1 + logger: + _target_: pytorch_lightning.loggers.WandbLogger + project: autoencoders + name: null + id: null + group: null + job_type: null + save_dir: ${hydra:runtime.output_dir} + log_model: true + tags: ${tags} +callbacks: + model_summary: + _target_: pytorch_lightning.callbacks.RichModelSummary + progress_bar: + _target_: pytorch_lightning.callbacks.RichProgressBar + refresh_rate: 5 + leave: true + early_stopping: + _target_: pytorch_lightning.callbacks.EarlyStopping + monitor: train-loss + min_delta: 0.001 + patience: 5 + check_on_train_epoch_end: true + model_checkpoint: + _target_: pytorch_lightning.callbacks.ModelCheckpoint + dirpath: ${hydra:runtime.output_dir}/checkpoints + monitor: train-loss + save_top_k: 1 + save_on_train_epoch_end: true + log_images: + _target_: autoencoders.callbacks.LogReconstructedImagesCallback +tags: +- ${data.name} +- ${model.name} diff --git a/outputs/autoencoder/train/2023-08-29/21-13-51/.hydra/hydra.yaml b/outputs/autoencoder/train/2023-08-29/21-13-51/.hydra/hydra.yaml new file mode 100644 index 0000000..683ee4b --- /dev/null +++ b/outputs/autoencoder/train/2023-08-29/21-13-51/.hydra/hydra.yaml @@ -0,0 +1,172 @@ +hydra: + run: + dir: outputs/${model.name}/${hydra.job.name}/${now:%Y-%m-%d}/${now:%H-%M-%S} + sweep: + dir: outputs/${model.name}/${hydra.job.name}/multirun + subdir: ${hydra.job.override_dirname}/${now:%Y-%m-%d}/${now:%H-%M-%S} + launcher: + _target_: hydra_plugins.hydra_joblib_launcher.joblib_launcher.JoblibLauncher + n_jobs: -1 + backend: null + prefer: processes + require: null + verbose: 0 + timeout: null + pre_dispatch: 2*n_jobs + batch_size: auto + temp_folder: null + max_nbytes: null + mmap_mode: r + sweeper: + _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper + max_batch_size: null + params: null + help: + app_name: ${hydra.job.name} + header: '${hydra.help.app_name} is powered by Hydra. + + ' + footer: 'Powered by Hydra (https://hydra.cc) + + Use --hydra-help to view Hydra specific help + + ' + template: '${hydra.help.header} + + == Configuration groups == + + Compose your configuration from those groups (group=option) + + + $APP_CONFIG_GROUPS + + + == Config == + + Override anything in the config (foo.bar=value) + + + $CONFIG + + + ${hydra.help.footer} + + ' + hydra_help: + template: 'Hydra (${hydra.runtime.version}) + + See https://hydra.cc for more info. + + + == Flags == + + $FLAGS_HELP + + + == Configuration groups == + + Compose your configuration from those groups (For example, append hydra/job_logging=disabled + to command line) + + + $HYDRA_CONFIG_GROUPS + + + Use ''--cfg hydra'' to Show the Hydra config. + + ' + hydra_help: ??? + hydra_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][HYDRA] %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + root: + level: INFO + handlers: + - console + loggers: + logging_example: + level: DEBUG + disable_existing_loggers: false + job_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + file: + class: logging.FileHandler + formatter: simple + filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log + root: + level: INFO + handlers: + - console + - file + disable_existing_loggers: false + env: {} + mode: RUN + searchpath: [] + callbacks: {} + output_subdir: .hydra + overrides: + hydra: + - hydra.mode=RUN + task: [] + job: + name: train + chdir: null + override_dirname: '' + id: ??? + num: ??? + config_name: config + env_set: {} + env_copy: [] + config: + override_dirname: + kv_sep: '=' + item_sep: ',' + exclude_keys: [] + runtime: + version: 1.3.2 + version_base: '1.3' + cwd: /Users/chrissantiago/Dropbox/GitHub/autoencoders + config_sources: + - path: hydra.conf + schema: pkg + provider: hydra + - path: /Users/chrissantiago/Dropbox/GitHub/autoencoders/autoencoders/conf + schema: file + provider: main + - path: '' + schema: structured + provider: schema + output_dir: /Users/chrissantiago/Dropbox/GitHub/autoencoders/outputs/autoencoder/train/2023-08-29/21-13-51 + choices: + experiment: null + callbacks: encoder + trainer: default + model: base + scheduler@model.scheduler: cyclic + optimizer@model.optimizer: adam + data: mnist + hydra/env: default + hydra/callbacks: null + hydra/job_logging: default + hydra/hydra_logging: default + hydra/hydra_help: default + hydra/help: default + hydra/sweeper: basic + hydra/launcher: joblib + hydra/output: default + verbose: false diff --git a/outputs/autoencoder/train/2023-08-29/21-13-51/.hydra/overrides.yaml b/outputs/autoencoder/train/2023-08-29/21-13-51/.hydra/overrides.yaml new file mode 100644 index 0000000..fe51488 --- /dev/null +++ b/outputs/autoencoder/train/2023-08-29/21-13-51/.hydra/overrides.yaml @@ -0,0 +1 @@ +[] diff --git a/outputs/autoencoder/train/2023-08-29/21-13-51/checkpoints/best_k_models.yaml b/outputs/autoencoder/train/2023-08-29/21-13-51/checkpoints/best_k_models.yaml new file mode 100644 index 0000000..cd97e0e --- /dev/null +++ b/outputs/autoencoder/train/2023-08-29/21-13-51/checkpoints/best_k_models.yaml @@ -0,0 +1,2 @@ +? /Users/chrissantiago/Dropbox/GitHub/autoencoders/outputs/autoencoder/train/2023-08-29/21-13-51/checkpoints/epoch=11-step=2820.ckpt +: 0.020108811557292938 diff --git a/outputs/deep-DAE/train/2023-08-29/21-28-11/.hydra/config.yaml b/outputs/deep-DAE/train/2023-08-29/21-28-11/.hydra/config.yaml new file mode 100644 index 0000000..01d828a --- /dev/null +++ b/outputs/deep-DAE/train/2023-08-29/21-28-11/.hydra/config.yaml @@ -0,0 +1,89 @@ +data: + batch_size: 256 + n_workers: 10 + name: mnist + train: + _target_: torch.utils.data.DataLoader + dataset: + _target_: autoencoders.data.AutoEncoderDataset + dataset: + _target_: autoencoders.data.get_mnist_dataset + train: true + batch_size: ${data.batch_size} + shuffle: true + num_workers: ${data.n_workers} + valid: + _target_: torch.utils.data.DataLoader + dataset: + _target_: autoencoders.data.AutoEncoderDataset + dataset: + _target_: autoencoders.data.get_mnist_dataset + train: false + batch_size: ${data.batch_size} + shuffle: false + num_workers: ${data.n_workers} +model: + optimizer: + _target_: torch.optim.Adam + _partial_: true + lr: 0.001 + betas: + - 0.9 + - 0.999 + weight_decay: 0 + scheduler: + _target_: torch.optim.lr_scheduler.CyclicLR + _partial_: true + base_lr: 0.001 + max_lr: 3.0 + cycle_momentum: false + name: deep-DAE + nn: + _target_: autoencoders.models.deep_ae.DeepAutoEncoder + base_channels: 16 + latent_dim: 256 + input_channels: 1 + loss_func: + _target_: torch.nn.MSELoss + encoder: + _target_: autoencoders.models.deep_ae.NoisyCNNEncoder + _partial_: true +trainer: + _target_: pytorch_lightning.Trainer + max_epochs: 100 + accelerator: mps + devices: 1 + logger: + _target_: pytorch_lightning.loggers.WandbLogger + project: autoencoders + name: null + id: null + group: null + job_type: null + save_dir: ${hydra:runtime.output_dir} + log_model: true + tags: ${tags} +callbacks: + model_summary: + _target_: pytorch_lightning.callbacks.RichModelSummary + progress_bar: + _target_: pytorch_lightning.callbacks.RichProgressBar + refresh_rate: 5 + leave: true + early_stopping: + _target_: pytorch_lightning.callbacks.EarlyStopping + monitor: train-loss + min_delta: 0.001 + patience: 5 + check_on_train_epoch_end: true + model_checkpoint: + _target_: pytorch_lightning.callbacks.ModelCheckpoint + dirpath: ${hydra:runtime.output_dir}/checkpoints + monitor: train-loss + save_top_k: 1 + save_on_train_epoch_end: true + log_images: + _target_: autoencoders.callbacks.LogReconstructedImagesCallback +tags: +- ${data.name} +- ${model.name} diff --git a/outputs/deep-DAE/train/2023-08-29/21-28-11/.hydra/hydra.yaml b/outputs/deep-DAE/train/2023-08-29/21-28-11/.hydra/hydra.yaml new file mode 100644 index 0000000..7521163 --- /dev/null +++ b/outputs/deep-DAE/train/2023-08-29/21-28-11/.hydra/hydra.yaml @@ -0,0 +1,173 @@ +hydra: + run: + dir: outputs/${model.name}/${hydra.job.name}/${now:%Y-%m-%d}/${now:%H-%M-%S} + sweep: + dir: outputs/${model.name}/${hydra.job.name}/multirun + subdir: ${hydra.job.override_dirname}/${now:%Y-%m-%d}/${now:%H-%M-%S} + launcher: + _target_: hydra_plugins.hydra_joblib_launcher.joblib_launcher.JoblibLauncher + n_jobs: -1 + backend: null + prefer: processes + require: null + verbose: 0 + timeout: null + pre_dispatch: 2*n_jobs + batch_size: auto + temp_folder: null + max_nbytes: null + mmap_mode: r + sweeper: + _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper + max_batch_size: null + params: null + help: + app_name: ${hydra.job.name} + header: '${hydra.help.app_name} is powered by Hydra. + + ' + footer: 'Powered by Hydra (https://hydra.cc) + + Use --hydra-help to view Hydra specific help + + ' + template: '${hydra.help.header} + + == Configuration groups == + + Compose your configuration from those groups (group=option) + + + $APP_CONFIG_GROUPS + + + == Config == + + Override anything in the config (foo.bar=value) + + + $CONFIG + + + ${hydra.help.footer} + + ' + hydra_help: + template: 'Hydra (${hydra.runtime.version}) + + See https://hydra.cc for more info. + + + == Flags == + + $FLAGS_HELP + + + == Configuration groups == + + Compose your configuration from those groups (For example, append hydra/job_logging=disabled + to command line) + + + $HYDRA_CONFIG_GROUPS + + + Use ''--cfg hydra'' to Show the Hydra config. + + ' + hydra_help: ??? + hydra_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][HYDRA] %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + root: + level: INFO + handlers: + - console + loggers: + logging_example: + level: DEBUG + disable_existing_loggers: false + job_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + file: + class: logging.FileHandler + formatter: simple + filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log + root: + level: INFO + handlers: + - console + - file + disable_existing_loggers: false + env: {} + mode: RUN + searchpath: [] + callbacks: {} + output_subdir: .hydra + overrides: + hydra: + - hydra.mode=RUN + task: + - model=deep_dae + job: + name: train + chdir: null + override_dirname: model=deep_dae + id: ??? + num: ??? + config_name: config + env_set: {} + env_copy: [] + config: + override_dirname: + kv_sep: '=' + item_sep: ',' + exclude_keys: [] + runtime: + version: 1.3.2 + version_base: '1.3' + cwd: /Users/chrissantiago/Dropbox/GitHub/autoencoders + config_sources: + - path: hydra.conf + schema: pkg + provider: hydra + - path: /Users/chrissantiago/Dropbox/GitHub/autoencoders/autoencoders/conf + schema: file + provider: main + - path: '' + schema: structured + provider: schema + output_dir: /Users/chrissantiago/Dropbox/GitHub/autoencoders/outputs/deep-DAE/train/2023-08-29/21-28-11 + choices: + experiment: null + callbacks: encoder + trainer: default + model: deep_dae + scheduler@model.scheduler: cyclic + optimizer@model.optimizer: adam + data: mnist + hydra/env: default + hydra/callbacks: null + hydra/job_logging: default + hydra/hydra_logging: default + hydra/hydra_help: default + hydra/help: default + hydra/sweeper: basic + hydra/launcher: joblib + hydra/output: default + verbose: false diff --git a/outputs/deep-DAE/train/2023-08-29/21-28-11/.hydra/overrides.yaml b/outputs/deep-DAE/train/2023-08-29/21-28-11/.hydra/overrides.yaml new file mode 100644 index 0000000..6030196 --- /dev/null +++ b/outputs/deep-DAE/train/2023-08-29/21-28-11/.hydra/overrides.yaml @@ -0,0 +1 @@ +- model=deep_dae diff --git a/outputs/deep-DAE/train/2023-08-29/21-28-11/checkpoints/best_k_models.yaml b/outputs/deep-DAE/train/2023-08-29/21-28-11/checkpoints/best_k_models.yaml new file mode 100644 index 0000000..d4b26cd --- /dev/null +++ b/outputs/deep-DAE/train/2023-08-29/21-28-11/checkpoints/best_k_models.yaml @@ -0,0 +1,2 @@ +? /Users/chrissantiago/Dropbox/GitHub/autoencoders/outputs/deep-DAE/train/2023-08-29/21-28-11/checkpoints/epoch=5-step=1410.ckpt +: 0.025117745622992516 diff --git a/outputs/deep-DAE/train/2023-08-29/21-31-57/.hydra/config.yaml b/outputs/deep-DAE/train/2023-08-29/21-31-57/.hydra/config.yaml new file mode 100644 index 0000000..1bde081 --- /dev/null +++ b/outputs/deep-DAE/train/2023-08-29/21-31-57/.hydra/config.yaml @@ -0,0 +1,89 @@ +data: + batch_size: 256 + n_workers: 10 + name: mnist + train: + _target_: torch.utils.data.DataLoader + dataset: + _target_: autoencoders.data.AutoEncoderDataset + dataset: + _target_: autoencoders.data.get_mnist_dataset + train: true + batch_size: ${data.batch_size} + shuffle: true + num_workers: ${data.n_workers} + valid: + _target_: torch.utils.data.DataLoader + dataset: + _target_: autoencoders.data.AutoEncoderDataset + dataset: + _target_: autoencoders.data.get_mnist_dataset + train: false + batch_size: ${data.batch_size} + shuffle: false + num_workers: ${data.n_workers} +model: + optimizer: + _target_: torch.optim.Adam + _partial_: true + lr: 0.001 + betas: + - 0.9 + - 0.999 + weight_decay: 0 + scheduler: + _target_: torch.optim.lr_scheduler.CyclicLR + _partial_: true + base_lr: 0.001 + max_lr: 3.0 + cycle_momentum: false + name: deep-DAE + nn: + _target_: autoencoders.models.deep_ae.DeepAutoEncoder + base_channels: 16 + latent_dim: 128 + input_channels: 1 + loss_func: + _target_: torch.nn.MSELoss + encoder: + _target_: autoencoders.models.deep_ae.NoisyCNNEncoder + _partial_: true +trainer: + _target_: pytorch_lightning.Trainer + max_epochs: 100 + accelerator: mps + devices: 1 + logger: + _target_: pytorch_lightning.loggers.WandbLogger + project: autoencoders + name: null + id: null + group: null + job_type: null + save_dir: ${hydra:runtime.output_dir} + log_model: true + tags: ${tags} +callbacks: + model_summary: + _target_: pytorch_lightning.callbacks.RichModelSummary + progress_bar: + _target_: pytorch_lightning.callbacks.RichProgressBar + refresh_rate: 5 + leave: true + early_stopping: + _target_: pytorch_lightning.callbacks.EarlyStopping + monitor: train-loss + min_delta: 0.001 + patience: 5 + check_on_train_epoch_end: true + model_checkpoint: + _target_: pytorch_lightning.callbacks.ModelCheckpoint + dirpath: ${hydra:runtime.output_dir}/checkpoints + monitor: train-loss + save_top_k: 1 + save_on_train_epoch_end: true + log_images: + _target_: autoencoders.callbacks.LogReconstructedImagesCallback +tags: +- ${data.name} +- ${model.name} diff --git a/outputs/deep-DAE/train/2023-08-29/21-31-57/.hydra/hydra.yaml b/outputs/deep-DAE/train/2023-08-29/21-31-57/.hydra/hydra.yaml new file mode 100644 index 0000000..e1b14d0 --- /dev/null +++ b/outputs/deep-DAE/train/2023-08-29/21-31-57/.hydra/hydra.yaml @@ -0,0 +1,174 @@ +hydra: + run: + dir: outputs/${model.name}/${hydra.job.name}/${now:%Y-%m-%d}/${now:%H-%M-%S} + sweep: + dir: outputs/${model.name}/${hydra.job.name}/multirun + subdir: ${hydra.job.override_dirname}/${now:%Y-%m-%d}/${now:%H-%M-%S} + launcher: + _target_: hydra_plugins.hydra_joblib_launcher.joblib_launcher.JoblibLauncher + n_jobs: -1 + backend: null + prefer: processes + require: null + verbose: 0 + timeout: null + pre_dispatch: 2*n_jobs + batch_size: auto + temp_folder: null + max_nbytes: null + mmap_mode: r + sweeper: + _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper + max_batch_size: null + params: null + help: + app_name: ${hydra.job.name} + header: '${hydra.help.app_name} is powered by Hydra. + + ' + footer: 'Powered by Hydra (https://hydra.cc) + + Use --hydra-help to view Hydra specific help + + ' + template: '${hydra.help.header} + + == Configuration groups == + + Compose your configuration from those groups (group=option) + + + $APP_CONFIG_GROUPS + + + == Config == + + Override anything in the config (foo.bar=value) + + + $CONFIG + + + ${hydra.help.footer} + + ' + hydra_help: + template: 'Hydra (${hydra.runtime.version}) + + See https://hydra.cc for more info. + + + == Flags == + + $FLAGS_HELP + + + == Configuration groups == + + Compose your configuration from those groups (For example, append hydra/job_logging=disabled + to command line) + + + $HYDRA_CONFIG_GROUPS + + + Use ''--cfg hydra'' to Show the Hydra config. + + ' + hydra_help: ??? + hydra_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][HYDRA] %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + root: + level: INFO + handlers: + - console + loggers: + logging_example: + level: DEBUG + disable_existing_loggers: false + job_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + file: + class: logging.FileHandler + formatter: simple + filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log + root: + level: INFO + handlers: + - console + - file + disable_existing_loggers: false + env: {} + mode: RUN + searchpath: [] + callbacks: {} + output_subdir: .hydra + overrides: + hydra: + - hydra.mode=RUN + task: + - model=deep_dae + - model.nn.latent_dim=128 + job: + name: train + chdir: null + override_dirname: model.nn.latent_dim=128,model=deep_dae + id: ??? + num: ??? + config_name: config + env_set: {} + env_copy: [] + config: + override_dirname: + kv_sep: '=' + item_sep: ',' + exclude_keys: [] + runtime: + version: 1.3.2 + version_base: '1.3' + cwd: /Users/chrissantiago/Dropbox/GitHub/autoencoders + config_sources: + - path: hydra.conf + schema: pkg + provider: hydra + - path: /Users/chrissantiago/Dropbox/GitHub/autoencoders/autoencoders/conf + schema: file + provider: main + - path: '' + schema: structured + provider: schema + output_dir: /Users/chrissantiago/Dropbox/GitHub/autoencoders/outputs/deep-DAE/train/2023-08-29/21-31-57 + choices: + experiment: null + callbacks: encoder + trainer: default + model: deep_dae + scheduler@model.scheduler: cyclic + optimizer@model.optimizer: adam + data: mnist + hydra/env: default + hydra/callbacks: null + hydra/job_logging: default + hydra/hydra_logging: default + hydra/hydra_help: default + hydra/help: default + hydra/sweeper: basic + hydra/launcher: joblib + hydra/output: default + verbose: false diff --git a/outputs/deep-DAE/train/2023-08-29/21-31-57/.hydra/overrides.yaml b/outputs/deep-DAE/train/2023-08-29/21-31-57/.hydra/overrides.yaml new file mode 100644 index 0000000..9afdacc --- /dev/null +++ b/outputs/deep-DAE/train/2023-08-29/21-31-57/.hydra/overrides.yaml @@ -0,0 +1,2 @@ +- model=deep_dae +- model.nn.latent_dim=128 diff --git a/outputs/deep-DAE/train/2023-08-29/21-31-57/checkpoints/best_k_models.yaml b/outputs/deep-DAE/train/2023-08-29/21-31-57/checkpoints/best_k_models.yaml new file mode 100644 index 0000000..2e5e687 --- /dev/null +++ b/outputs/deep-DAE/train/2023-08-29/21-31-57/checkpoints/best_k_models.yaml @@ -0,0 +1,2 @@ +? /Users/chrissantiago/Dropbox/GitHub/autoencoders/outputs/deep-DAE/train/2023-08-29/21-31-57/checkpoints/epoch=5-step=1410.ckpt +: 0.02478528767824173 diff --git a/outputs/deep-autoencoder/train/2023-08-29/21-24-59/.hydra/config.yaml b/outputs/deep-autoencoder/train/2023-08-29/21-24-59/.hydra/config.yaml new file mode 100644 index 0000000..dd579be --- /dev/null +++ b/outputs/deep-autoencoder/train/2023-08-29/21-24-59/.hydra/config.yaml @@ -0,0 +1,86 @@ +data: + batch_size: 256 + n_workers: 10 + name: mnist + train: + _target_: torch.utils.data.DataLoader + dataset: + _target_: autoencoders.data.AutoEncoderDataset + dataset: + _target_: autoencoders.data.get_mnist_dataset + train: true + batch_size: ${data.batch_size} + shuffle: true + num_workers: ${data.n_workers} + valid: + _target_: torch.utils.data.DataLoader + dataset: + _target_: autoencoders.data.AutoEncoderDataset + dataset: + _target_: autoencoders.data.get_mnist_dataset + train: false + batch_size: ${data.batch_size} + shuffle: false + num_workers: ${data.n_workers} +model: + optimizer: + _target_: torch.optim.Adam + _partial_: true + lr: 0.001 + betas: + - 0.9 + - 0.999 + weight_decay: 0 + scheduler: + _target_: torch.optim.lr_scheduler.CyclicLR + _partial_: true + base_lr: 0.001 + max_lr: 3.0 + cycle_momentum: false + name: deep-autoencoder + nn: + _target_: autoencoders.models.deep_ae.DeepAutoEncoder + base_channels: 16 + latent_dim: 64 + input_channels: 1 + loss_func: + _target_: torch.nn.MSELoss +trainer: + _target_: pytorch_lightning.Trainer + max_epochs: 100 + accelerator: mps + devices: 1 + logger: + _target_: pytorch_lightning.loggers.WandbLogger + project: autoencoders + name: null + id: null + group: null + job_type: null + save_dir: ${hydra:runtime.output_dir} + log_model: true + tags: ${tags} +callbacks: + model_summary: + _target_: pytorch_lightning.callbacks.RichModelSummary + progress_bar: + _target_: pytorch_lightning.callbacks.RichProgressBar + refresh_rate: 5 + leave: true + early_stopping: + _target_: pytorch_lightning.callbacks.EarlyStopping + monitor: train-loss + min_delta: 0.001 + patience: 5 + check_on_train_epoch_end: true + model_checkpoint: + _target_: pytorch_lightning.callbacks.ModelCheckpoint + dirpath: ${hydra:runtime.output_dir}/checkpoints + monitor: train-loss + save_top_k: 1 + save_on_train_epoch_end: true + log_images: + _target_: autoencoders.callbacks.LogReconstructedImagesCallback +tags: +- ${data.name} +- ${model.name} diff --git a/outputs/deep-autoencoder/train/2023-08-29/21-24-59/.hydra/hydra.yaml b/outputs/deep-autoencoder/train/2023-08-29/21-24-59/.hydra/hydra.yaml new file mode 100644 index 0000000..84b6ffd --- /dev/null +++ b/outputs/deep-autoencoder/train/2023-08-29/21-24-59/.hydra/hydra.yaml @@ -0,0 +1,173 @@ +hydra: + run: + dir: outputs/${model.name}/${hydra.job.name}/${now:%Y-%m-%d}/${now:%H-%M-%S} + sweep: + dir: outputs/${model.name}/${hydra.job.name}/multirun + subdir: ${hydra.job.override_dirname}/${now:%Y-%m-%d}/${now:%H-%M-%S} + launcher: + _target_: hydra_plugins.hydra_joblib_launcher.joblib_launcher.JoblibLauncher + n_jobs: -1 + backend: null + prefer: processes + require: null + verbose: 0 + timeout: null + pre_dispatch: 2*n_jobs + batch_size: auto + temp_folder: null + max_nbytes: null + mmap_mode: r + sweeper: + _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper + max_batch_size: null + params: null + help: + app_name: ${hydra.job.name} + header: '${hydra.help.app_name} is powered by Hydra. + + ' + footer: 'Powered by Hydra (https://hydra.cc) + + Use --hydra-help to view Hydra specific help + + ' + template: '${hydra.help.header} + + == Configuration groups == + + Compose your configuration from those groups (group=option) + + + $APP_CONFIG_GROUPS + + + == Config == + + Override anything in the config (foo.bar=value) + + + $CONFIG + + + ${hydra.help.footer} + + ' + hydra_help: + template: 'Hydra (${hydra.runtime.version}) + + See https://hydra.cc for more info. + + + == Flags == + + $FLAGS_HELP + + + == Configuration groups == + + Compose your configuration from those groups (For example, append hydra/job_logging=disabled + to command line) + + + $HYDRA_CONFIG_GROUPS + + + Use ''--cfg hydra'' to Show the Hydra config. + + ' + hydra_help: ??? + hydra_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][HYDRA] %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + root: + level: INFO + handlers: + - console + loggers: + logging_example: + level: DEBUG + disable_existing_loggers: false + job_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + file: + class: logging.FileHandler + formatter: simple + filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log + root: + level: INFO + handlers: + - console + - file + disable_existing_loggers: false + env: {} + mode: RUN + searchpath: [] + callbacks: {} + output_subdir: .hydra + overrides: + hydra: + - hydra.mode=RUN + task: + - model=deep_ae + job: + name: train + chdir: null + override_dirname: model=deep_ae + id: ??? + num: ??? + config_name: config + env_set: {} + env_copy: [] + config: + override_dirname: + kv_sep: '=' + item_sep: ',' + exclude_keys: [] + runtime: + version: 1.3.2 + version_base: '1.3' + cwd: /Users/chrissantiago/Dropbox/GitHub/autoencoders + config_sources: + - path: hydra.conf + schema: pkg + provider: hydra + - path: /Users/chrissantiago/Dropbox/GitHub/autoencoders/autoencoders/conf + schema: file + provider: main + - path: '' + schema: structured + provider: schema + output_dir: /Users/chrissantiago/Dropbox/GitHub/autoencoders/outputs/deep-autoencoder/train/2023-08-29/21-24-59 + choices: + experiment: null + callbacks: encoder + trainer: default + model: deep_ae + scheduler@model.scheduler: cyclic + optimizer@model.optimizer: adam + data: mnist + hydra/env: default + hydra/callbacks: null + hydra/job_logging: default + hydra/hydra_logging: default + hydra/hydra_help: default + hydra/help: default + hydra/sweeper: basic + hydra/launcher: joblib + hydra/output: default + verbose: false diff --git a/outputs/deep-autoencoder/train/2023-08-29/21-24-59/.hydra/overrides.yaml b/outputs/deep-autoencoder/train/2023-08-29/21-24-59/.hydra/overrides.yaml new file mode 100644 index 0000000..3902811 --- /dev/null +++ b/outputs/deep-autoencoder/train/2023-08-29/21-24-59/.hydra/overrides.yaml @@ -0,0 +1 @@ +- model=deep_ae diff --git a/outputs/deep-autoencoder/train/2023-08-29/21-24-59/checkpoints/best_k_models.yaml b/outputs/deep-autoencoder/train/2023-08-29/21-24-59/checkpoints/best_k_models.yaml new file mode 100644 index 0000000..ce609aa --- /dev/null +++ b/outputs/deep-autoencoder/train/2023-08-29/21-24-59/checkpoints/best_k_models.yaml @@ -0,0 +1,2 @@ +? /Users/chrissantiago/Dropbox/GitHub/autoencoders/outputs/deep-autoencoder/train/2023-08-29/21-24-59/checkpoints/epoch=11-step=2820.ckpt +: 0.0027759938966482878 diff --git a/outputs/deep-autoencoder/train/2023-08-29/21-33-38/.hydra/config.yaml b/outputs/deep-autoencoder/train/2023-08-29/21-33-38/.hydra/config.yaml new file mode 100644 index 0000000..20a3c8e --- /dev/null +++ b/outputs/deep-autoencoder/train/2023-08-29/21-33-38/.hydra/config.yaml @@ -0,0 +1,86 @@ +data: + batch_size: 256 + n_workers: 10 + name: mnist + train: + _target_: torch.utils.data.DataLoader + dataset: + _target_: autoencoders.data.AutoEncoderDataset + dataset: + _target_: autoencoders.data.get_mnist_dataset + train: true + batch_size: ${data.batch_size} + shuffle: true + num_workers: ${data.n_workers} + valid: + _target_: torch.utils.data.DataLoader + dataset: + _target_: autoencoders.data.AutoEncoderDataset + dataset: + _target_: autoencoders.data.get_mnist_dataset + train: false + batch_size: ${data.batch_size} + shuffle: false + num_workers: ${data.n_workers} +model: + optimizer: + _target_: torch.optim.Adam + _partial_: true + lr: 0.001 + betas: + - 0.9 + - 0.999 + weight_decay: 0 + scheduler: + _target_: torch.optim.lr_scheduler.CyclicLR + _partial_: true + base_lr: 0.001 + max_lr: 3.0 + cycle_momentum: false + name: deep-autoencoder + nn: + _target_: autoencoders.models.deep_ae.DeepAutoEncoder + base_channels: 16 + latent_dim: 32 + input_channels: 1 + loss_func: + _target_: torch.nn.MSELoss +trainer: + _target_: pytorch_lightning.Trainer + max_epochs: 100 + accelerator: mps + devices: 1 + logger: + _target_: pytorch_lightning.loggers.WandbLogger + project: autoencoders + name: null + id: null + group: null + job_type: null + save_dir: ${hydra:runtime.output_dir} + log_model: true + tags: ${tags} +callbacks: + model_summary: + _target_: pytorch_lightning.callbacks.RichModelSummary + progress_bar: + _target_: pytorch_lightning.callbacks.RichProgressBar + refresh_rate: 5 + leave: true + early_stopping: + _target_: pytorch_lightning.callbacks.EarlyStopping + monitor: train-loss + min_delta: 0.001 + patience: 5 + check_on_train_epoch_end: true + model_checkpoint: + _target_: pytorch_lightning.callbacks.ModelCheckpoint + dirpath: ${hydra:runtime.output_dir}/checkpoints + monitor: train-loss + save_top_k: 1 + save_on_train_epoch_end: true + log_images: + _target_: autoencoders.callbacks.LogReconstructedImagesCallback +tags: +- ${data.name} +- ${model.name} diff --git a/outputs/deep-autoencoder/train/2023-08-29/21-33-38/.hydra/hydra.yaml b/outputs/deep-autoencoder/train/2023-08-29/21-33-38/.hydra/hydra.yaml new file mode 100644 index 0000000..1341c6a --- /dev/null +++ b/outputs/deep-autoencoder/train/2023-08-29/21-33-38/.hydra/hydra.yaml @@ -0,0 +1,174 @@ +hydra: + run: + dir: outputs/${model.name}/${hydra.job.name}/${now:%Y-%m-%d}/${now:%H-%M-%S} + sweep: + dir: outputs/${model.name}/${hydra.job.name}/multirun + subdir: ${hydra.job.override_dirname}/${now:%Y-%m-%d}/${now:%H-%M-%S} + launcher: + _target_: hydra_plugins.hydra_joblib_launcher.joblib_launcher.JoblibLauncher + n_jobs: -1 + backend: null + prefer: processes + require: null + verbose: 0 + timeout: null + pre_dispatch: 2*n_jobs + batch_size: auto + temp_folder: null + max_nbytes: null + mmap_mode: r + sweeper: + _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper + max_batch_size: null + params: null + help: + app_name: ${hydra.job.name} + header: '${hydra.help.app_name} is powered by Hydra. + + ' + footer: 'Powered by Hydra (https://hydra.cc) + + Use --hydra-help to view Hydra specific help + + ' + template: '${hydra.help.header} + + == Configuration groups == + + Compose your configuration from those groups (group=option) + + + $APP_CONFIG_GROUPS + + + == Config == + + Override anything in the config (foo.bar=value) + + + $CONFIG + + + ${hydra.help.footer} + + ' + hydra_help: + template: 'Hydra (${hydra.runtime.version}) + + See https://hydra.cc for more info. + + + == Flags == + + $FLAGS_HELP + + + == Configuration groups == + + Compose your configuration from those groups (For example, append hydra/job_logging=disabled + to command line) + + + $HYDRA_CONFIG_GROUPS + + + Use ''--cfg hydra'' to Show the Hydra config. + + ' + hydra_help: ??? + hydra_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][HYDRA] %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + root: + level: INFO + handlers: + - console + loggers: + logging_example: + level: DEBUG + disable_existing_loggers: false + job_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + file: + class: logging.FileHandler + formatter: simple + filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log + root: + level: INFO + handlers: + - console + - file + disable_existing_loggers: false + env: {} + mode: RUN + searchpath: [] + callbacks: {} + output_subdir: .hydra + overrides: + hydra: + - hydra.mode=RUN + task: + - model=deep_ae + - model.nn.latent_dim=32 + job: + name: train + chdir: null + override_dirname: model.nn.latent_dim=32,model=deep_ae + id: ??? + num: ??? + config_name: config + env_set: {} + env_copy: [] + config: + override_dirname: + kv_sep: '=' + item_sep: ',' + exclude_keys: [] + runtime: + version: 1.3.2 + version_base: '1.3' + cwd: /Users/chrissantiago/Dropbox/GitHub/autoencoders + config_sources: + - path: hydra.conf + schema: pkg + provider: hydra + - path: /Users/chrissantiago/Dropbox/GitHub/autoencoders/autoencoders/conf + schema: file + provider: main + - path: '' + schema: structured + provider: schema + output_dir: /Users/chrissantiago/Dropbox/GitHub/autoencoders/outputs/deep-autoencoder/train/2023-08-29/21-33-38 + choices: + experiment: null + callbacks: encoder + trainer: default + model: deep_ae + scheduler@model.scheduler: cyclic + optimizer@model.optimizer: adam + data: mnist + hydra/env: default + hydra/callbacks: null + hydra/job_logging: default + hydra/hydra_logging: default + hydra/hydra_help: default + hydra/help: default + hydra/sweeper: basic + hydra/launcher: joblib + hydra/output: default + verbose: false diff --git a/outputs/deep-autoencoder/train/2023-08-29/21-33-38/.hydra/overrides.yaml b/outputs/deep-autoencoder/train/2023-08-29/21-33-38/.hydra/overrides.yaml new file mode 100644 index 0000000..62cb22e --- /dev/null +++ b/outputs/deep-autoencoder/train/2023-08-29/21-33-38/.hydra/overrides.yaml @@ -0,0 +1,2 @@ +- model=deep_ae +- model.nn.latent_dim=32 diff --git a/outputs/deep-autoencoder/train/2023-08-29/21-33-38/checkpoints/best_k_models.yaml b/outputs/deep-autoencoder/train/2023-08-29/21-33-38/checkpoints/best_k_models.yaml new file mode 100644 index 0000000..ec06f62 --- /dev/null +++ b/outputs/deep-autoencoder/train/2023-08-29/21-33-38/checkpoints/best_k_models.yaml @@ -0,0 +1,2 @@ +? /Users/chrissantiago/Dropbox/GitHub/autoencoders/outputs/deep-autoencoder/train/2023-08-29/21-33-38/checkpoints/epoch=9-step=2350.ckpt +: 0.0045637041330337524 diff --git a/outputs/results.txt b/outputs/results.txt new file mode 100644 index 0000000..aafb11b --- /dev/null +++ b/outputs/results.txt @@ -0,0 +1,6 @@ +{"autoencoder": {"acc": 0.8887, "auc": 0.9383}} +{"DAE": {"acc": 0.7964, "auc": 0.8871}} +{"deep-autoencoder": {"acc": 0.9276, "auc": 0.9599}} +{"deep-DAE": {"acc": 0.8723, "auc": 0.9292}} +{"deep-DAE": {"acc": 0.8581, "auc": 0.9213}} +{"deep-autoencoder": {"acc": 0.9321, "auc": 0.9624}}