diff --git a/autoencoders/conf/callbacks/vae.yaml b/autoencoders/conf/callbacks/siam.yaml similarity index 100% rename from autoencoders/conf/callbacks/vae.yaml rename to autoencoders/conf/callbacks/siam.yaml diff --git a/autoencoders/conf/experiment/simsiam-resnet.yaml b/autoencoders/conf/experiment/simsiam-resnet.yaml new file mode 100644 index 0000000..12386af --- /dev/null +++ b/autoencoders/conf/experiment/simsiam-resnet.yaml @@ -0,0 +1,26 @@ +# @package _global_ + +# to execute this experiment run: +# python train.py experiment=example + +defaults: + - override /data: simsiam + - override /model: simsiam + - override /trainer: default + - override /callbacks: siam + +# all parameters below will be merged with parameters from default configurations set above +# this allows you to overwrite only specified parameters + +tags: ["${data.name}", "${model.name}", "cyclicLR"] + +model: + nn: + _target_: autoencoders.models.simsiam.SimSiam + encoder: + _target_: autoencoders.models.simsiam.CNNEncoderProjection + channels_in: 1 + base_channels: 32 + latent_dim: ${model.nn.dim} + dim: 512 + pred_dim: 512 \ No newline at end of file diff --git a/autoencoders/conf/experiment/simsiam.yaml b/autoencoders/conf/experiment/simsiam.yaml new file mode 100644 index 0000000..cede654 --- /dev/null +++ b/autoencoders/conf/experiment/simsiam.yaml @@ -0,0 +1,24 @@ +# @package _global_ + +# to execute this experiment run: +# python train.py experiment=example + +defaults: + - override /data: simsiam + - override /model: simsiam + - override /trainer: default + - override /callbacks: siam + +# all parameters below will be merged with parameters from default configurations set above +# this allows you to overwrite only specified parameters + +tags: ["${data.name}", "${model.name}", "cyclicLR"] + +model: + nn: + _target_: autoencoders.models.simsiam.SimSiam + encoder: + _target_: autoencoders.models.simsiam.ResnetEncoder + latent_dim: ${model.nn.dim} + dim: 1024 + pred_dim: 512 \ No newline at end of file diff --git a/autoencoders/conf/model/simsiam.yaml b/autoencoders/conf/model/simsiam.yaml index 294760c..992dab0 100644 --- a/autoencoders/conf/model/simsiam.yaml +++ b/autoencoders/conf/model/simsiam.yaml @@ -11,7 +11,9 @@ name: SimSiam nn: _target_: autoencoders.models.simsiam.SimSiam encoder: - _target_: autoencoders.models.simsiam.ResnetEncoder + _target_: autoencoders.models.simsiam.CNNEncoderProjection + channels_in: 1 + base_channels: 32 latent_dim: ${model.nn.dim} - dim: 1024 + dim: 512 pred_dim: 512 diff --git a/autoencoders/models/simsiam.py b/autoencoders/models/simsiam.py index 7ad438b..99a102e 100644 --- a/autoencoders/models/simsiam.py +++ b/autoencoders/models/simsiam.py @@ -6,6 +6,7 @@ from torchvision.models.resnet import BasicBlock from autoencoders.models.base import BaseModule +from autoencoders.modules import CNNEncoder class ResnetEncoder(ResNet): @@ -40,6 +41,22 @@ def forward(self, x): return self.model(x) +class CNNEncoderProjection(CNNEncoder): + def __init__(self, channels_in: int, base_channels: int, latent_dim: int): + super().__init__(channels_in, base_channels, latent_dim) + + self.projection = nn.Sequential( + ProjectionLayer(latent_dim, latent_dim), + ProjectionLayer(latent_dim, latent_dim), + nn.Linear(latent_dim, latent_dim), + nn.BatchNorm1d(latent_dim, affine=False), + ) + + def forward(self, x): + z = self.model(x) + return self.projection(z) + + class SimSiam(BaseModule): def __init__( self, diff --git a/outputs/SimSiam/train/2023-09-01/10-39-53/.hydra/config.yaml b/outputs/SimSiam/train/2023-09-01/10-39-53/.hydra/config.yaml new file mode 100644 index 0000000..f12e4e3 --- /dev/null +++ b/outputs/SimSiam/train/2023-09-01/10-39-53/.hydra/config.yaml @@ -0,0 +1,88 @@ +data: + batch_size: 256 + n_workers: 10 + name: mnist + train: + _target_: torch.utils.data.DataLoader + dataset: + _target_: autoencoders.data.SimSiamDataset + dataset: + _target_: autoencoders.data.get_mnist_dataset + train: true + num_ops: 1 + batch_size: ${data.batch_size} + shuffle: true + num_workers: ${data.n_workers} + valid: + _target_: torch.utils.data.DataLoader + dataset: + _target_: autoencoders.data.SimSiamDataset + dataset: + _target_: autoencoders.data.get_mnist_dataset + train: false + num_ops: 1 + batch_size: ${data.batch_size} + shuffle: false + num_workers: ${data.n_workers} +model: + optimizer: + _target_: torch.optim.Adam + _partial_: true + lr: 0.001 + betas: + - 0.9 + - 0.999 + weight_decay: 0 + scheduler: + _target_: torch.optim.lr_scheduler.ReduceLROnPlateau + _partial_: true + mode: min + factor: 0.1 + patience: 10 + name: SimSiam + nn: + _target_: autoencoders.models.simsiam.SimSiam + encoder: + _target_: autoencoders.models.simsiam.CNNEncoderProjection + channels_in: 1 + base_channels: 32 + latent_dim: ${model.nn.dim} + dim: 512 + pred_dim: 512 +trainer: + _target_: pytorch_lightning.Trainer + max_epochs: 100 + accelerator: mps + devices: 1 + logger: + _target_: pytorch_lightning.loggers.WandbLogger + project: autoencoders + name: null + id: null + group: null + job_type: null + save_dir: ${hydra:runtime.output_dir} + log_model: true + tags: ${tags} +callbacks: + model_summary: + _target_: pytorch_lightning.callbacks.RichModelSummary + progress_bar: + _target_: pytorch_lightning.callbacks.RichProgressBar + refresh_rate: 5 + leave: true + early_stopping: + _target_: pytorch_lightning.callbacks.EarlyStopping + monitor: train-loss + min_delta: 0.001 + patience: 5 + check_on_train_epoch_end: true + model_checkpoint: + _target_: pytorch_lightning.callbacks.ModelCheckpoint + dirpath: ${hydra:runtime.output_dir}/checkpoints + monitor: train-loss + save_top_k: 1 + save_on_train_epoch_end: true +tags: +- ${data.name} +- ${model.name} diff --git a/outputs/SimSiam/train/2023-09-01/10-39-53/.hydra/hydra.yaml b/outputs/SimSiam/train/2023-09-01/10-39-53/.hydra/hydra.yaml new file mode 100644 index 0000000..3c8d937 --- /dev/null +++ b/outputs/SimSiam/train/2023-09-01/10-39-53/.hydra/hydra.yaml @@ -0,0 +1,175 @@ +hydra: + run: + dir: outputs/${model.name}/${hydra.job.name}/${now:%Y-%m-%d}/${now:%H-%M-%S} + sweep: + dir: outputs/${model.name}/${hydra.job.name}/multirun + subdir: ${hydra.job.override_dirname}/${now:%Y-%m-%d}/${now:%H-%M-%S} + launcher: + _target_: hydra_plugins.hydra_joblib_launcher.joblib_launcher.JoblibLauncher + n_jobs: -1 + backend: null + prefer: processes + require: null + verbose: 0 + timeout: null + pre_dispatch: 2*n_jobs + batch_size: auto + temp_folder: null + max_nbytes: null + mmap_mode: r + sweeper: + _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper + max_batch_size: null + params: null + help: + app_name: ${hydra.job.name} + header: '${hydra.help.app_name} is powered by Hydra. + + ' + footer: 'Powered by Hydra (https://hydra.cc) + + Use --hydra-help to view Hydra specific help + + ' + template: '${hydra.help.header} + + == Configuration groups == + + Compose your configuration from those groups (group=option) + + + $APP_CONFIG_GROUPS + + + == Config == + + Override anything in the config (foo.bar=value) + + + $CONFIG + + + ${hydra.help.footer} + + ' + hydra_help: + template: 'Hydra (${hydra.runtime.version}) + + See https://hydra.cc for more info. + + + == Flags == + + $FLAGS_HELP + + + == Configuration groups == + + Compose your configuration from those groups (For example, append hydra/job_logging=disabled + to command line) + + + $HYDRA_CONFIG_GROUPS + + + Use ''--cfg hydra'' to Show the Hydra config. + + ' + hydra_help: ??? + hydra_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][HYDRA] %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + root: + level: INFO + handlers: + - console + loggers: + logging_example: + level: DEBUG + disable_existing_loggers: false + job_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + file: + class: logging.FileHandler + formatter: simple + filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log + root: + level: INFO + handlers: + - console + - file + disable_existing_loggers: false + env: {} + mode: RUN + searchpath: [] + callbacks: {} + output_subdir: .hydra + overrides: + hydra: + - hydra.mode=RUN + task: + - data=simsiam + - model=simsiam + - callbacks=siam + job: + name: train + chdir: null + override_dirname: callbacks=siam,data=simsiam,model=simsiam + id: ??? + num: ??? + config_name: config + env_set: {} + env_copy: [] + config: + override_dirname: + kv_sep: '=' + item_sep: ',' + exclude_keys: [] + runtime: + version: 1.3.2 + version_base: '1.3' + cwd: /Users/chrissantiago/Dropbox/GitHub/autoencoders + config_sources: + - path: hydra.conf + schema: pkg + provider: hydra + - path: /Users/chrissantiago/Dropbox/GitHub/autoencoders/autoencoders/conf + schema: file + provider: main + - path: '' + schema: structured + provider: schema + output_dir: /Users/chrissantiago/Dropbox/GitHub/autoencoders/outputs/SimSiam/train/2023-09-01/10-39-53 + choices: + experiment: null + callbacks: siam + trainer: default + model: simsiam + scheduler@model.scheduler: plateau + optimizer@model.optimizer: adam + data: simsiam + hydra/env: default + hydra/callbacks: null + hydra/job_logging: default + hydra/hydra_logging: default + hydra/hydra_help: default + hydra/help: default + hydra/sweeper: basic + hydra/launcher: joblib + hydra/output: default + verbose: false diff --git a/outputs/SimSiam/train/2023-09-01/10-39-53/.hydra/overrides.yaml b/outputs/SimSiam/train/2023-09-01/10-39-53/.hydra/overrides.yaml new file mode 100644 index 0000000..4ffb35c --- /dev/null +++ b/outputs/SimSiam/train/2023-09-01/10-39-53/.hydra/overrides.yaml @@ -0,0 +1,3 @@ +- data=simsiam +- model=simsiam +- callbacks=siam diff --git a/outputs/SimSiam/train/2023-09-01/10-39-53/checkpoints/best_k_models.yaml b/outputs/SimSiam/train/2023-09-01/10-39-53/checkpoints/best_k_models.yaml new file mode 100644 index 0000000..5a7bcf3 --- /dev/null +++ b/outputs/SimSiam/train/2023-09-01/10-39-53/checkpoints/best_k_models.yaml @@ -0,0 +1,2 @@ +? /Users/chrissantiago/Dropbox/GitHub/autoencoders/outputs/SimSiam/train/2023-09-01/10-39-53/checkpoints/epoch=35-step=8460.ckpt +: -0.991788387298584 diff --git a/outputs/SimSiam/train/2023-09-01/10-49-10/.hydra/config.yaml b/outputs/SimSiam/train/2023-09-01/10-49-10/.hydra/config.yaml new file mode 100644 index 0000000..5710d40 --- /dev/null +++ b/outputs/SimSiam/train/2023-09-01/10-49-10/.hydra/config.yaml @@ -0,0 +1,86 @@ +data: + batch_size: 256 + n_workers: 10 + name: mnist + train: + _target_: torch.utils.data.DataLoader + dataset: + _target_: autoencoders.data.SimSiamDataset + dataset: + _target_: autoencoders.data.get_mnist_dataset + train: true + num_ops: 1 + batch_size: ${data.batch_size} + shuffle: true + num_workers: ${data.n_workers} + valid: + _target_: torch.utils.data.DataLoader + dataset: + _target_: autoencoders.data.SimSiamDataset + dataset: + _target_: autoencoders.data.get_mnist_dataset + train: false + num_ops: 1 + batch_size: ${data.batch_size} + shuffle: false + num_workers: ${data.n_workers} +model: + optimizer: + _target_: torch.optim.Adam + _partial_: true + lr: 0.001 + betas: + - 0.9 + - 0.999 + weight_decay: 0 + scheduler: + _target_: torch.optim.lr_scheduler.ReduceLROnPlateau + _partial_: true + mode: min + factor: 0.1 + patience: 10 + name: SimSiam + nn: + _target_: autoencoders.models.simsiam.SimSiam + encoder: + _target_: autoencoders.models.simsiam.ResnetEncoder + latent_dim: ${model.nn.dim} + dim: 512 + pred_dim: 512 +trainer: + _target_: pytorch_lightning.Trainer + max_epochs: 100 + accelerator: mps + devices: 1 + logger: + _target_: pytorch_lightning.loggers.WandbLogger + project: autoencoders + name: null + id: null + group: null + job_type: null + save_dir: ${hydra:runtime.output_dir} + log_model: true + tags: ${tags} +callbacks: + model_summary: + _target_: pytorch_lightning.callbacks.RichModelSummary + progress_bar: + _target_: pytorch_lightning.callbacks.RichProgressBar + refresh_rate: 5 + leave: true + early_stopping: + _target_: pytorch_lightning.callbacks.EarlyStopping + monitor: train-loss + min_delta: 0.001 + patience: 5 + check_on_train_epoch_end: true + model_checkpoint: + _target_: pytorch_lightning.callbacks.ModelCheckpoint + dirpath: ${hydra:runtime.output_dir}/checkpoints + monitor: train-loss + save_top_k: 1 + save_on_train_epoch_end: true +tags: +- ${data.name} +- ${model.name} diff --git a/outputs/SimSiam/train/2023-09-01/10-49-10/.hydra/hydra.yaml b/outputs/SimSiam/train/2023-09-01/10-49-10/.hydra/hydra.yaml new file mode 100644 index 0000000..6c6e3c5 --- /dev/null +++ b/outputs/SimSiam/train/2023-09-01/10-49-10/.hydra/hydra.yaml @@ -0,0 +1,175 @@ +hydra: + run: + dir: outputs/${model.name}/${hydra.job.name}/${now:%Y-%m-%d}/${now:%H-%M-%S} + sweep: + dir: outputs/${model.name}/${hydra.job.name}/multirun + subdir: ${hydra.job.override_dirname}/${now:%Y-%m-%d}/${now:%H-%M-%S} + launcher: + _target_: hydra_plugins.hydra_joblib_launcher.joblib_launcher.JoblibLauncher + n_jobs: -1 + backend: null + prefer: processes + require: null + verbose: 0 + timeout: null + pre_dispatch: 2*n_jobs + batch_size: auto + temp_folder: null + max_nbytes: null + mmap_mode: r + sweeper: + _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper + max_batch_size: null + params: null + help: + app_name: ${hydra.job.name} + header: '${hydra.help.app_name} is powered by Hydra. + + ' + footer: 'Powered by Hydra (https://hydra.cc) + + Use --hydra-help to view Hydra specific help + + ' + template: '${hydra.help.header} + + == Configuration groups == + + Compose your configuration from those groups (group=option) + + + $APP_CONFIG_GROUPS + + + == Config == + + Override anything in the config (foo.bar=value) + + + $CONFIG + + + ${hydra.help.footer} + + ' + hydra_help: + template: 'Hydra (${hydra.runtime.version}) + + See https://hydra.cc for more info. + + + == Flags == + + $FLAGS_HELP + + + == Configuration groups == + + Compose your configuration from those groups (For example, append hydra/job_logging=disabled + to command line) + + + $HYDRA_CONFIG_GROUPS + + + Use ''--cfg hydra'' to Show the Hydra config. + + ' + hydra_help: ??? + hydra_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][HYDRA] %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + root: + level: INFO + handlers: + - console + loggers: + logging_example: + level: DEBUG + disable_existing_loggers: false + job_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + file: + class: logging.FileHandler + formatter: simple + filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log + root: + level: INFO + handlers: + - console + - file + disable_existing_loggers: false + env: {} + mode: RUN + searchpath: [] + callbacks: {} + output_subdir: .hydra + overrides: + hydra: + - hydra.mode=RUN + task: + - data=simsiam + - model=simsiam + - callbacks=siam + job: + name: train + chdir: null + override_dirname: callbacks=siam,data=simsiam,model=simsiam + id: ??? + num: ??? + config_name: config + env_set: {} + env_copy: [] + config: + override_dirname: + kv_sep: '=' + item_sep: ',' + exclude_keys: [] + runtime: + version: 1.3.2 + version_base: '1.3' + cwd: /Users/chrissantiago/Dropbox/GitHub/autoencoders + config_sources: + - path: hydra.conf + schema: pkg + provider: hydra + - path: /Users/chrissantiago/Dropbox/GitHub/autoencoders/autoencoders/conf + schema: file + provider: main + - path: '' + schema: structured + provider: schema + output_dir: /Users/chrissantiago/Dropbox/GitHub/autoencoders/outputs/SimSiam/train/2023-09-01/10-49-10 + choices: + experiment: null + callbacks: siam + trainer: default + model: simsiam + scheduler@model.scheduler: plateau + optimizer@model.optimizer: adam + data: simsiam + hydra/env: default + hydra/callbacks: null + hydra/job_logging: default + hydra/hydra_logging: default + hydra/hydra_help: default + hydra/help: default + hydra/sweeper: basic + hydra/launcher: joblib + hydra/output: default + verbose: false diff --git a/outputs/SimSiam/train/2023-09-01/10-49-10/.hydra/overrides.yaml b/outputs/SimSiam/train/2023-09-01/10-49-10/.hydra/overrides.yaml new file mode 100644 index 0000000..4ffb35c --- /dev/null +++ b/outputs/SimSiam/train/2023-09-01/10-49-10/.hydra/overrides.yaml @@ -0,0 +1,3 @@ +- data=simsiam +- model=simsiam +- callbacks=siam diff --git a/outputs/SimSiam/train/2023-09-01/10-49-10/checkpoints/best_k_models.yaml b/outputs/SimSiam/train/2023-09-01/10-49-10/checkpoints/best_k_models.yaml new file mode 100644 index 0000000..68db913 --- /dev/null +++ b/outputs/SimSiam/train/2023-09-01/10-49-10/checkpoints/best_k_models.yaml @@ -0,0 +1,2 @@ +? /Users/chrissantiago/Dropbox/GitHub/autoencoders/outputs/SimSiam/train/2023-09-01/10-49-10/checkpoints/epoch=16-step=3995.ckpt +: -0.9935576319694519 diff --git a/outputs/results.json b/outputs/results.json index 03f07d7..4a7607c 100644 --- a/outputs/results.json +++ b/outputs/results.json @@ -102,5 +102,21 @@ "auc": 0.9326 }, "ckpt": "/Users/chrissantiago/Dropbox/GitHub/autoencoders/outputs/SimSiam/train/2023-09-01/08-59-47/checkpoints/epoch=20-step=4935.ckpt" + }, + { + "model": "SimSiam", + "metrics": { + "acc": 0.9624, + "auc": 0.9791 + }, + "ckpt": "/Users/chrissantiago/Dropbox/GitHub/autoencoders/outputs/SimSiam/train/2023-09-01/10-39-53/checkpoints/epoch=35-step=8460.ckpt" + }, + { + "model": "SimSiam", + "metrics": { + "acc": 0.9546, + "auc": 0.9748 + }, + "ckpt": "/Users/chrissantiago/Dropbox/GitHub/autoencoders/outputs/SimSiam/train/2023-09-01/10-49-10/checkpoints/epoch=16-step=3995.ckpt" } -] +] \ No newline at end of file