Skip to content

Commit

Permalink
basic trials
Browse files Browse the repository at this point in the history
  • Loading branch information
chris-santiago committed Aug 30, 2023
1 parent d140f83 commit 5f46bb7
Show file tree
Hide file tree
Showing 28 changed files with 1,598 additions and 2 deletions.
2 changes: 1 addition & 1 deletion autoencoders/eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def evaluate_linear(
x_test = encoder.encode(ds.data[train_length:].unsqueeze(1) / 255).numpy()
y_test = ds.targets[train_length:]

lr = LogisticRegression(max_iter=300)
lr = LogisticRegression(max_iter=1000)
lr.fit(x_train, y_train)
labels = lr.predict(x_test)
labels_ohe = F.one_hot(torch.tensor(labels)).float()
Expand Down
2 changes: 1 addition & 1 deletion autoencoders/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def main(cfg):

results = autoencoders.eval.evaluate_linear(module=model, trainer=trainer)
autoencoders.eval.to_json(
results={cfg.model.name: results}, filepath=constants.OUTPUTS.joinpath("results.json")
results={cfg.model.name: results}, filepath=constants.OUTPUTS.joinpath("results.txt")
)


Expand Down
2 changes: 2 additions & 0 deletions outputs/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
**/*.ckpt
**/wandb/
90 changes: 90 additions & 0 deletions outputs/DAE/train/2023-08-29/21-19-54/.hydra/config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
data:
batch_size: 256
n_workers: 10
name: mnist
train:
_target_: torch.utils.data.DataLoader
dataset:
_target_: autoencoders.data.AutoEncoderDataset
dataset:
_target_: autoencoders.data.get_mnist_dataset
train: true
batch_size: ${data.batch_size}
shuffle: true
num_workers: ${data.n_workers}
valid:
_target_: torch.utils.data.DataLoader
dataset:
_target_: autoencoders.data.AutoEncoderDataset
dataset:
_target_: autoencoders.data.get_mnist_dataset
train: false
batch_size: ${data.batch_size}
shuffle: false
num_workers: ${data.n_workers}
model:
optimizer:
_target_: torch.optim.Adam
_partial_: true
lr: 0.001
betas:
- 0.9
- 0.999
weight_decay: 0
scheduler:
_target_: torch.optim.lr_scheduler.CyclicLR
_partial_: true
base_lr: 0.001
max_lr: 3.0
cycle_momentum: false
name: DAE
nn:
_target_: autoencoders.models.base_dae.DenoisingAutoEncoder
layers:
- 128
- 64
- 64
input_shape:
- 28
- 28
loss_func:
_target_: torch.nn.MSELoss
trainer:
_target_: pytorch_lightning.Trainer
max_epochs: 100
accelerator: mps
devices: 1
logger:
_target_: pytorch_lightning.loggers.WandbLogger
project: autoencoders
name: null
id: null
group: null
job_type: null
save_dir: ${hydra:runtime.output_dir}
log_model: true
tags: ${tags}
callbacks:
model_summary:
_target_: pytorch_lightning.callbacks.RichModelSummary
progress_bar:
_target_: pytorch_lightning.callbacks.RichProgressBar
refresh_rate: 5
leave: true
early_stopping:
_target_: pytorch_lightning.callbacks.EarlyStopping
monitor: train-loss
min_delta: 0.001
patience: 5
check_on_train_epoch_end: true
model_checkpoint:
_target_: pytorch_lightning.callbacks.ModelCheckpoint
dirpath: ${hydra:runtime.output_dir}/checkpoints
monitor: train-loss
save_top_k: 1
save_on_train_epoch_end: true
log_images:
_target_: autoencoders.callbacks.LogReconstructedImagesCallback
tags:
- ${data.name}
- ${model.name}
173 changes: 173 additions & 0 deletions outputs/DAE/train/2023-08-29/21-19-54/.hydra/hydra.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,173 @@
hydra:
run:
dir: outputs/${model.name}/${hydra.job.name}/${now:%Y-%m-%d}/${now:%H-%M-%S}
sweep:
dir: outputs/${model.name}/${hydra.job.name}/multirun
subdir: ${hydra.job.override_dirname}/${now:%Y-%m-%d}/${now:%H-%M-%S}
launcher:
_target_: hydra_plugins.hydra_joblib_launcher.joblib_launcher.JoblibLauncher
n_jobs: -1
backend: null
prefer: processes
require: null
verbose: 0
timeout: null
pre_dispatch: 2*n_jobs
batch_size: auto
temp_folder: null
max_nbytes: null
mmap_mode: r
sweeper:
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
max_batch_size: null
params: null
help:
app_name: ${hydra.job.name}
header: '${hydra.help.app_name} is powered by Hydra.
'
footer: 'Powered by Hydra (https://hydra.cc)
Use --hydra-help to view Hydra specific help
'
template: '${hydra.help.header}
== Configuration groups ==
Compose your configuration from those groups (group=option)
$APP_CONFIG_GROUPS
== Config ==
Override anything in the config (foo.bar=value)
$CONFIG
${hydra.help.footer}
'
hydra_help:
template: 'Hydra (${hydra.runtime.version})
See https://hydra.cc for more info.
== Flags ==
$FLAGS_HELP
== Configuration groups ==
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
to command line)
$HYDRA_CONFIG_GROUPS
Use ''--cfg hydra'' to Show the Hydra config.
'
hydra_help: ???
hydra_logging:
version: 1
formatters:
simple:
format: '[%(asctime)s][HYDRA] %(message)s'
handlers:
console:
class: logging.StreamHandler
formatter: simple
stream: ext://sys.stdout
root:
level: INFO
handlers:
- console
loggers:
logging_example:
level: DEBUG
disable_existing_loggers: false
job_logging:
version: 1
formatters:
simple:
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
handlers:
console:
class: logging.StreamHandler
formatter: simple
stream: ext://sys.stdout
file:
class: logging.FileHandler
formatter: simple
filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log
root:
level: INFO
handlers:
- console
- file
disable_existing_loggers: false
env: {}
mode: RUN
searchpath: []
callbacks: {}
output_subdir: .hydra
overrides:
hydra:
- hydra.mode=RUN
task:
- model=base_dae
job:
name: train
chdir: null
override_dirname: model=base_dae
id: ???
num: ???
config_name: config
env_set: {}
env_copy: []
config:
override_dirname:
kv_sep: '='
item_sep: ','
exclude_keys: []
runtime:
version: 1.3.2
version_base: '1.3'
cwd: /Users/chrissantiago/Dropbox/GitHub/autoencoders
config_sources:
- path: hydra.conf
schema: pkg
provider: hydra
- path: /Users/chrissantiago/Dropbox/GitHub/autoencoders/autoencoders/conf
schema: file
provider: main
- path: ''
schema: structured
provider: schema
output_dir: /Users/chrissantiago/Dropbox/GitHub/autoencoders/outputs/DAE/train/2023-08-29/21-19-54
choices:
experiment: null
callbacks: encoder
trainer: default
model: base_dae
scheduler@model.scheduler: cyclic
optimizer@model.optimizer: adam
data: mnist
hydra/env: default
hydra/callbacks: null
hydra/job_logging: default
hydra/hydra_logging: default
hydra/hydra_help: default
hydra/help: default
hydra/sweeper: basic
hydra/launcher: joblib
hydra/output: default
verbose: false
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
- model=base_dae
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
/Users/chrissantiago/Dropbox/GitHub/autoencoders/outputs/DAE/train/2023-08-29/21-19-54/checkpoints/epoch=10-step=2585.ckpt: 0.03252919763326645
90 changes: 90 additions & 0 deletions outputs/autoencoder/train/2023-08-29/21-13-51/.hydra/config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
data:
batch_size: 256
n_workers: 10
name: mnist
train:
_target_: torch.utils.data.DataLoader
dataset:
_target_: autoencoders.data.AutoEncoderDataset
dataset:
_target_: autoencoders.data.get_mnist_dataset
train: true
batch_size: ${data.batch_size}
shuffle: true
num_workers: ${data.n_workers}
valid:
_target_: torch.utils.data.DataLoader
dataset:
_target_: autoencoders.data.AutoEncoderDataset
dataset:
_target_: autoencoders.data.get_mnist_dataset
train: false
batch_size: ${data.batch_size}
shuffle: false
num_workers: ${data.n_workers}
model:
optimizer:
_target_: torch.optim.Adam
_partial_: true
lr: 0.001
betas:
- 0.9
- 0.999
weight_decay: 0
scheduler:
_target_: torch.optim.lr_scheduler.CyclicLR
_partial_: true
base_lr: 0.001
max_lr: 3.0
cycle_momentum: false
name: autoencoder
nn:
_target_: autoencoders.models.base.AutoEncoder
layers:
- 128
- 64
- 16
input_shape:
- 28
- 28
loss_func:
_target_: torch.nn.MSELoss
trainer:
_target_: pytorch_lightning.Trainer
max_epochs: 100
accelerator: mps
devices: 1
logger:
_target_: pytorch_lightning.loggers.WandbLogger
project: autoencoders
name: null
id: null
group: null
job_type: null
save_dir: ${hydra:runtime.output_dir}
log_model: true
tags: ${tags}
callbacks:
model_summary:
_target_: pytorch_lightning.callbacks.RichModelSummary
progress_bar:
_target_: pytorch_lightning.callbacks.RichProgressBar
refresh_rate: 5
leave: true
early_stopping:
_target_: pytorch_lightning.callbacks.EarlyStopping
monitor: train-loss
min_delta: 0.001
patience: 5
check_on_train_epoch_end: true
model_checkpoint:
_target_: pytorch_lightning.callbacks.ModelCheckpoint
dirpath: ${hydra:runtime.output_dir}/checkpoints
monitor: train-loss
save_top_k: 1
save_on_train_epoch_end: true
log_images:
_target_: autoencoders.callbacks.LogReconstructedImagesCallback
tags:
- ${data.name}
- ${model.name}
Loading

0 comments on commit 5f46bb7

Please sign in to comment.