Skip to content

Commit

Permalink
Deepgp, dpl, parallel runs comment
Browse files Browse the repository at this point in the history
  • Loading branch information
karibbov committed Feb 23, 2024
1 parent b4945af commit 3d870c2
Show file tree
Hide file tree
Showing 2 changed files with 8 additions and 0 deletions.
4 changes: 4 additions & 0 deletions neps/optimizers/bayesian_optimization/models/DPL.py
Original file line number Diff line number Diff line change
Expand Up @@ -224,6 +224,10 @@ def __init__(
# IMPORTANT: For parallel runs lock the checkpoint file during the whole training
checkpointing: bool = False,
root_directory: Path | str | None = None,
# IMPORTANT: For parallel runs use a different checkpoint_file name for each
# IMPORTANT: surrogate. This makes sure that parallel runs don't override each
# IMPORTANT: others saved checkpoint. Although they will still have some conflicts due to
# IMPORTANT: global optimizer step tracking
checkpoint_file: Path | str = "surrogate_checkpoint.pth",
refine_epochs: int = default_refine_epochs,
n_initial_full_trainings: int = default_n_initial_full_trainings,
Expand Down
4 changes: 4 additions & 0 deletions neps/optimizers/bayesian_optimization/models/deepGP.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,6 +164,10 @@ def __init__(
# IMPORTANT: hence, it is not suitable for multiprocessing settings
checkpointing: bool = False,
root_directory: Path | str | None = None,
# IMPORTANT: For parallel runs use a different checkpoint_file name for each
# IMPORTANT: surrogate. This makes sure that parallel runs don't override each
# IMPORTANT: others saved checkpoint. Although they will still have some conflicts due to
# IMPORTANT: global optimizer step tracking
checkpoint_file: Path | str = "surrogate_checkpoint.pth",
refine_epochs: int = 50,
n_initial_full_trainings: int = 10,
Expand Down

0 comments on commit 3d870c2

Please sign in to comment.