Skip to content

Commit

Permalink
DyHPOBase draft version, with a random acq
Browse files Browse the repository at this point in the history
  • Loading branch information
karibbov committed Aug 23, 2023
1 parent 44aee8b commit c83ea9f
Show file tree
Hide file tree
Showing 5 changed files with 821 additions and 1 deletion.
86 changes: 86 additions & 0 deletions neps_examples/efficiency/multi_fidelity_dyhpo.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
import logging

import numpy as np
import torch
import torch.nn.functional as F
from torch import nn, optim

import neps


class TheModelClass(nn.Module):
"""Taken from https://pytorch.org/tutorials/beginner/saving_loading_models.html"""

def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)

def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x


def get_model_and_optimizer(learning_rate):
"""Taken from https://pytorch.org/tutorials/beginner/saving_loading_models.html"""
model = TheModelClass()
optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9)
return model, optimizer


def run_pipeline(pipeline_directory, previous_pipeline_directory, learning_rate, epoch):
model, optimizer = get_model_and_optimizer(learning_rate)
checkpoint_name = "checkpoint.pth"

if previous_pipeline_directory is not None:
# Read in state of the model after the previous fidelity rung
checkpoint = torch.load(previous_pipeline_directory / checkpoint_name)
model.load_state_dict(checkpoint["model_state_dict"])
optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
epochs_previously_spent = checkpoint["epoch"]
else:
epochs_previously_spent = 0

# Train model here ...

# Save model to disk
torch.save(
{
"epoch": epoch,
"model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
},
pipeline_directory / checkpoint_name,
)

loss = np.log(learning_rate / epoch) # Replace with actual error
epochs_spent_in_this_call = epoch - epochs_previously_spent # Optional for stopping
return dict(loss=loss, cost=epochs_spent_in_this_call)


pipeline_space = dict(
learning_rate=neps.FloatParameter(lower=1e-4, upper=1e0, log=True),
epoch=neps.IntegerParameter(lower=1, upper=10, is_fidelity=True),
)

logging.basicConfig(level=logging.INFO)
neps.run(
run_pipeline=run_pipeline,
pipeline_space=pipeline_space,
root_directory="results/multi_fidelity_example",
searcher="dyhpo",
# Optional: Do not start another evaluation after <=100 epochs, corresponds to cost
# field above.
max_cost_total=20,
surrogate_model="gp",
hp_kernels=["m52"],
)
2 changes: 2 additions & 0 deletions src/neps/optimizers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
from .bayesian_optimization.mf_tpe import MultiFidelityPriorWeightedTreeParzenEstimator
from .bayesian_optimization.optimizer import BayesianOptimization
from .grid_search.optimizer import GridSearch
from .multi_fidelity.dyhpo import DyHPOBase
from .multi_fidelity.hyperband import (
MOBSTER,
AsynchronousHyperband,
Expand Down Expand Up @@ -43,4 +44,5 @@
"hyperband_custom_default": HyperbandCustomDefault,
"priorband": PriorBand,
"mobster": MOBSTER,
"dyhpo": DyHPOBase,
}
Loading

0 comments on commit c83ea9f

Please sign in to comment.