Skip to content

Commit

Permalink
Merge pull request #212 from EmmaRenauld/Remove_deprecated
Browse files Browse the repository at this point in the history
Remove deprecated
  • Loading branch information
EmmaRenauld committed Oct 30, 2023
2 parents c2a3221 + ab64af7 commit 551921e
Show file tree
Hide file tree
Showing 14 changed files with 13 additions and 209 deletions.
11 changes: 1 addition & 10 deletions dwi_ml/data/dataset/multi_subject_containers.py
Original file line number Diff line number Diff line change
Expand Up @@ -427,16 +427,7 @@ def load_data(self, load_training=True, load_validation=True,
# Load main attributes from hdf file, but each process calling
# the collate_fn must open its own hdf_file
step_size = hdf_handle.attrs['step_size']
if 'compress' in hdf_handle.attrs:
compress = hdf_handle.attrs['compress']
else:
# Fix deprecated usages
logger.warning(
"Using an old version of hdf database. Compression rate "
"information was not saved. This only means that if you "
"use --compress option anywhere, we will perform it "
"again.")
compress = None
compress = hdf_handle.attrs['compress']

# Can't save None in hdf5, saved a string instead. Converting.
if step_size == 'Not defined by user':
Expand Down
2 changes: 1 addition & 1 deletion dwi_ml/data/processing/space/neighborhood.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ def get_neighborhood_vectors_axes(radius: int, resolution: float):
neighborhood_vectors : tensor of shape (N, 3)
A list of vectors with last dimension = 3 (x,y,z coordinate for each
neighbour per respect to the origin). The current point (0,0,0) is
included.
NOT included.
"""
tmp_axes = np.identity(3)
unit_axes = np.concatenate((tmp_axes, -tmp_axes))
Expand Down
85 changes: 0 additions & 85 deletions dwi_ml/models/main_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -249,47 +249,6 @@ def __init__(self, neighborhood_type: str = None,

super().__init__(**kw)

@classmethod
def _load_params(cls, model_dir):
params = super()._load_params(model_dir)

# Will eventually be deprecated:
if 'neighborhood_radius' in params and \
'neighborhood_resolution' not in params:
logging.warning(
"Model trained with a deprecated neighborhood management. "
"Fixing.")
r = params['neighborhood_radius']
if params['neighborhood_type'] == 'grid':
res = 1

if isinstance(r, list):
assert len(r) == 1
rad = r[0]
assert int(rad) == rad, \
"Failed. Cannot interpret float radius anymore."
rad = int(rad)
else:
rad = 1
else:
if isinstance(r, list):
res = r[0]
rad = len(r)
assert np.all(np.diff(r) == res), \
"Failed. Cannot use that type of neighborhood anymore. " \
"Resolution must be the same between each layer of " \
"neighborhood."
else:
res = r
rad = 1

logging.warning("Guessed values are: resolution {}, radius {}"
.format(res, rad))
params['neighborhood_resolution'] = float(res)
params['neighborhood_radius'] = rad

return params

def move_to(self, device):
super().move_to(device)
if self.neighborhood_vectors is not None:
Expand Down Expand Up @@ -441,20 +400,6 @@ def add_args_model_with_pd(p):
help="If true, normalize the previous directions (before the "
"embedding layer,\n if any, and before adding to the input.")

@classmethod
def _load_params(cls, model_dir):
params = super()._load_params(model_dir)

# Will eventually be deprecated:
if 'prev_dirs_embedding_size' in params:
logging.warning(
"Deprecated param prev_dirs_embedding_size. Now called "
"prev_dirs_embedded_size. Changing")
params['prev_dirs_embedded_size'] = params['prev_dirs_embedding_size']
del params['prev_dirs_embedding_size']

return params

@property
def params_for_checkpoint(self):
p = super().params_for_checkpoint
Expand Down Expand Up @@ -683,36 +628,6 @@ def instantiate_nn_embedding(self):
nb_features_in=input_size,
nb_features_out=self.computed_input_embedded_size)

@classmethod
def _load_params(cls, model_dir):
params = super()._load_params(model_dir)

# Will eventually be deprecated:
if 'input_embedding_size' in params:
logging.warning(
"Deprecated param input_embedding_size. Now called "
"input_embedded_size. Changing")
params['input_embedded_size'] = params['input_embedding_size']
del params['input_embedding_size']

if 'input_embedding_size_ratio' in params:
if params['input_embedding_size_ratio'] is None:
logging.warning(
"Deprecated params 'input_embedding_size_ratio', but was "
"None. Ignoring")
del params['input_embedding_size_ratio']
else:
raise ValueError("Deprecated use of "
"'input_embedding_size_ratio'. Cannot proceed.")

# These values did not exist in older models.
if 'nb_cnn_filters' not in params:
params['nb_cnn_filters'] = None
if 'kernel_size' not in params:
params['kernel_size'] = None

return params

@property
def params_for_checkpoint(self):
# Every parameter necessary to build the different layers again.
Expand Down
17 changes: 0 additions & 17 deletions dwi_ml/models/projects/learn2track_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,23 +219,6 @@ def params_for_checkpoint(self):

return params

@classmethod
def _load_state(cls, model_dir):
model_state = super()._load_state(model_dir)

if 'input_embedding.linear.weight' in model_state:
logging.warning("Deprecated variable name input_embedding. Now "
"called input_embedding_layer. Fixing model "
"state at loading.")
model_state['input_embedding_layer.linear.weight'] = \
model_state['input_embedding.linear.weight']
model_state['input_embedding_layer.linear.bias'] = \
model_state['input_embedding.linear.bias']
del model_state['input_embedding.linear.weight']
del model_state['input_embedding.linear.bias']

return model_state

@property
def computed_params_for_display(self):
p = super().computed_params_for_display
Expand Down
23 changes: 0 additions & 23 deletions dwi_ml/models/projects/transformer_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -257,13 +257,6 @@ def params_for_checkpoint(self):
def _load_params(cls, model_dir):
params = super()._load_params(model_dir)

# Fix deprecated value
if 'embedding_key_x' in params:
logging.warning("Deprecated model. Variable 'embedding_key_x' "
"now called input_embedding_key. Renaming.")
params['input_embedding_key'] = params['embedding_key_x']
del params['embedding_key_x']

# d_model now a property method.
if 'd_model' in params:
if isinstance(cls, TransformerSrcOnlyModel):
Expand All @@ -273,22 +266,6 @@ def _load_params(cls, model_dir):

return params

@classmethod
def _load_state(cls, model_dir):
model_state = super()._load_state(model_dir)

if 'embedding_layer_x.linear.weight' in model_state:
logging.warning("Deprecated variable name embedding_layer_x. Now "
"called input_embedding_layer. Fixing model "
"state at loading.")
model_state['input_embedding_layer.linear.weight'] = \
model_state['embedding_layer_x.linear.weight']
model_state['input_embedding_layer.linear.bias'] = \
model_state['embedding_layer_x.linear.bias']
del model_state['embedding_layer_x.linear.weight']
del model_state['embedding_layer_x.linear.bias']
return model_state

def set_context(self, context):
assert context in ['training', 'validation', 'tracking', 'visu']
self._context = context
Expand Down
6 changes: 0 additions & 6 deletions dwi_ml/training/batch_loaders.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,12 +161,6 @@ def params_for_checkpoint(self):
@classmethod
def init_from_checkpoint(cls, dataset, model, checkpoint_state,
new_log_level):
# Adding noise_gaussian_size_loss for deprecated batch loaders
if 'noise_gaussian_size_loss' not in checkpoint_state:
logging.warning("Deprecated batch loader. Did not contain a "
"noise_gaussian_size_loss value. Setting to 0.0.")
checkpoint_state['noise_gaussian_size_loss'] = 0.0

batch_loader = cls(dataset=dataset, model=model,
log_level=new_log_level, **checkpoint_state)
return batch_loader
Expand Down
50 changes: 5 additions & 45 deletions dwi_ml/training/trainers.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,6 @@ def __init__(self,
experiment_name: str, batch_sampler: DWIMLBatchIDSampler,
batch_loader: DWIMLAbstractBatchLoader,
learning_rates: Union[List, float] = None,
lr_decrease_params: Tuple[float, float] = None,
weight_decay: float = 0.01,
optimizer: str = 'Adam', max_epochs: int = 10,
max_batches_per_epoch_training: int = 1000,
Expand Down Expand Up @@ -87,12 +86,6 @@ def __init__(self,
torch's default, 0.001). A list [0.01, 0.01, 0.001], for instance,
would use these values for the first 3 epochs, and keep the final
value for remaining epochs.
lr_decrease_params: Tuple[float, float]
Parameters [E, L] to set the learning rate an exponential decreasing
curve. The final curve will be init_lr * exp(-x / r). The rate of
decrease, r, is defined in order to ensure that the learning rate
curve will hit value L at epoch E.
learning_rates must be a single float value.
weight_decay: float
Add a weight decay penalty on the parameters. Default: 0.01.
(torch's default).
Expand Down Expand Up @@ -152,22 +145,9 @@ def __init__(self,
self.comet_project = comet_project
self.space = 'vox'
self.origin = 'corner'
self.lr_decrease_params = lr_decrease_params
self.clip_grad = clip_grad

# Learning rate:
if lr_decrease_params is not None:
assert isinstance(learning_rates, float), \
"To use lr_decrease_params, the learning_rate cannot be a " \
"list of learning rates. Expecting a single float value, but " \
"got {}".format(learning_rates)
self.initial_lr = learning_rates # Initial value
x, y = lr_decrease_params
assert x.is_integer(), \
"First value of lr_decrease_params should be an epoch " \
"(integer), but got {}".format(x)
self.lr_decrease_rate = -x / np.log(y / self.initial_lr)

if learning_rates is None:
self.learning_rates = [0.001]
elif isinstance(learning_rates, float):
Expand Down Expand Up @@ -360,7 +340,6 @@ def params_for_checkpoint(self):
# user to increase the patience when running again.
params = {
'learning_rates': self.learning_rates,
'lr_decrease_params': self.lr_decrease_params,
'weight_decay': self.weight_decay,
'max_epochs': self.max_epochs,
'max_batches_per_epoch_training': self.max_batches_per_epochs_train,
Expand Down Expand Up @@ -478,16 +457,6 @@ def init_from_checkpoint(
"""
trainer_params = checkpoint_state['params_for_init']

# Will eventually be deprecated:
if 'tracking_phase_nb_steps_init' in trainer_params:
logging.warning(
"Model trained with an older version of dwi_ml. Param "
"tracking_phase_nb_steps_init will soon be deprecated. Now "
"called tracking_phase_nb_segments_init, with value one less.")
val = trainer_params['tracking_phase_nb_steps_init']
del trainer_params['tracking_phase_nb_steps_init']
trainer_params['tracking_phase_nb_segments_init'] = val - 1

trainer = cls(model=model, experiments_path=experiments_path,
experiment_name=experiment_name,
batch_sampler=batch_sampler,
Expand Down Expand Up @@ -559,12 +528,7 @@ def _update_states_from_checkpoint(self, current_states):

# F. Monitors
for monitor in self.monitors:
if (monitor.name == 'unclipped_grad_norm_monitor' and
'unclipped_grad_norm_monitor_state' not in current_states):
logging.warning("Deprecated trainer. Did not contain an "
"unclipped grad monitor. Starting as new.")
else:
monitor.set_state(current_states[monitor.name + '_state'])
monitor.set_state(current_states[monitor.name + '_state'])

def _init_comet(self):
"""
Expand Down Expand Up @@ -689,16 +653,12 @@ def train_and_validate(self):
.format(epoch, epoch + 1))

# Computing learning rate
if self.lr_decrease_params is not None:
# Exponential decrease
current_lr = self.initial_lr * np.exp(-epoch/self.lr_decrease_rate)
else:
# User-given values
current_lr = self.learning_rates[
min(self.current_epoch, len(self.learning_rates) - 1)]
current_lr = self.learning_rates[
min(self.current_epoch, len(self.learning_rates) - 1)]
logger.info("Learning rate = {}".format(current_lr))
if self.comet_exp:
self.comet_exp.log_metric("learning_rate", current_lr, step=epoch)
self.comet_exp.log_metric("learning_rate", current_lr,
step=epoch)

for g in self.optimizer.param_groups:
g['lr'] = current_lr
Expand Down
9 changes: 2 additions & 7 deletions dwi_ml/training/utils/monitoring.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,13 +130,8 @@ def get_state(self):
def set_state(self, state):
self.average_per_epoch = state['average_per_epoch']
self.current_epoch = state['current_epoch']

if 'ever_max' in state:
self.ever_max = state['ever_max']
self.ever_min = state['ever_min']
else:
logging.info("Deprecated model. Ever_max and ever_min values not "
"set in {}. Ignoring".format(self.name))
self.ever_max = state['ever_max']
self.ever_min = state['ever_min']


class BestEpochMonitor(object):
Expand Down
8 changes: 0 additions & 8 deletions dwi_ml/training/utils/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,6 @@ def add_training_args(p: argparse.ArgumentParser,
"Ex: '--learning_rate 0.001*3 0.0001' would set the lr to 0.001 "
"for the first \n3 epochs, and 0.0001 for the remaining epochs.\n"
"(torch's default = 0.001)")
training_group.add_argument(
'--lr_decrease_params', metavar='E L', nargs=2, type=float,
help="Parameters [E, L] to set the learning rate an exponential "
"decreasing curve. \nThe final curve will be "
"init_lr * exp(-x / r). The rate of \ndecrease, r, is defined in "
"order to ensure that the learning rate curve will hit \nvalue L "
"at epoch E.\n"
"learning_rate must be a single float value.")
training_group.add_argument(
'--weight_decay', type=float, default=0.01, metavar='v',
help="Add a weight decay penalty on the parameters (regularization "
Expand Down
2 changes: 1 addition & 1 deletion dwi_ml/unit_tests/utils/data_and_models_for_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def fetch_testing_data():
name_as_dict = {
'data_for_tests_dwi_ml.zip':
['1beRWAorhaINCncttgwqVAP2rNOfx842Q',
'8bdbf051877ec5c70aace21c9dab9bb7']}
'da6c94fbef7ac13029acdb8b94325096']}
fetch_data(name_as_dict)

return testing_data_dir
Expand Down
2 changes: 1 addition & 1 deletion dwi_ml/unit_tests/utils/expected_values.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
└── hdf5_file.hdf5
"""
# Values corresponding to the testing data, as of Nov 22, 22.
# Values corresponding to the testing data, as of Oct 30, 23.
# Created from Emmanuelle's preprocessed WithReversed data.

TEST_EXPECTED_VOLUME_GROUPS = ['input', 'wm_mask']
Expand Down
3 changes: 1 addition & 2 deletions scripts_python/l2t_train_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,8 +119,7 @@ def init_from_args(args, sub_loggers_level):
comet_project=args.comet_project,
comet_workspace=args.comet_workspace,
# TRAINING
learning_rates=lr, lr_decrease_params=args.lr_decrease_params,
weight_decay=args.weight_decay,
learning_rates=lr, weight_decay=args.weight_decay,
optimizer=args.optimizer, max_epochs=args.max_epochs,
max_batches_per_epoch_training=args.max_batches_per_epoch_training,
max_batches_per_epoch_validation=args.max_batches_per_epoch_validation,
Expand Down
1 change: 0 additions & 1 deletion scripts_python/tests/test_all_steps_learn2track.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,6 @@ def test_training(script_runner, experiments_path):
input_group_name, streamline_group_name,
'--max_epochs', '1', '--step_size', '0.5',
'--learning_rate', '0.002',
'--lr_decrease_params', '50', '0.001',
'--batch_size_training', '5',
'--batch_size_validation', '5',
'--batch_size_units', 'nb_streamlines',
Expand Down
3 changes: 1 addition & 2 deletions scripts_python/tt_train_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,8 +137,7 @@ def init_from_args(args, sub_loggers_level):
comet_project=args.comet_project,
comet_workspace=args.comet_workspace,
# TRAINING
learning_rates=lr, lr_decrease_params=args.lr_decrease_params,
weight_decay=args.weight_decay,
learning_rates=lr, weight_decay=args.weight_decay,
optimizer=args.optimizer, max_epochs=args.max_epochs,
max_batches_per_epoch_training=args.max_batches_per_epoch_training,
max_batches_per_epoch_validation=args.max_batches_per_epoch_validation,
Expand Down

0 comments on commit 551921e

Please sign in to comment.