-
Notifications
You must be signed in to change notification settings - Fork 2
/
train.py
executable file
·245 lines (211 loc) · 15.3 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
import sys
import os
import argparse
import logging
import pytorch_lightning as pl
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
from pytorch_lightning.callbacks import StochasticWeightAveraging
from pytorch_lightning.loggers import CSVLogger
from pytorch_lightning.strategies.ddp import DDPStrategy
from torchmdnet.models.model import load_model
from torchmdnet.module import LNNP
from torchmdnet import datasets, priors, models
from torchmdnet.data import DataModule
from torchmdnet.models import output_modules
from torchmdnet.models.utils import rbf_class_mapping, act_class_mapping
from torchmdnet.utils import LoadFromFile, LoadFromCheckpoint, save_argparse, number
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.loggers import TensorBoardLogger
import math
def get_args():
# fmt: off
parser = argparse.ArgumentParser(description='Training')
parser.add_argument('--load-model', action=LoadFromCheckpoint, help='Restart training using a model checkpoint') # keep first
parser.add_argument('--conf', '-c', type=open, action=LoadFromFile, help='Configuration yaml file') # keep second
parser.add_argument('--num-epochs', default=300, type=int, help='number of epochs')
parser.add_argument('--batch-size', default=32, type=int, help='batch size')
parser.add_argument('--inference-batch-size', default=None, type=int, help='Batchsize for validation and tests.')
parser.add_argument('--lr', default=1e-4, type=float, help='learning rate')
parser.add_argument('--lr-patience', type=int, default=10, help='Patience for lr-schedule. Patience per eval-interval of validation')
parser.add_argument('--lr-metric', type=str, default='val_loss', choices=['train_loss', 'val_loss'], help='Metric to monitor when deciding whether to reduce learning rate')
parser.add_argument('--lr-min', type=float, default=1e-6, help='Minimum learning rate before early stop')
parser.add_argument('--lr-factor', type=float, default=0.8, help='Factor by which to multiply the learning rate when the metric stops improving')
parser.add_argument('--lr-warmup-steps', type=int, default=0, help='How many steps to warm-up over. Defaults to 0 for no warm-up')
parser.add_argument('--lr-cosine-length', type=int, default=0, help='Cosine length if lr_schedule is cosine.')
parser.add_argument('--lr-schedule', type=str, default='reduce_on_plateau', help='Which scheduler to use. Either "cosine" for or "reduce_on_plateau"!')
parser.add_argument('--early-stopping-patience', type=int, default=30, help='Stop training after this many epochs without improvement')
parser.add_argument('--reset-trainer', type=bool, default=False, help='Reset training metrics (e.g. early stopping, lr) when loading a model checkpoint')
parser.add_argument('--weight-decay', type=float, default=0.0, help='Weight decay strength')
parser.add_argument('--ema-alpha-y', type=float, default=1.0, help='The amount of influence of new losses on the exponential moving average of y')
parser.add_argument('--ema-alpha-dy', type=float, default=1.0, help='The amount of influence of new losses on the exponential moving average of dy')
parser.add_argument('--ngpus', type=int, default=-1, help='Number of GPUs, -1 use all available. Use CUDA_VISIBLE_DEVICES=1, to decide gpus')
parser.add_argument('--num-nodes', type=int, default=1, help='Number of nodes')
parser.add_argument('--precision', type=int, default=32, choices=[16, 32], help='Floating point precision')
parser.add_argument('--log-dir', '-l', default='/tmp/logs', help='log file')
parser.add_argument('--splits', default=None, help='Npz with splits idx_train, idx_val, idx_test')
parser.add_argument('--dataset-split', default=None, help='Random, scaffold or stratify')
parser.add_argument('--train-size', type=number, default=None, help='Percentage/number of samples in training set (None to use all remaining samples)')
parser.add_argument('--val-size', type=number, default=0.05, help='Percentage/number of samples in validation set (None to use all remaining samples)')
parser.add_argument('--test-size', type=number, default=0.1, help='Percentage/number of samples in test set (None to use all remaining samples)')
parser.add_argument('--test-interval', type=int, default=10, help='Test interval, one test per n epochs (default: 10)')
parser.add_argument('--save-interval', type=int, default=10, help='Save interval, one save per n epochs (default: 10)')
parser.add_argument('--seed', type=int, default=1, help='random seed (default: 1)')
parser.add_argument('--num-workers', type=int, default=4, help='Number of workers for data prefetch')
parser.add_argument('--redirect', type=bool, default=False, help='Redirect stdout and stderr to log_dir/log')
# dataset specific
parser.add_argument('--dataset', default=None, type=str, choices=datasets.__all__, help='Name of the torch_geometric dataset')
parser.add_argument('--dataset-root', default='~/data', type=str, help='Data storage directory (not used if dataset is "CG")')
parser.add_argument('--dataset-arg', default=None, type=str, help='Additional dataset arguments, e.g. target property for QM9 or molecule for MD17. Need to be specified in JSON format i.e. \'{"molecules": "aspirin,benzene"}\'')
parser.add_argument('--coord-files', default=None, type=str, help='Custom coordinate files glob')
parser.add_argument('--embed-files', default=None, type=str, help='Custom embedding files glob')
parser.add_argument('--energy-files', default=None, type=str, help='Custom energy files glob')
parser.add_argument('--force-files', default=None, type=str, help='Custom force files glob')
parser.add_argument('--energy-weight', default=1.0, type=float, help='Weighting factor for energies in the loss function')
parser.add_argument('--force-weight', default=1.0, type=float, help='Weighting factor for forces in the loss function')
# model architecture
parser.add_argument('--model', type=str, default='graph-network', choices=models.__all__, help='Which model to train')
parser.add_argument('--output-model', type=str, default='Scalar', choices=output_modules.__all__, help='The type of output model')
parser.add_argument('--prior-model', type=str, default=None, choices=priors.__all__, help='Which prior model to use')
# architectural args
parser.add_argument('--charge', type=bool, default=False, help='Model needs a total charge')
parser.add_argument('--spin', type=bool, default=False, help='Model needs a spin state')
parser.add_argument('--embedding-dimension', type=int, default=256, help='Embedding dimension')
parser.add_argument('--num-layers', type=int, default=6, help='Number of interaction layers in the model')
parser.add_argument('--num-rbf', type=int, default=64, help='Number of radial basis functions in model')
parser.add_argument('--activation', type=str, default='silu', choices=list(act_class_mapping.keys()), help='Activation function')
parser.add_argument('--rbf-type', type=str, default='expnorm', choices=list(rbf_class_mapping.keys()), help='Type of distance expansion')
parser.add_argument('--trainable-rbf', type=bool, default=False, help='If distance expansion functions should be trainable')
parser.add_argument('--neighbor-embedding', type=bool, default=False, help='If a neighbor embedding should be applied before interactions')
parser.add_argument('--aggr', type=str, default='add', help='Aggregation operation for CFConv filter output. Must be one of \'add\', \'mean\', or \'max\'')
# Transformer specific
parser.add_argument('--distance-influence', type=str, default='both', choices=['keys', 'values', 'both', 'none'], help='Where distance information is included inside the attention')
parser.add_argument('--attn-activation', default='silu', choices=list(act_class_mapping.keys()), help='Attention activation function')
parser.add_argument('--num-heads', type=int, default=8, help='Number of attention heads')
# other args
parser.add_argument('--derivative', default=False, type=bool, help='If true, take the derivative of the prediction w.r.t coordinates')
parser.add_argument('--cutoff-lower', type=float, default=0.0, help='Lower cutoff in model')
parser.add_argument('--cutoff-upper', type=float, default=5.0, help='Upper cutoff in model')
parser.add_argument('--max-z', type=int, default=100, help='Maximum atomic number that fits in the embedding matrix')
parser.add_argument('--max-num-neighbors', type=int, default=32, help='Maximum number of neighbors to consider in the network')
parser.add_argument('--standardize', type=bool, default=False, help='If true, multiply prediction by dataset std and add mean')
parser.add_argument('--reduce-op', type=str, default='add', choices=['add', 'mean'], help='Reduce operation to apply to atomic predictions')
parser.add_argument('--num-atom-types', type=int, default=5, help='Number of different atom types in contrast to max_z, which gives the highest nuclear charge in the data')
#ENERGY CORRECTIONS
parser.add_argument('--use-zbl-repulsion', type=bool, default=False, help='Use ZBL repulsion term')
parser.add_argument('--use-electrostatics', type=bool, default=False, help='Use electrostatics energy.')
parser.add_argument('--use-d4-dispersion', type=bool, default=False, help='Use Grimmes D4 dispersion.')
parser.add_argument('--compute-d4-atomic', type=bool, default=False, help='Use Grimmes D4 dispersion.')
parser.add_argument('--long-range-cutoff', type=float, default=None, help='Use long-range cutoff for energy correction terms.')
parser.add_argument('--output-model-charges', type=str, default=None, help='Which output model for charge prediction for energy corrections')
parser.add_argument('--layernorm-on-vec', type=str, default=None, choices=['whitened'], help='Whether to apply an equivariant layer norm to vec features. Off by default.')
#Toxicity
parser.add_argument('--energy-tox-multi-task', type=bool, default=False, help='If true, predict both energy and toxicity.')
parser.add_argument('--use-total-charge', type=bool, default=False, help='If true, use total charge as input.')
parser.add_argument('--use-energy-feature', type=bool, default=False, help='If true, use energy as input.')
parser.add_argument('--toxicity-weight', default=0., type=float, help='Weighting factor for denoising in the loss function.')
parser.add_argument('--output-channels-toxicity', type=int, default=12, help='Number of toxicity labels')
parser.add_argument('--linear-probing', type=bool, default=False, help='If true, use linear probing only.')
parser.add_argument('--use-smiles', type=bool, default=False, help='Use SMILES strings encodings.')
parser.add_argument('--use-smiles-only', type=bool, default=False, help='Use SMILES strings encodings only.')
parser.add_argument('--max-len-smiles', type=int, default=174, help='Max. length of SMILES strings.')
parser.add_argument('--atom-types', type=list, default=[], help='All atom types in the data.')
parser.add_argument('--use-atom-props', type=bool, default=False, help='Use local atom properties.')
parser.add_argument('--single-task-id', type=int, default=None, help='Which task to train on for single-task training.')
parser.add_argument('--save-top-k', type=int, default=1, help='How many checkpoints to save. "1" saves the best model only')
parser.add_argument('--train-type', type=str, default="classification", help='Whether regression or classification.')
#wandb
parser.add_argument('--wandb', type=bool, default=True, help='If true, use wandb logging!')
#Test run
parser.add_argument('--test-run', type=bool, default=False, help='If true, only run test set inference!')
parser.add_argument('--test-checkpoint', type=str, default=None, help='Model checkpoint for test run!')
# fmt: on
args = parser.parse_args()
if args.redirect:
sys.stdout = open(os.path.join(args.log_dir, "log"), "w")
sys.stderr = sys.stdout
logging.getLogger("pytorch_lightning").addHandler(
logging.StreamHandler(sys.stdout)
)
if args.inference_batch_size is None:
args.inference_batch_size = args.batch_size
save_argparse(args, os.path.join(args.log_dir, "input.yaml"), exclude=["conf"])
return args
def main():
args = get_args()
pl.seed_everything(args.seed, workers=True)
test_run = args.test_run
# initialize data module
multi_conformer_training = args.dataset_arg["num_conformers"] > 1
if multi_conformer_training:
print("\nMulti-conformer training...\n")
args.test_run = False # ugly, but otherwise pytorch-lightning complaints about hyperparameter mismatch
data = DataModule(args, multi_conformer_training=multi_conformer_training)
data.prepare_data()
data.setup("fit")
# initialize lightning module
model = LNNP(args)
monitor = "val_loss" if args.train_type == "regression" else "val_tox_auc"
mode = "min" if args.train_type == "regression" else "max"
if monitor == "val_tox_auc":
if args.save_top_k == 1:
filename = f"{args.dataset_arg['dataset']}"
else:
filename = filename = "{epoch}-{val_tox_auc:.5f}"
else:
if args.save_top_k == 1:
filename = f"{args.dataset_arg['dataset']}"
else:
filename = ("{epoch}-{val_loss:.5f}-{test_loss:.5f}",)
print(f"\nCheckpointing monitors {monitor} with mode {mode}.\n")
checkpoint_callback = ModelCheckpoint(
dirpath=args.log_dir,
monitor=monitor,
mode=mode,
save_top_k=args.save_top_k, # -1 to save all
every_n_epochs=args.save_interval,
filename=filename,
)
early_stopping = EarlyStopping(
monitor, mode=mode, patience=args.early_stopping_patience
)
tb_logger = pl.loggers.TensorBoardLogger(
args.log_dir, name="tensorbord", version="", default_hp_metric=False
)
csv_logger = CSVLogger(args.log_dir, name="", version="")
if args.wandb:
wandb_logger = WandbLogger()
wandb_logger.watch(model, log="all")
logger = [tb_logger, csv_logger, wandb_logger]
else:
logger = [tb_logger, csv_logger]
if not test_run:
trainer = pl.Trainer(
strategy=DDPStrategy(find_unused_parameters=False),
max_epochs=args.num_epochs,
gpus=args.ngpus,
num_nodes=args.num_nodes,
default_root_dir=args.log_dir,
auto_lr_find=False,
resume_from_checkpoint=None if args.reset_trainer else args.load_model,
callbacks=[early_stopping, checkpoint_callback],
logger=logger,
gradient_clip_val=2.0,
gradient_clip_algorithm="value",
precision=args.precision,
)
trainer.fit(model, data)
# run test set after completing the fit
model = LNNP.load_from_checkpoint(trainer.checkpoint_callback.best_model_path)
trainer = pl.Trainer(
logger=[tb_logger, csv_logger], devices=1, accelerator="gpu"
)
trainer.test(model, data)
else:
model = LNNP.load_from_checkpoint(args.test_checkpoint)
trainer = pl.Trainer(
logger=[tb_logger, csv_logger], devices=1, accelerator="gpu"
)
trainer.test(model, data)
if __name__ == "__main__":
main()