-
Notifications
You must be signed in to change notification settings - Fork 3
/
run_vasp_online.py
108 lines (101 loc) · 3.13 KB
/
run_vasp_online.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
from cluster_mlp.clus_ga_deap import cluster_GA
from ase.data import atomic_numbers, covalent_radii
from ase.calculators.vasp import Vasp
from dask_kubernetes import KubeCluster
from dask.distributed import Client
import torch
from ase.optimize import BFGS
from ase.calculators.emt import EMT
from ase.calculators.vasp import Vasp
if __name__ == "__main__":
use_dask = True #Launch Dask
eleNames = [ "Pd"] #element list in the cluster
eleNums = [16] #element composition
nPool = 10 #number of clusters in the initial pool (population)
generations = 3 # number of generations
CXPB = 0.5 #cross-over probability; 1-CXPB is the mutation probability
use_vasp = True # use vasp for VASP DFT calculations
use_vasp_inter = False # vasp_interative, not recommended
al_method = "Online" # active learning (AL-GA), if you want DFT-GA, use al_method = None
optimizer = BFGS
restart = False # if you want to restart from a generation use True, otherwise False
gen_num = 16 #if restart=True, give the generation number to restart
#ASE calculator, we have tested EMT() and VASP.
calc = Vasp(
kpar=1,
ncore=8,
encut=400,
xc="PBE",
kpts=(1, 1, 1),
gamma=True, # Gamma-centered
ismear=1,
sigma=0.2,
ibrion=-1,
nsw=0,
#potim=0.2,
isif=0,
# ediffg=-0.02,
# ediff=1e-6,
lcharg=False,
lwave=False,
lreal=False,
ispin=2,
isym=0,
)
eleRadii = [covalent_radii[atomic_numbers[ele]] for ele in eleNames]
comp_list = [ eleNames[i]+str(eleNums[i]) for i in range(len(eleNames))]
filename='clus_'+''.join(comp_list)# For saving the best cluster at every generation
log_file = filename+".log"
if len(eleNames) == 1:
singleTypeCluster = True
else:
singleTypeCluster = False
if use_dask == True:
# Run between 0 and 4 1-core/1-gpu workers on the kube cluster
cluster = KubeCluster.from_yaml("worker-cpu-spec.yml")
client = Client(cluster)
# cluster.adapt(minimum=0, maximum=10)
cluster.scale(nPool)
learner_params = {
"filename": "relax_example",
"file_dir": "./",
"stat_uncertain_tol": 0.08,
"dyn_uncertain_tol": 0.1,
"fmax_verify_threshold": 0.05, # eV/AA
"reverify_with_parent": False,
"suppress_warnings": True
}
train_config = {
"sigma": 4.5,
"power": 2,
"cutoff_function": "quadratic",
"cutoff": 5.0,
"radial_basis": "chebyshev",
"cutoff_hyps": [],
"sigma_e": 0.009,
"sigma_f": 0.005,
"sigma_s": 0.0006,
"hpo_max_iterations": 50,
"freeze_hyps": 0,
}
bi, final_cluster = cluster_GA(
nPool,
eleNames,
eleNums,
eleRadii,
generations,
calc,
filename,
log_file,
CXPB,
singleTypeCluster,
use_dask,
use_vasp,
al_method,
learner_params,
train_config,
optimizer,
use_vasp_inter,
restart,
gen_num,
)