-
Notifications
You must be signed in to change notification settings - Fork 5
/
Copy pathsscha_cluster_pod.py
85 lines (66 loc) · 2.44 KB
/
sscha_cluster_pod.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import cellconstructor as CC, cellconstructor.Phonons
import sscha
import sscha.Cluster
import sys, os
def configure_cluster(dirname = 'mater_work'):
## Set use_active_shell=True means login by ssh
cluster = sscha.Cluster.Cluster(
hostname = "yubi@pod-login1.cnsi.ucsb.edu",
mpi_cmd = 'mpirun',
binary="pw.x -npool NPOOL -i PREFIX.pwi > PREFIX.pwo",
#qos_name='Test',
)
## Don't setup the memory usage
cluster.use_memory = False
#cluster.ram = 180000
## Set partition
cluster.use_partition = True
#cluster.partition_name = "batch"
cluster.partition_name = "short"
## Don't set account name in pod, but in expanse
cluster.use_account = False
#cluster.account_name = "my_allocation_resources"
## Don't use qos
cluster.use_qos = False
## add 'set -x'
## print on stdout all executed commands
cluster.add_set_minus_x = False
cluster.n_nodes = 1
cluster.n_cpu = 4
#cluster.use_cpu = F4lse
#cluster.custom_params["get-user-env"] = None
#cluster.custom_params["cpus-per-task"] = 1
cluster.custom_params["ntasks-per-node"] = 4
cluster.time = "2:00:00"
cluster.n_pool = 2
cluster.job_number = 3
cluster.batch_size = 50
home_workdir=os.path.join("$HOME/work", dirname)
## copy files from scratch_workdir to cluster.workdir
scratch_workdir = os.path.join("/home/yubi/work/Au_gold/02_sscha/b1_tut2_manual", dirname)
cluster.workdir = home_workdir
cluster.load_modules = f"""
module load intel
module load mpi
module load mkl
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
mkdir -p {scratch_workdir}
cp $HOME/espresso/pseudo/* {scratch_workdir}/
"""
def cp_files(lbls):
extrain = f"cd {scratch_workdir}\n"
extraout = "sleep 1\n"
for lbl in lbls:
extrain += f"cp {home_workdir}/{lbl}.pwi {scratch_workdir}/\n"
extraout += f"mv {scratch_workdir}/{lbl}.pwo {home_workdir}/\n"
return extrain, extraout
# Add the possibility to copy the input files
cluster.additional_script_parameters = cp_files
# Force to open a shell when executing ssh commands
# (Otherwise the cluster will not load the module environment)
#cluster.use_active_shell = True
cluster.setup_workdir()
# Check the communication
if not cluster.CheckCommunication():
raise ValueError("Impossible to connect to the cluster.")
return cluster