From 1b4368bb61d0a0b3ecafcbc653593910233af141 Mon Sep 17 00:00:00 2001 From: Pierre Siddall <43399998+Pierre-siddall@users.noreply.github.com> Date: Mon, 19 Jan 2026 16:39:56 +0000 Subject: [PATCH 01/14] add shellout.py skeleton --- Utilities/lib/shellout.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 Utilities/lib/shellout.py diff --git a/Utilities/lib/shellout.py b/Utilities/lib/shellout.py new file mode 100644 index 0000000..e1a21ce --- /dev/null +++ b/Utilities/lib/shellout.py @@ -0,0 +1,12 @@ +import timer +import subprocess + + +def exec_subprocess(cmd, verbose=True): + """ + Execute a given shell command + + :param cmd: Description + :param verbose: Description + """ + pass From de73ecd073fb4472fd44209e7987533de6979785 Mon Sep 17 00:00:00 2001 From: Pierre Siddall <43399998+Pierre-siddall@users.noreply.github.com> Date: Tue, 20 Jan 2026 10:12:38 +0000 Subject: [PATCH 02/14] Execute a subprocess --- Utilities/lib/shellout.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/Utilities/lib/shellout.py b/Utilities/lib/shellout.py index e1a21ce..f27fd11 100644 --- a/Utilities/lib/shellout.py +++ b/Utilities/lib/shellout.py @@ -1,12 +1,22 @@ import timer import subprocess +import shlex +import os -def exec_subprocess(cmd, verbose=True): +def _exec_subprocess(cmd, verbose=True,current_working_directory=os.getcwd()): """ Execute a given shell command - :param cmd: Description - :param verbose: Description + :param cmd: The command to be executed given as a string + :param verbose: A boolean value to determine if the stout + stream is displayed during the runtime. + :param current_working_directory: The directory in which the + command should be executed. """ - pass + process = subprocess.run(cmd, + stdin= subprocess.PIPE, + capture_output=True, + cwd=current_working_directory, + timeout=10 + ) From f8b0a3a62515fae26fadf639ce19455810307fd9 Mon Sep 17 00:00:00 2001 From: Pierre Siddall <43399998+Pierre-siddall@users.noreply.github.com> Date: Tue, 20 Jan 2026 11:38:14 +0000 Subject: [PATCH 03/14] Implement basic logging --- Utilities/lib/shellout.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/Utilities/lib/shellout.py b/Utilities/lib/shellout.py index f27fd11..d22859b 100644 --- a/Utilities/lib/shellout.py +++ b/Utilities/lib/shellout.py @@ -2,6 +2,7 @@ import subprocess import shlex import os +import sys def _exec_subprocess(cmd, verbose=True,current_working_directory=os.getcwd()): @@ -9,14 +10,22 @@ def _exec_subprocess(cmd, verbose=True,current_working_directory=os.getcwd()): Execute a given shell command :param cmd: The command to be executed given as a string - :param verbose: A boolean value to determine if the stout + :param verbose: A boolean value to determine if the stdout stream is displayed during the runtime. :param current_working_directory: The directory in which the command should be executed. """ - process = subprocess.run(cmd, + output = subprocess.run(cmd, stdin= subprocess.PIPE, capture_output=True, cwd=current_working_directory, timeout=10 ) + if verbose and output: + sys.stdout.write(f"[DEBUG]{output.stdout}\n") + if output.stderr and output.returncode!= 0: + sys.stderr.write(f"[ERROR] {output.stderr}\n") + if sys.version_info[0] >= 3: + output.stdout = output.stdout.decode() + + return output From 786900f0d414aca9601196d2a06a2f3125daf983 Mon Sep 17 00:00:00 2001 From: Pierre Siddall <43399998+Pierre-siddall@users.noreply.github.com> Date: Tue, 20 Jan 2026 11:39:20 +0000 Subject: [PATCH 04/14] Apply black --- Utilities/lib/shellout.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/Utilities/lib/shellout.py b/Utilities/lib/shellout.py index d22859b..8ba05f6 100644 --- a/Utilities/lib/shellout.py +++ b/Utilities/lib/shellout.py @@ -5,7 +5,7 @@ import sys -def _exec_subprocess(cmd, verbose=True,current_working_directory=os.getcwd()): +def _exec_subprocess(cmd, verbose=True, current_working_directory=os.getcwd()): """ Execute a given shell command @@ -15,15 +15,16 @@ def _exec_subprocess(cmd, verbose=True,current_working_directory=os.getcwd()): :param current_working_directory: The directory in which the command should be executed. """ - output = subprocess.run(cmd, - stdin= subprocess.PIPE, - capture_output=True, - cwd=current_working_directory, - timeout=10 - ) + output = subprocess.run( + cmd, + stdin=subprocess.PIPE, + capture_output=True, + cwd=current_working_directory, + timeout=10, + ) if verbose and output: sys.stdout.write(f"[DEBUG]{output.stdout}\n") - if output.stderr and output.returncode!= 0: + if output.stderr and output.returncode != 0: sys.stderr.write(f"[ERROR] {output.stderr}\n") if sys.version_info[0] >= 3: output.stdout = output.stdout.decode() From 7da2a02f1523c9a00bb5c1a8a3575a1858e0f178 Mon Sep 17 00:00:00 2001 From: Pierre Siddall <43399998+Pierre-siddall@users.noreply.github.com> Date: Tue, 20 Jan 2026 11:49:28 +0000 Subject: [PATCH 05/14] Add exception handling --- Utilities/lib/shellout.py | 39 +++++++++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 14 deletions(-) diff --git a/Utilities/lib/shellout.py b/Utilities/lib/shellout.py index 8ba05f6..fb6d587 100644 --- a/Utilities/lib/shellout.py +++ b/Utilities/lib/shellout.py @@ -15,18 +15,29 @@ def _exec_subprocess(cmd, verbose=True, current_working_directory=os.getcwd()): :param current_working_directory: The directory in which the command should be executed. """ - output = subprocess.run( - cmd, - stdin=subprocess.PIPE, - capture_output=True, - cwd=current_working_directory, - timeout=10, - ) - if verbose and output: - sys.stdout.write(f"[DEBUG]{output.stdout}\n") - if output.stderr and output.returncode != 0: - sys.stderr.write(f"[ERROR] {output.stderr}\n") - if sys.version_info[0] >= 3: - output.stdout = output.stdout.decode() + try: - return output + output = subprocess.run( + cmd, + stdin=subprocess.PIPE, + capture_output=True, + cwd=current_working_directory, + timeout=10, + ) + rcode = output.returncode + + if verbose and output: + sys.stdout.write(f"[DEBUG]{output.stdout}\n") + if output.stderr and output.returncode != 0: + sys.stderr.write(f"[ERROR] {output.stderr}\n") + if sys.version_info[0] >= 3: + output.stdout = output.stdout.decode() + + except subprocess.CalledProcessError as exc: + output = exc.output + rcode = exc.returncode + except OSError as exc: + output = exc.strerror + rcode = exc.errno + + return output, rcode From da9af7d01c494291e7518972b117d22f0126295c Mon Sep 17 00:00:00 2001 From: Pierre Siddall <43399998+Pierre-siddall@users.noreply.github.com> Date: Tue, 20 Jan 2026 12:00:17 +0000 Subject: [PATCH 06/14] Add timer decorator --- Utilities/lib/shellout.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Utilities/lib/shellout.py b/Utilities/lib/shellout.py index fb6d587..a60a3a3 100644 --- a/Utilities/lib/shellout.py +++ b/Utilities/lib/shellout.py @@ -1,10 +1,9 @@ import timer import subprocess -import shlex import os import sys - +@timer.run_timer def _exec_subprocess(cmd, verbose=True, current_working_directory=os.getcwd()): """ Execute a given shell command From bb4667a4987733197aba72d3d603b68c3bdbe0c9 Mon Sep 17 00:00:00 2001 From: Pierre Siddall <43399998+Pierre-siddall@users.noreply.github.com> Date: Tue, 20 Jan 2026 13:22:50 +0000 Subject: [PATCH 07/14] Seperate command string using shlex --- Utilities/lib/shellout.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Utilities/lib/shellout.py b/Utilities/lib/shellout.py index a60a3a3..c12d75f 100644 --- a/Utilities/lib/shellout.py +++ b/Utilities/lib/shellout.py @@ -2,6 +2,7 @@ import subprocess import os import sys +import shlex @timer.run_timer def _exec_subprocess(cmd, verbose=True, current_working_directory=os.getcwd()): @@ -14,6 +15,9 @@ def _exec_subprocess(cmd, verbose=True, current_working_directory=os.getcwd()): :param current_working_directory: The directory in which the command should be executed. """ + + cmd = shlex.split(cmd) + try: output = subprocess.run( From 9ca97229b30e7e63e99189ccc9d92dcf8ea16c20 Mon Sep 17 00:00:00 2001 From: Pierre Siddall <43399998+Pierre-siddall@users.noreply.github.com> Date: Tue, 20 Jan 2026 13:27:23 +0000 Subject: [PATCH 08/14] Apply black again --- Utilities/lib/shellout.py | 1 + 1 file changed, 1 insertion(+) diff --git a/Utilities/lib/shellout.py b/Utilities/lib/shellout.py index c12d75f..c376d74 100644 --- a/Utilities/lib/shellout.py +++ b/Utilities/lib/shellout.py @@ -4,6 +4,7 @@ import sys import shlex + @timer.run_timer def _exec_subprocess(cmd, verbose=True, current_working_directory=os.getcwd()): """ From baac45da32a6c9403290f58cc1fa61d9e1ab0626 Mon Sep 17 00:00:00 2001 From: Pierre Siddall <43399998+Pierre-siddall@users.noreply.github.com> Date: Tue, 20 Jan 2026 13:51:54 +0000 Subject: [PATCH 09/14] Replace instances of common.exec_subproc --- Coupled_Drivers/common.py | 67 ---------------------------------- Postprocessing/common/utils.py | 54 ++------------------------- Utilities/lib/shellout.py | 4 +- 3 files changed, 5 insertions(+), 120 deletions(-) diff --git a/Coupled_Drivers/common.py b/Coupled_Drivers/common.py index d3470cf..fdcb0be 100644 --- a/Coupled_Drivers/common.py +++ b/Coupled_Drivers/common.py @@ -228,73 +228,6 @@ def setup_runtime(common_env): return runlen_sec -def exec_subproc_timeout(cmd, timeout_sec=10): - ''' - Execute a given shell command with a timeout. Takes a list containing - the commands to be run, and an integer timeout_sec for how long to - wait for the command to run. Returns the return code from the process - and the standard out from the command or 'None' if the command times out. - ''' - process = subprocess.Popen(cmd, shell=False, - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - timer = threading.Timer(timeout_sec, process.kill) - try: - timer.start() - stdout, err = process.communicate() - if err: - sys.stderr.write('[SUBPROCESS ERROR] %s\n' % error) - rcode = process.returncode - finally: - timer.cancel() - if sys.version_info[0] >= 3: - output = stdout.decode() - else: - output = stdout - return rcode, output - - -def exec_subproc(cmd, verbose=True): - ''' - Execute given shell command. Takes a list containing the commands to be - run, and a logical verbose which if set to true will write the output of - the command to stdout. - ''' - process = subprocess.Popen(cmd, shell=False, - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - output, err = process.communicate() - if verbose and output: - sys.stdout.write('[SUBPROCESS OUTPUT] %s\n' % output) - if err: - sys.stderr.write('[SUBPROCESS ERROR] %s\n' % error) - if sys.version_info[0] >= 3: - output = output.decode() - return process.returncode, output - - -def __exec_subproc_true_shell(cmd, verbose=True): - ''' - Execute given shell command, with shell=True. Only use this function if - exec_subproc does not work correctly. Takes a list containing the commands - to be run, and a logical verbose which if set to true will write the - output of the command to stdout. - ''' - process = subprocess.Popen(cmd, shell=True, - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - output, err = process.communicate() - if verbose and output: - sys.stdout.write('[SUBPROCESS OUTPUT] %s\n' % output) - if err: - sys.stderr.write('[SUBPROCESS ERROR] %s\n' % error) - if sys.version_info[0] >= 3: - output = output.decode() - return process.returncode, output - def _calculate_ppn_values(nproc, nodes): ''' diff --git a/Postprocessing/common/utils.py b/Postprocessing/common/utils.py index 9a79af4..159d664 100644 --- a/Postprocessing/common/utils.py +++ b/Postprocessing/common/utils.py @@ -23,7 +23,7 @@ import os import errno import shutil -import subprocess +import shellout import timer @@ -153,61 +153,13 @@ def finalcycle(): return fcycle -@timer.run_timer -def exec_subproc(cmd, verbose=True, cwd=os.getcwd()): - ''' - Execute given shell command. - 'cmd' input should be in the form of either a: - string - "cd DIR; command arg1 arg2" - list of words - ["command", "arg1", "arg2"] - Optional arguments: - verbose = False: only reproduce the command std.out upon - failure of the command - True: reproduce std.out regardless of outcome - cwd = Directory in which to execute the command - ''' - import shlex - - cmd_array = [cmd] - if not isinstance(cmd, list): - cmd_array = cmd.split(';') - for i, cmd in enumerate(cmd_array): - # Use shlex.split to cope with arguments that contain whitespace - cmd_array[i] = shlex.split(cmd) - - # Initialise rcode, in the event there is no command - rcode = 99 - output = 'No command provided' - - for cmd in cmd_array: - try: - output = subprocess.check_output(cmd, - stderr=subprocess.STDOUT, - universal_newlines=True, cwd=cwd) - rcode = 0 - if verbose: - log_msg('[SUBPROCESS]: ' + str(output)) - except subprocess.CalledProcessError as exc: - output = exc.output - rcode = exc.returncode - except OSError as exc: - output = exc.strerror - rcode = exc.errno - if rcode != 0: - msg = '[SUBPROCESS]: Command: {}\n[SUBPROCESS]: Error = {}:\n\t{}' - log_msg(msg.format(' '.join(cmd), rcode, output), level='WARN') - break - - return rcode, output - - def get_utility_avail(utility): '''Return True/False if shell command is available''' try: status = shutil.which(utility) except AttributeError: # subprocess.getstatusoutput does not exist at Python2.7 - status, _ = utils.exec_subproc(utility + ' --help', verbose=False) + status, _ = shellout.exec_subprocess(utility + ' --help') return bool(status) @@ -496,7 +448,7 @@ def _mod_all_calendars_date(indate, delta, cal): cmd = '{} {} --calendar {} --offset {} --print-format ' \ '%Y,%m,%d,%H,%M'.format(datecmd, dateinput, cal, offset) - rcode, output = exec_subproc(cmd, verbose=False) + rcode, output = shellout._exec_subprocess(cmd) else: log_msg('add_period_to_date: Invalid date for conversion to ' 'ISO 8601 date representation: ' + str(outdate), diff --git a/Utilities/lib/shellout.py b/Utilities/lib/shellout.py index c376d74..c0effa8 100644 --- a/Utilities/lib/shellout.py +++ b/Utilities/lib/shellout.py @@ -6,7 +6,7 @@ @timer.run_timer -def _exec_subprocess(cmd, verbose=True, current_working_directory=os.getcwd()): +def _exec_subprocess(cmd, verbose=False, current_working_directory=os.getcwd()): """ Execute a given shell command @@ -44,4 +44,4 @@ def _exec_subprocess(cmd, verbose=True, current_working_directory=os.getcwd()): output = exc.strerror rcode = exc.errno - return output, rcode + return rcode, output From bf3f823aa25eb4a8b613fe855602fea7087a4f11 Mon Sep 17 00:00:00 2001 From: Pierre Siddall <43399998+Pierre-siddall@users.noreply.github.com> Date: Tue, 20 Jan 2026 14:42:40 +0000 Subject: [PATCH 10/14] Recplace all shellout instances (AI assisted) --- Coupled_Drivers/cice_driver.py | 37 ++++++++--------- Coupled_Drivers/cpmip_utils.py | 7 ++-- Coupled_Drivers/cpmip_xios.py | 5 ++- Coupled_Drivers/mct_driver.py | 3 +- Coupled_Drivers/nemo_driver.py | 40 +++++++++---------- Coupled_Drivers/rivers_driver.py | 13 +++--- Coupled_Drivers/si3_controller.py | 5 ++- Coupled_Drivers/top_controller.py | 5 ++- Coupled_Drivers/unittests/test_cpmip_utils.py | 10 ++--- Coupled_Drivers/unittests/test_cpmip_xios.py | 4 +- .../unittests/test_rivers_driver.py | 20 +++++----- Coupled_Drivers/write_namcouple.py | 3 +- Coupled_Drivers/xios_driver.py | 3 +- 13 files changed, 78 insertions(+), 77 deletions(-) diff --git a/Coupled_Drivers/cice_driver.py b/Coupled_Drivers/cice_driver.py index bc0f1a1..176b8a4 100644 --- a/Coupled_Drivers/cice_driver.py +++ b/Coupled_Drivers/cice_driver.py @@ -30,6 +30,7 @@ import time2days import inc_days import common +import shellout import error import dr_env_lib.cice_def import dr_env_lib.env_lib @@ -184,24 +185,21 @@ def _setup_executable(common_env): #any variables containing things that can be globbed will start with gl_ gl_step_int_match = '^dt=' - _, step_int_val = common.exec_subproc(['grep', gl_step_int_match, - cice_nl]) + _, step_int_val = shellout._exec_subprocess('grep %s %s' % (gl_step_int_match, cice_nl)) cice_step_int = int(re.findall(r'^dt=(\d*)\.?', step_int_val)[0]) cice_steps = (tot_runlen_sec - last_dump_seconds) // cice_step_int - _, cice_histfreq_val = common.exec_subproc(['grep', 'histfreq', cice_nl]) + _, cice_histfreq_val = shellout._exec_subprocess('grep histfreq %s' % cice_nl) cice_histfreq_val = re.findall(r'histfreq\s*=\s*(.*)', cice_histfreq_val)[0] cice_histfreq = __expand_array(cice_histfreq_val)[1] - _, cice_histfreq_n_val = common.exec_subproc([ \ - 'grep', 'histfreq_n', cice_nl]) + _, cice_histfreq_n_val = shellout._exec_subprocess('grep histfreq_n %s' % cice_nl) cice_histfreq_n_val = re.findall(r'histfreq_n\s*=\s*(.*)', cice_histfreq_n_val)[0] cice_histfreq_n = __expand_array(cice_histfreq_n_val) cice_histfreq_n = int(cice_histfreq_n.split(',')[0]) - _, cice_age_rest_val = common.exec_subproc([ \ - 'grep', '^restart_age', cice_nl]) + _, cice_age_rest_val = shellout._exec_subprocess('grep ^restart_age %s' % cice_nl) cice_age_rest = re.findall(r'restart_age\s*=\s*(.*)', cice_age_rest_val)[0] @@ -216,7 +214,7 @@ def _setup_executable(common_env): cice_envar['SHARED_FNAME']) sys.exit(error.MISSING_DRIVER_FILE_ERROR) if not common_env['MODELBASIS']: - _, modelbasis_val = common.exec_subproc('grep', 'model_basis_time', + _, modelbasis_val = shellout._exec_subprocess('grep model_basis_time %s' % cice_envar['SHARED_FNAME']) modelbasis_val = re.findall(r'model_basis_time\s*=\s*(.*)', modelbasis_val) @@ -225,7 +223,7 @@ def _setup_executable(common_env): if not common_env['TASKSTART']: common_env.add('TASKSTART', common_env['MODELBASIS']) if not common_env['TASKLENGTH']: - _, tasklength_val = common.exec_subproc('grep', 'run_target_end', + _, tasklength_val = shellout._exec_subprocess('grep run_target_end %s' % cice_envar['SHARED_FNAME']) tasklength_val = re.findall(r'run_target_end\s*=\s*(.*)', tasklength_val) @@ -241,20 +239,18 @@ def _setup_executable(common_env): // cice_step_int else: # This is probably a coupled NWP suite - cmd = ['rose', 'date', str(run_start[0])+'0101T0000Z', - cice_envar['TASK_START_TIME']] - _, time_since_year_start = common.exec_subproc(cmd) + cmd = 'rose date %s0101T0000Z %s' % (str(run_start[0]), cice_envar['TASK_START_TIME']) + _, time_since_year_start = shellout._exec_subprocess(cmd) #The next command works because rose date assumes # 19700101T0000Z is second 0 - cmd = ['rose', 'date', '--print-format=%s', '19700101T00Z', - '--offset='+time_since_year_start] + cmd = 'rose date --print-format=%%s 19700101T00Z --offset=%s' % time_since_year_start # Account for restarting from a failure in next line - # common.exec_subproc returns a tuple containing (return_code, output) - seconds_since_year_start = int(common.exec_subproc(cmd)[1]) \ + # shellout._exec_subprocess returns a tuple containing (return_code, output) + seconds_since_year_start = int(shellout._exec_subprocess(cmd)[1]) \ + last_dump_seconds cice_istep0 = seconds_since_year_start/cice_step_int - _, cice_rst_val = common.exec_subproc(['grep', 'restart_dir', cice_nl]) + _, cice_rst_val = shellout._exec_subprocess('grep restart_dir %s' % cice_nl) cice_rst = re.findall(r'restart_dir\s*=\s*\'(.*)\',', cice_rst_val)[0] if cice_rst[-1] == '/': cice_rst = cice_rst[:-1] @@ -266,9 +262,9 @@ def _setup_executable(common_env): cice_restart = os.path.join(cice_rst, cice_envar['CICE_RESTART']) - _, cice_hist_val = common.exec_subproc(['grep', 'history_dir', cice_nl]) + _, cice_hist_val = shellout._exec_subprocess('grep history_dir %s' % cice_nl) cice_hist = re.findall(r'history_dir\s*=\s*\'(.*)\',', cice_hist_val)[0] - _, cice_incond_val = common.exec_subproc(['grep', 'incond_dir', cice_nl]) + _, cice_incond_val = shellout._exec_subprocess('grep incond_dir %s' % cice_nl) cice_incond = re.findall(r'incond_dir\s*=\s*\'(.*)\',', cice_incond_val)[0] for direc in (cice_rst, cice_hist, cice_incond): @@ -312,8 +308,7 @@ def _setup_executable(common_env): if cice_age_rest == 'true': cice_runtype = 'continue' ice_ic = 'set in pointer file' - _, _ = common.exec_subproc([cice_envar['CICE_START'], - '>', cice_restart]) + _, _ = shellout._exec_subprocess('%s > %s' % (cice_envar['CICE_START'], cice_restart)) sys.stdout.write('[INFO] %s > %s' % (cice_envar['CICE_START'], cice_restart)) diff --git a/Coupled_Drivers/cpmip_utils.py b/Coupled_Drivers/cpmip_utils.py index f6bfcf2..946be5c 100644 --- a/Coupled_Drivers/cpmip_utils.py +++ b/Coupled_Drivers/cpmip_utils.py @@ -23,6 +23,7 @@ import sys import error import common +import shellout def get_component_resolution(nlist_file, resolution_variables): ''' @@ -32,7 +33,7 @@ def get_component_resolution(nlist_file, resolution_variables): ''' resolution = 1 for res_var in resolution_variables: - _, out = common.exec_subproc(['grep', res_var, nlist_file], + _, out = shellout._exec_subprocess('grep %s %s' % (res_var, nlist_file), verbose=True) try: i_res = int(re.search(r'(\d+)', out).group(0)) @@ -56,7 +57,7 @@ def get_glob_usage(glob_path, timeout=60): filelist = glob.glob(glob_path) if filelist: du_command = ['du', '-c'] + filelist - rcode, output = common.exec_subproc_timeout(du_command, timeout) + rcode, output = shellout._exec_subprocess(du_command, timeout) if rcode == 0: size_k = float(output.split()[-2]) else: @@ -131,7 +132,7 @@ def get_workdir_netcdf_output(timeout=60): i_f.split('.')[-1] == 'nc' and not os.path.islink(i_f)] size_k = -1.0 du_command = ['du', '-c'] + output_files - rcode, output = common.exec_subproc_timeout(du_command, timeout) + rcode, output = shellout._exec_subprocess(du_command, timeout) if rcode == 0: size_k = float(output.split()[-2]) return size_k diff --git a/Coupled_Drivers/cpmip_xios.py b/Coupled_Drivers/cpmip_xios.py index d9dc65a..848ff2a 100644 --- a/Coupled_Drivers/cpmip_xios.py +++ b/Coupled_Drivers/cpmip_xios.py @@ -21,6 +21,7 @@ import shutil import sys import common +import shellout def data_metrics_setup_nemo(): ''' @@ -58,8 +59,8 @@ def measure_xios_client_times(timeout=120): 'xios_client' in i_f and 'out' in i_f] total_files = len(files) for i_f in files: - rcode, out = common.exec_subproc_timeout( - ['grep', 'total time', i_f], timeout) + rcode, out = shellout._exec_subprocess( + 'grep "total time" %s' % i_f, timeout) if rcode == 0: meas_time = float(out.split()[-2]) total_measured += 1 diff --git a/Coupled_Drivers/mct_driver.py b/Coupled_Drivers/mct_driver.py index 0b02996..b25dc66 100644 --- a/Coupled_Drivers/mct_driver.py +++ b/Coupled_Drivers/mct_driver.py @@ -23,6 +23,7 @@ import sys import glob import common +import shellout import error import update_namcouple import dr_env_lib.mct_def @@ -238,7 +239,7 @@ def _setup_executable(common_env, envarinsts, run_info): # Create transient field namelist (note if we're creating a # namcouple on the fly, this will have to wait until after # the namcouple have been created). - _, _ = common.exec_subproc('./OASIS_fields') + _, _ = shellout._exec_subprocess('./OASIS_fields') for component in mct_envar['COUPLING_COMPONENTS'].split(): if not component in common_env['models']: diff --git a/Coupled_Drivers/nemo_driver.py b/Coupled_Drivers/nemo_driver.py index e816772..0a6cf37 100644 --- a/Coupled_Drivers/nemo_driver.py +++ b/Coupled_Drivers/nemo_driver.py @@ -27,6 +27,7 @@ import shutil import inc_days import common +import shellout import error try: @@ -78,8 +79,8 @@ def _get_nemorst(nemo_nl_file): ''' Retrieve the nemo restart directory from the nemo namelist file ''' - ocerst_rcode, ocerst_val = common.exec_subproc([ \ - 'grep', 'cn_ocerst_outdir', nemo_nl_file]) + ocerst_rcode, ocerst_val = shellout._exec_subprocess( + 'grep cn_ocerst_outdir %s' % nemo_nl_file) if ocerst_rcode == 0: nemo_rst = re.findall(r'[\"\'](.*?)[\"\']', ocerst_val)[0] if nemo_rst[-1] == '/': @@ -92,8 +93,8 @@ def _get_ln_icebergs(nemo_nl_file): Interrogate the nemo namelist to see if we are running with icebergs, Returns boolean, True if icebergs are used, False if not ''' - icb_rcode, icb_val = common.exec_subproc([ \ - 'grep', 'ln_icebergs', nemo_nl_file]) + icb_rcode, icb_val = shellout._exec_subprocess( + 'grep ln_icebergs %s' % nemo_nl_file) if icb_rcode != 0: sys.stderr.write('Unable to read ln_icebergs in &namberg namelist' ' in the NEMO namelist file %s\n' @@ -303,8 +304,8 @@ def _setup_executable(common_env): nemo_rst = _get_nemorst(nemo_envar['NEMO_NL']) if nemo_rst: restart_direcs.append(nemo_rst) - icerst_rcode, icerst_val = common.exec_subproc([ \ - 'grep', 'cn_icerst_dir', nemo_envar['NEMO_NL']]) + icerst_rcode, icerst_val = shellout._exec_subprocess( + 'grep cn_icerst_dir %s' % nemo_envar['NEMO_NL']) if icerst_rcode == 0: ice_rst = re.findall(r'[\"\'](.*?)[\"\']', icerst_val)[0] if ice_rst[-1] == '/': @@ -440,14 +441,14 @@ def _setup_executable(common_env): sys.exit(error.MISSING_MODEL_FILE_ERROR) # First timestep of the previous cycle - _, first_step_val = common.exec_subproc(['grep', gl_first_step_match, - history_nemo_nl]) + _, first_step_val = shellout._exec_subprocess('grep %s %s' % (gl_first_step_match, + history_nemo_nl)) nemo_first_step = int(re.findall(r'.+=(.+),', first_step_val)[0]) # Last timestep of the previous cycle - _, last_step_val = common.exec_subproc(['grep', gl_last_step_match, - history_nemo_nl]) + _, last_step_val = shellout._exec_subprocess('grep %s %s' % (gl_last_step_match, + history_nemo_nl)) nemo_last_step = re.findall(r'.+=(.+),', last_step_val)[0] # The string in the nemo time step field might have any one of @@ -460,15 +461,15 @@ def _setup_executable(common_env): nemo_last_step = 0 # Determine (as an integer) the number of seconds per model timestep - _, nemo_step_int_val = common.exec_subproc(['grep', gl_step_int_match, - nemo_envar['NEMO_NL']]) + _, nemo_step_int_val = shellout._exec_subprocess('grep %s %s' % (gl_step_int_match, + nemo_envar['NEMO_NL'])) nemo_step_int = int(re.findall(r'.+=(\d*)', nemo_step_int_val)[0]) # If the value for nemo_rst_date_value is true then the model uses # absolute date convention, otherwise the dump times are relative to the # start of the model run and have an integer representation - _, nemo_rst_date_value = common.exec_subproc([ \ - 'grep', gl_nemo_restart_date_match, history_nemo_nl]) + _, nemo_rst_date_value = shellout._exec_subprocess( + 'grep %s %s' % (gl_nemo_restart_date_match, history_nemo_nl)) if 'true' in nemo_rst_date_value: nemo_rst_date_bool = True else: @@ -480,8 +481,8 @@ def _setup_executable(common_env): nemo_dump_time = "00000000" # Get the model basis time for this run (YYYYMMDD) - _, model_basis_val = common.exec_subproc( - ['grep', gl_model_basis_time, history_nemo_nl]) + _, model_basis_val = shellout._exec_subprocess( + 'grep %s %s' % (gl_model_basis_time, history_nemo_nl)) nemo_model_basis = re.findall(r'.+=(.+),', model_basis_val)[0] if os.path.isfile(latest_nemo_dump): @@ -768,8 +769,7 @@ def _setup_executable(common_env): update_nl_cmd = './update_nemo_nl %s' % update_nl_cmd # REFACTOR TO USE THE SAFE EXEC SUBPROC - update_nl_rcode, _ = common.__exec_subproc_true_shell([ \ - update_nl_cmd]) + update_nl_rcode, _ = shellout._exec_subprocess(update_nl_cmd) if update_nl_rcode != 0: sys.stderr.write('[FAIL] Error updating nemo namelist\n') sys.exit(error.SUBPROC_ERROR) @@ -989,8 +989,8 @@ def _finalize_executable(common_env): write_ocean_out_to_stdout() - _, error_count = common.__exec_subproc_true_shell([ \ - 'grep "E R R O R" ocean.output | wc -l']) + _, error_count = shellout._exec_subprocess( + 'grep "E R R O R" ocean.output | wc -l') if int(error_count) >= 1: sys.stderr.write('[FAIL] An error has been found with the NEMO run.' ' Please investigate the ocean.output file for more' diff --git a/Coupled_Drivers/rivers_driver.py b/Coupled_Drivers/rivers_driver.py index 8dfab29..42ed0f7 100644 --- a/Coupled_Drivers/rivers_driver.py +++ b/Coupled_Drivers/rivers_driver.py @@ -23,6 +23,7 @@ import re import pathlib import common +import shellout import error import dr_env_lib.rivers_def import dr_env_lib.env_lib @@ -58,12 +59,12 @@ def _setup_dates(common_envar): task_length[2], task_length[3], task_length[4]) - start_cmd = ['isodatetime', '%s' % start_date, '-f', '%s' % format_date] - end_cmd = ['isodatetime', '%s' % start_date, '-f', '%s' % format_date, - '-s', '%s' % length_date, '--calendar', '%s' % calendar] + start_cmd = 'isodatetime %s -f "%s"' % (start_date, format_date) + end_cmd = 'isodatetime %s -f "%s" -s %s --calendar %s' % (start_date, format_date, + length_date, calendar) - _, run_start = common.exec_subproc(start_cmd) - _, run_end = common.exec_subproc(end_cmd) + _, run_start = shellout._exec_subprocess(start_cmd) + _, run_end = shellout._exec_subprocess(end_cmd) return run_start.strip(), run_end.strip() @@ -97,7 +98,7 @@ def _update_river_nl(river_envar, run_start, run_end): mod_timenl.replace() # Create the output directory, do not rely on f90nml - rcode, val = common.exec_subproc(['grep', 'output_dir', output_nl]) + rcode, val = shellout._exec_subprocess('grep output_dir %s' % output_nl) if rcode == 0: try: output_dir = re.findall(r'[\"\'](.*?)[\"\']', val)[0].rstrip('/') diff --git a/Coupled_Drivers/si3_controller.py b/Coupled_Drivers/si3_controller.py index b0a93e2..48c6ce8 100644 --- a/Coupled_Drivers/si3_controller.py +++ b/Coupled_Drivers/si3_controller.py @@ -22,6 +22,7 @@ import sys import glob import common +import shellout import error import dr_env_lib.ocn_cont_def import dr_env_lib.env_lib @@ -44,8 +45,8 @@ def _get_si3rst(si3_nl_file): ''' Retrieve the SI3 restart directory from the nemo namelist file ''' - si3rst_rcode, si3rst_val = common.exec_subproc([ \ - 'grep', 'cn_icerst_outdir', si3_nl_file]) + si3rst_rcode, si3rst_val = shellout._exec_subprocess( + 'grep cn_icerst_outdir %s' % si3_nl_file) if si3rst_rcode == 0: si3_rst = re.findall('[\"\'](.*?)[\"\']', si3rst_val)[0] if si3_rst[-1] == '/': diff --git a/Coupled_Drivers/top_controller.py b/Coupled_Drivers/top_controller.py index f1d2478..833834a 100644 --- a/Coupled_Drivers/top_controller.py +++ b/Coupled_Drivers/top_controller.py @@ -69,6 +69,7 @@ import glob import shutil import common +import shellout import error import dr_env_lib.ocn_cont_def import dr_env_lib.env_lib @@ -100,8 +101,8 @@ def _get_toprst_dir(top_nl_file): something different. ''' - toprst_rcode, toprst_val = common.exec_subproc([ \ - 'grep', 'cn_trcrst_outdir', top_nl_file]) + toprst_rcode, toprst_val = shellout._exec_subprocess( + 'grep cn_trcrst_outdir %s' % top_nl_file) if toprst_rcode == 0: top_rst_dir = re.findall('[\"\'](.*?)[\"\']', toprst_val)[0] diff --git a/Coupled_Drivers/unittests/test_cpmip_utils.py b/Coupled_Drivers/unittests/test_cpmip_utils.py index 964b97c..c1a35a1 100644 --- a/Coupled_Drivers/unittests/test_cpmip_utils.py +++ b/Coupled_Drivers/unittests/test_cpmip_utils.py @@ -29,7 +29,7 @@ class TestGetComponentResolution(unittest.TestCase): ''' Test the construction of component resolution from namelist ''' - @mock.patch('cpmip_utils.common.exec_subproc') + @mock.patch('cpmip_utils.shellout._exec_subprocess') def test_get_component_resolution(self, mock_subproc): ''' Test construction of total resolution @@ -44,7 +44,7 @@ def test_get_component_resolution(self, mock_subproc): 6000) subproc_calls = [] for res_var in res_vars: - subproc_calls.append(mock.call(['grep', res_var, 'NEMO_NL'], + subproc_calls.append(mock.call('grep %s NEMO_NL' % res_var, verbose=True)) mock_subproc.assert_has_calls(subproc_calls) @@ -67,7 +67,7 @@ def test_get_glob_usage_nofile(self, mock_glob): self.assertEqual(patch_output.getvalue(), expected_output) @mock.patch('cpmip_utils.glob.glob', return_value=['file1', 'file2']) - @mock.patch('cpmip_utils.common.exec_subproc_timeout', + @mock.patch('cpmip_utils.shellout._exec_subprocess', return_value=(0, '\n128 file1\n128 file2\n256 total\n')) def test_get_glob_usage(self, mock_subproc, mock_glob): ''' @@ -81,7 +81,7 @@ class TestNCDFOutput(unittest.TestCase): Test measurment of NCDF file sizes ''' @mock.patch('cpmip_utils.os.listdir', return_value=[]) - @mock.patch('cpmip_utils.common.exec_subproc_timeout', + @mock.patch('cpmip_utils.shellout._exec_subprocess', return_value=(1, None)) def test_no_files_output(self, mock_subproc, mock_ncdffiles): ''' @@ -91,7 +91,7 @@ def test_no_files_output(self, mock_subproc, mock_ncdffiles): @mock.patch('cpmip_utils.os.listdir', return_value=['file1.nc', 'file2.nc']) - @mock.patch('cpmip_utils.common.exec_subproc_timeout', + @mock.patch('cpmip_utils.shellout._exec_subprocess', return_value=(0, '\n128 file1.nc\n128 file2.nc\n256 total\n')) def test_files_output(self, mock_subproc, mock_ncdffiles): ''' diff --git a/Coupled_Drivers/unittests/test_cpmip_xios.py b/Coupled_Drivers/unittests/test_cpmip_xios.py index 2d1c087..58df7c7 100644 --- a/Coupled_Drivers/unittests/test_cpmip_xios.py +++ b/Coupled_Drivers/unittests/test_cpmip_xios.py @@ -93,7 +93,7 @@ def test_no_files(self, mock_listdir): ['xios_client0.out', 'xios_client1.out', 'xios_client2.out']) - @mock.patch('cpmip_xios.common.exec_subproc_timeout') + @mock.patch('cpmip_xios.shellout._exec_subprocess') def test_three_files(self, mock_exec_subproc, mock_listdir): ''' Test that three files with no timeout give mean and max @@ -116,7 +116,7 @@ def test_three_files(self, mock_exec_subproc, mock_listdir): ['xios_client0.out', 'xios_client1.out', 'xios_client2.out']) - @mock.patch('cpmip_xios.common.exec_subproc_timeout') + @mock.patch('cpmip_xios.shellout._exec_subprocess') def test_one_timeout(self, mock_exec_subproc, mock_listdir): ''' Test what happens if there is a timeout diff --git a/Coupled_Drivers/unittests/test_rivers_driver.py b/Coupled_Drivers/unittests/test_rivers_driver.py index 5d9a206..2f25233 100644 --- a/Coupled_Drivers/unittests/test_rivers_driver.py +++ b/Coupled_Drivers/unittests/test_rivers_driver.py @@ -30,26 +30,24 @@ class TestPrivateMethods(unittest.TestCase): Test the private methods of the JULES river standalone driver ''' - @mock.patch('rivers_driver.common.exec_subproc', return_value=[0, 'output']) + @mock.patch('rivers_driver.shellout._exec_subprocess', return_value=[0, 'output']) def test_setup_dates(self, mock_exec): ''' Test the _setup_dates method ''' start, end = rivers_driver._setup_dates(COMMON_ENV) - self.assertIn(mock.call(['isodatetime', '19790901T0000Z', - '-f', '%Y-%m-%d %H:%M:%S']), + self.assertIn(mock.call('isodatetime 19790901T0000Z -f "%Y-%m-%d %H:%M:%S"'), mock_exec.mock_calls) - self.assertIn(mock.call(['isodatetime', '19790901T0000Z', '-f', - '%Y-%m-%d %H:%M:%S', '-s', 'P1Y4M10DT0H0M', - '--calendar', 'gregorian']), + self.assertIn(mock.call('isodatetime 19790901T0000Z -f "%Y-%m-%d %H:%M:%S" -s P1Y4M10DT0H0M --calendar gregorian'), mock_exec.mock_calls) self.assertEqual(len(mock_exec.mock_calls), 2) @mock.patch('rivers_driver.common') + @mock.patch('rivers_driver.shellout') @mock.patch('rivers_driver.os.path.isfile') @mock.patch('rivers_driver.pathlib') - def test_update_river_nl(self, mock_lib, mock_path, mock_common): + def test_update_river_nl(self, mock_lib, mock_path, mock_shellout, mock_common): ''' Test the _update_river_nl method ''' - mock_common.exec_subproc.return_value = (0, 'dir="this/path/"') + mock_shellout._exec_subprocess.returnvalue = (0, 'dir="this/path/"') rivers_driver._update_river_nl(RIVER_ENV, '19790901T0000Z', '19810121T0000Z') @@ -64,14 +62,14 @@ def test_update_river_nl(self, mock_lib, mock_path, mock_common): self.assertIn(mock.call().var_val('output_start', '19790901T0000Z'), nml_calls) self.assertIn(mock.call().replace(), nml_calls) - + self.assertIn(mock.call().var_val('main_run_start', '19790901T0000Z'), nml_calls) self.assertIn(mock.call().var_val('main_run_end', '19810121T0000Z'), nml_calls) - mock_common.exec_subproc.assert_called_once_with( - ['grep', 'output_dir', 'output.nml'] + mock_shellout._exec_subprocess.assert_called_once_with( + 'grep output_dir output.nml' ) mock_lib.Path.assert_called_once_with('this/path') mock_lib.Path().mkdir.assert_called_once_with(parents=True, diff --git a/Coupled_Drivers/write_namcouple.py b/Coupled_Drivers/write_namcouple.py index 5d1fb85..4328497 100644 --- a/Coupled_Drivers/write_namcouple.py +++ b/Coupled_Drivers/write_namcouple.py @@ -19,6 +19,7 @@ import sys import itertools import common +import shellout import default_couplings import error import write_cf_name_table @@ -332,4 +333,4 @@ def write_namcouple(common_env, run_info, coupling_list): # Now that namcouple has been created, we can create the transient # field namelist - _, _ = common.exec_subproc('./OASIS_fields') + _, _ = shellout._exec_subprocess('./OASIS_fields') diff --git a/Coupled_Drivers/xios_driver.py b/Coupled_Drivers/xios_driver.py index 3834673..b08cd4d 100644 --- a/Coupled_Drivers/xios_driver.py +++ b/Coupled_Drivers/xios_driver.py @@ -24,6 +24,7 @@ import os import shutil import common +import shellout import dr_env_lib.xios_def import dr_env_lib.env_lib @@ -43,7 +44,7 @@ def _update_iodef( ''' # Work-around in lieu of viable multi component iodef.xml handling - _, _ = common.exec_subproc(['cp', 'mydef.xml', iodef_fname]) + _, _ = shellout._exec_subprocess('cp mydef.xml %s' % iodef_fname) # Note we do not use python's xml module for this job, as the comment # line prevalent in the first line of the GO5 iodef.xml files renders From 787b5b50158cd53823580d55eb436e8503c91e19 Mon Sep 17 00:00:00 2001 From: Pierre Siddall <43399998+Pierre-siddall@users.noreply.github.com> Date: Tue, 20 Jan 2026 14:53:39 +0000 Subject: [PATCH 11/14] Apply black to all modified files --- Coupled_Drivers/cice_driver.py | 494 ++++---- Coupled_Drivers/common.py | 221 ++-- Coupled_Drivers/cpmip_utils.py | 158 +-- Coupled_Drivers/cpmip_xios.py | 43 +- Coupled_Drivers/mct_driver.py | 303 ++--- Coupled_Drivers/nemo_driver.py | 1089 +++++++++-------- Coupled_Drivers/rivers_driver.py | 214 ++-- Coupled_Drivers/si3_controller.py | 255 ++-- Coupled_Drivers/top_controller.py | 290 ++--- Coupled_Drivers/unittests/test_cpmip_utils.py | 238 ++-- Coupled_Drivers/unittests/test_cpmip_xios.py | 166 +-- .../unittests/test_rivers_driver.py | 239 ++-- Coupled_Drivers/write_namcouple.py | 423 ++++--- Coupled_Drivers/xios_driver.py | 163 +-- Postprocessing/common/utils.py | 336 ++--- 15 files changed, 2529 insertions(+), 2103 deletions(-) diff --git a/Coupled_Drivers/cice_driver.py b/Coupled_Drivers/cice_driver.py index 176b8a4..a9da184 100644 --- a/Coupled_Drivers/cice_driver.py +++ b/Coupled_Drivers/cice_driver.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -''' +""" *****************************COPYRIGHT****************************** (C) Crown copyright 2023-2025 Met Office. All rights reserved. @@ -18,7 +18,7 @@ Driver for the CICE model, called from link_drivers. Currently this does not cater for stand alone CICE and therefore must be run in conjuction with the NEMO driver -''' +""" import os @@ -37,235 +37,263 @@ def __expand_array(short_array): - ''' + """ Expand a shortened array containing n*m entries into a full list - ''' - long_array = '' - for group in short_array.split(','): - if '*' not in group: - long_array += '%s,' % group + """ + long_array = "" + for group in short_array.split(","): + if "*" not in group: + long_array += "%s," % group else: - multiplier = int(group.split('*')[0]) - value = group.split('*')[1] - long_array += ('%s,' % value) * multiplier - if long_array[-1] == ',': + multiplier = int(group.split("*")[0]) + value = group.split("*")[1] + long_array += ("%s," % value) * multiplier + if long_array[-1] == ",": long_array = long_array[:-1] return long_array + def _verify_fix_rst(pointerfile, task_start): - ''' + """ Verify the restart file for cice is at the time associated with the TASKSTART variable. The pointerfile contains a string of the path to the restart file. If the dates dont match, fix the date in the pointerfile. - ''' + """ # Convert the format of the task start time. Seasonal forecasting # uses a date format that includes seconds, so account for this in # the choice of date formatting. try: task_start_datetime = datetime.datetime.strptime( - task_start, "%Y,%m,%d,%H,%M,%S") + task_start, "%Y,%m,%d,%H,%M,%S" + ) except ValueError: - task_start_datetime = datetime.datetime.strptime( - task_start, "%Y,%m,%d,%H,%M") - task_start = task_start_datetime.strftime('%Y%m%d') + task_start_datetime = datetime.datetime.strptime(task_start, "%Y,%m,%d,%H,%M") + task_start = task_start_datetime.strftime("%Y%m%d") # deal with the pointer file - with common.open_text_file(pointerfile, 'r') as pointer_handle: + with common.open_text_file(pointerfile, "r") as pointer_handle: restart_path = pointer_handle.readlines()[0].strip() if not os.path.isfile(restart_path): - sys.stderr.write('[INFO] The CICE restart file %s can not be found\n' % - restart_path) + sys.stderr.write( + "[INFO] The CICE restart file %s can not be found\n" % restart_path + ) sys.exit(error.MISSING_MODEL_FILE_ERROR) - #grab the date from the restart file name. It has form yyyy-mm-dd, to - #match cyclepoint strip the -'s. - restartmatch = re.search(r'\d{4}-\d{2}-\d{2}', - os.path.basename(restart_path)) - restartdate = restartmatch.group(0).replace('-', '') + # grab the date from the restart file name. It has form yyyy-mm-dd, to + # match cyclepoint strip the -'s. + restartmatch = re.search(r"\d{4}-\d{2}-\d{2}", os.path.basename(restart_path)) + restartdate = restartmatch.group(0).replace("-", "") if restartdate != task_start: # write the message to both standard out and standard error - msg = '[WARN ]The CICE restart data does not match the ' \ - ' current task start time\n.' \ - ' Task start time is %s\n' \ - ' CICE restart time is %s\n' % (task_start, restartdate) + msg = ( + "[WARN ]The CICE restart data does not match the " + " current task start time\n." + " Task start time is %s\n" + " CICE restart time is %s\n" % (task_start, restartdate) + ) sys.stdout.write(msg) sys.stderr.write(msg) - #Turn the task_start variable into form yyyy-mm-dd - fixed_restart_date = '%s-%s-%s' % (task_start[:4], - task_start[4:6], - task_start[6:8]) - #Swap the date in the restart path - restart_path_fixed = restart_path.replace(restartmatch.group(0), - fixed_restart_date) - new_pointerfile = '%s.tmp' % (pointerfile) - with common.open_text_file(new_pointerfile, 'w') as new_pointer_handle: - #The restart path line should be padded to 256 characters + # Turn the task_start variable into form yyyy-mm-dd + fixed_restart_date = "%s-%s-%s" % ( + task_start[:4], + task_start[4:6], + task_start[6:8], + ) + # Swap the date in the restart path + restart_path_fixed = restart_path.replace( + restartmatch.group(0), fixed_restart_date + ) + new_pointerfile = "%s.tmp" % (pointerfile) + with common.open_text_file(new_pointerfile, "w") as new_pointer_handle: + # The restart path line should be padded to 256 characters new_pointer_handle.write("{:<256}".format(restart_path_fixed)) os.rename(new_pointerfile, pointerfile) - sys.stdout.write('%s\n' % ('*'*42,)) - sys.stdout.write('[WARN] Automatically fixing CICE restart\n') - sys.stdout.write('[WARN] Update pointer file %s to replace \n' - '[WARN] restart file %s\n' - '[WARN] with\n' - '[WARN] restart file %s\n' % - (pointerfile, restart_path, restart_path_fixed)) - sys.stdout.write('%s\n' % ('*'*42,)) + sys.stdout.write("%s\n" % ("*" * 42,)) + sys.stdout.write("[WARN] Automatically fixing CICE restart\n") + sys.stdout.write( + "[WARN] Update pointer file %s to replace \n" + "[WARN] restart file %s\n" + "[WARN] with\n" + "[WARN] restart file %s\n" % (pointerfile, restart_path, restart_path_fixed) + ) + sys.stdout.write("%s\n" % ("*" * 42,)) else: - sys.stdout.write('[INFO] Validated CICE restart date\n') + sys.stdout.write("[INFO] Validated CICE restart date\n") def _load_environment_variables(cice_envar): - ''' + """ Load the CICE environment variables required for the model run into the cice_envar container - ''' + """ cice_envar = dr_env_lib.env_lib.load_envar_from_definition( - cice_envar, dr_env_lib.cice_def.CICE_ENVIRONMENT_VARS_INITIAL) - - cice_envar['ATM_DATA_DIR'] = '%s:%s' % \ - (cice_envar['ATM_DATA_DIR'], cice_envar['CICE_ATMOS_DATA']) - cice_envar['OCN_DATA_DIR'] = '%s:%s' % \ - (cice_envar['OCN_DATA_DIR'], cice_envar['CICE_OCEAN_DATA']) + cice_envar, dr_env_lib.cice_def.CICE_ENVIRONMENT_VARS_INITIAL + ) + + cice_envar["ATM_DATA_DIR"] = "%s:%s" % ( + cice_envar["ATM_DATA_DIR"], + cice_envar["CICE_ATMOS_DATA"], + ) + cice_envar["OCN_DATA_DIR"] = "%s:%s" % ( + cice_envar["OCN_DATA_DIR"], + cice_envar["CICE_OCEAN_DATA"], + ) return cice_envar def _setup_executable(common_env): - ''' + """ Setup the environment and any files required by the executable - ''' + """ # Create the environment variable container cice_envar = dr_env_lib.env_lib.LoadEnvar() # Load the ice namelist path. Information will be retrieved from this file # druing the running of the driver, so check if it exists. - _ = cice_envar.load_envar('CICE_IN', 'ice_in') - cice_nl = cice_envar['CICE_IN'] + _ = cice_envar.load_envar("CICE_IN", "ice_in") + cice_nl = cice_envar["CICE_IN"] if not os.path.isfile(cice_nl): - sys.stderr.write('[FAIL] Can not find the cice namelist file %s\n' % - cice_nl) + sys.stderr.write("[FAIL] Can not find the cice namelist file %s\n" % cice_nl) sys.exit(error.MISSING_DRIVER_FILE_ERROR) # load the remaining environment variables cice_envar = _load_environment_variables(cice_envar) - calendar = common_env['CALENDAR'] - if calendar == '360day': - calendar = '360' + calendar = common_env["CALENDAR"] + if calendar == "360day": + calendar = "360" caldays = 360 cice_leap_years = ".false." - elif calendar == '365day': - calendar = '365' + elif calendar == "365day": + calendar = "365" caldays = 365 cice_leap_years = ".false." else: caldays = 365 cice_leap_years = ".true." - #turn our times into lists of integers - model_basis = [int(i) for i in common_env['MODELBASIS'].split(',')] - run_start = [int(i) for i in common_env['TASKSTART'].split(',')] - run_length = [int(i) for i in common_env['TASKLENGTH'].split(',')] - - run_days = inc_days.inc_days(run_start[0], run_start[1], run_start[2], - run_length[0], run_length[1], run_length[2], - calendar) - days_to_start = time2days.time2days(run_start[0], run_start[1], - run_start[2], calendar) - - tot_runlen_sec = run_days * 86400 + run_length[3]*3600 + run_length[4]*60 \ - + run_length[5] + # turn our times into lists of integers + model_basis = [int(i) for i in common_env["MODELBASIS"].split(",")] + run_start = [int(i) for i in common_env["TASKSTART"].split(",")] + run_length = [int(i) for i in common_env["TASKLENGTH"].split(",")] + + run_days = inc_days.inc_days( + run_start[0], + run_start[1], + run_start[2], + run_length[0], + run_length[1], + run_length[2], + calendar, + ) + days_to_start = time2days.time2days( + run_start[0], run_start[1], run_start[2], calendar + ) + + tot_runlen_sec = ( + run_days * 86400 + run_length[3] * 3600 + run_length[4] * 60 + run_length[5] + ) # These variables default to zero except in operational NWP suite where # a run can be restarted part way through after a failure. # In this case CONTINUE_FROM_FAIL should also be true - last_dump_hours = int(common_env['LAST_DUMP_HOURS']) - last_dump_seconds = last_dump_hours*3600 - - #any variables containing things that can be globbed will start with gl_ - gl_step_int_match = '^dt=' - _, step_int_val = shellout._exec_subprocess('grep %s %s' % (gl_step_int_match, cice_nl)) - cice_step_int = int(re.findall(r'^dt=(\d*)\.?', step_int_val)[0]) + last_dump_hours = int(common_env["LAST_DUMP_HOURS"]) + last_dump_seconds = last_dump_hours * 3600 + + # any variables containing things that can be globbed will start with gl_ + gl_step_int_match = "^dt=" + _, step_int_val = shellout._exec_subprocess( + "grep %s %s" % (gl_step_int_match, cice_nl) + ) + cice_step_int = int(re.findall(r"^dt=(\d*)\.?", step_int_val)[0]) cice_steps = (tot_runlen_sec - last_dump_seconds) // cice_step_int - _, cice_histfreq_val = shellout._exec_subprocess('grep histfreq %s' % cice_nl) - cice_histfreq_val = re.findall(r'histfreq\s*=\s*(.*)', cice_histfreq_val)[0] + _, cice_histfreq_val = shellout._exec_subprocess("grep histfreq %s" % cice_nl) + cice_histfreq_val = re.findall(r"histfreq\s*=\s*(.*)", cice_histfreq_val)[0] cice_histfreq = __expand_array(cice_histfreq_val)[1] - _, cice_histfreq_n_val = shellout._exec_subprocess('grep histfreq_n %s' % cice_nl) - cice_histfreq_n_val = re.findall(r'histfreq_n\s*=\s*(.*)', - cice_histfreq_n_val)[0] + _, cice_histfreq_n_val = shellout._exec_subprocess("grep histfreq_n %s" % cice_nl) + cice_histfreq_n_val = re.findall(r"histfreq_n\s*=\s*(.*)", cice_histfreq_n_val)[0] cice_histfreq_n = __expand_array(cice_histfreq_n_val) - cice_histfreq_n = int(cice_histfreq_n.split(',')[0]) + cice_histfreq_n = int(cice_histfreq_n.split(",")[0]) - _, cice_age_rest_val = shellout._exec_subprocess('grep ^restart_age %s' % cice_nl) - cice_age_rest = re.findall(r'restart_age\s*=\s*(.*)', - cice_age_rest_val)[0] + _, cice_age_rest_val = shellout._exec_subprocess("grep ^restart_age %s" % cice_nl) + cice_age_rest = re.findall(r"restart_age\s*=\s*(.*)", cice_age_rest_val)[0] # If the variables MODELBASIS, TASKSTART, TASKLENGTH are unset from the # environment then read from the shared namelist file - if False in (common_env['MODELBASIS'], - common_env['TASKSTART'], - common_env['TASKLENGTH']): + if False in ( + common_env["MODELBASIS"], + common_env["TASKSTART"], + common_env["TASKLENGTH"], + ): # at least one variable has to be read from the shared namelist file - if not os.path.ispath(cice_envar['SHARED_FNAME']): - sys.stderr.write('[FAIL] Can not find shared namelist file %s\n' % - cice_envar['SHARED_FNAME']) + if not os.path.ispath(cice_envar["SHARED_FNAME"]): + sys.stderr.write( + "[FAIL] Can not find shared namelist file %s\n" + % cice_envar["SHARED_FNAME"] + ) sys.exit(error.MISSING_DRIVER_FILE_ERROR) - if not common_env['MODELBASIS']: - _, modelbasis_val = shellout._exec_subprocess('grep model_basis_time %s' % - cice_envar['SHARED_FNAME']) - modelbasis_val = re.findall(r'model_basis_time\s*=\s*(.*)', - modelbasis_val) + if not common_env["MODELBASIS"]: + _, modelbasis_val = shellout._exec_subprocess( + "grep model_basis_time %s" % cice_envar["SHARED_FNAME"] + ) + modelbasis_val = re.findall(r"model_basis_time\s*=\s*(.*)", modelbasis_val) modelbasis = [int(i) for i in __expand_array(modelbasis_val)] - common_env.add('MODELBASIS', modelbasis) - if not common_env['TASKSTART']: - common_env.add('TASKSTART', common_env['MODELBASIS']) - if not common_env['TASKLENGTH']: - _, tasklength_val = shellout._exec_subprocess('grep run_target_end %s' % - cice_envar['SHARED_FNAME']) - tasklength_val = re.findall(r'run_target_end\s*=\s*(.*)', - tasklength_val) + common_env.add("MODELBASIS", modelbasis) + if not common_env["TASKSTART"]: + common_env.add("TASKSTART", common_env["MODELBASIS"]) + if not common_env["TASKLENGTH"]: + _, tasklength_val = shellout._exec_subprocess( + "grep run_target_end %s" % cice_envar["SHARED_FNAME"] + ) + tasklength_val = re.findall(r"run_target_end\s*=\s*(.*)", tasklength_val) tasklength = [int(i) for i in __expand_array(tasklength_val)] - common_env.add('TASKLENGTH', tasklength) + common_env.add("TASKLENGTH", tasklength) - if cice_envar['TASK_START_TIME'] == 'unavaliable': + if cice_envar["TASK_START_TIME"] == "unavaliable": # This is probably a climate suite days_to_year_init = time2days.time2days(model_basis[0], 1, 1, calendar) - days_to_start = time2days.time2days(run_start[0], run_start[1], - run_start[2], calendar) - cice_istep0 = (days_to_start - days_to_year_init) * 86400 \ - // cice_step_int + days_to_start = time2days.time2days( + run_start[0], run_start[1], run_start[2], calendar + ) + cice_istep0 = (days_to_start - days_to_year_init) * 86400 // cice_step_int else: # This is probably a coupled NWP suite - cmd = 'rose date %s0101T0000Z %s' % (str(run_start[0]), cice_envar['TASK_START_TIME']) + cmd = "rose date %s0101T0000Z %s" % ( + str(run_start[0]), + cice_envar["TASK_START_TIME"], + ) _, time_since_year_start = shellout._exec_subprocess(cmd) - #The next command works because rose date assumes + # The next command works because rose date assumes # 19700101T0000Z is second 0 - cmd = 'rose date --print-format=%%s 19700101T00Z --offset=%s' % time_since_year_start + cmd = ( + "rose date --print-format=%%s 19700101T00Z --offset=%s" + % time_since_year_start + ) # Account for restarting from a failure in next line # shellout._exec_subprocess returns a tuple containing (return_code, output) - seconds_since_year_start = int(shellout._exec_subprocess(cmd)[1]) \ - + last_dump_seconds - cice_istep0 = seconds_since_year_start/cice_step_int - - _, cice_rst_val = shellout._exec_subprocess('grep restart_dir %s' % cice_nl) - cice_rst = re.findall(r'restart_dir\s*=\s*\'(.*)\',', cice_rst_val)[0] - if cice_rst[-1] == '/': + seconds_since_year_start = ( + int(shellout._exec_subprocess(cmd)[1]) + last_dump_seconds + ) + cice_istep0 = seconds_since_year_start / cice_step_int + + _, cice_rst_val = shellout._exec_subprocess("grep restart_dir %s" % cice_nl) + cice_rst = re.findall(r"restart_dir\s*=\s*\'(.*)\',", cice_rst_val)[0] + if cice_rst[-1] == "/": cice_rst = cice_rst[:-1] - if cice_rst in (os.getcwd(), '.'): - cice_restart = os.path.join(common_env['DATAM'], - cice_envar['CICE_RESTART']) + if cice_rst in (os.getcwd(), "."): + cice_restart = os.path.join(common_env["DATAM"], cice_envar["CICE_RESTART"]) else: - cice_restart = os.path.join(cice_rst, - cice_envar['CICE_RESTART']) + cice_restart = os.path.join(cice_rst, cice_envar["CICE_RESTART"]) - _, cice_hist_val = shellout._exec_subprocess('grep history_dir %s' % cice_nl) - cice_hist = re.findall(r'history_dir\s*=\s*\'(.*)\',', cice_hist_val)[0] - _, cice_incond_val = shellout._exec_subprocess('grep incond_dir %s' % cice_nl) - cice_incond = re.findall(r'incond_dir\s*=\s*\'(.*)\',', cice_incond_val)[0] + _, cice_hist_val = shellout._exec_subprocess("grep history_dir %s" % cice_nl) + cice_hist = re.findall(r"history_dir\s*=\s*\'(.*)\',", cice_hist_val)[0] + _, cice_incond_val = shellout._exec_subprocess("grep incond_dir %s" % cice_nl) + cice_incond = re.findall(r"incond_dir\s*=\s*\'(.*)\',", cice_incond_val)[0] for direc in (cice_rst, cice_hist, cice_incond): # Strip white space @@ -273,148 +301,158 @@ def _setup_executable(common_env): # Check for trailing slashes in directory names and strip them # out if they're present. - if direc.endswith('/'): - direc = direc.rstrip('/') - - if os.path.isdir(direc) and (direc not in ('./', '.')) and \ - common_env['CONTINUE'] == 'false': - sys.stdout.write('[INFO] directory is %s\n' % direc) - sys.stdout.write('[INFO] This is a New Run. Renaming old CICE' - ' history directory\n') + if direc.endswith("/"): + direc = direc.rstrip("/") + + if ( + os.path.isdir(direc) + and (direc not in ("./", ".")) + and common_env["CONTINUE"] == "false" + ): + sys.stdout.write("[INFO] directory is %s\n" % direc) + sys.stdout.write( + "[INFO] This is a New Run. Renaming old CICE" " history directory\n" + ) # In seasonal forecasting, we automatically apply # short-stepping to re-try the model. Before re-attempting # it, remove the associated CICE history directory. old_hist_dir = "%s.%s" % (direc, time.strftime("%Y%m%d%H%M")) - if (common_env['SEASONAL'] == 'True' and - int(common_env['CYLC_TASK_TRY_NUMBER']) > 1): + if ( + common_env["SEASONAL"] == "True" + and int(common_env["CYLC_TASK_TRY_NUMBER"]) > 1 + ): common.remove_latest_hist_dir(old_hist_dir) os.rename(direc, old_hist_dir) os.makedirs(direc) elif not os.path.isdir(direc): - sys.stdout.write('[INFO] Creating CICE output directory %s\n' % - direc) + sys.stdout.write("[INFO] Creating CICE output directory %s\n" % direc) os.makedirs(direc) - cice_restart_files = [f for f in os.listdir(cice_rst) if - re.findall(r'.*i\.restart\..*', f)] + cice_restart_files = [ + f for f in os.listdir(cice_rst) if re.findall(r".*i\.restart\..*", f) + ] if not cice_restart_files: - cice_restart_files = ['nofile'] + cice_restart_files = ["nofile"] if not os.path.isfile(os.path.join(cice_rst, cice_restart_files[-1])): - if cice_envar['CICE_START']: - if cice_age_rest == 'true': - cice_runtype = 'continue' - ice_ic = 'set in pointer file' - _, _ = shellout._exec_subprocess('%s > %s' % (cice_envar['CICE_START'], cice_restart)) - sys.stdout.write('[INFO] %s > %s' % - (cice_envar['CICE_START'], - cice_restart)) + if cice_envar["CICE_START"]: + if cice_age_rest == "true": + cice_runtype = "continue" + ice_ic = "set in pointer file" + _, _ = shellout._exec_subprocess( + "%s > %s" % (cice_envar["CICE_START"], cice_restart) + ) + sys.stdout.write( + "[INFO] %s > %s" % (cice_envar["CICE_START"], cice_restart) + ) else: - cice_runtype = 'initial' - ice_ic = cice_envar['CICE_START'] - restart = '.true.' + cice_runtype = "initial" + ice_ic = cice_envar["CICE_START"] + restart = ".true." else: - ice_ic = 'default' - cice_runtype = 'initial' - restart = '.false.' + ice_ic = "default" + cice_runtype = "initial" + restart = ".false." else: - cice_runtype = 'continue' - restart = '.true.' - if cice_envar['CICE_START']: - ice_ic = 'set_in_pointer_file' + cice_runtype = "continue" + restart = ".true." + if cice_envar["CICE_START"]: + ice_ic = "set_in_pointer_file" else: - ice_ic = 'default' + ice_ic = "default" # if this is a continuation verify the restart file date - if cice_runtype == 'continue' and \ - common_env['DRIVERS_VERIFY_RST'] == 'True': - _verify_fix_rst(cice_restart, common_env['TASKSTART']) + if cice_runtype == "continue" and common_env["DRIVERS_VERIFY_RST"] == "True": + _verify_fix_rst(cice_restart, common_env["TASKSTART"]) # if this is a continuation from a failed NWP job we check that the last # CICE dump matches the time of LAST_DUMP_HOURS - if common_env['CONTINUE_FROM_FAIL'] == 'true': - #Read the filename from pointer file + if common_env["CONTINUE_FROM_FAIL"] == "true": + # Read the filename from pointer file with open(cice_restart) as fid: rst_file = fid.readline() - rst_file = rst_file.rstrip('\n').strip() + rst_file = rst_file.rstrip("\n").strip() rst_file = os.path.basename(rst_file) - ymds = [int(f) for f in rst_file[-19:-3].split('-')] - since_start = datetime.datetime(ymds[0], ymds[1], ymds[2], \ - ymds[3]//3600, (ymds[3]%3600)//60, \ - (ymds[3]%3600)%60) \ - - datetime.datetime(run_start[0], run_start[1], run_start[2], - run_start[3], run_start[4]) + ymds = [int(f) for f in rst_file[-19:-3].split("-")] + since_start = datetime.datetime( + ymds[0], + ymds[1], + ymds[2], + ymds[3] // 3600, + (ymds[3] % 3600) // 60, + (ymds[3] % 3600) % 60, + ) - datetime.datetime( + run_start[0], run_start[1], run_start[2], run_start[3], run_start[4] + ) if int(since_start.total_seconds()) != last_dump_seconds: - sys.stderr.write('[FAIL] Last CICE restart not at correct time') - sys.stderr.write('since_start='+since_start.total_seconds()) - sys.stderr.write('last_dump_seconds='+last_dump_seconds) + sys.stderr.write("[FAIL] Last CICE restart not at correct time") + sys.stderr.write("since_start=" + since_start.total_seconds()) + sys.stderr.write("last_dump_seconds=" + last_dump_seconds) sys.exit(error.RESTART_FILE_ERROR) - #block of code to modify the main CICE namelist + # block of code to modify the main CICE namelist mod_cicenl = common.ModNamelist(cice_nl) - mod_cicenl.var_val('days_per_year', caldays) - mod_cicenl.var_val('history_file', '%si.%i%s' % - (common_env['RUNID'], - cice_histfreq_n, - cice_histfreq)) - mod_cicenl.var_val('ice_ic', ice_ic) - mod_cicenl.var_val('incond_file', '%si_ic' % common_env['RUNID']) - mod_cicenl.var_val('istep0', int(cice_istep0)) - mod_cicenl.var_val('npt', int(cice_steps)) - mod_cicenl.var_val('pointer_file', cice_restart) - mod_cicenl.var_val('restart', restart) - mod_cicenl.var_val('restart_file', '%si.restart' % - common_env['RUNID']) - mod_cicenl.var_val('runtype', cice_runtype) - mod_cicenl.var_val('use_leap_years', cice_leap_years) - mod_cicenl.var_val('year_init', int(model_basis[0])) - mod_cicenl.var_val('grid_file', cice_envar['CICE_GRID']) - mod_cicenl.var_val('kmt_file', cice_envar['CICE_KMT']) - mod_cicenl.var_val('nprocs', int(cice_envar['CICE_NPROC'])) - mod_cicenl.var_val('atm_data_dir', cice_envar['ATM_DATA_DIR']) - mod_cicenl.var_val('ocn_data_dir', cice_envar['OCN_DATA_DIR']) + mod_cicenl.var_val("days_per_year", caldays) + mod_cicenl.var_val( + "history_file", + "%si.%i%s" % (common_env["RUNID"], cice_histfreq_n, cice_histfreq), + ) + mod_cicenl.var_val("ice_ic", ice_ic) + mod_cicenl.var_val("incond_file", "%si_ic" % common_env["RUNID"]) + mod_cicenl.var_val("istep0", int(cice_istep0)) + mod_cicenl.var_val("npt", int(cice_steps)) + mod_cicenl.var_val("pointer_file", cice_restart) + mod_cicenl.var_val("restart", restart) + mod_cicenl.var_val("restart_file", "%si.restart" % common_env["RUNID"]) + mod_cicenl.var_val("runtype", cice_runtype) + mod_cicenl.var_val("use_leap_years", cice_leap_years) + mod_cicenl.var_val("year_init", int(model_basis[0])) + mod_cicenl.var_val("grid_file", cice_envar["CICE_GRID"]) + mod_cicenl.var_val("kmt_file", cice_envar["CICE_KMT"]) + mod_cicenl.var_val("nprocs", int(cice_envar["CICE_NPROC"])) + mod_cicenl.var_val("atm_data_dir", cice_envar["ATM_DATA_DIR"]) + mod_cicenl.var_val("ocn_data_dir", cice_envar["OCN_DATA_DIR"]) mod_cicenl.replace() - return cice_envar def _set_launcher_command(_): - ''' + """ Setup the launcher command for the executable - ''' - sys.stdout.write('[INFO] CICE uses the same launch command as NEMO\n') - launch_cmd = '' + """ + sys.stdout.write("[INFO] CICE uses the same launch command as NEMO\n") + launch_cmd = "" return launch_cmd + def _finalize_executable(_): - ''' + """ Write the Ice output to stdout - ''' - ice_out_file = 'ice_diag.d' + """ + ice_out_file = "ice_diag.d" if os.path.isfile(ice_out_file): - sys.stdout.write('[INFO] CICE output from file %s\n' % ice_out_file) - with open(ice_out_file, 'r') as i_out: + sys.stdout.write("[INFO] CICE output from file %s\n" % ice_out_file) + with open(ice_out_file, "r") as i_out: for line in i_out: sys.stdout.write(line) else: - sys.stdout.write('[INFO] CICE output file %s not avaliable\n' - % ice_out_file) + sys.stdout.write("[INFO] CICE output file %s not avaliable\n" % ice_out_file) def run_driver(common_env, mode, run_info): - ''' + """ Run the driver, and return an instance of dr_env_lib.env_lib.LoadEnvar and as string containing the launcher command for the CICE model - ''' - if mode == 'run_driver': + """ + if mode == "run_driver": exe_envar = _setup_executable(common_env) launch_cmd = _set_launcher_command(exe_envar) model_snd_list = None - elif mode == 'finalize' or 'failure': + elif mode == "finalize" or "failure": _finalize_executable(common_env) exe_envar = None launch_cmd = None diff --git a/Coupled_Drivers/common.py b/Coupled_Drivers/common.py index fdcb0be..2c4266f 100644 --- a/Coupled_Drivers/common.py +++ b/Coupled_Drivers/common.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -''' +""" *****************************COPYRIGHT****************************** (C) Crown copyright 2022-2025 Met Office. All rights reserved. @@ -16,8 +16,7 @@ DESCRIPTION Common functions and classes required by multiple model drivers -''' - +""" import datetime @@ -34,75 +33,78 @@ class ModNamelist(object): - ''' + """ Modify a fortran namelist. This will not add any new variables, only modify existing ones - ''' + """ def __init__(self, filename): - ''' + """ Initialise the container, with the name of file to be updated - ''' + """ self.filename = filename self.replace_vars = {} def var_val(self, variable, value): - ''' + """ Create a container of variable name, value pairs to be updated. Note that if a variable doesn't exist in the namelist file, then it will be ignored - ''' + """ if isinstance(value, str): - if value.lower() not in ('.true.', '.false.'): - value = '\'%s\'' % value + if value.lower() not in (".true.", ".false."): + value = "'%s'" % value self.replace_vars[variable] = value def replace(self): - ''' + """ Do the update - ''' - output_file = open_text_file(self.filename+'out', 'w') - input_file = open_text_file(self.filename, 'r') + """ + output_file = open_text_file(self.filename + "out", "w") + input_file = open_text_file(self.filename, "r") for line in input_file.readlines(): - variable_name = re.findall(r'\s*(\S*)\s*=\s*', line) + variable_name = re.findall(r"\s*(\S*)\s*=\s*", line) if variable_name: variable_name = variable_name[0] if variable_name in list(self.replace_vars.keys()): - output_file.write('%s=%s,\n' % - (variable_name, - self.replace_vars[variable_name])) + output_file.write( + "%s=%s,\n" % (variable_name, self.replace_vars[variable_name]) + ) else: output_file.write(line) input_file.close() output_file.close() os.remove(self.filename) - os.rename(self.filename+'out', self.filename) + os.rename(self.filename + "out", self.filename) + def find_previous_workdir(cyclepoint, workdir, taskname, task_param_run=None): - ''' + """ Find the work directory for the previous cycle. Takes as argument the current cyclepoint, the path to the current work directory, and the current taskname, a value specifying multiple tasks within same cycle (e.g. coupled_run1, coupled_run2) as used in coupled NWP and returns an absolute path. - ''' + """ if task_param_run: stem = workdir.rstrip(task_param_run) nchars = len(task_param_run) - prev_param_run = '{:0{}d}'.format(int(task_param_run) - 1, nchars) + prev_param_run = "{:0{}d}".format(int(task_param_run) - 1, nchars) previous_workdir = stem + prev_param_run if not os.path.isdir(previous_workdir): - sys.stderr.write('[FAIL] Can not find previous work directory for' - ' task %s\n' % taskname) + sys.stderr.write( + "[FAIL] Can not find previous work directory for" + " task %s\n" % taskname + ) sys.exit(error.MISSING_DRIVER_FILE_ERROR) return previous_workdir else: cyclesdir = os.sep.join(workdir.split(os.sep)[:-2]) - #find the work directory for the previous cycle + # find the work directory for the previous cycle work_cycles = os.listdir(cyclesdir) work_cycles.sort() try: @@ -118,19 +120,21 @@ def find_previous_workdir(cyclepoint, workdir, taskname, task_param_run=None): break if not previous_task_cycle: - sys.stderr.write('[FAIL] Can not find previous work directory for' - ' task %s\n' % taskname) + sys.stderr.write( + "[FAIL] Can not find previous work directory for" + " task %s\n" % taskname + ) sys.exit(error.MISSING_DRIVER_FILE_ERROR) return os.path.join(cyclesdir, previous_task_cycle, taskname) def get_filepaths(directory): - ''' + """ Equivilant to ls -d Provides an absolute path to every file in directory including subdirectorys - ''' + """ file_paths = [] for root, _, files in os.walk(directory): for filename in files: @@ -140,118 +144,138 @@ def get_filepaths(directory): def open_text_file(name, mode): - ''' + """ Provide a common function to open a file and provide a suitiable error should this not be possible - ''' - modes = {'r':'reading', - 'w':'writing', - 'a':'appending', - 'r+':'updating (reading)', - 'w+':'updating (writing)', - 'a+':'updating (appending)'} + """ + modes = { + "r": "reading", + "w": "writing", + "a": "appending", + "r+": "updating (reading)", + "w+": "updating (writing)", + "a+": "updating (appending)", + } if mode not in list(modes.keys()): - options = '' + options = "" for k in modes: - options += ' %s: %s\n' % (k, modes[k]) - sys.stderr.write('[FAIL] Attempting to open file %s, do not recognise' - ' mode %s. Please use one of the following modes:\n%s' - % (name, mode, options)) + options += " %s: %s\n" % (k, modes[k]) + sys.stderr.write( + "[FAIL] Attempting to open file %s, do not recognise" + " mode %s. Please use one of the following modes:\n%s" + % (name, mode, options) + ) sys.exit(error.IOERROR) try: handle = open(name, mode) except IOError: - sys.stderr.write('[FAIL] Unable to open file %s using mode %s (%s)\n' - % (name, mode, modes[mode])) + sys.stderr.write( + "[FAIL] Unable to open file %s using mode %s (%s)\n" + % (name, mode, modes[mode]) + ) sys.exit(error.IOERROR) return handle + def is_non_zero_file(path): - ''' + """ Check to see if a file 'path' exists and has non zero length. Returns True if that is the case. If the file a) doesn't exist, or b) has zero length, returns False - ''' + """ if os.path.isfile(path) and os.path.getsize(path) > 0: return True else: return False + def remove_file(filename): - ''' + """ Check to see if a file or a link exists and if it does, remove it. Return True if a file/link was removed, False otherwise. - ''' + """ if os.path.isfile(filename) or os.path.islink(filename): os.remove(filename) return True else: return False + def setup_runtime(common_env): - ''' + """ Set up model run length in seconds based on the model suite env vars (rather than in the manner of the old UM control scripts by interrogating NEMO namelists!) - ''' - if not common_env['CALENDAR']: - sys.stderr.write('[WARN] setup_runtime: Environment variable' \ - ' CALENDAR not set. Assuming 360 day calendar.\n') - calendar = '360' + """ + if not common_env["CALENDAR"]: + sys.stderr.write( + "[WARN] setup_runtime: Environment variable" + " CALENDAR not set. Assuming 360 day calendar.\n" + ) + calendar = "360" else: - calendar = common_env['CALENDAR'] - if calendar == '360day': - calendar = '360' - elif calendar == '365day': - calendar = '365' - elif calendar == 'gregorian': + calendar = common_env["CALENDAR"] + if calendar == "360day": + calendar = "360" + elif calendar == "365day": + calendar = "365" + elif calendar == "gregorian": pass else: - sys.stderr.write('[FAIL] setup_runtime: Calendar type %s not' \ - ' recognised\n' % calendar) + sys.stderr.write( + "[FAIL] setup_runtime: Calendar type %s not" " recognised\n" % calendar + ) sys.exit(error.INVALID_EVAR_ERROR) - # Turn our times into lists of integers - run_start = [int(i) for i in common_env['TASKSTART'].split(',')] - run_length = [int(i) for i in common_env['TASKLENGTH'].split(',')] - - run_days = inc_days.inc_days(run_start[0], run_start[1], run_start[2], - run_length[0], run_length[1], run_length[2], - calendar) + run_start = [int(i) for i in common_env["TASKSTART"].split(",")] + run_length = [int(i) for i in common_env["TASKLENGTH"].split(",")] + + run_days = inc_days.inc_days( + run_start[0], + run_start[1], + run_start[2], + run_length[0], + run_length[1], + run_length[2], + calendar, + ) # Work out the total run length in seconds - runlen_sec = (run_days * 86400) \ - + (run_length[3]*3600) \ - + (run_length[4]*60) \ - + run_length[5] + runlen_sec = ( + (run_days * 86400) + + (run_length[3] * 3600) + + (run_length[4] * 60) + + run_length[5] + ) return runlen_sec def _calculate_ppn_values(nproc, nodes): - ''' + """ Calculates number of processes per node and numa node for launch command options - ''' + """ nproc = int(nproc) nodes = float(nodes) numa_nodes = 2 - ppnu = int(math.ceil(nproc/nodes/numa_nodes)) + ppnu = int(math.ceil(nproc / nodes / numa_nodes)) ppn = (ppnu * numa_nodes) if nproc > 1 else nproc return ppnu, ppn def set_aprun_options(nproc, nodes, ompthr, hyperthreads, ss): - ''' + """ Setup the aprun options for the launcher command - ''' + """ ppnu, ppn = _calculate_ppn_values(nproc, nodes) - rose_launcher_preopts = \ - '-n %s -N %s -S %s -d %s -j %s env OMP_NUM_THREADS=%s env HYPERTHREADS=%s' \ - % (nproc, ppn, ppnu, ompthr, hyperthreads, ompthr, hyperthreads) + rose_launcher_preopts = ( + "-n %s -N %s -S %s -d %s -j %s env OMP_NUM_THREADS=%s env HYPERTHREADS=%s" + % (nproc, ppn, ppnu, ompthr, hyperthreads, ompthr, hyperthreads) + ) if ss: rose_launcher_preopts = "-ss " + rose_launcher_preopts @@ -260,18 +284,21 @@ def set_aprun_options(nproc, nodes, ompthr, hyperthreads, ss): def _sort_hist_dirs_by_date(dir_list): - ''' + """ Sort a list of history directories by date - ''' + """ # Pattern that defines the name of the history directories, # which contain a date of the form YYYYmmddHHMM. - pattern = r'\.(\d{12})' + pattern = r"\.(\d{12})" try: - dir_list.sort(key=lambda dname: datetime.datetime.strptime( - re.search(pattern, dname).group(1), '%Y%m%d%H%M')) + dir_list.sort( + key=lambda dname: datetime.datetime.strptime( + re.search(pattern, dname).group(1), "%Y%m%d%H%M" + ) + ) except AttributeError: - msg = '[FAIL] Cannot order directories: %s' % " ".join(dir_list) + msg = "[FAIL] Cannot order directories: %s" % " ".join(dir_list) sys.stderr.write(msg) sys.exit(error.IOERROR) @@ -279,16 +306,15 @@ def _sort_hist_dirs_by_date(dir_list): def remove_latest_hist_dir(old_hist_dir): - ''' + """ If a model task has failed, then removed the last created history directory, before a new one is created, associated with the re-attempt. - ''' + """ # Replace the regex pattern that defines the history directory # name (that contains a date of the format YYYYmmddHHMM) with a # generic pattern so that we can perform the directory glob. - history_pattern = re.sub( - r'\.\d{12}', '.????????????', old_hist_dir) + history_pattern = re.sub(r"\.\d{12}", ".????????????", old_hist_dir) # Find and sort the history directories, and delete # the latest one, corresponding to the last entry in @@ -296,14 +322,15 @@ def remove_latest_hist_dir(old_hist_dir): history_dirs = glob.glob(history_pattern) history_dirs = _sort_hist_dirs_by_date(history_dirs) - msg = '[INFO] Found history directories: %s \n' % ' '.join( - history_dirs) + msg = "[INFO] Found history directories: %s \n" % " ".join(history_dirs) sys.stdout.write(msg) latest_hist_dir = history_dirs[-1] - msg = ("[WARN] Re-attempting failed model step. \n" - "[WARN] Clearing out latest history \n" - "[WARN] directory %s. \n" % latest_hist_dir) + msg = ( + "[WARN] Re-attempting failed model step. \n" + "[WARN] Clearing out latest history \n" + "[WARN] directory %s. \n" % latest_hist_dir + ) sys.stdout.write(msg) shutil.rmtree(latest_hist_dir) diff --git a/Coupled_Drivers/cpmip_utils.py b/Coupled_Drivers/cpmip_utils.py index 946be5c..5ca2779 100644 --- a/Coupled_Drivers/cpmip_utils.py +++ b/Coupled_Drivers/cpmip_utils.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -''' +""" *****************************COPYRIGHT****************************** (C) Crown copyright 2023-2025 Met Office. All rights reserved. @@ -16,7 +16,7 @@ DESCRIPTION Utility functions for the CPMIP controller -''' +""" import glob import os import re @@ -25,200 +25,220 @@ import common import shellout + def get_component_resolution(nlist_file, resolution_variables): - ''' + """ Get the total componenet resolution nx x ny x nz from a given namelist file. The arguments are a namelist file, and a list of the resolution variables within that namelist. Returns a single value - ''' + """ resolution = 1 for res_var in resolution_variables: - _, out = shellout._exec_subprocess('grep %s %s' % (res_var, nlist_file), - verbose=True) + _, out = shellout._exec_subprocess( + "grep %s %s" % (res_var, nlist_file), verbose=True + ) try: - i_res = int(re.search(r'(\d+)', out).group(0)) + i_res = int(re.search(r"(\d+)", out).group(0)) resolution *= i_res except AttributeError: - msg = '[WARN] Failed to find resolution %s in file %s.\n' % \ - (res_var, nlist_file) + msg = "[WARN] Failed to find resolution %s in file %s.\n" % ( + res_var, + nlist_file, + ) sys.stdout.write(msg) return resolution + def get_glob_usage(glob_path, timeout=60): - ''' + """ Get the total data from a list of files produced by a glob expression, using the du -c command. This command takes two arguments, a glob expression, and a timeout in seconds. This timeout is required as some filesystems (notably Lustre) can take a long time to respond to metadata queries - ''' + """ size_k = -1.0 filelist = glob.glob(glob_path) if filelist: - du_command = ['du', '-c'] + filelist + du_command = ["du", "-c"] + filelist rcode, output = shellout._exec_subprocess(du_command, timeout) if rcode == 0: size_k = float(output.split()[-2]) else: - sys.stderr.write('[WARN] Attepting to find the size of files described' - ' by glob expression %s. There are no files found' - % glob_path) + sys.stderr.write( + "[WARN] Attepting to find the size of files described" + " by glob expression %s. There are no files found" % glob_path + ) size_k = 0.0 return size_k + def get_datam_output_runonly(common_envar, cpmip_envar, timeout): - ''' + """ Grab the data of interest within the datam directory. We only want the contents of NEMOhist and CICEhist, and the output files labelled with the runid. We must avoid any directories containing lrun/nrun/crun restart tests etc. - ''' + """ total_usage = 0.0 failed_components = [] # um files - if 'um' in common_envar['models']: - um_path_gl = os.path.join(common_envar['DATAM'], - '%s*' % common_envar['RUNID']) + if "um" in common_envar["models"]: + um_path_gl = os.path.join(common_envar["DATAM"], "%s*" % common_envar["RUNID"]) um_usage = get_glob_usage(um_path_gl, timeout) if um_usage >= 0.0: total_usage += um_usage else: - failed_components.append('UM') + failed_components.append("UM") # Jnr UM files - if 'jnr' in common_envar['models']: - jnr_path_gl = os.path.join(common_envar['DATAM'], - '%s*' % cpmip_envar['RUNID_JNR']) + if "jnr" in common_envar["models"]: + jnr_path_gl = os.path.join( + common_envar["DATAM"], "%s*" % cpmip_envar["RUNID_JNR"] + ) jnr_usage = get_glob_usage(jnr_path_gl, timeout) if jnr_usage >= 0.0: total_usage += jnr_usage else: - failed_components.append('Jnr') + failed_components.append("Jnr") # nemo files - if 'nemo' in common_envar['models']: - nemo_path_gl = os.path.join(common_envar['DATAM'], 'NEMOhist', '*') + if "nemo" in common_envar["models"]: + nemo_path_gl = os.path.join(common_envar["DATAM"], "NEMOhist", "*") nemo_usage = get_glob_usage(nemo_path_gl, timeout) if nemo_usage >= 0.0: total_usage += nemo_usage else: - failed_components.append('NEMO') + failed_components.append("NEMO") # cice file - if 'cice' in common_envar['models']: - cice_path_gl = os.path.join(common_envar['DATAM'], 'CICEhist', '*') + if "cice" in common_envar["models"]: + cice_path_gl = os.path.join(common_envar["DATAM"], "CICEhist", "*") cice_usage = get_glob_usage(cice_path_gl, timeout) if cice_usage >= 0.0: total_usage += cice_usage else: - failed_components.append('CICE') + failed_components.append("CICE") if failed_components: for failed_component in failed_components: - sys.stderr.write('[FAIL] Unable to determine the usage in DATAM' - ' for the %s component\n' % - failed_component) + sys.stderr.write( + "[FAIL] Unable to determine the usage in DATAM" + " for the %s component\n" % failed_component + ) sys.exit(error.MISSING_MODEL_FILE_ERROR) else: return total_usage def get_workdir_netcdf_output(timeout=60): - ''' + """ Gather any netcdf output files written to the work directory - ''' - output_files = [i_f for i_f in os.listdir('.') if \ - i_f.split('.')[-1] == 'nc' and not os.path.islink(i_f)] + """ + output_files = [ + i_f + for i_f in os.listdir(".") + if i_f.split(".")[-1] == "nc" and not os.path.islink(i_f) + ] size_k = -1.0 - du_command = ['du', '-c'] + output_files + du_command = ["du", "-c"] + output_files rcode, output = shellout._exec_subprocess(du_command, timeout) if rcode == 0: size_k = float(output.split()[-2]) return size_k + def tasklength_to_years(tasklength): - ''' + """ Takes in a tasklength variable (string of form Y,M,D,h,m,s) and returns an integer value of the equivalent number of years for a 360 day calendar. - ''' - length = [int(i) for i in tasklength.split(',')] - to_years = (1, 1./30., 1./360., 1./(360.*24.), - 1./(360.*24.*60.), 1./(360.*24.*3600.)) - years = sum([x*y for x, y in zip(to_years, length)]) + """ + length = [int(i) for i in tasklength.split(",")] + to_years = ( + 1, + 1.0 / 30.0, + 1.0 / 360.0, + 1.0 / (360.0 * 24.0), + 1.0 / (360.0 * 24.0 * 60.0), + 1.0 / (360.0 * 24.0 * 3600.0), + ) + years = sum([x * y for x, y in zip(to_years, length)]) return years + def seconds_to_days(time_secs): - ''' + """ Takes in an integer value in units of seconds, and returns a floating point value of that time in days - ''' - time_days = time_secs / (24.*3600.) + """ + time_days = time_secs / (24.0 * 3600.0) return time_days + def get_jobfile_info(jobfile): - ''' + """ Takes in a path to the jobfile and returns a dictionary containing all the directives set by PBS -l. This code is specific to the PBS load scheduler present on the Cray systems - ''' - job_f = common.open_text_file(jobfile, 'r') + """ + job_f = common.open_text_file(jobfile, "r") pbs_l_dict = {} for line in job_f.readlines(): # Grab key value pairs of the PBS variables. The pairs are delimited # by colons in the PBS directive. Times are also however defined using # colons (for example on hour is 01:00:00). - if line.strip().startswith('#PBS -l'): - for item in re.findall(r'(\w+)=(\w+(:\d+)*)', line): + if line.strip().startswith("#PBS -l"): + for item in re.findall(r"(\w+)=(\w+(:\d+)*)", line): pbs_l_dict[item[0]] = item[1] job_f.close() return pbs_l_dict def get_select_nodes(jobfile): - ''' + """ Takes in a path to the jobfile and returns a dictionary containing the selected nodes for each component MPMD model - ''' - pbs_line = '' + """ + pbs_line = "" model_nodes = [] - with common.open_text_file(jobfile, 'r') as job_handle: + with common.open_text_file(jobfile, "r") as job_handle: for line in job_handle.readlines(): # Grab the line containing the -l select command - if line[:14] == '#PBS -l select': + if line[:14] == "#PBS -l select": pbs_line = line break - #break up the line - #First model - first_model_nodes = re.match(r'#PBS -l select=(\d+)', pbs_line).group(1) + # break up the line + # First model + first_model_nodes = re.match(r"#PBS -l select=(\d+)", pbs_line).group(1) model_nodes.append(int(first_model_nodes)) # Any additional models - split_pbs_line = pbs_line.split('+')[1:] + split_pbs_line = pbs_line.split("+")[1:] for i_model in split_pbs_line: - i_model_node = re.match(r'(\d+):', i_model).group(1) + i_model_node = re.match(r"(\d+):", i_model).group(1) model_nodes.append(int(i_model_node)) # Check for a coretype try: - coretype = re.match(r'.+coretype=([a-z]+)', line).group(1) + coretype = re.match(r".+coretype=([a-z]+)", line).group(1) except AttributeError: # As chip not specified assume milan chip - coretype = 'milan' + coretype = "milan" return model_nodes, coretype def increment_dump(datestr, resub, resub_units): - ''' + """ Increment the dump date to end of cycle, so it can be found to calculate complexity - ''' + """ year = int(datestr[:4]) month = int(datestr[4:6]) day = int(datestr[6:8]) resub = int(resub) - if 'm' in resub_units.lower(): + if "m" in resub_units.lower(): resub *= 30 if resub >= 360: i_years = resub // 360 @@ -241,4 +261,4 @@ def increment_dump(datestr, resub, resub_units): output_month -= 12 i_years += 12 output_year = year + i_years - return '%04d%02d%02d' % (output_year, output_month, output_day) + return "%04d%02d%02d" % (output_year, output_month, output_day) diff --git a/Coupled_Drivers/cpmip_xios.py b/Coupled_Drivers/cpmip_xios.py index 848ff2a..171a45c 100644 --- a/Coupled_Drivers/cpmip_xios.py +++ b/Coupled_Drivers/cpmip_xios.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -''' +""" *****************************COPYRIGHT****************************** (C) Crown copyright 2021-2025 Met Office. All rights reserved. @@ -16,26 +16,28 @@ DESCRIPTION CPMIP functions for XIOS -''' +""" import os import shutil import sys import common import shellout + def data_metrics_setup_nemo(): - ''' + """ Set up IODEF file to produce XIOS timing files - ''' - with open('iodef.xml', 'r') as f_in, \ - open('iodef_out.xml', 'w') as f_out: + """ + with open("iodef.xml", "r") as f_in, open("iodef_out.xml", "w") as f_out: update = False for line in f_in.readlines(): if 'variable id="print_file"' in line: continue if update: - updated_line = '\t true\n' + updated_line = ( + '\t true\n' + ) f_out.write(updated_line) f_out.write(line) update = False @@ -43,34 +45,35 @@ def data_metrics_setup_nemo(): f_out.write(line) if 'variable id="using_server"' in line: update = True - shutil.move('iodef_out.xml', 'iodef.xml') + shutil.move("iodef_out.xml", "iodef.xml") + def measure_xios_client_times(timeout=120): - ''' + """ Gather the output from XIOS client files. Takes in an optional value of timeout in seconds, as there may be a lot of files and we don't want to hang around forever if there is a problem opening them all. Returns the mean time and high watermark time - ''' + """ total_measured = 0 - total_time = 0. - max_time = 0. - files = [i_f for i_f in os.listdir('.') if \ - 'xios_client' in i_f and 'out' in i_f] + total_time = 0.0 + max_time = 0.0 + files = [i_f for i_f in os.listdir(".") if "xios_client" in i_f and "out" in i_f] total_files = len(files) for i_f in files: - rcode, out = shellout._exec_subprocess( - 'grep "total time" %s' % i_f, timeout) + rcode, out = shellout._exec_subprocess('grep "total time" %s' % i_f, timeout) if rcode == 0: meas_time = float(out.split()[-2]) total_measured += 1 total_time += meas_time if meas_time > max_time: max_time = meas_time - sys.stdout.write('[INFO] Measured timings for (%s/%s) XIOS clients\n' % - (total_measured, total_files)) + sys.stdout.write( + "[INFO] Measured timings for (%s/%s) XIOS clients\n" + % (total_measured, total_files) + ) if total_measured == 0: - sys.stderr.write('[WARN] Unable to find any XIOS client output files\n') + sys.stderr.write("[WARN] Unable to find any XIOS client output files\n") mean_time = 0.0 max_time = 0.0 else: diff --git a/Coupled_Drivers/mct_driver.py b/Coupled_Drivers/mct_driver.py index b25dc66..f80b809 100644 --- a/Coupled_Drivers/mct_driver.py +++ b/Coupled_Drivers/mct_driver.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -''' +""" *****************************COPYRIGHT****************************** (C) Crown copyright 2021-2025 Met Office. All rights reserved. Use, duplication or disclosure of this code is subject to the restrictions @@ -15,8 +15,7 @@ DESCRIPTION Driver for OASIS3-MCT -''' - +""" import os @@ -29,153 +28,160 @@ import dr_env_lib.mct_def import dr_env_lib.env_lib import cpmip_controller + try: import f90nml except ImportError: pass + def _multiglob(*args): - ''' + """ Takes in a list of globbable strings, and returns a single list of filenames matching those strings - ''' + """ filenames = [] for arg in args: filenames += glob.glob(arg) return filenames + def _setup_river_cpld(common_envar, mct_envar, river_envar): - ''' + """ Setup JULES rivers for coupled configurations - ''' - river_debug_files = glob.glob('*%s*.nc' % river_envar['RIVER_LINK']) + """ + river_debug_files = glob.glob("*%s*.nc" % river_envar["RIVER_LINK"]) for river_debug_file in river_debug_files: common.remove_file(river_debug_file) + def _setup_nemo_cpld(common_envar, mct_envar, nemo_envar): - ''' + """ Setup NEMO for coupled configurations - ''' - nemo_debug_files = glob.glob('*%s*.nc' % nemo_envar['OCEAN_LINK']) + """ + nemo_debug_files = glob.glob("*%s*.nc" % nemo_envar["OCEAN_LINK"]) for nemo_debug_file in nemo_debug_files: common.remove_file(nemo_debug_file) def _setup_lfric_cpld(common_envar, mct_envar, lfric_envar): - ''' + """ Setup LFRIC for coupled configurations - ''' + """ # Remove potential LFRIC debug netcdf files. If this isn't done MCT will # just append details to existing files - lfric_debug_files = glob.glob('*%s*.nc' % lfric_envar['LFRIC_LINK']) + lfric_debug_files = glob.glob("*%s*.nc" % lfric_envar["LFRIC_LINK"]) for lfric_debug_file in lfric_debug_files: common.remove_file(lfric_debug_file) def _setup_um_cpld(common_env, mct_envar, um_envar): - ''' + """ Setup UM for coupled configurations - ''' + """ # Remove potential UM debug netcdf files. If this isn't done MCT will # just append details to existing files - um_debug_files = glob.glob('*%s*.nc' % um_envar['ATMOS_LINK']) + um_debug_files = glob.glob("*%s*.nc" % um_envar["ATMOS_LINK"]) for um_debug_file in um_debug_files: common.remove_file(um_debug_file) def _setup_jnr_cpld(common_env, mct_envar, jnr_envar): - ''' + """ Setup Jnr UM for coupled configurations. This function is only used when creating the namcouple at run time. - ''' + """ # Remove potential UM debug netcdf files. If this isn't done MCT will # just append details to existing files - um_debug_files = glob.glob('*%s*.nc' % jnr_envar['ATMOS_LINK_JNR']) + um_debug_files = glob.glob("*%s*.nc" % jnr_envar["ATMOS_LINK_JNR"]) for um_debug_file in um_debug_files: common.remove_file(um_debug_file) def _generate_ngms_namcouple(): - ''' + """ Generate the namcouple files for ngms coupled models. This function should only be called if we request it - ''' + """ # This requires access to the MOCI namcouple generation library, test to # see if we can access this try: import generate_nam except ModuleNotFoundError: - sys.stderr.write('This run requires access to the MOCI namcouple' - ' generation library\n. Please ensure this is' - ' available\n') + sys.stderr.write( + "This run requires access to the MOCI namcouple" + " generation library\n. Please ensure this is" + " available\n" + ) sys.exit(error.IMPORT_ERROR) # First remove any existing namcouple files. - files_to_tidy = _multiglob('namcouple*') + files_to_tidy = _multiglob("namcouple*") for f_to_tidy in files_to_tidy: # some driver files may have namcouple in the filename - if f_to_tidy.split('.')[-1] != 'py': + if f_to_tidy.split(".")[-1] != "py": common.remove_file(f_to_tidy) # Set up input and output file names for namcouple # creation and select namelist reading mode for input file. - file_in = 'cpl_configuration.nml' - file_out = 'namcouple' - file_mode = 'namelist' + file_in = "cpl_configuration.nml" + file_out = "namcouple" + file_mode = "namelist" generate_nam.generate_nam(file_in, file_out, file_mode) def _setup_rmp_dir(mct_envar, run_info): - ''' + """ Set up link to the remapping weights files. This function is only used when creating the namcouple at run time. - ''' + """ # It's only when the namcouple file doesn't exist that we're # anticipating needing more than one remapping directory. - if run_info['l_namcouple']: + if run_info["l_namcouple"]: # Organise the remapping files - remap_files = glob.glob('%s/rmp_*' % mct_envar['RMP_DIR']) + remap_files = glob.glob("%s/rmp_*" % mct_envar["RMP_DIR"]) for remap_file in remap_files: linkname = os.path.split(remap_file)[-1] os.symlink(remap_file, linkname) else: # Need to be precise about order of components - comp_names = {'um':'ATM', 'jnr':'JNR', 'nemo':'OCN'} - comp_order = ['um', 'jnr', 'nemo'] + comp_names = {"um": "ATM", "jnr": "JNR", "nemo": "OCN"} + comp_order = ["um", "jnr", "nemo"] comp_list = [] for component in comp_order: - if component in mct_envar['COUPLING_COMPONENTS'].split(): + if component in mct_envar["COUPLING_COMPONENTS"].split(): comp_list.append(comp_names[component]) # Links to areas.nc, grids.nc and masks.nc core_dir_str = None for comp in comp_order: - grid = comp_names[comp] + '_grid' + grid = comp_names[comp] + "_grid" if grid in run_info: grid_name = run_info[grid] else: grid_name = "*" if core_dir_str: - core_dir_str = ('%s_%s' % (core_dir_str, grid_name)) + core_dir_str = "%s_%s" % (core_dir_str, grid_name) else: core_dir_str = grid_name - core_dir_str = mct_envar['RMP_DIR'] + '/' + core_dir_str + core_dir_str = mct_envar["RMP_DIR"] + "/" + core_dir_str # Find the core remapping directory and link the core # remapping files core_dirs = glob.glob(core_dir_str) if len(core_dirs) < 1: - sys.stderr.write('[FAIL] failed to find core remapping ' - 'directory %s\n' % core_dir_str) + sys.stderr.write( + "[FAIL] failed to find core remapping " "directory %s\n" % core_dir_str + ) sys.exit(error.MISSING_CORE_RMP_DIR) - for core_file in ['areas.nc', 'grids.nc', 'masks.nc']: - core_file2 = core_dirs[0] + '/' + core_file + for core_file in ["areas.nc", "grids.nc", "masks.nc"]: + core_file2 = core_dirs[0] + "/" + core_file if os.path.isfile(core_file2): # Remove link if it already exists common.remove_file(core_file) # Create symbolic link os.symlink(core_file2, core_file) else: - sys.stderr.write('[FAIL] failed to find %s' % core_file2) + sys.stderr.write("[FAIL] failed to find %s" % core_file2) sys.exit(error.MISSING_CORE_RMP_FILE) # Links to the remapping weight files @@ -185,38 +191,44 @@ def _setup_rmp_dir(mct_envar, run_info): break # Create the links for remapping file between these # components - grid1 = comp1 + '_grid' - grid2 = comp2 + '_grid' + grid1 = comp1 + "_grid" + grid2 = comp2 + "_grid" if not grid1 in run_info or not grid2 in run_info: - sys.stderr.write('[FAIL] either %s or %s is missing ' - 'from run_info.\n' % (grid1, grid2)) + sys.stderr.write( + "[FAIL] either %s or %s is missing " + "from run_info.\n" % (grid1, grid2) + ) sys.exit(error.MISSING_GRID_IN_RUN_INFO) - rmp_dir = mct_envar['RMP_DIR'] + '/' + run_info[grid2] +\ - '_' + run_info[grid1] + rmp_dir = ( + mct_envar["RMP_DIR"] + "/" + run_info[grid2] + "_" + run_info[grid1] + ) # Check that directory exists if not os.path.isdir(rmp_dir): - sys.stderr.write('[FAIL] failed to find remapping ' - 'directory %s\n.' % rmp_dir) + sys.stderr.write( + "[FAIL] failed to find remapping " "directory %s\n." % rmp_dir + ) sys.exit(error.MISSING_RMP_DIR) # Create the links - remap_files = glob.glob('%s/rmp_*' % rmp_dir) + remap_files = glob.glob("%s/rmp_*" % rmp_dir) for remap_file in remap_files: linkname = os.path.split(remap_file)[-1] os.symlink(remap_file, linkname) def _setup_executable(common_env, envarinsts, run_info): - ''' + """ Setup the environment and any files required by the executable - ''' + """ # Load the environment variables required mct_envar = dr_env_lib.env_lib.LoadEnvar() mct_envar = dr_env_lib.env_lib.load_envar_from_definition( - mct_envar, dr_env_lib.mct_def.MCT_ENVIRONMENT_VARS_INITIAL) + mct_envar, dr_env_lib.mct_def.MCT_ENVIRONMENT_VARS_INITIAL + ) # Tidyup our OASIS files before the setup process is started - files_to_tidy = _multiglob('nout.*', 'debug.*root.*', 'debug.??.*', - 'debug.???.*', '*fort*', 'rmp_*') + files_to_tidy = _multiglob( + "nout.*", "debug.*root.*", "debug.??.*", "debug.???.*", "*fort*", "rmp_*" + ) for f_to_tidy in files_to_tidy: common.remove_file(f_to_tidy) @@ -224,177 +236,194 @@ def _setup_executable(common_env, envarinsts, run_info): _setup_rmp_dir(mct_envar, run_info) # Are we using automatic namcouple generation for NG-Coupling? - if mct_envar['NAMCOUPLE_STATIC'] == '.false.': + if mct_envar["NAMCOUPLE_STATIC"] == ".false.": _generate_ngms_namcouple() # Are we expecting a namcouple file - if run_info['l_namcouple']: + if run_info["l_namcouple"]: # Does the namcouple file exist - if not os.path.exists('namcouple'): - sys.stderr.write('[FAIL] Could not find a namcouple file in the' - ' working directory. This file should originate' - ' in the Rose app\'s file directory\n') + if not os.path.exists("namcouple"): + sys.stderr.write( + "[FAIL] Could not find a namcouple file in the" + " working directory. This file should originate" + " in the Rose app's file directory\n" + ) sys.exit(error.MISSING_MODEL_FILE_ERROR) # Create transient field namelist (note if we're creating a # namcouple on the fly, this will have to wait until after # the namcouple have been created). - _, _ = shellout._exec_subprocess('./OASIS_fields') - - for component in mct_envar['COUPLING_COMPONENTS'].split(): - if not component in common_env['models']: - sys.stderr.write('[FAIL] Attempting to couple component %s,' - ' however this component is not being run in' - ' this configuration\n' % component) + _, _ = shellout._exec_subprocess("./OASIS_fields") + + for component in mct_envar["COUPLING_COMPONENTS"].split(): + if not component in common_env["models"]: + sys.stderr.write( + "[FAIL] Attempting to couple component %s," + " however this component is not being run in" + " this configuration\n" % component + ) sys.exit(999) if not component in list(SUPPORTED_MODELS.keys()): - sys.stderr.write('[FAIL] The component %s is not supported by the' - ' mct driver\n' % component) + sys.stderr.write( + "[FAIL] The component %s is not supported by the" + " mct driver\n" % component + ) sys.exit(999) # Setup coupling for individual component - sys.stdout.write('[INFO] MCT driver setting up %s component\n' % - component) - SUPPORTED_MODELS[component](common_env, mct_envar, - envarinsts[component]) + sys.stdout.write("[INFO] MCT driver setting up %s component\n" % component) + SUPPORTED_MODELS[component](common_env, mct_envar, envarinsts[component]) # Update the general, non-component specific namcouple details - if run_info['l_namcouple']: - update_namcouple.update('mct', common_env) + if run_info["l_namcouple"]: + update_namcouple.update("mct", common_env) # Run the CPMIP controller if appropriate # Check for the presence of t (as in TRUE, True, or true) in the # CPMIP_ANALYSIS value - if mct_envar['CPMIP_ANALYSIS'].lower().startswith('t'): + if mct_envar["CPMIP_ANALYSIS"].lower().startswith("t"): controller_mode = "run_controller" - sys.stdout.write('[INFO] mct_driver: CPMIP analyis will be performed\n') + sys.stdout.write("[INFO] mct_driver: CPMIP analyis will be performed\n") cpmip_controller.run_controller(controller_mode, common_env) return mct_envar def _set_launcher_command(_): - ''' + """ Setup the launcher command for the executable. MCT does not require a call to the launcher as it runs as a library - ''' - launch_cmd = '' + """ + launch_cmd = "" return launch_cmd def _sent_coupling_fields(mct_envar, run_info): - ''' + """ Read the SHARED file to get the coupling frequencies. This function is only used when creating the namcouple at run time. - ''' + """ # Dictionary for the component names - component_names = {'um':'ATM', 'nemo':'OCN', 'jnr':'JNR'} + component_names = {"um": "ATM", "nemo": "OCN", "jnr": "JNR"} # Dictionary for the coupling frequencies # (Note that for now, we're assuming that coupling frequencies # for JNR<->OCN are the same as ATM<->OCN) - couple_freqs = {'ATM2OCN_freq': ['oasis_couple_freq_ao'], - 'OCN2ATM_freq': ['oasis_couple_freq_oa'], - 'ATM2JNR_freq': ['oasis_couple_freq_aj', - 'oasis_couple_freq_aj_stats'], - 'JNR2ATM_freq': ['oasis_couple_freq_ja', - 'oasis_couple_freq_ja_stats'], - 'JNR2OCN_freq': ['oasis_couple_freq_ao'], - 'OCN2JNR_freq': ['oasis_couple_freq_oa']} + couple_freqs = { + "ATM2OCN_freq": ["oasis_couple_freq_ao"], + "OCN2ATM_freq": ["oasis_couple_freq_oa"], + "ATM2JNR_freq": ["oasis_couple_freq_aj", "oasis_couple_freq_aj_stats"], + "JNR2ATM_freq": ["oasis_couple_freq_ja", "oasis_couple_freq_ja_stats"], + "JNR2OCN_freq": ["oasis_couple_freq_ao"], + "OCN2JNR_freq": ["oasis_couple_freq_oa"], + } # Check that SHARED exists - if not os.path.isfile(run_info['SHARED_FILE']): - sys.stderr.write('[FAIL] not found SHARED file.\n') + if not os.path.isfile(run_info["SHARED_FILE"]): + sys.stderr.write("[FAIL] not found SHARED file.\n") sys.exit(error.NOT_FOUND_SHARED) # Read the namelist file SHARED - shared_nml = f90nml.read(run_info['SHARED_FILE']) - for component1 in mct_envar['COUPLING_COMPONENTS'].split(): - for component2 in mct_envar['COUPLING_COMPONENTS'].split(): + shared_nml = f90nml.read(run_info["SHARED_FILE"]) + for component1 in mct_envar["COUPLING_COMPONENTS"].split(): + for component2 in mct_envar["COUPLING_COMPONENTS"].split(): if component2 != component1: # Check component names exist - if not component1 in component_names or \ - not component2 in component_names: - sys.stderr.write('[FAIL] %s or %s is unrecognised as ' - 'a component name\n' % (component1, - component2)) + if ( + not component1 in component_names + or not component2 in component_names + ): + sys.stderr.write( + "[FAIL] %s or %s is unrecognised as " + "a component name\n" % (component1, component2) + ) sys.exit(error.UNRECOGNISED_COMP) # Determine the variable which stores the coupling frequency - cpl_var = component_names[component1] + '2' + \ - component_names[component2] + '_freq' + cpl_var = ( + component_names[component1] + + "2" + + component_names[component2] + + "_freq" + ) # Check the coupling frequency/ies exist if not cpl_var in couple_freqs: - sys.stderr.write('[FAIL] %s is not recognised\n' % - cpl_var) + sys.stderr.write("[FAIL] %s is not recognised\n" % cpl_var) sys.exit(error.UNRECOGNISED_CPL_VAR) nml_cpl_vars = couple_freqs[cpl_var] - if 'coupling_control' not in shared_nml: - sys.stderr.write('[FAIL] failed to find coupling_control ' - 'in SHARED namelist.\n') + if "coupling_control" not in shared_nml: + sys.stderr.write( + "[FAIL] failed to find coupling_control " + "in SHARED namelist.\n" + ) sys.exit(error.MISSING_CPL_CONTROL) # Loop across the coupling variables for nml_cpl_entry in nml_cpl_vars: - if not nml_cpl_entry in shared_nml['coupling_control']: - sys.stderr.write('[FAIL] failed to find %s in ' - 'namelist coupling_control\n' % - nml_cpl_entry) + if not nml_cpl_entry in shared_nml["coupling_control"]: + sys.stderr.write( + "[FAIL] failed to find %s in " + "namelist coupling_control\n" % nml_cpl_entry + ) sys.exit(error.MISSING_CPL_FREQ) # Store coupling frequency if not cpl_var in run_info: run_info[cpl_var] = [] - cpl_freq = 3600 * \ - shared_nml['coupling_control'][nml_cpl_entry][0] + \ - 60 * shared_nml['coupling_control'][nml_cpl_entry][1] + cpl_freq = ( + 3600 * shared_nml["coupling_control"][nml_cpl_entry][0] + + 60 * shared_nml["coupling_control"][nml_cpl_entry][1] + ) run_info[cpl_var].append(cpl_freq) return run_info def _finalize_executable(common_env): - ''' + """ Perform any tasks required after completion of model run - ''' + """ # Load the environment variables required mct_envar = dr_env_lib.env_lib.LoadEnvar() mct_envar = dr_env_lib.env_lib.load_envar_from_definition( - mct_envar, dr_env_lib.mct_def.MCT_ENVIRONMENT_VARS_FINAL) + mct_envar, dr_env_lib.mct_def.MCT_ENVIRONMENT_VARS_FINAL + ) # run the cpmip controller if appropriate # check for the presence of t (as in TRUE, True, or true) in the # CPMIP_ANALYSIS value - if mct_envar['CPMIP_ANALYSIS'].lower().startswith('t'): + if mct_envar["CPMIP_ANALYSIS"].lower().startswith("t"): controller_mode = "finalize" - sys.stdout.write( - '[INFO] mct_driver: CPMIP analyis is being performed\n') + sys.stdout.write("[INFO] mct_driver: CPMIP analyis is being performed\n") cpmip_controller.run_controller(controller_mode, common_env) def run_driver(envar_insts, mode, run_info): - ''' + """ Run the driver, and return an instance of LoadEnvar and as string containing the launcher command for the MCT component - ''' - common_env = envar_insts['common'] - if mode == 'run_driver': + """ + common_env = envar_insts["common"] + if mode == "run_driver": exe_envar = _setup_executable(common_env, envar_insts, run_info) launch_cmd = _set_launcher_command(exe_envar) model_snd_list = None - if not run_info['l_namcouple']: + if not run_info["l_namcouple"]: run_info = _sent_coupling_fields(exe_envar, run_info) - elif mode == 'finalize': + elif mode == "finalize": _finalize_executable(common_env) exe_envar = None launch_cmd = None model_snd_list = None return exe_envar, launch_cmd, run_info, model_snd_list + # Dictionary containing the supported models and their assosicated setup # function within the driver -SUPPORTED_MODELS = {'rivers': _setup_river_cpld, - 'nemo': _setup_nemo_cpld, - 'um': _setup_um_cpld, - 'jnr': _setup_jnr_cpld, - 'lfric': _setup_lfric_cpld} +SUPPORTED_MODELS = { + "rivers": _setup_river_cpld, + "nemo": _setup_nemo_cpld, + "um": _setup_um_cpld, + "jnr": _setup_jnr_cpld, + "lfric": _setup_lfric_cpld, +} diff --git a/Coupled_Drivers/nemo_driver.py b/Coupled_Drivers/nemo_driver.py index 0a6cf37..c880cbf 100644 --- a/Coupled_Drivers/nemo_driver.py +++ b/Coupled_Drivers/nemo_driver.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -''' +""" *****************************COPYRIGHT****************************** (C) Crown copyright 2023-2025 Met Office. All rights reserved. @@ -17,7 +17,7 @@ DESCRIPTION Driver for the NEMO 3.6 model, called from link_drivers. Note that this does not cater for any earlier versions of NEMO -''' +""" import re import os import time @@ -33,12 +33,14 @@ try: import cf_units except ImportError: - IMPORT_ERROR_MSG = ('Unable to import cf_units. Ensure the scitools module' - 'has been loaded first.') + IMPORT_ERROR_MSG = ( + "Unable to import cf_units. Ensure the scitools module" "has been loaded first." + ) sys.exit(IMPORT_ERROR_MSG) import dr_env_lib.nemo_def import dr_env_lib.env_lib + try: import f90nml except ImportError: @@ -55,95 +57,101 @@ SERIAL_MODE_ERROR = 99 # Ocean resolutions -OCEAN_RESOLS = {'orca2': [182, 149], - 'orca1': [362, 332], - 'orca025': [1442, 1021], - 'orca12': [4322, 3059], - 'orca36': [12960, 10850]} +OCEAN_RESOLS = { + "orca2": [182, 149], + "orca1": [362, 332], + "orca025": [1442, 1021], + "orca12": [4322, 3059], + "orca36": [12960, 10850], +} + def _check_nemonl(envar_container): - ''' + """ As the environment variable NEMO_NL is required by both the setup and finalise functions, this will be encapsulated here - ''' + """ # Information will be retrieved from this file during the running of the # driver, so check it exists - if not os.path.isfile(envar_container['NEMO_NL']): - sys.stderr.write('[FAIL] Can not find the nemo namelist file %s\n' % - envar_container['NEMO_NL']) + if not os.path.isfile(envar_container["NEMO_NL"]): + sys.stderr.write( + "[FAIL] Can not find the nemo namelist file %s\n" + % envar_container["NEMO_NL"] + ) sys.exit(error.MISSING_DRIVER_FILE_ERROR) else: return 0 + def _get_nemorst(nemo_nl_file): - ''' + """ Retrieve the nemo restart directory from the nemo namelist file - ''' + """ ocerst_rcode, ocerst_val = shellout._exec_subprocess( - 'grep cn_ocerst_outdir %s' % nemo_nl_file) + "grep cn_ocerst_outdir %s" % nemo_nl_file + ) if ocerst_rcode == 0: - nemo_rst = re.findall(r'[\"\'](.*?)[\"\']', ocerst_val)[0] - if nemo_rst[-1] == '/': + nemo_rst = re.findall(r"[\"\'](.*?)[\"\']", ocerst_val)[0] + if nemo_rst[-1] == "/": nemo_rst = nemo_rst[:-1] return nemo_rst return None + def _get_ln_icebergs(nemo_nl_file): - ''' + """ Interrogate the nemo namelist to see if we are running with icebergs, Returns boolean, True if icebergs are used, False if not - ''' - icb_rcode, icb_val = shellout._exec_subprocess( - 'grep ln_icebergs %s' % nemo_nl_file) + """ + icb_rcode, icb_val = shellout._exec_subprocess("grep ln_icebergs %s" % nemo_nl_file) if icb_rcode != 0: - sys.stderr.write('Unable to read ln_icebergs in &namberg namelist' - ' in the NEMO namelist file %s\n' - % nemo_nl_file) + sys.stderr.write( + "Unable to read ln_icebergs in &namberg namelist" + " in the NEMO namelist file %s\n" % nemo_nl_file + ) sys.exit(error.SUBPROC_ERROR) else: - if 'true' in icb_val.lower(): + if "true" in icb_val.lower(): return True return False def _verify_nemo_rst(cyclepointstr, nemo_rst, nemo_nl, nemo_nproc, nemo_version): - ''' + """ Verify that the full set of nemo restart files match. Currently this is limited to the icebergs restart file. We require either a single restart file, or a number of restart files equal to the number of nemo processors. - ''' - restart_files = [f for f in os.listdir(nemo_rst) if - 'restart' in f] - + """ + restart_files = [f for f in os.listdir(nemo_rst) if "restart" in f] if _get_ln_icebergs(nemo_nl): if nemo_version < 402: - # Pre nemo 4.2 compatibility - nemo_icb_regex = r'_icebergs_%s_restart(_\d+)?\.nc' % cyclepointstr + # Pre nemo 4.2 compatibility + nemo_icb_regex = r"_icebergs_%s_restart(_\d+)?\.nc" % cyclepointstr else: - # Post nemo 4.2 compatibility - nemo_icb_regex = r'_%s_restart_icb(_\d+)?\.nc' % cyclepointstr + # Post nemo 4.2 compatibility + nemo_icb_regex = r"_%s_restart_icb(_\d+)?\.nc" % cyclepointstr - icb_restart_files = [f for f in restart_files if - re.findall(nemo_icb_regex, f)] + icb_restart_files = [f for f in restart_files if re.findall(nemo_icb_regex, f)] # we can have a single rebuilt file, number of files equal to # number of nemo processors, or rebuilt file and processor files. - if len(icb_restart_files) not in (1, nemo_nproc, nemo_nproc+1): - sys.stderr.write('[FAIL] Unable to find iceberg restart files for' - ' this cycle. Must either have one rebuilt file,' - ' as many as there are nemo processors (%i) or' - ' both rebuilt and processor files.' - '[FAIL] Found %i iceberg restart files\n' - % (nemo_nproc, len(icb_restart_files))) + if len(icb_restart_files) not in (1, nemo_nproc, nemo_nproc + 1): + sys.stderr.write( + "[FAIL] Unable to find iceberg restart files for" + " this cycle. Must either have one rebuilt file," + " as many as there are nemo processors (%i) or" + " both rebuilt and processor files." + "[FAIL] Found %i iceberg restart files\n" + % (nemo_nproc, len(icb_restart_files)) + ) sys.exit(error.MISSING_MODEL_FILE_ERROR) -def _calc_current_model_date(model_basis_time, time_step, num_steps, - calendar): - ''' +def _calc_current_model_date(model_basis_time, time_step, num_steps, calendar): + """ Calculate the current model date using the basis time, and the number of time-steps covered in a given model run. @@ -153,8 +161,8 @@ def _calc_current_model_date(model_basis_time, time_step, num_steps, model run :arg: string calendar : Calendar used in the model run - ''' - ref_date_format = 'seconds since %Y-%m-%d %H:%M:%S' + """ + ref_date_format = "seconds since %Y-%m-%d %H:%M:%S" # modify the calendar names for compatability with cf_units module if calendar == "360day": @@ -166,17 +174,20 @@ def _calc_current_model_date(model_basis_time, time_step, num_steps, ref_time = model_basis_time.strftime(ref_date_format) model_progress_secs = cf_units.date2num( - model_basis_time, ref_time, calendar=calendar) + (time_step * num_steps) + model_basis_time, ref_time, calendar=calendar + ) + (time_step * num_steps) current_model_date = cf_units.num2date( - model_progress_secs, ref_time, calendar=calendar) + model_progress_secs, ref_time, calendar=calendar + ) return current_model_date -def _verify_fix_rst(restartdate, nemo_rst, model_basis_time, time_step, - num_steps, calendar): - ''' +def _verify_fix_rst( + restartdate, nemo_rst, model_basis_time, time_step, num_steps, calendar +): + """ Verify that the restart file for nemo corresponds to the model time reached within a given model run. If they don't match, then make sure that nemo restarts from the correct restart date @@ -188,41 +199,44 @@ def _verify_fix_rst(restartdate, nemo_rst, model_basis_time, time_step, :arg: int num_steps : Num. of time-steps covered :arg: string calendar : Calendar used in model - ''' + """ # Calculate the model restart time based on the start date of the # last calculated model step, the time-step and the number of # steps. Then convert the date format. - model_basis_datetime = datetime.datetime.strptime( - model_basis_time, "%Y%m%d") + model_basis_datetime = datetime.datetime.strptime(model_basis_time, "%Y%m%d") model_restart_date = _calc_current_model_date( - model_basis_datetime, time_step, num_steps, calendar) + model_basis_datetime, time_step, num_steps, calendar + ) - model_restart_date = model_restart_date.strftime('%Y%m%d') + model_restart_date = model_restart_date.strftime("%Y%m%d") if restartdate == model_restart_date: - sys.stdout.write('[INFO] Validated NEMO restart date\n') + sys.stdout.write("[INFO] Validated NEMO restart date\n") else: # Write the message to both standard out and standard error - msg = '[WARN] The NEMO restart data does not match the ' \ - ' current model time\n.' \ - ' Current model date is %s\n' \ - ' NEMO restart time is %s\n' \ - '[WARN] Automatically removing NEMO dumps ahead of ' \ - 'the current model date, and pick up the dump at ' \ - 'this time\n' % (model_restart_date, restartdate) + msg = ( + "[WARN] The NEMO restart data does not match the " + " current model time\n." + " Current model date is %s\n" + " NEMO restart time is %s\n" + "[WARN] Automatically removing NEMO dumps ahead of " + "the current model date, and pick up the dump at " + "this time\n" % (model_restart_date, restartdate) + ) sys.stdout.write(msg) sys.stderr.write(msg) - #Remove all nemo restart files that are later than the correct - #cycle times - #Make our generic restart regular expression, to cover normal NEMO - #restart, and potential iceberg, SI3 or passive tracer restart files, - #for both the rebuilt and non rebuilt cases - generic_rst_regex = r'(icebergs)?.*restart(_trc)?(_ice)?(_icb)?(_\d+)?\.nc' - all_restart_files = [f for f in os.listdir(nemo_rst) if - re.findall(generic_rst_regex, f)] + # Remove all nemo restart files that are later than the correct + # cycle times + # Make our generic restart regular expression, to cover normal NEMO + # restart, and potential iceberg, SI3 or passive tracer restart files, + # for both the rebuilt and non rebuilt cases + generic_rst_regex = r"(icebergs)?.*restart(_trc)?(_ice)?(_icb)?(_\d+)?\.nc" + all_restart_files = [ + f for f in os.listdir(nemo_rst) if re.findall(generic_rst_regex, f) + ] for restart_file in all_restart_files: - fname_date = re.findall(r'\d{8}', restart_file)[0] + fname_date = re.findall(r"\d{8}", restart_file)[0] if fname_date > model_restart_date: common.remove_file(os.path.join(nemo_rst, restart_file)) restartdate = model_restart_date @@ -230,85 +244,90 @@ def _verify_fix_rst(restartdate, nemo_rst, model_basis_time, time_step, def _load_environment_variables(nemo_envar): - ''' + """ Load the NEMO environment variables required for the model run into the nemo_envar container - ''' + """ # Load the nemo namelist environment variable nemo_envar = dr_env_lib.env_lib.load_envar_from_definition( - nemo_envar, dr_env_lib.nemo_def.NEMO_ENVIRONMENT_VARS_INITIAL) + nemo_envar, dr_env_lib.nemo_def.NEMO_ENVIRONMENT_VARS_INITIAL + ) _ = _check_nemonl(nemo_envar) return nemo_envar def _setup_dates(common_env): - ''' + """ Setup the dates for the NEMO model run - ''' - calendar = common_env['CALENDAR'] + """ + calendar = common_env["CALENDAR"] - sys.stdout.write('[INFO] NEMO calendar= %s ' % calendar) - if calendar == '360day': - calendar = '360' + sys.stdout.write("[INFO] NEMO calendar= %s " % calendar) + if calendar == "360day": + calendar = "360" nleapy = 30 - elif calendar == '365day': - calendar = '365' + elif calendar == "365day": + calendar = "365" nleapy = 0 - elif calendar == 'gregorian': + elif calendar == "gregorian": nleapy = 1 else: - sys.stderr.write('[FAIL] Calendar type %s not recognised\n' % - calendar) + sys.stderr.write("[FAIL] Calendar type %s not recognised\n" % calendar) sys.exit(error.INVALID_EVAR_ERROR) - #turn our times into lists of integers - model_basis = [int(i) for i in common_env['MODELBASIS'].split(',')] - run_start = [int(i) for i in common_env['TASKSTART'].split(',')] - run_length = [int(i) for i in common_env['TASKLENGTH'].split(',')] - - run_days = inc_days.inc_days(run_start[0], run_start[1], run_start[2], - run_length[0], run_length[1], run_length[2], - calendar) + # turn our times into lists of integers + model_basis = [int(i) for i in common_env["MODELBASIS"].split(",")] + run_start = [int(i) for i in common_env["TASKSTART"].split(",")] + run_length = [int(i) for i in common_env["TASKLENGTH"].split(",")] + + run_days = inc_days.inc_days( + run_start[0], + run_start[1], + run_start[2], + run_length[0], + run_length[1], + run_length[2], + calendar, + ) return nleapy, model_basis, run_start, run_length, run_days - def _setup_executable(common_env): - ''' + """ Setup the environment and any files required by the executable - ''' + """ # Create the environment variable container nemo_envar = dr_env_lib.env_lib.LoadEnvar() # Load the environment variables required nemo_envar = _load_environment_variables(nemo_envar) - - #Link the ocean executable - common.remove_file(nemo_envar['OCEAN_LINK']) - os.symlink(nemo_envar['OCEAN_EXEC'], - nemo_envar['OCEAN_LINK']) + # Link the ocean executable + common.remove_file(nemo_envar["OCEAN_LINK"]) + os.symlink(nemo_envar["OCEAN_EXEC"], nemo_envar["OCEAN_LINK"]) # Setup date variables - nleapy, model_basis, run_start, \ - run_length, run_days = _setup_dates(common_env) + nleapy, model_basis, run_start, run_length, run_days = _setup_dates(common_env) # NEMO model setup - if int(nemo_envar['NEMO_VERSION']) < 306: - sys.stderr.write('[FAIL] The python drivers are only valid for nemo' - ' versions greater than 3.6') + if int(nemo_envar["NEMO_VERSION"]) < 306: + sys.stderr.write( + "[FAIL] The python drivers are only valid for nemo" + " versions greater than 3.6" + ) sys.exit(error.INVALID_COMPONENT_VER_ERROR) # Read restart from nemo namelist restart_direcs = [] - nemo_rst = _get_nemorst(nemo_envar['NEMO_NL']) + nemo_rst = _get_nemorst(nemo_envar["NEMO_NL"]) if nemo_rst: restart_direcs.append(nemo_rst) icerst_rcode, icerst_val = shellout._exec_subprocess( - 'grep cn_icerst_dir %s' % nemo_envar['NEMO_NL']) + "grep cn_icerst_dir %s" % nemo_envar["NEMO_NL"] + ) if icerst_rcode == 0: - ice_rst = re.findall(r'[\"\'](.*?)[\"\']', icerst_val)[0] - if ice_rst[-1] == '/': + ice_rst = re.findall(r"[\"\'](.*?)[\"\']", icerst_val)[0] + if ice_rst[-1] == "/": ice_rst = ice_rst[:-1] restart_direcs.append(ice_rst) @@ -318,29 +337,34 @@ def _setup_executable(common_env): # Check for trailing slashes in directory names and strip them # out if they're present. - if direc.endswith('/'): - direc = direc.rstrip('/') - - if os.path.isdir(direc) and (direc not in ('./', '.')) and \ - common_env['CONTINUE'] == 'false': - sys.stdout.write('[INFO] directory is %s\n' % direc) - sys.stdout.write('[INFO] This is a New Run. Renaming old NEMO' - ' history directory\n') + if direc.endswith("/"): + direc = direc.rstrip("/") + + if ( + os.path.isdir(direc) + and (direc not in ("./", ".")) + and common_env["CONTINUE"] == "false" + ): + sys.stdout.write("[INFO] directory is %s\n" % direc) + sys.stdout.write( + "[INFO] This is a New Run. Renaming old NEMO" " history directory\n" + ) # In seasonal forecasting, we automatically apply # short-stepping to re-try the model. Before re-attempting # it, remove the associated NEMO history directory. old_hist_dir = "%s.%s" % (direc, time.strftime("%Y%m%d%H%M")) - if (common_env['SEASONAL'] == 'True' and - int(common_env['CYLC_TASK_TRY_NUMBER']) > 1): + if ( + common_env["SEASONAL"] == "True" + and int(common_env["CYLC_TASK_TRY_NUMBER"]) > 1 + ): common.remove_latest_hist_dir(old_hist_dir) os.rename(direc, old_hist_dir) os.makedirs(direc) elif not os.path.isdir(direc): - sys.stdout.write('[INFO] Creating NEMO restart directory:\n %s' % - direc) + sys.stdout.write("[INFO] Creating NEMO restart directory:\n %s" % direc) os.makedirs(direc) # Compile a list of NEMO, seaice and iceberg restart files, if any exist. @@ -349,85 +373,95 @@ def _setup_executable(common_env): # _yyyymmdd_restart_icb_.nc where # may itself contain underscores, hence we # do not parse details based on counting the number of underscores. - nemo_restart_files = [f for f in os.listdir(nemo_rst) if - re.findall(r'.+_\d{8}_restart(_\d+)?\.nc', f) or - re.findall(r'.+_\d{8}_restart_icb(_\d+)?\.nc', f)] + nemo_restart_files = [ + f + for f in os.listdir(nemo_rst) + if re.findall(r".+_\d{8}_restart(_\d+)?\.nc", f) + or re.findall(r".+_\d{8}_restart_icb(_\d+)?\.nc", f) + ] nemo_restart_files.sort() if nemo_restart_files: - latest_nemo_dump = nemo_rst + '/' + nemo_restart_files[-1] + latest_nemo_dump = nemo_rst + "/" + nemo_restart_files[-1] else: - latest_nemo_dump = 'unset' + latest_nemo_dump = "unset" - nemo_init_dir = '.' + nemo_init_dir = "." # We need to ensure any lingering NEMO or iceberg retarts from # previous runs are removed to ensure they're not accidentally # picked up if we're starting from climatology on this occasion. - common.remove_file('restart.nc') - common.remove_file('restart_icebergs.nc') - common.remove_file('restart_icb.nc') + common.remove_file("restart.nc") + common.remove_file("restart_icebergs.nc") + common.remove_file("restart_icb.nc") - if common_env['CONTINUE'] == 'false': + if common_env["CONTINUE"] == "false": # This is a new run - sys.stdout.write('[INFO] New nemo run\n') + sys.stdout.write("[INFO] New nemo run\n") if os.path.isfile(latest_nemo_dump): - #os.path.isfile will return true for symbolic links aswell - sys.stdout.write('[INFO] Removing old NEMO restart data\n') - for file_path in glob.glob(nemo_rst+'/*restart*'): + # os.path.isfile will return true for symbolic links aswell + sys.stdout.write("[INFO] Removing old NEMO restart data\n") + for file_path in glob.glob(nemo_rst + "/*restart*"): common.remove_file(file_path) - for file_path in glob.glob(ice_rst+'/*restart*'): + for file_path in glob.glob(ice_rst + "/*restart*"): common.remove_file(file_path) - for file_path in glob.glob(nemo_rst+'/*trajectory*'): + for file_path in glob.glob(nemo_rst + "/*trajectory*"): common.remove_file(file_path) # source our history namelist file from the current directory in case # of first cycle - history_nemo_nl = os.path.join(nemo_init_dir, nemo_envar['NEMO_NL']) + history_nemo_nl = os.path.join(nemo_init_dir, nemo_envar["NEMO_NL"]) elif os.path.isfile(latest_nemo_dump): - sys.stdout.write('[INFO] Restart data available in NEMO restart ' - 'directory %s. Restarting from previous task output\n' - % nemo_rst) - sys.stdout.write('[INFO] Sourcing namelist file from the work ' - 'directory of the previous cycle\n') + sys.stdout.write( + "[INFO] Restart data available in NEMO restart " + "directory %s. Restarting from previous task output\n" % nemo_rst + ) + sys.stdout.write( + "[INFO] Sourcing namelist file from the work " + "directory of the previous cycle\n" + ) # find the previous work directory if there is one - if common_env['CONTINUE_FROM_FAIL'] == 'false': - if common_env['CNWP_SUB_CYCLING'] == 'True': - prev_workdir = common.find_previous_workdir( \ - common_env['CYLC_TASK_CYCLE_POINT'], - common_env['CYLC_TASK_WORK_DIR'], - common_env['CYLC_TASK_NAME'], - common_env['CYLC_TASK_PARAM_run']) + if common_env["CONTINUE_FROM_FAIL"] == "false": + if common_env["CNWP_SUB_CYCLING"] == "True": + prev_workdir = common.find_previous_workdir( + common_env["CYLC_TASK_CYCLE_POINT"], + common_env["CYLC_TASK_WORK_DIR"], + common_env["CYLC_TASK_NAME"], + common_env["CYLC_TASK_PARAM_run"], + ) else: - prev_workdir = common.find_previous_workdir( \ - common_env['CYLC_TASK_CYCLE_POINT'], - common_env['CYLC_TASK_WORK_DIR'], - common_env['CYLC_TASK_NAME']) - history_nemo_nl = os.path.join(prev_workdir, nemo_envar['NEMO_NL']) + prev_workdir = common.find_previous_workdir( + common_env["CYLC_TASK_CYCLE_POINT"], + common_env["CYLC_TASK_WORK_DIR"], + common_env["CYLC_TASK_NAME"], + ) + history_nemo_nl = os.path.join(prev_workdir, nemo_envar["NEMO_NL"]) else: - history_nemo_nl = nemo_envar['NEMO_NL'] + history_nemo_nl = nemo_envar["NEMO_NL"] nemo_init_dir = nemo_rst else: - sys.stderr.write('[FAIL] No restart data available in NEMO restart ' - 'directory:\n %s\n' % nemo_rst) + sys.stderr.write( + "[FAIL] No restart data available in NEMO restart " + "directory:\n %s\n" % nemo_rst + ) sys.exit(error.MISSING_MODEL_FILE_ERROR) # Strings which are different pre-NEMO4.2 and at NEMO4.2 - if int(nemo_envar['NEMO_VERSION']) < 402: - gl_step_int_match = 'rn_rdt=' - iceberg_rst_part1 = '_icebergs_' - iceberg_rst_part2 = '_restart' - iceberg_link_name = 'restart_icebergs' + if int(nemo_envar["NEMO_VERSION"]) < 402: + gl_step_int_match = "rn_rdt=" + iceberg_rst_part1 = "_icebergs_" + iceberg_rst_part2 = "_restart" + iceberg_link_name = "restart_icebergs" else: - gl_step_int_match = 'rn_dt=' - iceberg_rst_part1 = '_' - iceberg_rst_part2 = '_restart_icb' - iceberg_link_name = 'restart_icb' + gl_step_int_match = "rn_dt=" + iceberg_rst_part1 = "_" + iceberg_rst_part2 = "_restart_icb" + iceberg_link_name = "restart_icb" # Any variables containing things that can be globbed will start with gl_ - gl_first_step_match = 'nn_it000=' - gl_last_step_match = 'nn_itend=' + gl_first_step_match = "nn_it000=" + gl_last_step_match = "nn_itend=" - gl_nemo_restart_date_match = 'ln_rstdate' - gl_model_basis_time = 'nn_date0=' + gl_nemo_restart_date_match = "ln_rstdate" + gl_model_basis_time = "nn_date0=" # Read values from the nemo namelist file used by the previous cycle # (if appropriate), or the configuration namelist if this is the initial @@ -436,20 +470,24 @@ def _setup_executable(common_env): # Make sure this file exists before trying to read it since restarted models # may have had old work directories removed for numerous reasons. if not os.path.isfile(history_nemo_nl): - sys.stderr.write('[FAIL] Cannot find namelist file %s to extract ' - 'timestep data.\n' % history_nemo_nl) + sys.stderr.write( + "[FAIL] Cannot find namelist file %s to extract " + "timestep data.\n" % history_nemo_nl + ) sys.exit(error.MISSING_MODEL_FILE_ERROR) # First timestep of the previous cycle - _, first_step_val = shellout._exec_subprocess('grep %s %s' % (gl_first_step_match, - history_nemo_nl)) + _, first_step_val = shellout._exec_subprocess( + "grep %s %s" % (gl_first_step_match, history_nemo_nl) + ) - nemo_first_step = int(re.findall(r'.+=(.+),', first_step_val)[0]) + nemo_first_step = int(re.findall(r".+=(.+),", first_step_val)[0]) # Last timestep of the previous cycle - _, last_step_val = shellout._exec_subprocess('grep %s %s' % (gl_last_step_match, - history_nemo_nl)) - nemo_last_step = re.findall(r'.+=(.+),', last_step_val)[0] + _, last_step_val = shellout._exec_subprocess( + "grep %s %s" % (gl_last_step_match, history_nemo_nl) + ) + nemo_last_step = re.findall(r".+=(.+),", last_step_val)[0] # The string in the nemo time step field might have any one of # a number of variants. e.g. "set_by_rose", "set_by_system", @@ -461,53 +499,65 @@ def _setup_executable(common_env): nemo_last_step = 0 # Determine (as an integer) the number of seconds per model timestep - _, nemo_step_int_val = shellout._exec_subprocess('grep %s %s' % (gl_step_int_match, - nemo_envar['NEMO_NL'])) - nemo_step_int = int(re.findall(r'.+=(\d*)', nemo_step_int_val)[0]) + _, nemo_step_int_val = shellout._exec_subprocess( + "grep %s %s" % (gl_step_int_match, nemo_envar["NEMO_NL"]) + ) + nemo_step_int = int(re.findall(r".+=(\d*)", nemo_step_int_val)[0]) # If the value for nemo_rst_date_value is true then the model uses # absolute date convention, otherwise the dump times are relative to the # start of the model run and have an integer representation _, nemo_rst_date_value = shellout._exec_subprocess( - 'grep %s %s' % (gl_nemo_restart_date_match, history_nemo_nl)) - if 'true' in nemo_rst_date_value: + "grep %s %s" % (gl_nemo_restart_date_match, history_nemo_nl) + ) + if "true" in nemo_rst_date_value: nemo_rst_date_bool = True else: nemo_rst_date_bool = False # The initial date of the model run (YYYYMMDD) - nemo_ndate0 = '%04d%02d%02d' % tuple(model_basis[:3]) + nemo_ndate0 = "%04d%02d%02d" % tuple(model_basis[:3]) nemo_dump_time = "00000000" # Get the model basis time for this run (YYYYMMDD) _, model_basis_val = shellout._exec_subprocess( - 'grep %s %s' % (gl_model_basis_time, history_nemo_nl)) - nemo_model_basis = re.findall(r'.+=(.+),', model_basis_val)[0] + "grep %s %s" % (gl_model_basis_time, history_nemo_nl) + ) + nemo_model_basis = re.findall(r".+=(.+),", model_basis_val)[0] if os.path.isfile(latest_nemo_dump): - nemo_dump_time = re.findall(r'_(\d*)_restart', latest_nemo_dump)[0] + nemo_dump_time = re.findall(r"_(\d*)_restart", latest_nemo_dump)[0] # Verify the dump time against cycle time if appropriate, do the # automatic fix, and check all other restart files match - if common_env['DRIVERS_VERIFY_RST'] == 'True': + if common_env["DRIVERS_VERIFY_RST"] == "True": nemo_dump_time = _verify_fix_rst( nemo_dump_time, nemo_rst, - nemo_model_basis, nemo_step_int, nemo_last_step, - common_env['CALENDAR']) + nemo_model_basis, + nemo_step_int, + nemo_last_step, + common_env["CALENDAR"], + ) - _verify_nemo_rst(nemo_dump_time, nemo_rst, nemo_envar['NEMO_NL'], - int(nemo_envar['NEMO_NPROC']), - int(nemo_envar['NEMO_VERSION'])) + _verify_nemo_rst( + nemo_dump_time, + nemo_rst, + nemo_envar["NEMO_NL"], + int(nemo_envar["NEMO_NPROC"]), + int(nemo_envar["NEMO_VERSION"]), + ) # link restart files no that the last output one becomes next input one - common.remove_file('restart.nc') + common.remove_file("restart.nc") - common.remove_file('restart_ice_in.nc') + common.remove_file("restart_ice_in.nc") # Sort out the processor restart files - if int(nemo_envar['NEMO_NPROC']) == 1: - sys.stderr.write('[FAIL] NEMO driver does not support the running' - ' of NEMO in serial mode\n') + if int(nemo_envar["NEMO_NPROC"]) == 1: + sys.stderr.write( + "[FAIL] NEMO driver does not support the running" + " of NEMO in serial mode\n" + ) sys.exit(SERIAL_MODE_ERROR) else: @@ -516,535 +566,598 @@ def _setup_executable(common_env): iceberg_restart_count = 0 # Nemo has multiple processors - for i_proc in range(int(nemo_envar['NEMO_NPROC'])): + for i_proc in range(int(nemo_envar["NEMO_NPROC"])): tag = str(i_proc).zfill(4) - nemo_rst_source = '%s/%so_%s_restart_%s.nc' % \ - (nemo_init_dir, common_env['RUNID'], \ - nemo_dump_time, tag) - nemo_rst_link = 'restart_%s.nc' % tag + nemo_rst_source = "%s/%so_%s_restart_%s.nc" % ( + nemo_init_dir, + common_env["RUNID"], + nemo_dump_time, + tag, + ) + nemo_rst_link = "restart_%s.nc" % tag common.remove_file(nemo_rst_link) if os.path.isfile(nemo_rst_source): os.symlink(nemo_rst_source, nemo_rst_link) nemo_restart_count += 1 - ice_rst_source = '%s/%so_%s_restart_ice_%s.nc' % \ - (nemo_init_dir, common_env['RUNID'], \ - nemo_dump_time, tag) + ice_rst_source = "%s/%so_%s_restart_ice_%s.nc" % ( + nemo_init_dir, + common_env["RUNID"], + nemo_dump_time, + tag, + ) if os.path.isfile(ice_rst_source): - ice_rst_link = 'restart_ice_in_%s.nc' % tag + ice_rst_link = "restart_ice_in_%s.nc" % tag common.remove_file(ice_rst_link) os.symlink(ice_rst_source, ice_rst_link) ice_restart_count += 1 - iceberg_rst_source = '%s/%so%s%s%s_%s.nc' % \ - (nemo_init_dir, common_env['RUNID'], iceberg_rst_part1, - nemo_dump_time, iceberg_rst_part2, tag) + iceberg_rst_source = "%s/%so%s%s%s_%s.nc" % ( + nemo_init_dir, + common_env["RUNID"], + iceberg_rst_part1, + nemo_dump_time, + iceberg_rst_part2, + tag, + ) if os.path.isfile(iceberg_rst_source): - iceberg_rst_link = '%s_%s.nc' % (iceberg_link_name, tag) + iceberg_rst_link = "%s_%s.nc" % (iceberg_link_name, tag) common.remove_file(iceberg_rst_link) os.symlink(iceberg_rst_source, iceberg_rst_link) iceberg_restart_count += 1 - #endfor + # endfor if nemo_restart_count < 1: - sys.stdout.write('[INFO] No NEMO sub-PE restarts found\n') + sys.stdout.write("[INFO] No NEMO sub-PE restarts found\n") # We found no nemo restart sub-domain files let's # look for a global file. - nemo_rst_source = '%s/%so_%s_restart.nc' % \ - (nemo_init_dir, common_env['RUNID'], \ - nemo_dump_time) + nemo_rst_source = "%s/%so_%s_restart.nc" % ( + nemo_init_dir, + common_env["RUNID"], + nemo_dump_time, + ) if os.path.isfile(nemo_rst_source): - sys.stdout.write('[INFO] Using rebuilt NEMO restart '\ - 'file: %s\n' % nemo_rst_source) - nemo_rst_link = 'restart.nc' + sys.stdout.write( + "[INFO] Using rebuilt NEMO restart " + "file: %s\n" % nemo_rst_source + ) + nemo_rst_link = "restart.nc" common.remove_file(nemo_rst_link) os.symlink(nemo_rst_source, nemo_rst_link) if ice_restart_count < 1: - sys.stdout.write('[INFO] No ice sub-PE restarts found\n') + sys.stdout.write("[INFO] No ice sub-PE restarts found\n") # We found no ice restart sub-domain files let's # look for a global file. - ice_rst_source = '%s/%so_%s_restart_ice.nc' % \ - (nemo_init_dir, common_env['RUNID'], \ - nemo_dump_time) + ice_rst_source = "%s/%so_%s_restart_ice.nc" % ( + nemo_init_dir, + common_env["RUNID"], + nemo_dump_time, + ) if os.path.isfile(ice_rst_source): - sys.stdout.write('[INFO] Using rebuilt ice restart '\ - 'file: %s\n' % ice_rst_source) - ice_rst_link = 'restart_ice_in.nc' + sys.stdout.write( + "[INFO] Using rebuilt ice restart " + "file: %s\n" % ice_rst_source + ) + ice_rst_link = "restart_ice_in.nc" common.remove_file(ice_rst_link) os.symlink(ice_rst_source, ice_rst_link) if iceberg_restart_count < 1: - sys.stdout.write('[INFO] No iceberg sub-PE restarts found\n') + sys.stdout.write("[INFO] No iceberg sub-PE restarts found\n") # We found no iceberg restart sub-domain files let's # look for a global file. - iceberg_rst_source = '%s/%so%s%s%s.nc' % \ - (nemo_init_dir, common_env['RUNID'], iceberg_rst_part1, - nemo_dump_time, iceberg_rst_part2) + iceberg_rst_source = "%s/%so%s%s%s.nc" % ( + nemo_init_dir, + common_env["RUNID"], + iceberg_rst_part1, + nemo_dump_time, + iceberg_rst_part2, + ) if os.path.isfile(iceberg_rst_source): - sys.stdout.write('[INFO] Using rebuilt iceberg restart'\ - 'file: %s\n' % iceberg_rst_source) - iceberg_rst_nc = iceberg_link_name + '.nc' + sys.stdout.write( + "[INFO] Using rebuilt iceberg restart" + "file: %s\n" % iceberg_rst_source + ) + iceberg_rst_nc = iceberg_link_name + ".nc" common.remove_file(iceberg_rst_nc) os.symlink(iceberg_rst_source, iceberg_rst_link) - #endif (nemo_envar(NEMO_NPROC) == 1) + # endif (nemo_envar(NEMO_NPROC) == 1) if nemo_rst_date_bool: - #Then nemo_dump_time has the form YYYYMMDD + # Then nemo_dump_time has the form YYYYMMDD pass else: - #nemo_dump_time is relative to start of model run and is an - #integer + # nemo_dump_time is relative to start of model run and is an + # integer nemo_dump_time = int(nemo_dump_time) completed_days = nemo_dump_time * (nemo_step_int // 86400) - sys.stdout.write('[INFO] Nemo has previously completed %i days\n' % - completed_days) + sys.stdout.write( + "[INFO] Nemo has previously completed %i days\n" % completed_days + ) ln_restart = ".true." restart_ctl = 2 - if common_env['CONTINUE_FROM_FAIL'] == 'true': + if common_env["CONTINUE_FROM_FAIL"] == "true": # This is only used for coupled NWP where we don't have dates in # NEMO restart file names - nemo_next_step = int(nemo_dump_time)+1 + nemo_next_step = int(nemo_dump_time) + 1 else: nemo_next_step = nemo_last_step + 1 else: # This is an NRUN - if nemo_envar['NEMO_START'] != '': - if os.path.isfile(nemo_envar['NEMO_START']): + if nemo_envar["NEMO_START"] != "": + if os.path.isfile(nemo_envar["NEMO_START"]): - os.symlink(nemo_envar['NEMO_START'], 'restart.nc') + os.symlink(nemo_envar["NEMO_START"], "restart.nc") ln_restart = ".true." - elif os.path.isfile('%s_0000.nc' % - nemo_envar['NEMO_START']): - for fname in glob.glob('%s_????.nc' % - nemo_envar['NEMO_START']): - proc_number = fname.split('.')[-2][-4:] + elif os.path.isfile("%s_0000.nc" % nemo_envar["NEMO_START"]): + for fname in glob.glob("%s_????.nc" % nemo_envar["NEMO_START"]): + proc_number = fname.split(".")[-2][-4:] # We need to make sure there isn't already # a restart file link set up, and if there is, get # rid of it because symlink wont work otherwise! - common.remove_file('restart_%s.nc' % proc_number) + common.remove_file("restart_%s.nc" % proc_number) - os.symlink(fname, 'restart_%s.nc' % proc_number) + os.symlink(fname, "restart_%s.nc" % proc_number) ln_restart = ".true." - elif os.path.isfile('%s_0000.nc' % - nemo_envar['NEMO_START'][:-3]): - for fname in glob.glob('%s_????.nc' % - nemo_envar['NEMO_START'][:-3]): - proc_number = fname.split('.')[-2][-4:] + elif os.path.isfile("%s_0000.nc" % nemo_envar["NEMO_START"][:-3]): + for fname in glob.glob("%s_????.nc" % nemo_envar["NEMO_START"][:-3]): + proc_number = fname.split(".")[-2][-4:] # We need to make sure there isn't already # a restart file link set up, and if there is, get # rid of it because symlink wont work otherwise! - common.remove_file('restart_%s.nc' % proc_number) + common.remove_file("restart_%s.nc" % proc_number) - os.symlink(fname, 'restart_%s.nc' % proc_number) + os.symlink(fname, "restart_%s.nc" % proc_number) ln_restart = ".true." else: - sys.stderr.write('[FAIL] file %s not found\n' % - nemo_envar['NEMO_START']) + sys.stderr.write( + "[FAIL] file %s not found\n" % nemo_envar["NEMO_START"] + ) sys.exit(error.MISSING_MODEL_FILE_ERROR) else: - #NEMO_START is unset - sys.stdout.write('[WARN] NEMO_START not set\n' - 'NEMO will use climatology\n') + # NEMO_START is unset + sys.stdout.write( + "[WARN] NEMO_START not set\n" "NEMO will use climatology\n" + ) ln_restart = ".false." - if nemo_envar['NEMO_ICEBERGS_START'] != '': + if nemo_envar["NEMO_ICEBERGS_START"] != "": - if os.path.isfile(nemo_envar['NEMO_ICEBERGS_START']): + if os.path.isfile(nemo_envar["NEMO_ICEBERGS_START"]): # We need to make sure there isn't already # an iceberg restart file link set up, and if there is, get # rid of it because symlink wont work otherwise! - iceberg_rst_file = iceberg_link_name + '.nc' + iceberg_rst_file = iceberg_link_name + ".nc" common.remove_file(iceberg_rst_file) - os.symlink(nemo_envar['NEMO_ICEBERGS_START'], - iceberg_rst_file) - elif os.path.isfile('%s_0000.nc' % - nemo_envar['NEMO_ICEBERGS_START']): - for fname in glob.glob('%s_????.nc' % - nemo_envar['NEMO_ICEBERGS_START']): - proc_number = fname.split('.')[-2][-4:] + os.symlink(nemo_envar["NEMO_ICEBERGS_START"], iceberg_rst_file) + elif os.path.isfile("%s_0000.nc" % nemo_envar["NEMO_ICEBERGS_START"]): + for fname in glob.glob( + "%s_????.nc" % nemo_envar["NEMO_ICEBERGS_START"] + ): + proc_number = fname.split(".")[-2][-4:] # We need to make sure there isn't already # an iceberg restart file link set up, and if there is, get # rid of it because symlink wont work otherwise! - common.remove_file('%s_%s.nc' % - (iceberg_rst_link, proc_number)) + common.remove_file("%s_%s.nc" % (iceberg_rst_link, proc_number)) - os.symlink(fname, '%s_%s.nc' % - (iceberg_rst_file, proc_number)) - elif os.path.isfile('%s_0000.nc' % - nemo_envar['NEMO_ICEBERGS_START'][:-3]): - for fname in glob.glob('%s_????.nc' % - nemo_envar['NEMO_ICEBERGS_START'][:-3]): - proc_number = fname.split('.')[-2][-4:] + os.symlink(fname, "%s_%s.nc" % (iceberg_rst_file, proc_number)) + elif os.path.isfile("%s_0000.nc" % nemo_envar["NEMO_ICEBERGS_START"][:-3]): + for fname in glob.glob( + "%s_????.nc" % nemo_envar["NEMO_ICEBERGS_START"][:-3] + ): + proc_number = fname.split(".")[-2][-4:] # We need to make sure there isn't already # an iceberg restart file link set up, and if there is, get # rid of it because symlink wont work otherwise! - common.remove_file('%s_%s.nc' % - (iceberg_rst_link, proc_number)) + common.remove_file("%s_%s.nc" % (iceberg_rst_link, proc_number)) - os.symlink(fname, '%s_%s.nc' % - (iceberg_rst_link, proc_number)) + os.symlink(fname, "%s_%s.nc" % (iceberg_rst_link, proc_number)) else: - sys.stderr.write('[FAIL] file %s not found\n' % - nemo_envar['NEMO_ICEBERGS_START']) + sys.stderr.write( + "[FAIL] file %s not found\n" % nemo_envar["NEMO_ICEBERGS_START"] + ) sys.exit(error.MISSING_MODEL_FILE_ERROR) else: - #NEMO_ICEBERGS_START unset - sys.stdout.write('[WARN] NEMO_ICEBERGS_START not set or file(s)' - ' not found. Icebergs (if switched on) will start' - ' from a state of zero icebergs\n') + # NEMO_ICEBERGS_START unset + sys.stdout.write( + "[WARN] NEMO_ICEBERGS_START not set or file(s)" + " not found. Icebergs (if switched on) will start" + " from a state of zero icebergs\n" + ) restart_ctl = 0 nemo_next_step = nemo_first_step nemo_last_step = nemo_first_step - 1 - if common_env['CONTINUE_FROM_FAIL'] == 'true': - #Check that the length of run is correct - #(it won't be if this is the wrong restart file) - run_start_dt = datetime.datetime(run_start[0], run_start[1], - run_start[2], run_start[3]) - model_basis_dt = datetime.datetime(model_basis[0], model_basis[1], - model_basis[2], model_basis[3]) - nemo_init_step = (run_start_dt-model_basis_dt).total_seconds() \ - /nemo_step_int - tot_runlen_sec = run_days * 86400 + run_length[3]*3600 \ - + run_length[4]*60 + run_length[5] - nemo_final_step = int((tot_runlen_sec//nemo_step_int) + nemo_init_step) + if common_env["CONTINUE_FROM_FAIL"] == "true": + # Check that the length of run is correct + # (it won't be if this is the wrong restart file) + run_start_dt = datetime.datetime( + run_start[0], run_start[1], run_start[2], run_start[3] + ) + model_basis_dt = datetime.datetime( + model_basis[0], model_basis[1], model_basis[2], model_basis[3] + ) + nemo_init_step = (run_start_dt - model_basis_dt).total_seconds() / nemo_step_int + tot_runlen_sec = ( + run_days * 86400 + run_length[3] * 3600 + run_length[4] * 60 + run_length[5] + ) + nemo_final_step = int((tot_runlen_sec // nemo_step_int) + nemo_init_step) # Check that nemo_next_step is the correct number of hours to # match LAST_DUMP_HOURS variable - steps_per_hour = 3600./nemo_step_int - last_dump_hrs = int(common_env['LAST_DUMP_HOURS']) - last_dump_step = int(nemo_init_step + last_dump_hrs*steps_per_hour) - if nemo_next_step-1 != last_dump_step: - sys.stderr.write('[FAIL] Last NEMO restarts not at correct time') + steps_per_hour = 3600.0 / nemo_step_int + last_dump_hrs = int(common_env["LAST_DUMP_HOURS"]) + last_dump_step = int(nemo_init_step + last_dump_hrs * steps_per_hour) + if nemo_next_step - 1 != last_dump_step: + sys.stderr.write("[FAIL] Last NEMO restarts not at correct time") sys.exit(error.RESTART_FILE_ERROR) else: - tot_runlen_sec = run_days * 86400 + run_length[3]*3600 \ - + run_length[4]*60 + run_length[5] + tot_runlen_sec = ( + run_days * 86400 + run_length[3] * 3600 + run_length[4] * 60 + run_length[5] + ) nemo_final_step = (tot_runlen_sec // nemo_step_int) + nemo_last_step - - #Make our call to update the nemo namelist. First generate the list - #of commands - if int(nemo_envar['NEMO_VERSION']) >= 400: + # Make our call to update the nemo namelist. First generate the list + # of commands + if int(nemo_envar["NEMO_VERSION"]) >= 400: # from NEMO 4.0 onwards we don't have jpnij in the namelist - update_nl_cmd = '--file %s --runid %so --restart %s --restart_ctl %s' \ - ' --next_step %i --final_step %s --start_date %s --leapyear %i' \ - ' --iproc %s --jproc %s --cpl_river_count %s --verbose' % \ - (nemo_envar['NEMO_NL'], \ - common_env['RUNID'], \ - ln_restart, \ - restart_ctl, \ - nemo_next_step, \ - nemo_final_step, \ - nemo_ndate0, \ - nleapy, \ - nemo_envar['NEMO_IPROC'], \ - nemo_envar['NEMO_JPROC'], \ - common_env['CPL_RIVER_COUNT']) + update_nl_cmd = ( + "--file %s --runid %so --restart %s --restart_ctl %s" + " --next_step %i --final_step %s --start_date %s --leapyear %i" + " --iproc %s --jproc %s --cpl_river_count %s --verbose" + % ( + nemo_envar["NEMO_NL"], + common_env["RUNID"], + ln_restart, + restart_ctl, + nemo_next_step, + nemo_final_step, + nemo_ndate0, + nleapy, + nemo_envar["NEMO_IPROC"], + nemo_envar["NEMO_JPROC"], + common_env["CPL_RIVER_COUNT"], + ) + ) else: - update_nl_cmd = '--file %s --runid %so --restart %s --restart_ctl %s' \ - ' --next_step %i --final_step %s --start_date %s' \ - ' --leapyear %i --iproc %s --jproc %s --ijproc %s' \ - ' --cpl_river_count %s --verbose' % \ - (nemo_envar['NEMO_NL'], \ - common_env['RUNID'], \ - ln_restart, \ - restart_ctl, \ - nemo_next_step, \ - nemo_final_step, \ - nemo_ndate0, \ - nleapy, \ - nemo_envar['NEMO_IPROC'], \ - nemo_envar['NEMO_JPROC'], \ - nemo_envar['NEMO_NPROC'], \ - common_env['CPL_RIVER_COUNT']) - - update_nl_cmd = './update_nemo_nl %s' % update_nl_cmd + update_nl_cmd = ( + "--file %s --runid %so --restart %s --restart_ctl %s" + " --next_step %i --final_step %s --start_date %s" + " --leapyear %i --iproc %s --jproc %s --ijproc %s" + " --cpl_river_count %s --verbose" + % ( + nemo_envar["NEMO_NL"], + common_env["RUNID"], + ln_restart, + restart_ctl, + nemo_next_step, + nemo_final_step, + nemo_ndate0, + nleapy, + nemo_envar["NEMO_IPROC"], + nemo_envar["NEMO_JPROC"], + nemo_envar["NEMO_NPROC"], + common_env["CPL_RIVER_COUNT"], + ) + ) + + update_nl_cmd = "./update_nemo_nl %s" % update_nl_cmd # REFACTOR TO USE THE SAFE EXEC SUBPROC update_nl_rcode, _ = shellout._exec_subprocess(update_nl_cmd) if update_nl_rcode != 0: - sys.stderr.write('[FAIL] Error updating nemo namelist\n') + sys.stderr.write("[FAIL] Error updating nemo namelist\n") sys.exit(error.SUBPROC_ERROR) # We just check for the presence of T or t (as in TRUE, True or true) # in the L_OCN_PASS_TRC value. - if ('T' in nemo_envar['L_OCN_PASS_TRC']) or \ - ('t' in nemo_envar['L_OCN_PASS_TRC']): + if ("T" in nemo_envar["L_OCN_PASS_TRC"]) or ("t" in nemo_envar["L_OCN_PASS_TRC"]): - sys.stdout.write('[INFO] nemo_driver: Passive tracer code is ' - 'active.\n') + sys.stdout.write("[INFO] nemo_driver: Passive tracer code is " "active.\n") controller_mode = "run_controller" - top_controller.run_controller(common_env, - restart_ctl, - int(nemo_envar['NEMO_NPROC']), - common_env['RUNID'], - common_env['DRIVERS_VERIFY_RST'], - nemo_dump_time, - controller_mode) + top_controller.run_controller( + common_env, + restart_ctl, + int(nemo_envar["NEMO_NPROC"]), + common_env["RUNID"], + common_env["DRIVERS_VERIFY_RST"], + nemo_dump_time, + controller_mode, + ) else: - sys.stdout.write('[INFO] nemo_driver: ' - 'Passive tracer code not active\n.') + sys.stdout.write("[INFO] nemo_driver: " "Passive tracer code not active\n.") - use_si3 = 'si3' in common_env['models'] + use_si3 = "si3" in common_env["models"] if use_si3: - sys.stdout.write('[INFO] nemo_driver: SI3 code is active.\n') + sys.stdout.write("[INFO] nemo_driver: SI3 code is active.\n") controller_mode = "run_controller" - si3_controller.run_controller(common_env, - restart_ctl, - int(nemo_envar['NEMO_NPROC']), - common_env['RUNID'], - common_env['DRIVERS_VERIFY_RST'], - nemo_dump_time, - controller_mode) + si3_controller.run_controller( + common_env, + restart_ctl, + int(nemo_envar["NEMO_NPROC"]), + common_env["RUNID"], + common_env["DRIVERS_VERIFY_RST"], + nemo_dump_time, + controller_mode, + ) return nemo_envar def _set_launcher_command(launcher, nemo_envar): - ''' + """ Setup the launcher command for the executable - ''' - if nemo_envar['ROSE_LAUNCHER_PREOPTS_NEMO'] == 'unset': + """ + if nemo_envar["ROSE_LAUNCHER_PREOPTS_NEMO"] == "unset": ss = False - nemo_envar['ROSE_LAUNCHER_PREOPTS_NEMO'] = \ - common.set_aprun_options(nemo_envar['NEMO_NPROC'], \ - nemo_envar['OCEAN_NODES'], nemo_envar['OMPTHR_OCN'], \ - nemo_envar['OHYPERTHREADS'], ss) \ - if launcher == 'aprun' else '' - - launch_cmd = '%s ./%s' % \ - (nemo_envar['ROSE_LAUNCHER_PREOPTS_NEMO'], \ - nemo_envar['OCEAN_LINK']) + nemo_envar["ROSE_LAUNCHER_PREOPTS_NEMO"] = ( + common.set_aprun_options( + nemo_envar["NEMO_NPROC"], + nemo_envar["OCEAN_NODES"], + nemo_envar["OMPTHR_OCN"], + nemo_envar["OHYPERTHREADS"], + ss, + ) + if launcher == "aprun" + else "" + ) + + launch_cmd = "%s ./%s" % ( + nemo_envar["ROSE_LAUNCHER_PREOPTS_NEMO"], + nemo_envar["OCEAN_LINK"], + ) # Put in quotes to allow this environment variable to be exported as it # contains (or can contain) spaces - nemo_envar['ROSE_LAUNCHER_PREOPTS_NEMO'] = "'%s'" % \ - nemo_envar['ROSE_LAUNCHER_PREOPTS_NEMO'] + nemo_envar["ROSE_LAUNCHER_PREOPTS_NEMO"] = ( + "'%s'" % nemo_envar["ROSE_LAUNCHER_PREOPTS_NEMO"] + ) return launch_cmd + def get_ocean_resol(nemo_nl_file, run_info): - ''' + """ Determine the ocean resolution. This function is only used when creating the namcouple at run time. - ''' + """ # See if resolution is contained within namelists (existent of # namelist_cfg has already been checked) ocean_nml = f90nml.read(nemo_nl_file) # Check the required entries exist - if 'namcfg' not in ocean_nml: - sys.stderr.write('[FAIL] namcfg not found in namelist_cfg\n') + if "namcfg" not in ocean_nml: + sys.stderr.write("[FAIL] namcfg not found in namelist_cfg\n") sys.exit(error.MISSING_OCN_RESOL_NML) - if 'jpiglo' in ocean_nml['namcfg']: + if "jpiglo" in ocean_nml["namcfg"]: # Resolution is contained within namelists - if 'jpiglo' not in ocean_nml['namcfg'] or \ - 'jpjglo' not in ocean_nml['namcfg'] or \ - 'cp_cfg' not in ocean_nml['namcfg'] or \ - 'jp_cfg' not in ocean_nml['namcfg']: - sys.stderr.write('[FAIL] cp_cfg, jp_cfg, jpiglo or jpjglo are ' - 'missing from namelist namcf in namelist_cfg\n') + if ( + "jpiglo" not in ocean_nml["namcfg"] + or "jpjglo" not in ocean_nml["namcfg"] + or "cp_cfg" not in ocean_nml["namcfg"] + or "jp_cfg" not in ocean_nml["namcfg"] + ): + sys.stderr.write( + "[FAIL] cp_cfg, jp_cfg, jpiglo or jpjglo are " + "missing from namelist namcf in namelist_cfg\n" + ) sys.exit(error.MISSING_OCN_RESOL) # Check it is on orca grid - if ocean_nml['namcfg']['cp_cfg'] != 'orca': - sys.stderr.write('[FAIL] we can currently only handle the ' - 'ORCA grid\n') + if ocean_nml["namcfg"]["cp_cfg"] != "orca": + sys.stderr.write("[FAIL] we can currently only handle the " "ORCA grid\n") sys.exit(error.NOT_ORCA_GRID) # Check this is a grid we recognise - if ocean_nml['namcfg']['jp_cfg'] == 25: - run_info['OCN_grid'] = 'orca025' + if ocean_nml["namcfg"]["jp_cfg"] == 25: + run_info["OCN_grid"] = "orca025" else: - run_info['OCN_grid'] = 'orca' + str(ocean_nml['namcfg']['jp_cfg']) + run_info["OCN_grid"] = "orca" + str(ocean_nml["namcfg"]["jp_cfg"]) # Store the ocean resolution - run_info['OCN_resol'] = [ocean_nml['namcfg']['jpiglo'], - ocean_nml['namcfg']['jpjglo']] + run_info["OCN_resol"] = [ + ocean_nml["namcfg"]["jpiglo"], + ocean_nml["namcfg"]["jpjglo"], + ] else: # Resolution should be contained within a domain_cfg netCDF file. # Rather than read this file, assume resolution is declared. - if 'OCN_grid' not in run_info: - sys.stderr.write('[FAIL] it is necessary to declare the ocean ' - 'resolution by setting the OCN_RES environment ' - 'variable.') + if "OCN_grid" not in run_info: + sys.stderr.write( + "[FAIL] it is necessary to declare the ocean " + "resolution by setting the OCN_RES environment " + "variable." + ) sys.exit(error.NOT_DECLARE_OCN_RES) else: # Determine the ocean resolution - if run_info['OCN_grid'] not in OCEAN_RESOLS: - sys.stderr.write('[FAIL] the ocean resolution for %s is ' - 'unknown' % run_info['OCN_grid']) + if run_info["OCN_grid"] not in OCEAN_RESOLS: + sys.stderr.write( + "[FAIL] the ocean resolution for %s is " + "unknown" % run_info["OCN_grid"] + ) sys.exit(error.UNKNOWN_OCN_RESOL) else: - run_info['OCN_resol'] = [OCEAN_RESOLS[run_info['OCN_grid']][0], - OCEAN_RESOLS[run_info['OCN_grid']][1]] + run_info["OCN_resol"] = [ + OCEAN_RESOLS[run_info["OCN_grid"]][0], + OCEAN_RESOLS[run_info["OCN_grid"]][1], + ] return run_info + def _sent_coupling_fields(nemo_envar, run_info): - ''' + """ Write the coupling fields sent from NEMO into model_snd_list. This function is only used when creating the namcouple at run time. - ''' + """ # Check that file specifying the coupling fields sent from # NEMO is present - if not os.path.exists('OASIS_OCN_SEND'): - sys.stderr.write('[FAIL] OASIS_OCN_SEND is missing.\n') + if not os.path.exists("OASIS_OCN_SEND"): + sys.stderr.write("[FAIL] OASIS_OCN_SEND is missing.\n") sys.exit(error.MISSING_OASIS_OCN_SEND) # Add toyatm to our list of executables - if not 'exec_list' in run_info: - run_info['exec_list'] = [] - run_info['exec_list'].append('toyoce') + if not "exec_list" in run_info: + run_info["exec_list"] = [] + run_info["exec_list"].append("toyoce") # Store ocean resolution if it is provided - if nemo_envar['OCN_RES']: - run_info['OCN_grid'] = nemo_envar['OCN_RES'] + if nemo_envar["OCN_RES"]: + run_info["OCN_grid"] = nemo_envar["OCN_RES"] # Store the nemo version - if nemo_envar['NEMO_VERSION']: - run_info['NEMO_VERSION'] = nemo_envar['NEMO_VERSION'] + if nemo_envar["NEMO_VERSION"]: + run_info["NEMO_VERSION"] = nemo_envar["NEMO_VERSION"] # Determine the ocean resolution - run_info = get_ocean_resol(nemo_envar['NEMO_NL'], run_info) + run_info = get_ocean_resol(nemo_envar["NEMO_NL"], run_info) # If using the default coupling option, we'll need to read the # NEMO namelist later - run_info['nemo_nl'] = nemo_envar['NEMO_NL'] + run_info["nemo_nl"] = nemo_envar["NEMO_NL"] # Read the namelist - oasis_nml = f90nml.read('OASIS_OCN_SEND') + oasis_nml = f90nml.read("OASIS_OCN_SEND") # Check we have the expected information - if 'oasis_ocn_send_nml' not in oasis_nml: - sys.stderr.write('[FAIL] namelist oasis_ocn_send_nml is ' - 'missing from OASIS_OCN_SEND.\n') + if "oasis_ocn_send_nml" not in oasis_nml: + sys.stderr.write( + "[FAIL] namelist oasis_ocn_send_nml is " "missing from OASIS_OCN_SEND.\n" + ) sys.exit(error.MISSING_OASIS_OCN_SEND_NML) - if 'oasis_ocn_send' not in oasis_nml['oasis_ocn_send_nml']: - sys.stderr.write('[FAIL] entry oasis_ocn_send is missing ' - 'from namelist oasis_ocn_send_nml in ' - 'OASIS_OCN_SEND.\n') + if "oasis_ocn_send" not in oasis_nml["oasis_ocn_send_nml"]: + sys.stderr.write( + "[FAIL] entry oasis_ocn_send is missing " + "from namelist oasis_ocn_send_nml in " + "OASIS_OCN_SEND.\n" + ) sys.exit(error.MISSING_OASIS_OCN_SEND) # Create a list of fields sent from OCN import write_namcouple - model_snd_list = \ - write_namcouple.add_to_cpl_list( \ - 'OCN', False, 0, - oasis_nml['oasis_ocn_send_nml']['oasis_ocn_send']) + + model_snd_list = write_namcouple.add_to_cpl_list( + "OCN", False, 0, oasis_nml["oasis_ocn_send_nml"]["oasis_ocn_send"] + ) return run_info, model_snd_list + def write_ocean_out_to_stdout(): - ''' + """ Write the contents of ocean.output to stnadard out - ''' + """ # append the ocean output and solver stat file to standard out. Use an # iterator to read the files, incase they are too large to fit into # memory. Try to find both the NEMO 3.6 and NEMO 4.0 solver files for # compatiblilty reasons - nemo_stdout_file = 'ocean.output' - nemo36_solver_file = 'solver.stat' - nemo40_solver_file = 'run.stat' - icebergs_stat_file = 'icebergs.stat' - for nemo_output_file in (nemo_stdout_file, - nemo36_solver_file, nemo40_solver_file, - icebergs_stat_file): + nemo_stdout_file = "ocean.output" + nemo36_solver_file = "solver.stat" + nemo40_solver_file = "run.stat" + icebergs_stat_file = "icebergs.stat" + for nemo_output_file in ( + nemo_stdout_file, + nemo36_solver_file, + nemo40_solver_file, + icebergs_stat_file, + ): # The output file from NEMO4.0 has some suspect utf8 encoding, # this try/except will handle it if os.path.isfile(nemo_output_file): - sys.stdout.write('[INFO] Ocean output from file %s\n' % - nemo_output_file) - with open(nemo_output_file, 'r', encoding='utf-8') as n_out: + sys.stdout.write("[INFO] Ocean output from file %s\n" % nemo_output_file) + with open(nemo_output_file, "r", encoding="utf-8") as n_out: for line in n_out: try: sys.stdout.write(line) except UnicodeEncodeError: pass else: - sys.stdout.write('[INFO] Nemo output file %s not avaliable\n' - % nemo_output_file) + sys.stdout.write( + "[INFO] Nemo output file %s not avaliable\n" % nemo_output_file + ) + def _finalize_executable(common_env): - ''' + """ Finalize the NEMO run, copy the nemo namelist to the restart directory for the next cycle, update standard out, and ensure that no errors have been found in the NEMO execution. - ''' - sys.stdout.write('[INFO] finalizing NEMO\n') - sys.stdout.write('[INFO] running finalize in %s\n' % os.getcwd()) + """ + sys.stdout.write("[INFO] finalizing NEMO\n") + sys.stdout.write("[INFO] running finalize in %s\n" % os.getcwd()) write_ocean_out_to_stdout() - _, error_count = shellout._exec_subprocess( - 'grep "E R R O R" ocean.output | wc -l') + _, error_count = shellout._exec_subprocess('grep "E R R O R" ocean.output | wc -l') if int(error_count) >= 1: - sys.stderr.write('[FAIL] An error has been found with the NEMO run.' - ' Please investigate the ocean.output file for more' - ' details\n') + sys.stderr.write( + "[FAIL] An error has been found with the NEMO run." + " Please investigate the ocean.output file for more" + " details\n" + ) sys.exit(error.COMPONENT_MODEL_ERROR) # move the nemo namelist to the restart directory to allow the next cycle # to pick it up nemo_envar_fin = dr_env_lib.env_lib.LoadEnvar() nemo_envar_fin = dr_env_lib.env_lib.load_envar_from_definition( - nemo_envar_fin, dr_env_lib.nemo_def.NEMO_ENVIRONMENT_VARS_FINAL) - nemo_rst = _get_nemorst(nemo_envar_fin['NEMO_NL']) - if os.path.isdir(nemo_rst) and \ - os.path.isfile(nemo_envar_fin['NEMO_NL']): - shutil.copy(nemo_envar_fin['NEMO_NL'], nemo_rst) + nemo_envar_fin, dr_env_lib.nemo_def.NEMO_ENVIRONMENT_VARS_FINAL + ) + nemo_rst = _get_nemorst(nemo_envar_fin["NEMO_NL"]) + if os.path.isdir(nemo_rst) and os.path.isfile(nemo_envar_fin["NEMO_NL"]): + shutil.copy(nemo_envar_fin["NEMO_NL"], nemo_rst) # The only way to check if TOP is active is by checking the # passive tracer env var. # Check whether we need to finalize the TOP controller - if ('T' in nemo_envar_fin['L_OCN_PASS_TRC']) or \ - ('t' in nemo_envar_fin['L_OCN_PASS_TRC']): + if ("T" in nemo_envar_fin["L_OCN_PASS_TRC"]) or ( + "t" in nemo_envar_fin["L_OCN_PASS_TRC"] + ): - sys.stdout.write('[INFO] nemo_driver: Finalize TOP controller.') + sys.stdout.write("[INFO] nemo_driver: Finalize TOP controller.") controller_mode = "finalize" top_controller.run_controller([], [], [], [], [], [], controller_mode) - use_si3 = 'si3' in common_env['models'] + use_si3 = "si3" in common_env["models"] if use_si3: - sys.stdout.write('[INFO] nemo_driver: Finalise SI3 controller\n') + sys.stdout.write("[INFO] nemo_driver: Finalise SI3 controller\n") controller_mode = "finalize" si3_controller.run_controller([], [], [], [], [], [], controller_mode) + def run_driver(common_env, mode, run_info): - ''' + """ Run the driver, and return an instance of LoadEnvar and as string containing the launcher command for the NEMO model - ''' - if mode == 'run_driver': + """ + if mode == "run_driver": exe_envar = _setup_executable(common_env) - launch_cmd = _set_launcher_command(common_env['ROSE_LAUNCHER'], - exe_envar) - if run_info['l_namcouple']: + launch_cmd = _set_launcher_command(common_env["ROSE_LAUNCHER"], exe_envar) + if run_info["l_namcouple"]: model_snd_list = None else: - run_info, model_snd_list = \ - _sent_coupling_fields(exe_envar, run_info) - elif mode == 'finalize': + run_info, model_snd_list = _sent_coupling_fields(exe_envar, run_info) + elif mode == "finalize": _finalize_executable(common_env) exe_envar = None launch_cmd = None model_snd_list = None - elif mode == 'failure': + elif mode == "failure": # subset of operations of the model fails write_ocean_out_to_stdout() exe_envar = None diff --git a/Coupled_Drivers/rivers_driver.py b/Coupled_Drivers/rivers_driver.py index 42ed0f7..f959bd9 100644 --- a/Coupled_Drivers/rivers_driver.py +++ b/Coupled_Drivers/rivers_driver.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -''' +""" *****************************COPYRIGHT****************************** (C) Crown copyright 2025 Met Office. All rights reserved. @@ -16,7 +16,7 @@ DESCRIPTION Driver for the JULES river standalone model, called from link_drivers. -''' +""" import os import sys @@ -27,6 +27,7 @@ import error import dr_env_lib.rivers_def import dr_env_lib.env_lib + try: import f90nml except ImportError: @@ -34,74 +35,82 @@ def _setup_dates(common_envar): - ''' + """ Setup the dates for the JULES river model run - ''' - calendar = common_envar['CALENDAR'] + """ + calendar = common_envar["CALENDAR"] - sys.stdout.write('[INFO] River calendar= %s ' % calendar) - if calendar not in ('360day', '365day', 'gregorian'): - sys.stderr.write('[FAIL] Calendar type %s not recognised\n' % - calendar) + sys.stdout.write("[INFO] River calendar= %s " % calendar) + if calendar not in ("360day", "365day", "gregorian"): + sys.stderr.write("[FAIL] Calendar type %s not recognised\n" % calendar) sys.exit(error.INVALID_EVAR_ERROR) # Find the start and end times in the right format - task_start = common_envar['TASKSTART'].split(',') - task_length = common_envar['TASKLENGTH'].split(',') - - start_date = '%s%s%sT%s%sZ' % (task_start[0].zfill(4), - task_start[1].zfill(2), - task_start[2].zfill(2), - task_start[3].zfill(2), - task_start[4].zfill(2)) - format_date = '%Y-%m-%d %H:%M:%S' - length_date = 'P%sY%sM%sDT%sH%sM' % (task_length[0], task_length[1], - task_length[2], task_length[3], - task_length[4]) + task_start = common_envar["TASKSTART"].split(",") + task_length = common_envar["TASKLENGTH"].split(",") + + start_date = "%s%s%sT%s%sZ" % ( + task_start[0].zfill(4), + task_start[1].zfill(2), + task_start[2].zfill(2), + task_start[3].zfill(2), + task_start[4].zfill(2), + ) + format_date = "%Y-%m-%d %H:%M:%S" + length_date = "P%sY%sM%sDT%sH%sM" % ( + task_length[0], + task_length[1], + task_length[2], + task_length[3], + task_length[4], + ) start_cmd = 'isodatetime %s -f "%s"' % (start_date, format_date) - end_cmd = 'isodatetime %s -f "%s" -s %s --calendar %s' % (start_date, format_date, - length_date, calendar) + end_cmd = 'isodatetime %s -f "%s" -s %s --calendar %s' % ( + start_date, + format_date, + length_date, + calendar, + ) _, run_start = shellout._exec_subprocess(start_cmd) _, run_end = shellout._exec_subprocess(end_cmd) return run_start.strip(), run_end.strip() + def _update_river_nl(river_envar, run_start, run_end): - ''' + """ Check that the JULES river namelist files exist, update the start and end dates, and create the output directory - ''' + """ # Check that the namelist files exist - output_nl = river_envar['OUTPUT_NLIST'] - time_nl = river_envar['TIME_NLIST'] + output_nl = river_envar["OUTPUT_NLIST"] + time_nl = river_envar["TIME_NLIST"] if not os.path.isfile(output_nl): - sys.stderr.write('[FAIL] Can not find the river namelist file %s\n' % - output_nl) + sys.stderr.write("[FAIL] Can not find the river namelist file %s\n" % output_nl) sys.exit(error.MISSING_DRIVER_FILE_ERROR) if not os.path.isfile(time_nl): - sys.stderr.write('[FAIL] Can not find the river namelist file %s\n' % - time_nl) + sys.stderr.write("[FAIL] Can not find the river namelist file %s\n" % time_nl) sys.exit(error.MISSING_DRIVER_FILE_ERROR) # Update the start and end dates mod_outputnl = common.ModNamelist(output_nl) - mod_outputnl.var_val('output_start', run_start) - mod_outputnl.var_val('output_end', run_end) + mod_outputnl.var_val("output_start", run_start) + mod_outputnl.var_val("output_end", run_end) mod_outputnl.replace() mod_timenl = common.ModNamelist(time_nl) - mod_timenl.var_val('main_run_start', run_start) - mod_timenl.var_val('main_run_end', run_end) + mod_timenl.var_val("main_run_start", run_start) + mod_timenl.var_val("main_run_end", run_end) mod_timenl.replace() # Create the output directory, do not rely on f90nml - rcode, val = shellout._exec_subprocess('grep output_dir %s' % output_nl) + rcode, val = shellout._exec_subprocess("grep output_dir %s" % output_nl) if rcode == 0: try: - output_dir = re.findall(r'[\"\'](.*?)[\"\']', val)[0].rstrip('/') + output_dir = re.findall(r"[\"\'](.*?)[\"\']", val)[0].rstrip("/") pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True) except IndexError: # No path found @@ -109,19 +118,19 @@ def _update_river_nl(river_envar, run_start, run_end): def _setup_executable(common_envar): - ''' + """ Setup the environment and any files required by the executable - ''' + """ # Create the environment variable container river_envar = dr_env_lib.env_lib.LoadEnvar() # Load the environment variables required river_envar = dr_env_lib.env_lib.load_envar_from_definition( - river_envar, dr_env_lib.rivers_def.RIVERS_ENVIRONMENT_VARS_INITIAL) + river_envar, dr_env_lib.rivers_def.RIVERS_ENVIRONMENT_VARS_INITIAL + ) - #Link the ocean executable - common.remove_file(river_envar['RIVER_LINK']) - os.symlink(river_envar['RIVER_EXEC'], - river_envar['RIVER_LINK']) + # Link the ocean executable + common.remove_file(river_envar["RIVER_LINK"]) + os.symlink(river_envar["RIVER_EXEC"], river_envar["RIVER_LINK"]) # Setup date variables run_start, run_end = _setup_dates(common_envar) @@ -133,133 +142,144 @@ def _setup_executable(common_envar): def _set_launcher_command(river_envar): - ''' + """ Setup the launcher command for the executable - ''' - launch_cmd = river_envar['ROSE_LAUNCHER_PREOPTS_RIVER'] + """ + launch_cmd = river_envar["ROSE_LAUNCHER_PREOPTS_RIVER"] - launch_cmd = '%s ./%s' % \ - (river_envar['ROSE_LAUNCHER_PREOPTS_RIVER'], - river_envar['RIVER_LINK']) + launch_cmd = "%s ./%s" % ( + river_envar["ROSE_LAUNCHER_PREOPTS_RIVER"], + river_envar["RIVER_LINK"], + ) # Put in quotes to allow this environment variable to be exported as it # contains (or can contain) spaces - river_envar['ROSE_LAUNCHER_PREOPTS_RIVER'] = "'%s'" % \ - river_envar['ROSE_LAUNCHER_PREOPTS_RIVER'] + river_envar["ROSE_LAUNCHER_PREOPTS_RIVER"] = ( + "'%s'" % river_envar["ROSE_LAUNCHER_PREOPTS_RIVER"] + ) return launch_cmd def _get_river_resol(river_nl_file, run_info): - ''' + """ Determine the JULES river resolution. This function is only used when creating the namcouple at run time. - ''' + """ # Check if the namelist file exists if not os.path.isfile(river_nl_file): - sys.stderr.write('[FAIL] Can not find the river namelist file %s\n' % - river_nl_file) + sys.stderr.write( + "[FAIL] Can not find the river namelist file %s\n" % river_nl_file + ) sys.exit(error.MISSING_DRIVER_FILE_ERROR) # Read in the resolution of JULES river river_nml = f90nml.read(river_nl_file) # Check the required entries exist - if 'jules_input_grid' not in river_nml: - sys.stderr.write('[FAIL] jules_input_grid not found in %s\n' % - river_nl_file) + if "jules_input_grid" not in river_nml: + sys.stderr.write("[FAIL] jules_input_grid not found in %s\n" % river_nl_file) sys.exit(error.MISSING_RIVER_RESOL_NML) - if 'nx' not in river_nml['jules_input_grid'] or \ - 'ny' not in river_nml['jules_input_grid']: - sys.stderr.write('[FAIL] nx or ny are missing from namelist' - 'jules_input_grid in %s\n' % river_nl_file) + if ( + "nx" not in river_nml["jules_input_grid"] + or "ny" not in river_nml["jules_input_grid"] + ): + sys.stderr.write( + "[FAIL] nx or ny are missing from namelist" + "jules_input_grid in %s\n" % river_nl_file + ) sys.exit(error.MISSING_RIVER_RESOL) # Store the ocean resolution - run_info['RIV_resol'] = [river_nml['jules_input_grid']['nx'], - river_nml['jules_input_grid']['ny']] + run_info["RIV_resol"] = [ + river_nml["jules_input_grid"]["nx"], + river_nml["jules_input_grid"]["ny"], + ] return run_info def _sent_coupling_fields(river_envar, run_info): - ''' + """ Write the coupling fields sent from JULES river into model_snd_list. This function is only used when creating the namcouple at run time. - ''' + """ from write_namcouple import add_to_cpl_list # Check that file specifying the coupling fields sent from # JULES river is present - if not os.path.exists('OASIS_RIV_SEND'): - sys.stderr.write('[FAIL] OASIS_RIV_SEND is missing.\n') + if not os.path.exists("OASIS_RIV_SEND"): + sys.stderr.write("[FAIL] OASIS_RIV_SEND is missing.\n") sys.exit(error.MISSING_OASIS_RIV_SEND) # Add toyatm to our list of executables - if not 'exec_list' in run_info: - run_info['exec_list'] = [] - run_info['exec_list'].append('toyriv') + if not "exec_list" in run_info: + run_info["exec_list"] = [] + run_info["exec_list"].append("toyriv") # Determine the ocean resolution - run_info = _get_river_resol(river_envar['MODEL_NLIST'], run_info) + run_info = _get_river_resol(river_envar["MODEL_NLIST"], run_info) # If using the default coupling option, we'll need to read the # JULES river namelist later - river_nl = river_envar['COUPLE_NLIST'] + river_nl = river_envar["COUPLE_NLIST"] if not os.path.isfile(river_nl): - sys.stderr.write('[FAIL] Can not find the river namelist file %s\n' % - river_nl) + sys.stderr.write("[FAIL] Can not find the river namelist file %s\n" % river_nl) sys.exit(error.MISSING_DRIVER_FILE_ERROR) - run_info['river_nl'] = river_nl + run_info["river_nl"] = river_nl # Read the namelist - oasis_nml = f90nml.read('OASIS_RIV_SEND') + oasis_nml = f90nml.read("OASIS_RIV_SEND") # Check we have the expected information - if 'oasis_riv_send_nml' not in oasis_nml: - sys.stderr.write('[FAIL] namelist oasis_riv_send_nml is ' - 'missing from OASIS_RIV_SEND.\n') + if "oasis_riv_send_nml" not in oasis_nml: + sys.stderr.write( + "[FAIL] namelist oasis_riv_send_nml is " "missing from OASIS_RIV_SEND.\n" + ) sys.exit(error.MISSING_OASIS_RIV_SEND_NML) - if 'oasis_riv_send' not in oasis_nml['oasis_riv_send_nml']: - sys.stderr.write('[FAIL] entry oasis_riv_send is missing ' - 'from namelist oasis_riv_send_nml in ' - 'OASIS_RIV_SEND.\n') + if "oasis_riv_send" not in oasis_nml["oasis_riv_send_nml"]: + sys.stderr.write( + "[FAIL] entry oasis_riv_send is missing " + "from namelist oasis_riv_send_nml in " + "OASIS_RIV_SEND.\n" + ) sys.exit(error.MISSING_OASIS_RIV_SEND) # Create a list of fields sent from RIV model_snd_list = add_to_cpl_list( - 'RIV', False, 0, oasis_nml['oasis_riv_send_nml']['oasis_riv_send'] + "RIV", False, 0, oasis_nml["oasis_riv_send_nml"]["oasis_riv_send"] ) return run_info, model_snd_list + def _finalize_executable(): - ''' + """ Finalize the JULES river run, copy the nemo namelist to the restart directory for the next cycle, update standard out, and ensure that no errors have been found in the NEMO execution. - ''' - sys.stdout.write('[INFO] finalizing JULES river') - sys.stdout.write('[INFO] running finalize in %s' % os.getcwd()) + """ + sys.stdout.write("[INFO] finalizing JULES river") + sys.stdout.write("[INFO] running finalize in %s" % os.getcwd()) # The JULES river output is written by default to the standard output # JULES river does not produce a restart file yet + def run_driver(common_env, mode, run_info): - ''' + """ Run the driver, and return an instance of common.LoadEnvar and as string containing the launcher command for the JULES river model - ''' - if mode == 'run_driver': + """ + if mode == "run_driver": exe_envar = _setup_executable(common_env) launch_cmd = _set_launcher_command(exe_envar) - if run_info['l_namcouple']: + if run_info["l_namcouple"]: model_snd_list = None else: - run_info, model_snd_list = \ - _sent_coupling_fields(exe_envar, run_info) - elif mode == 'finalize': + run_info, model_snd_list = _sent_coupling_fields(exe_envar, run_info) + elif mode == "finalize": _finalize_executable() exe_envar = None launch_cmd = None diff --git a/Coupled_Drivers/si3_controller.py b/Coupled_Drivers/si3_controller.py index 48c6ce8..740ca6c 100644 --- a/Coupled_Drivers/si3_controller.py +++ b/Coupled_Drivers/si3_controller.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -''' +""" *****************************COPYRIGHT****************************** (C) Crown copyright 2021-2025 Met Office. All rights reserved. Use, duplication or disclosure of this code is subject to the restrictions @@ -14,7 +14,7 @@ si3_controller.py DESCRIPTION -''' +""" import re @@ -27,73 +27,77 @@ import dr_env_lib.ocn_cont_def import dr_env_lib.env_lib + def _check_si3nl_envar(envar_container): - ''' + """ Get the si3 namelist file exists - ''' + """ - #Information will be retrieved from this file during the running of the - #controller, so check it exists. + # Information will be retrieved from this file during the running of the + # controller, so check it exists. - if not os.path.isfile(envar_container['SI3_NL']): - sys.stderr.write('[FAIL] si3_controller: Can not find the SI3 namelist ' - 'file %s\n' % envar_container['SI3_NL']) + if not os.path.isfile(envar_container["SI3_NL"]): + sys.stderr.write( + "[FAIL] si3_controller: Can not find the SI3 namelist " + "file %s\n" % envar_container["SI3_NL"] + ) sys.exit(error.MISSING_CONTROLLER_FILE_ERROR) return 0 + def _get_si3rst(si3_nl_file): - ''' + """ Retrieve the SI3 restart directory from the nemo namelist file - ''' + """ si3rst_rcode, si3rst_val = shellout._exec_subprocess( - 'grep cn_icerst_outdir %s' % si3_nl_file) + "grep cn_icerst_outdir %s" % si3_nl_file + ) if si3rst_rcode == 0: - si3_rst = re.findall('[\"\'](.*?)[\"\']', si3rst_val)[0] - if si3_rst[-1] == '/': + si3_rst = re.findall("[\"'](.*?)[\"']", si3rst_val)[0] + if si3_rst[-1] == "/": si3_rst = si3_rst[:-1] return si3_rst return None def _verify_si3_rst(cyclepointstr, nemo_nproc, si3_restart_files): - ''' + """ Verify that the SI3 restart files match what we expect from the number of NEMO processors. - ''' - si3_rst_regex = r'%s_restart_ice(_\d+)?\.nc' % cyclepointstr - current_rst_files = [f for f in si3_restart_files if - re.findall(si3_rst_regex, f)] - - if len(current_rst_files) not in (1, nemo_nproc, nemo_nproc+1): - sys.stderr.write('[FAIL] Unable to find SI3 restart files for' - ' this cycle. Must either have one rebuilt file,' - ' as many as there are nemo processors (%i) or' - ' both rebuilt and processor files.' - '[FAIL] Found %i SI3 restart files\n' - % (nemo_nproc, len(current_rst_files))) + """ + si3_rst_regex = r"%s_restart_ice(_\d+)?\.nc" % cyclepointstr + current_rst_files = [f for f in si3_restart_files if re.findall(si3_rst_regex, f)] + + if len(current_rst_files) not in (1, nemo_nproc, nemo_nproc + 1): + sys.stderr.write( + "[FAIL] Unable to find SI3 restart files for" + " this cycle. Must either have one rebuilt file," + " as many as there are nemo processors (%i) or" + " both rebuilt and processor files." + "[FAIL] Found %i SI3 restart files\n" % (nemo_nproc, len(current_rst_files)) + ) sys.exit(error.MISSING_MODEL_FILE_ERROR) def _load_environment_variables(si3_envar): - ''' + """ Load the SI3 environment variables required for the model run into the si3_envar container - ''' + """ si3_envar = dr_env_lib.env_lib.load_envar_from_definition( - si3_envar, dr_env_lib.ocn_cont_def.SI3_ENVIRONMENT_VARS_INITIAL) + si3_envar, dr_env_lib.ocn_cont_def.SI3_ENVIRONMENT_VARS_INITIAL + ) return si3_envar -def _setup_si3_controller(common_env, - restart_ctl, - nemo_nproc, - runid, - verify_restart, - nemo_dump_time): - ''' + +def _setup_si3_controller( + common_env, restart_ctl, nemo_nproc, runid, verify_restart, nemo_dump_time +): + """ Setup the environment and any files required by the executable - ''' + """ # Create the environment variable container si3_envar = dr_env_lib.env_lib.LoadEnvar() @@ -103,13 +107,14 @@ def _setup_si3_controller(common_env, # SI3 hasn't been set up to use CONTINUE_FROM_FAIL yet # Raise an error if it's set to prevent unexpected behaviour in future - if common_env['CONTINUE_FROM_FAIL'] == 'true': - sys.stderr.write('[FAIL] si3_controller is not coded to work with' - 'CONTINUE_FROM_FAIL=true') + if common_env["CONTINUE_FROM_FAIL"] == "true": + sys.stderr.write( + "[FAIL] si3_controller is not coded to work with" "CONTINUE_FROM_FAIL=true" + ) sys.exit(error.INVALID_EVAR_ERROR) restart_direcs = [] - si3_rst = _get_si3rst(si3_envar['SI3_NL']) + si3_rst = _get_si3rst(si3_envar["SI3_NL"]) if si3_rst: restart_direcs.append(si3_rst) @@ -123,8 +128,9 @@ def _setup_si3_controller(common_env, # _yyyymmdd_restart_ice.nc" or # _yyyymmdd_restart_ice.nc" in the case # of the restart file having been rebuilt. - si3_restart_files = [f for f in os.listdir(si3_rst) if - re.findall(r'.+_\d{8}_restart_ice', f)] + si3_restart_files = [ + f for f in os.listdir(si3_rst) if re.findall(r".+_\d{8}_restart_ice", f) + ] si3_restart_files.sort() # Default position is that we're starting from a restart file and @@ -138,86 +144,84 @@ def _setup_si3_controller(common_env, else: # If we didn't find any restart files in the suite data directory, # check the SI3_START env var. - if common_env['CONTINUE'] == 'false': - latest_si3_dump = si3_envar['SI3_START'] + if common_env["CONTINUE"] == "false": + latest_si3_dump = si3_envar["SI3_START"] else: # We don't have a restart file, which implies we must be # starting from climatology. - latest_si3_dump = 'unset' + latest_si3_dump = "unset" # If we have a link to restart_ice.nc left over from a previous run, # remove it for both NRUNs and CRUNs - common.remove_file('restart_ice.nc') + common.remove_file("restart_ice.nc") # Is this a CRUN or an NRUN? - if common_env['CONTINUE'] == 'false': + if common_env["CONTINUE"] == "false": # This is definitely a new run - sys.stdout.write('[INFO] si3_controller: New SI3 run\n\n') + sys.stdout.write("[INFO] si3_controller: New SI3 run\n\n") if os.path.isfile(latest_si3_dump): - sys.stdout.write('[INFO] si3_controller: Removing old SI3 ' - 'restart data\n\n') + sys.stdout.write( + "[INFO] si3_controller: Removing old SI3 " "restart data\n\n" + ) # For NRUNS, get rid of any existing restart files from # previous runs. - for file_path in glob.glob(si3_rst+'/*restart_ice*'): + for file_path in glob.glob(si3_rst + "/*restart_ice*"): # os.path.isfile will return true for symbolic links as well # as physical files. common.remove_file(file_path) # If we do have a SI3 start dump. - if si3_envar['SI3_START'] != '': - if os.path.isfile(si3_envar['SI3_START']): - os.symlink(si3_envar['SI3_START'], 'restart_ice.nc') - elif os.path.isfile('%s_0000.nc' % - si3_envar['SI3_START']): - for fname in glob.glob('%s_????.nc' % - si3_envar['SI3_START']): - proc_number = fname.split('.')[-2][-4:] - common.remove_file('restart_ice_%s.nc' % proc_number) - os.symlink(fname, 'restart_ice_%s.nc' % proc_number) - elif os.path.isfile('%s_0000.nc' % - si3_envar['SI3_START'][:-3]): - for fname in glob.glob('%s_????.nc' % - si3_envar['SI3_START'][:-3]): - proc_number = fname.split('.')[-2][-4:] + if si3_envar["SI3_START"] != "": + if os.path.isfile(si3_envar["SI3_START"]): + os.symlink(si3_envar["SI3_START"], "restart_ice.nc") + elif os.path.isfile("%s_0000.nc" % si3_envar["SI3_START"]): + for fname in glob.glob("%s_????.nc" % si3_envar["SI3_START"]): + proc_number = fname.split(".")[-2][-4:] + common.remove_file("restart_ice_%s.nc" % proc_number) + os.symlink(fname, "restart_ice_%s.nc" % proc_number) + elif os.path.isfile("%s_0000.nc" % si3_envar["SI3_START"][:-3]): + for fname in glob.glob("%s_????.nc" % si3_envar["SI3_START"][:-3]): + proc_number = fname.split(".")[-2][-4:] # We need to make sure there isn't already # a restart file link set up, and if there is, get # rid of it because symlink wont work otherwise! - common.remove_file('restart_ice_%s.nc' % proc_number) + common.remove_file("restart_ice_%s.nc" % proc_number) - os.symlink(fname, 'restart_ice_%s.nc' % proc_number) + os.symlink(fname, "restart_ice_%s.nc" % proc_number) else: - sys.stderr.write('[FAIL] file %s not found\n' % - si3_envar['SI3_START']) + sys.stderr.write("[FAIL] file %s not found\n" % si3_envar["SI3_START"]) sys.exit(error.MISSING_MODEL_FILE_ERROR) else: # If there's no SI3 restart we must be starting from climatology. - sys.stdout.write('[INFO] si3_controller: SI3 is starting from' - ' climatology.\n\n') - + sys.stdout.write( + "[INFO] si3_controller: SI3 is starting from" " climatology.\n\n" + ) elif os.path.isfile(latest_si3_dump): # We have a valid restart file so we're not starting from climatology # This could be a new run or a continutaion run. - si3_dump_time = re.findall(r'_(\d*)_restart_ice', latest_si3_dump)[0] + si3_dump_time = re.findall(r"_(\d*)_restart_ice", latest_si3_dump)[0] - if verify_restart == 'True': + if verify_restart == "True": _verify_si3_rst(nemo_dump_time, nemo_nproc, si3_restart_files) if si3_dump_time != nemo_dump_time: - sys.stderr.write('[FAIL] si3_controller: Mismatch in SI3 restart ' - 'file date %s and NEMO restart file date %s\n' - % (si3_dump_time, nemo_dump_time)) + sys.stderr.write( + "[FAIL] si3_controller: Mismatch in SI3 restart " + "file date %s and NEMO restart file date %s\n" + % (si3_dump_time, nemo_dump_time) + ) sys.exit(error.MISMATCH_RESTART_DATE_ERROR) - # This could be a new run (the first NRUN of a cycle) or # a CRUN. - sys.stdout.write('[INFO] si3_controller: Restart data avaliable in ' - 'SI3 restart directory %s. Restarting from previous ' - 'task output\n\n' - % si3_rst) + sys.stdout.write( + "[INFO] si3_controller: Restart data avaliable in " + "SI3 restart directory %s. Restarting from previous " + "task output\n\n" % si3_rst + ) # For each PE, set up a link to the appropriate sub-domain # restart file. @@ -225,25 +229,33 @@ def _setup_si3_controller(common_env, for i_proc in range(nemo_nproc): tag = str(i_proc).zfill(4) - si3_rst_source = '%s/%so_%s_restart_ice_%s.nc' % \ - (si3_rst, runid, si3_dump_time, tag) - si3_rst_link = 'restart_ice_%s.nc' % tag + si3_rst_source = "%s/%so_%s_restart_ice_%s.nc" % ( + si3_rst, + runid, + si3_dump_time, + tag, + ) + si3_rst_link = "restart_ice_%s.nc" % tag common.remove_file(si3_rst_link) if os.path.isfile(si3_rst_source): os.symlink(si3_rst_source, si3_rst_link) si3_restart_count += 1 if si3_restart_count < 1: - sys.stdout.write('[INFO] No SI3 sub-PE restarts found\n') + sys.stdout.write("[INFO] No SI3 sub-PE restarts found\n") # We found no passive tracer restart sub-domain files let's # look for a full domain file. - si3_rst_source = '%s/%so_%s_restart_ice.nc' % \ - (si3_rst, runid, si3_dump_time) + si3_rst_source = "%s/%so_%s_restart_ice.nc" % ( + si3_rst, + runid, + si3_dump_time, + ) if os.path.isfile(si3_rst_source): - sys.stdout.write('[INFO] Using rebuilt SI3 restart '\ - 'file: %s\n' % si3_rst_source) - si3_rst_link = 'restart_ice.nc' + sys.stdout.write( + "[INFO] Using rebuilt SI3 restart " "file: %s\n" % si3_rst_source + ) + si3_rst_link = "restart_ice.nc" common.remove_file(si3_rst_link) os.symlink(si3_rst_source, si3_rst_link) @@ -253,48 +265,45 @@ def _setup_si3_controller(common_env, # during a CRUN seems pretty slim. else: - sys.stderr.write('[FAIL] si3_controller: No restart data avaliable in ' - 'SI3 restart directory:\n %s\n' % si3_rst) + sys.stderr.write( + "[FAIL] si3_controller: No restart data avaliable in " + "SI3 restart directory:\n %s\n" % si3_rst + ) sys.exit(error.MISSING_MODEL_FILE_ERROR) - - return si3_envar + def _set_launcher_command(_): - ''' + """ Setup the launcher command for the executable - ''' - sys.stdout.write('[INFO] si3_controller: SI3 uses the same launch ' - 'command as NEMO\n\n') - launch_cmd = '' + """ + sys.stdout.write( + "[INFO] si3_controller: SI3 uses the same launch " "command as NEMO\n\n" + ) + launch_cmd = "" return launch_cmd + def _finalize_si3_controller(): - ''' + """ Finalize the passive SI3 setup - ''' - -def run_controller(common_env, - restart_ctl, - nemo_nproc, - runid, - verify_restart, - nemo_dump_time, - mode): - ''' + """ + + +def run_controller( + common_env, restart_ctl, nemo_nproc, runid, verify_restart, nemo_dump_time, mode +): + """ Run the passive tracer controller. - ''' - if mode == 'run_controller': - exe_envar = _setup_si3_controller(common_env, - restart_ctl, - nemo_nproc, - runid, - verify_restart, - nemo_dump_time) + """ + if mode == "run_controller": + exe_envar = _setup_si3_controller( + common_env, restart_ctl, nemo_nproc, runid, verify_restart, nemo_dump_time + ) launch_cmd = _set_launcher_command(exe_envar) - elif mode == 'finalize': + elif mode == "finalize": _finalize_si3_controller() exe_envar = None launch_cmd = None diff --git a/Coupled_Drivers/top_controller.py b/Coupled_Drivers/top_controller.py index 833834a..518299d 100644 --- a/Coupled_Drivers/top_controller.py +++ b/Coupled_Drivers/top_controller.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -''' +""" *****************************COPYRIGHT****************************** (C) Crown copyright 2021-2025 Met Office. All rights reserved. @@ -59,8 +59,7 @@ all pre- and post-proccessing code. Version compatibility: NEMO vn3.6 -''' - +""" import re @@ -77,73 +76,82 @@ # Define errors for the TOP controller only SERIAL_MODE_ERROR = 99 + def _check_topnl_envar(envar_container): - ''' + """ As the environment variable TOP_NL is required by both the setup and finalize functions, this will be encapsulated here. - ''' - #Information will be retrieved from this file during the running of the - #controller, so check it exists. - - if not os.path.isfile(envar_container['TOP_NL']): - sys.stderr.write('[FAIL] top_controller: Can not find the TOP namelist ' - 'file %s\n' % envar_container['TOP_NL']) + """ + # Information will be retrieved from this file during the running of the + # controller, so check it exists. + + if not os.path.isfile(envar_container["TOP_NL"]): + sys.stderr.write( + "[FAIL] top_controller: Can not find the TOP namelist " + "file %s\n" % envar_container["TOP_NL"] + ) sys.exit(error.MISSING_CONTROLLER_FILE_ERROR) return 0 + def _get_toprst_dir(top_nl_file): - ''' + """ Retrieve the restart directory from the TOP namelist file. Currently TOP/MEDUSA uses the same restart directory as the main NEMO component so we could in principle get this from the NEMO namelist. However, for complete flexibility we interrogate the TOP namelist in case this is ever defined as something different. - ''' + """ toprst_rcode, toprst_val = shellout._exec_subprocess( - 'grep cn_trcrst_outdir %s' % top_nl_file) + "grep cn_trcrst_outdir %s" % top_nl_file + ) if toprst_rcode == 0: - top_rst_dir = re.findall('[\"\'](.*?)[\"\']', toprst_val)[0] - if top_rst_dir[-1] == '/': + top_rst_dir = re.findall("[\"'](.*?)[\"']", toprst_val)[0] + if top_rst_dir[-1] == "/": top_rst_dir = top_rst_dir[:-1] return top_rst_dir + def _verify_top_rst(cyclepointstr, nemo_nproc, top_restart_files): - ''' + """ Verify that the top restart files match what we expect from the number of NEMO processors. - ''' - top_rst_regex = r'%s_restart_trc(_\d+)?\.nc' % cyclepointstr - current_rst_files = [f for f in top_restart_files if - re.findall(top_rst_regex, f)] - if len(current_rst_files) not in (1, nemo_nproc, nemo_nproc+1): - sys.stderr.write('[FAIL] Unable to find top restart files for' - ' this cycle. Must either have one rebuilt file,' - ' as many as there are nemo processors (%i) or' - ' both rebuilt and processor files.' - '[FAIL] Found %i top restart files\n' - % (nemo_nproc, len(current_rst_files))) + """ + top_rst_regex = r"%s_restart_trc(_\d+)?\.nc" % cyclepointstr + current_rst_files = [f for f in top_restart_files if re.findall(top_rst_regex, f)] + if len(current_rst_files) not in (1, nemo_nproc, nemo_nproc + 1): + sys.stderr.write( + "[FAIL] Unable to find top restart files for" + " this cycle. Must either have one rebuilt file," + " as many as there are nemo processors (%i) or" + " both rebuilt and processor files." + "[FAIL] Found %i top restart files\n" % (nemo_nproc, len(current_rst_files)) + ) sys.exit(error.MISSING_MODEL_FILE_ERROR) def _load_environment_variables(top_envar): - ''' + """ Load the TOP environment variables required for the model run into the top_envar container - ''' + """ top_envar = dr_env_lib.env_lib.load_envar_from_definition( - top_envar, dr_env_lib.ocn_cont_def.TOP_ENVIRONMENT_VARS_INITIAL) + top_envar, dr_env_lib.ocn_cont_def.TOP_ENVIRONMENT_VARS_INITIAL + ) _ = _check_topnl_envar(top_envar) return top_envar -def _setup_top_controller(common_env, restart_ctl, nemo_nproc, runid, - verify_restart, nemo_dump_time): - ''' + +def _setup_top_controller( + common_env, restart_ctl, nemo_nproc, runid, verify_restart, nemo_dump_time +): + """ Setup the environment and any files required by the executable - ''' + """ # Create the environment variable container top_envar = dr_env_lib.env_lib.LoadEnvar() @@ -152,17 +160,17 @@ def _setup_top_controller(common_env, restart_ctl, nemo_nproc, runid, # TOP controller hasn't been set up to use CONTINUE_FROM_FAIL yet # Raise an error if it's set to prevent unexpected behaviour in future - if common_env['CONTINUE_FROM_FAIL'] == 'true': - sys.stderr.write('[FAIL] top_controller is not coded to work with' - 'CONTINUE_FROM_FAIL=true') + if common_env["CONTINUE_FROM_FAIL"] == "true": + sys.stderr.write( + "[FAIL] top_controller is not coded to work with" "CONTINUE_FROM_FAIL=true" + ) sys.exit(error.INVALID_EVAR_ERROR) - # Read restart from TOP namelist restart_direcs = [] # Find the TOP restart location - top_rst = _get_toprst_dir(top_envar['TOP_NL']) + top_rst = _get_toprst_dir(top_envar["TOP_NL"]) if top_rst: restart_direcs.append(top_rst) @@ -177,8 +185,9 @@ def _setup_top_controller(common_env, restart_ctl, nemo_nproc, runid, # _yyyymmdd_restart_trc_.nc" or # _yyyymmdd_restart_trc.nc" in the case # of the restart file having been rebuilt. - top_restart_files = [f for f in os.listdir(top_rst) if - re.findall(r'.+_\d{8}_restart_trc', f)] + top_restart_files = [ + f for f in os.listdir(top_rst) if re.findall(r".+_\d{8}_restart_trc", f) + ] top_restart_files.sort() # Default position is that we're starting from a restart file and @@ -192,57 +201,55 @@ def _setup_top_controller(common_env, restart_ctl, nemo_nproc, runid, else: # If we didn't find any restart files in the suite data directory, # check the TOP_START env var. - if common_env['CONTINUE'] == 'false': - latest_top_dump = top_envar['TOP_START'] + if common_env["CONTINUE"] == "false": + latest_top_dump = top_envar["TOP_START"] else: # We don't have a restart file, which implies we must be # starting from climatology. - latest_top_dump = 'unset' + latest_top_dump = "unset" - top_init_dir = '.' + top_init_dir = "." # If we have a link to restart_trc.nc left over from a previous run, # remove it for both NRUNs and CRUNs - common.remove_file('restart_trc.nc') + common.remove_file("restart_trc.nc") # Is this a CRUN or an NRUN? - if common_env['CONTINUE'] == 'false': + if common_env["CONTINUE"] == "false": # This is definitely a new run - sys.stdout.write('[INFO] top_controller: New TOP/MEDUSA run\n\n') + sys.stdout.write("[INFO] top_controller: New TOP/MEDUSA run\n\n") if os.path.isfile(latest_top_dump): - sys.stdout.write('[INFO] top_controller: Removing old TOP ' - 'restart data\n\n') + sys.stdout.write( + "[INFO] top_controller: Removing old TOP " "restart data\n\n" + ) # For NRUNS, get rid of any existing restart files from # previous runs. - for file_path in glob.glob(top_rst+'/*restart_trc*'): + for file_path in glob.glob(top_rst + "/*restart_trc*"): # os.path.isfile will return true for symbolic links as well # as physical files. common.remove_file(file_path) # If we do have a passive tracer start dump. - if top_envar['TOP_START'] != '': - if os.path.isfile(top_envar['TOP_START']): - os.symlink(top_envar['TOP_START'], 'restart_trc.nc') - elif os.path.isfile('%s_0000.nc' % - top_envar['TOP_START']): - for fname in glob.glob('%s_????.nc' % - top_envar['TOP_START']): - proc_number = fname.split('.')[-2][-4:] - common.remove_file('restart_trc_%s.nc' % proc_number) - os.symlink(fname, 'restart_trc_%s.nc' % proc_number) - elif os.path.isfile('%s_0000.nc' % - top_envar['TOP_START'][:-3]): - for fname in glob.glob('%s_????.nc' % - top_envar['TOP_START'][-3:]): - proc_number = fname.split('.')[-2][-4:] - common.remove_file('restart_trc_%s.nc' % proc_number) - os.symlink(fname, 'restart_trc_%s.nc' % proc_number) + if top_envar["TOP_START"] != "": + if os.path.isfile(top_envar["TOP_START"]): + os.symlink(top_envar["TOP_START"], "restart_trc.nc") + elif os.path.isfile("%s_0000.nc" % top_envar["TOP_START"]): + for fname in glob.glob("%s_????.nc" % top_envar["TOP_START"]): + proc_number = fname.split(".")[-2][-4:] + common.remove_file("restart_trc_%s.nc" % proc_number) + os.symlink(fname, "restart_trc_%s.nc" % proc_number) + elif os.path.isfile("%s_0000.nc" % top_envar["TOP_START"][:-3]): + for fname in glob.glob("%s_????.nc" % top_envar["TOP_START"][-3:]): + proc_number = fname.split(".")[-2][-4:] + common.remove_file("restart_trc_%s.nc" % proc_number) + os.symlink(fname, "restart_trc_%s.nc" % proc_number) else: # If there's no TOP restart we must be starting from climatology. - sys.stdout.write('[INFO] top_controller: TOP is starting from' - ' climatology.\n\n') + sys.stdout.write( + "[INFO] top_controller: TOP is starting from" " climatology.\n\n" + ) # Set the restart flag accordingly ln_restart = ".false." @@ -252,27 +259,28 @@ def _setup_top_controller(common_env, restart_ctl, nemo_nproc, runid, # in all components. restart_ctl = 0 - elif os.path.isfile(latest_top_dump): # We have a valid restart file so we're not starting from climatology # This could be a new run or a continutaion run. - top_dump_time = re.findall(r'_(\d*)_restart_trc', latest_top_dump)[0] + top_dump_time = re.findall(r"_(\d*)_restart_trc", latest_top_dump)[0] - if verify_restart == 'True': + if verify_restart == "True": _verify_top_rst(nemo_dump_time, nemo_nproc, top_restart_files) if top_dump_time != nemo_dump_time: - sys.stderr.write('[FAIL] top_controller: Mismatch in TOP restart ' - 'file date %s and NEMO restart file date %s\n' - % (top_dump_time, nemo_dump_time)) + sys.stderr.write( + "[FAIL] top_controller: Mismatch in TOP restart " + "file date %s and NEMO restart file date %s\n" + % (top_dump_time, nemo_dump_time) + ) sys.exit(error.MISMATCH_RESTART_DATE_ERROR) - # This could be a new run (the first NRUN of a cycle) or # a CRUN. - sys.stdout.write('[INFO] top_controller: Restart data avaliable in ' - 'TOP restart directory %s. Restarting from previous ' - 'task output\n\n' - % top_rst) + sys.stdout.write( + "[INFO] top_controller: Restart data avaliable in " + "TOP restart directory %s. Restarting from previous " + "task output\n\n" % top_rst + ) top_init_dir = top_rst # For each PE, set up a link to the appropriate sub-domain @@ -281,25 +289,33 @@ def _setup_top_controller(common_env, restart_ctl, nemo_nproc, runid, for i_proc in range(nemo_nproc): tag = str(i_proc).zfill(4) - top_rst_source = '%s/%so_%s_restart_trc_%s.nc' % \ - (top_init_dir, runid, top_dump_time, tag) - top_rst_link = 'restart_trc_%s.nc' % tag + top_rst_source = "%s/%so_%s_restart_trc_%s.nc" % ( + top_init_dir, + runid, + top_dump_time, + tag, + ) + top_rst_link = "restart_trc_%s.nc" % tag common.remove_file(top_rst_link) if os.path.isfile(top_rst_source): os.symlink(top_rst_source, top_rst_link) top_restart_count += 1 if top_restart_count < 1: - sys.stdout.write('[INFO] No TOP sub-PE restarts found\n') + sys.stdout.write("[INFO] No TOP sub-PE restarts found\n") # We found no passive tracer restart sub-domain files let's # look for a full domain file. - top_rst_source = '%s/%so_%s_restart_trc.nc' % \ - (top_init_dir, runid, top_dump_time) + top_rst_source = "%s/%so_%s_restart_trc.nc" % ( + top_init_dir, + runid, + top_dump_time, + ) if os.path.isfile(top_rst_source): - sys.stdout.write('[INFO] Using rebuilt TOP restart '\ - 'file: %s\n' % top_rst_source) - top_rst_link = 'restart_trc.nc' + sys.stdout.write( + "[INFO] Using rebuilt TOP restart " "file: %s\n" % top_rst_source + ) + top_rst_link = "restart_trc.nc" common.remove_file(top_rst_link) os.symlink(top_rst_source, top_rst_link) @@ -309,8 +325,10 @@ def _setup_top_controller(common_env, restart_ctl, nemo_nproc, runid, # during a CRUN seems pretty slim. else: - sys.stderr.write('[FAIL] top_controller: No restart data available in ' - 'TOP restart directory:\n %s\n' % top_rst) + sys.stderr.write( + "[FAIL] top_controller: No restart data available in " + "TOP restart directory:\n %s\n" % top_rst + ) sys.exit(error.MISSING_MODEL_FILE_ERROR) # ln_trcdta appears to always be the opposite of ln_restart, so we @@ -324,74 +342,72 @@ def _setup_top_controller(common_env, restart_ctl, nemo_nproc, runid, elif ln_restart == ".false.": ln_trcdta = ".true." else: - sys.stderr.write('[FAIL] top_controller: invalid ln_restart value: ' - '%s\n' % ln_restart) + sys.stderr.write( + "[FAIL] top_controller: invalid ln_restart value: " "%s\n" % ln_restart + ) sys.exit(error.INVALID_LOCAL_ERROR) # Update the TOP namelist. - mod_topnl = common.ModNamelist(top_envar['TOP_NL']) - mod_topnl.var_val('ln_rsttr', ln_restart) - mod_topnl.var_val('nn_rsttr', restart_ctl) - mod_topnl.var_val('ln_trcdta', ln_trcdta) + mod_topnl = common.ModNamelist(top_envar["TOP_NL"]) + mod_topnl.var_val("ln_rsttr", ln_restart) + mod_topnl.var_val("nn_rsttr", restart_ctl) + mod_topnl.var_val("ln_trcdta", ln_trcdta) mod_topnl.replace() # Write details of our namelist settings - sys.stdout.write('[INFO] top_controller: Start of TOP namelist settings:\n') - sys.stdout.write('[INFO] Namelist file: %s \n' % top_envar['TOP_NL']) - sys.stdout.write('[INFO] ln_rsttr = %s \n' % ln_restart) - sys.stdout.write('[INFO] nn_rsttr = %d \n' % restart_ctl) - sys.stdout.write('[INFO] ln_trcdta = %s \n' % ln_trcdta) - sys.stdout.write('[INFO] top_controller: End of TOP namelist settings\n\n') + sys.stdout.write("[INFO] top_controller: Start of TOP namelist settings:\n") + sys.stdout.write("[INFO] Namelist file: %s \n" % top_envar["TOP_NL"]) + sys.stdout.write("[INFO] ln_rsttr = %s \n" % ln_restart) + sys.stdout.write("[INFO] nn_rsttr = %d \n" % restart_ctl) + sys.stdout.write("[INFO] ln_trcdta = %s \n" % ln_trcdta) + sys.stdout.write("[INFO] top_controller: End of TOP namelist settings\n\n") return top_envar + def _set_launcher_command(_): - ''' + """ Setup the launcher command for the executable - ''' - sys.stdout.write('[INFO] top_controller: MEDUSA/TOP uses the same launch ' - 'command as NEMO\n\n') - launch_cmd = '' + """ + sys.stdout.write( + "[INFO] top_controller: MEDUSA/TOP uses the same launch " "command as NEMO\n\n" + ) + launch_cmd = "" return launch_cmd + def _finalize_top_controller(): - ''' + """ Finalize the passive tracer set-up, copy the TOP namelist to the restart directory for the next cycle. - ''' - sys.stdout.write('[INFO] finalizing Ocean Passive Tracers \n') - sys.stdout.write('[INFO] running finalize in %s \n' % os.getcwd()) + """ + sys.stdout.write("[INFO] finalizing Ocean Passive Tracers \n") + sys.stdout.write("[INFO] running finalize in %s \n" % os.getcwd()) # Move the TOP namelist to the restart directory to allow the next cycle # to pick it up top_envar_fin = dr_env_lib.env_lib.LoadEnvar() top_envar_fin = dr_env_lib.env_lib.load_envar_from_definition( - top_envar_fin, dr_env_lib.ocn_cont_def.TOP_ENVIRONMENT_VARS_FINAL) - top_rst = _get_toprst_dir(top_envar_fin['TOP_NL']) - if os.path.isdir(top_rst) and \ - os.path.isfile(top_envar_fin['TOP_NL']): - shutil.copy(top_envar_fin['TOP_NL'], top_rst) - - -def run_controller(common_env, - restart_ctl, - nemo_nproc, - runid, - verify_restart, - nemo_dump_time, mode): - ''' + top_envar_fin, dr_env_lib.ocn_cont_def.TOP_ENVIRONMENT_VARS_FINAL + ) + top_rst = _get_toprst_dir(top_envar_fin["TOP_NL"]) + if os.path.isdir(top_rst) and os.path.isfile(top_envar_fin["TOP_NL"]): + shutil.copy(top_envar_fin["TOP_NL"], top_rst) + + +def run_controller( + common_env, restart_ctl, nemo_nproc, runid, verify_restart, nemo_dump_time, mode +): + """ Run the passive tracer controller. - ''' - if mode == 'run_controller': - exe_envar = _setup_top_controller(common_env, - restart_ctl, - nemo_nproc, - runid, - verify_restart, - nemo_dump_time) + """ + if mode == "run_controller": + exe_envar = _setup_top_controller( + common_env, restart_ctl, nemo_nproc, runid, verify_restart, nemo_dump_time + ) launch_cmd = _set_launcher_command(exe_envar) - elif mode == 'finalize': + elif mode == "finalize": _finalize_top_controller() exe_envar = None launch_cmd = None diff --git a/Coupled_Drivers/unittests/test_cpmip_utils.py b/Coupled_Drivers/unittests/test_cpmip_utils.py index c1a35a1..1e55d36 100644 --- a/Coupled_Drivers/unittests/test_cpmip_utils.py +++ b/Coupled_Drivers/unittests/test_cpmip_utils.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -''' +""" *****************************COPYRIGHT****************************** (C) Crown copyright 2023-2025 Met Office. All rights reserved. @@ -11,9 +11,10 @@ Met Office, FitzRoy Road, Exeter, Devon, EX1 3PB, United Kingdom *****************************COPYRIGHT****************************** -''' +""" import unittest + try: # mock is integrated into unittest as of Python 3.3 import unittest.mock as mock @@ -25,114 +26,123 @@ import os import cpmip_utils + class TestGetComponentResolution(unittest.TestCase): - ''' + """ Test the construction of component resolution from namelist - ''' - @mock.patch('cpmip_utils.shellout._exec_subprocess') + """ + + @mock.patch("cpmip_utils.shellout._exec_subprocess") def test_get_component_resolution(self, mock_subproc): - ''' + """ Test construction of total resolution - ''' - res_vars = ('i_dim', 'j_dim', 'k_dim') - subproc_return_values = [(0, 'i_dim=10'), - (0, 'j_dim=20'), - (0, 'k_dim=30')] + """ + res_vars = ("i_dim", "j_dim", "k_dim") + subproc_return_values = [(0, "i_dim=10"), (0, "j_dim=20"), (0, "k_dim=30")] mock_subproc.side_effect = subproc_return_values - self.assertEqual(cpmip_utils.get_component_resolution('NEMO_NL', - res_vars), - 6000) + self.assertEqual( + cpmip_utils.get_component_resolution("NEMO_NL", res_vars), 6000 + ) subproc_calls = [] for res_var in res_vars: - subproc_calls.append(mock.call('grep %s NEMO_NL' % res_var, - verbose=True)) + subproc_calls.append(mock.call("grep %s NEMO_NL" % res_var, verbose=True)) mock_subproc.assert_has_calls(subproc_calls) class TestGlobUsage(unittest.TestCase): - ''' + """ Test the determination of disk usage using globs - ''' - @mock.patch('cpmip_utils.glob.glob', return_value=[]) + """ + + @mock.patch("cpmip_utils.glob.glob", return_value=[]) def test_get_glob_usage_nofile(self, mock_glob): - ''' + """ Test glob usage if there are no files - ''' - expected_output = '[WARN] Attepting to find the size of files' \ - ' described by glob expression a*b*. There are' \ - ' no files found' - with mock.patch('sys.stderr', new=io.StringIO()) as patch_output: - dusize = cpmip_utils.get_glob_usage('a*b*') + """ + expected_output = ( + "[WARN] Attepting to find the size of files" + " described by glob expression a*b*. There are" + " no files found" + ) + with mock.patch("sys.stderr", new=io.StringIO()) as patch_output: + dusize = cpmip_utils.get_glob_usage("a*b*") self.assertEqual(dusize, 0.0) self.assertEqual(patch_output.getvalue(), expected_output) - @mock.patch('cpmip_utils.glob.glob', return_value=['file1', 'file2']) - @mock.patch('cpmip_utils.shellout._exec_subprocess', - return_value=(0, '\n128 file1\n128 file2\n256 total\n')) + @mock.patch("cpmip_utils.glob.glob", return_value=["file1", "file2"]) + @mock.patch( + "cpmip_utils.shellout._exec_subprocess", + return_value=(0, "\n128 file1\n128 file2\n256 total\n"), + ) def test_get_glob_usage(self, mock_subproc, mock_glob): - ''' + """ Test file size from glob - ''' - self.assertEqual(cpmip_utils.get_glob_usage('a*b*'), - 256.0) + """ + self.assertEqual(cpmip_utils.get_glob_usage("a*b*"), 256.0) + class TestNCDFOutput(unittest.TestCase): - ''' + """ Test measurment of NCDF file sizes - ''' - @mock.patch('cpmip_utils.os.listdir', return_value=[]) - @mock.patch('cpmip_utils.shellout._exec_subprocess', - return_value=(1, None)) + """ + + @mock.patch("cpmip_utils.os.listdir", return_value=[]) + @mock.patch("cpmip_utils.shellout._exec_subprocess", return_value=(1, None)) def test_no_files_output(self, mock_subproc, mock_ncdffiles): - ''' + """ Test what happens if we cant find any files - ''' + """ self.assertEqual(cpmip_utils.get_workdir_netcdf_output(), -1.0) - @mock.patch('cpmip_utils.os.listdir', return_value=['file1.nc', - 'file2.nc']) - @mock.patch('cpmip_utils.shellout._exec_subprocess', - return_value=(0, '\n128 file1.nc\n128 file2.nc\n256 total\n')) + @mock.patch("cpmip_utils.os.listdir", return_value=["file1.nc", "file2.nc"]) + @mock.patch( + "cpmip_utils.shellout._exec_subprocess", + return_value=(0, "\n128 file1.nc\n128 file2.nc\n256 total\n"), + ) def test_files_output(self, mock_subproc, mock_ncdffiles): - ''' + """ Check to see if the function can get the correct total value - ''' + """ self.assertEqual(cpmip_utils.get_workdir_netcdf_output(), 256.0) class TestTimeFunctions(unittest.TestCase): - ''' + """ Test the time related functions - ''' + """ + def test_seconds_to_days_halfday(self): - ''' + """ Test half a day of seconds - ''' + """ self.assertEqual(cpmip_utils.seconds_to_days(43200.0), 0.5) def test_seconds_to_days_twodays(self): - ''' + """ Test two full days of seconds - ''' + """ self.assertEqual(cpmip_utils.seconds_to_days(172800.0), 2.0) def test_tasklength_to_years(self): - ''' + """ Test tasklength to years, pass in all ones, to check everything - ''' - self.assertEqual(cpmip_utils.tasklength_to_years( - '0001,01,01,01,01,01'), 1.0362288130144035) + """ + self.assertEqual( + cpmip_utils.tasklength_to_years("0001,01,01,01,01,01"), 1.0362288130144035 + ) + class TestPBSJobFileXc40Case(unittest.TestCase): - ''' + """ Test the reading of an example PBS job file - ''' + """ + def setUp(self): - ''' + """ Create an example jobfile - ''' - self.jobfile_name = 'test_jobfile' - example_input = ''' + """ + self.jobfile_name = "test_jobfile" + example_input = """ # DIRECTIVES: #PBS -N coupled.19600101T0000Z.mi-ba962_compiler_upgrade_877 #PBS -o cylc-run/mi-ba962_compiler_upgrade_877/log/job/19600101T0000Z/coupled/01/job.out @@ -143,45 +153,48 @@ def setUp(self): #PBS -q high # N.B. CYLC_DIR has been updated on the remote host export CYLC_DIR='/common/fcm/cylc-7.8.6' - ''' - with open(self.jobfile_name, 'w') as test_jobfile: + """ + with open(self.jobfile_name, "w") as test_jobfile: test_jobfile.write(example_input) def tearDown(self): - ''' + """ Remove the example job file at end of test - ''' + """ try: os.remove(self.jobfile_name) except OSError: pass def test_jobfile(self): - ''' + """ Test the retrival of the pbs -l directives is correct. As this is a double underscore function, we need to apply the name mangling rule - ''' - expected_result = {'walltime': '02:30:00', - 'select': '36', - 'subproject': 'ukesmdev', - 'funding': 'hccp', - 'coretype': 'broadwell'} + """ + expected_result = { + "walltime": "02:30:00", + "select": "36", + "subproject": "ukesmdev", + "funding": "hccp", + "coretype": "broadwell", + } result = cpmip_utils.get_jobfile_info(self.jobfile_name) self.assertEqual(result, expected_result) class TestPBSJobFileExCase(unittest.TestCase): - ''' + """ Test the reading of an example PBS job file - ''' + """ + def setUp(self): - ''' + """ Create example jobfiles - ''' + """ # A fully fledged jobfile - self.jobfile_name = 'test_jobfile' - example_input = '''# DIRECTIVES: + self.jobfile_name = "test_jobfile" + example_input = """# DIRECTIVES: #PBS -N coupled.19780901T0000Z.mi-bd155_add_cpmip_metrics #PBS -o cylc-run/mi-bd155_add_cpmip_metrics/log/job/19780901T0000Z/coupled/01/job.out #PBS -e cylc-run/mi-bd155_add_cpmip_metrics/log/job/19780901T0000Z/coupled/01/job.err @@ -189,20 +202,20 @@ def setUp(self): #PBS -q normal #PBS -l select=2:ncpus=256:mpiprocs=90+5:ncpus=256:mpiprocs=120+1:ncpus=256:mpiprocs=6 # N.B. CYLC_DIR has been updated on the remote host - ''' - with open(self.jobfile_name, 'w') as test_jobfile: + """ + with open(self.jobfile_name, "w") as test_jobfile: test_jobfile.write(example_input) # A jobfile with one model - self.onemodel_jobfile_name = 'test_onemodel_jobfile' - example_input='''#PBS -l select=24:ncpus=256''' - with open(self.onemodel_jobfile_name, 'w') as test_jobfile: + self.onemodel_jobfile_name = "test_onemodel_jobfile" + example_input = """#PBS -l select=24:ncpus=256""" + with open(self.onemodel_jobfile_name, "w") as test_jobfile: test_jobfile.write(example_input) def tearDown(self): - ''' + """ Remove the example job file at end of test - ''' + """ jobfiles = (self.jobfile_name, self.onemodel_jobfile_name) for jobfile in jobfiles: try: @@ -211,58 +224,55 @@ def tearDown(self): pass def test_jobfile(self): - ''' + """ Test the retrival of the pbs -l select directive for nodes for each model in MPMD mode is correct - ''' - expected_result = ([2, 5, 1], 'milan') + """ + expected_result = ([2, 5, 1], "milan") result = cpmip_utils.get_select_nodes(self.jobfile_name) self.assertEqual(result, expected_result) def test_jobfile_one_model(self): - ''' + """ Test the correct retrieval for a single model in the -l select directive - ''' - expected_result = ([24], 'milan') + """ + expected_result = ([24], "milan") result = cpmip_utils.get_select_nodes(self.onemodel_jobfile_name) self.assertEqual(result, expected_result) + class TestIncrementDump(unittest.TestCase): - ''' + """ Test the increment of the dump date to the end of cycle - ''' + """ + def test_one_day_increment(self): - ''' + """ Test increment of one day - ''' - self.assertEqual(cpmip_utils.increment_dump('20200115', 1, 'd'), - '20200116') + """ + self.assertEqual(cpmip_utils.increment_dump("20200115", 1, "d"), "20200116") def test_thirty_five_day_increment(self): - ''' + """ Test increment of 35 days, to ensure that the month rolls over - ''' - self.assertEqual(cpmip_utils.increment_dump('20200101', 35, 'd'), - '20200206') + """ + self.assertEqual(cpmip_utils.increment_dump("20200101", 35, "d"), "20200206") def test_one_month_increment(self): - ''' + """ Test increment of one month - from not the first of the month testing a month rollover - ''' - self.assertEqual(cpmip_utils.increment_dump('20200115', 1, 'm'), - '20200215') + """ + self.assertEqual(cpmip_utils.increment_dump("20200115", 1, "m"), "20200215") def test_three_month_increment(self): - ''' + """ Test increment of three months - ''' - self.assertEqual(cpmip_utils.increment_dump('20200115', 3, 'M'), - '20200415') + """ + self.assertEqual(cpmip_utils.increment_dump("20200115", 3, "M"), "20200415") def test_thirteen_month_increment(self): - ''' + """ Test increment of three months - testing a month and a year rollover - ''' - self.assertEqual(cpmip_utils.increment_dump('20200115', 13, 'M'), - '20210215') + """ + self.assertEqual(cpmip_utils.increment_dump("20200115", 13, "M"), "20210215") diff --git a/Coupled_Drivers/unittests/test_cpmip_xios.py b/Coupled_Drivers/unittests/test_cpmip_xios.py index 58df7c7..b8855f0 100644 --- a/Coupled_Drivers/unittests/test_cpmip_xios.py +++ b/Coupled_Drivers/unittests/test_cpmip_xios.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -''' +""" *****************************COPYRIGHT****************************** (C) Crown copyright 2021-2025 Met Office. All rights reserved. @@ -11,9 +11,10 @@ Met Office, FitzRoy Road, Exeter, Devon, EX1 3PB, United Kingdom *****************************COPYRIGHT****************************** -''' +""" import unittest + try: # mock is integrated into unittest as of Python 3.3 import unittest.mock as mock @@ -25,111 +26,134 @@ import os import cpmip_xios + class TestDataMetricSetupNemo(unittest.TestCase): - ''' + """ Check the setting up of XIOS for NEMO data metrics - ''' + """ + def setUp(self): - ''' + """ Create an iodef file for test - ''' - self.xml_file_name = 'iodef.xml' - input_contents = 'iodef line 1\n' \ - 'variable id="using_server"\n' \ - '\t false\n' \ - 'iodef line 4\n' - with open(self.xml_file_name, 'w') as iodef_fh: + """ + self.xml_file_name = "iodef.xml" + input_contents = ( + "iodef line 1\n" + 'variable id="using_server"\n' + '\t false\n' + "iodef line 4\n" + ) + with open(self.xml_file_name, "w") as iodef_fh: iodef_fh.write(input_contents) def tearDown(self): - ''' + """ Remove the iodef file at the end of test - ''' + """ try: os.remove(self.xml_file_name) except FileNotFoundError: pass def test_update_iodef(self): - ''' + """ Test the iodef file is updated correctly - ''' - expected_output = 'iodef line 1\n' \ - 'variable id="using_server"\n' \ - '\t true\n' \ - 'iodef line 4\n' + """ + expected_output = ( + "iodef line 1\n" + 'variable id="using_server"\n' + '\t true\n' + "iodef line 4\n" + ) cpmip_xios.data_metrics_setup_nemo() # check contents of new iodef.xml file - with open(self.xml_file_name, 'r') as new_iodef_fh: + with open(self.xml_file_name, "r") as new_iodef_fh: new_iodef_contents = new_iodef_fh.read() self.assertEqual(new_iodef_contents, expected_output) - self.assertFalse('iodef_out.xml' in os.listdir('.')) + self.assertFalse("iodef_out.xml" in os.listdir(".")) class TestMeasureXIOSClient(unittest.TestCase): - ''' + """ Test measurement of timings from XIOS client files - ''' - @mock.patch('cpmip_xios.os.listdir', return_value=[]) + """ + + @mock.patch("cpmip_xios.os.listdir", return_value=[]) def test_no_files(self, mock_listdir): - ''' + """ Test that the correct output and error messages are produced when no XIOS client files can be found - ''' - expected_output = '[INFO] Measured timings for (0/0) XIOS clients\n' - expected_error = '[WARN] Unable to find any XIOS client output files\n' - - with mock.patch('sys.stdout', new=io.StringIO()) as patch_out: - with mock.patch('sys.stderr', new=io.StringIO()) as patch_err: - self.assertEqual(cpmip_xios.measure_xios_client_times(), - (0.0, 0.0)) + """ + expected_output = "[INFO] Measured timings for (0/0) XIOS clients\n" + expected_error = "[WARN] Unable to find any XIOS client output files\n" + + with mock.patch("sys.stdout", new=io.StringIO()) as patch_out: + with mock.patch("sys.stderr", new=io.StringIO()) as patch_err: + self.assertEqual(cpmip_xios.measure_xios_client_times(), (0.0, 0.0)) self.assertEqual(patch_out.getvalue(), expected_output) self.assertEqual(patch_err.getvalue(), expected_error) - @mock.patch('cpmip_xios.os.listdir', return_value= - ['xios_client0.out', - 'xios_client1.out', - 'xios_client2.out']) - @mock.patch('cpmip_xios.shellout._exec_subprocess') + @mock.patch( + "cpmip_xios.os.listdir", + return_value=["xios_client0.out", "xios_client1.out", "xios_client2.out"], + ) + @mock.patch("cpmip_xios.shellout._exec_subprocess") def test_three_files(self, mock_exec_subproc, mock_listdir): - ''' + """ Test that three files with no timeout give mean and max - ''' + """ mock_exec_subproc.side_effect = [ - (0, '-> report : Performance report : total time spent for XIOS' - ' : 100.0 s'), - (0, '-> report : Performance report : total time spent for XIOS' - ' : 10.0 s'), - (0, '-> report : Performance report : total time spent for XIOS' - ' : 1000.0 s')] + ( + 0, + "-> report : Performance report : total time spent for XIOS" + " : 100.0 s", + ), + ( + 0, + "-> report : Performance report : total time spent for XIOS" + " : 10.0 s", + ), + ( + 0, + "-> report : Performance report : total time spent for XIOS" + " : 1000.0 s", + ), + ] expected_output = (370.0, 1000.0) - with mock.patch('sys.stdout', new=io.StringIO()) as patch_output: - self.assertEqual(cpmip_xios.measure_xios_client_times(), - expected_output) - self.assertEqual(patch_output.getvalue(), - '[INFO] Measured timings for (3/3) XIOS clients\n') - - @mock.patch('cpmip_xios.os.listdir', return_value= - ['xios_client0.out', - 'xios_client1.out', - 'xios_client2.out']) - @mock.patch('cpmip_xios.shellout._exec_subprocess') + with mock.patch("sys.stdout", new=io.StringIO()) as patch_output: + self.assertEqual(cpmip_xios.measure_xios_client_times(), expected_output) + self.assertEqual( + patch_output.getvalue(), "[INFO] Measured timings for (3/3) XIOS clients\n" + ) + + @mock.patch( + "cpmip_xios.os.listdir", + return_value=["xios_client0.out", "xios_client1.out", "xios_client2.out"], + ) + @mock.patch("cpmip_xios.shellout._exec_subprocess") def test_one_timeout(self, mock_exec_subproc, mock_listdir): - ''' + """ Test what happens if there is a timeout - ''' + """ mock_exec_subproc.side_effect = [ - (0, '-> report : Performance report : total time spent for XIOS' - ' : 100.0 s'), + ( + 0, + "-> report : Performance report : total time spent for XIOS" + " : 100.0 s", + ), (1, None), - (0, '-> report : Performance report : total time spent for XIOS' - ' : 1000.0 s')] + ( + 0, + "-> report : Performance report : total time spent for XIOS" + " : 1000.0 s", + ), + ] expected_output = (550.0, 1000.0) - with mock.patch('sys.stdout', new=io.StringIO()) as patch_output: - self.assertEqual(cpmip_xios.measure_xios_client_times(), - expected_output) - self.assertEqual(patch_output.getvalue(), - '[INFO] Measured timings for (2/3) XIOS clients\n') + with mock.patch("sys.stdout", new=io.StringIO()) as patch_output: + self.assertEqual(cpmip_xios.measure_xios_client_times(), expected_output) + self.assertEqual( + patch_output.getvalue(), "[INFO] Measured timings for (2/3) XIOS clients\n" + ) diff --git a/Coupled_Drivers/unittests/test_rivers_driver.py b/Coupled_Drivers/unittests/test_rivers_driver.py index 2f25233..1d935b2 100644 --- a/Coupled_Drivers/unittests/test_rivers_driver.py +++ b/Coupled_Drivers/unittests/test_rivers_driver.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -''' +""" *****************************COPYRIGHT****************************** (C) Crown copyright 2025 Met Office. All rights reserved. @@ -11,206 +11,209 @@ Met Office, FitzRoy Road, Exeter, Devon, EX1 3PB, United Kingdom *****************************COPYRIGHT****************************** -''' +""" import sys import unittest import unittest.mock as mock import rivers_driver -COMMON_ENV = { 'CALENDAR': 'gregorian', - 'TASKSTART': '1979,09,01,00,0', - 'TASKLENGTH': '1,4,10,0,0', } +COMMON_ENV = { + "CALENDAR": "gregorian", + "TASKSTART": "1979,09,01,00,0", + "TASKLENGTH": "1,4,10,0,0", +} nml = rivers_driver.dr_env_lib.rivers_def.RIVERS_ENVIRONMENT_VARS_INITIAL -RIVER_ENV = { k: nml[k].get('default_val', None) for k in nml.keys()} +RIVER_ENV = {k: nml[k].get("default_val", None) for k in nml.keys()} class TestPrivateMethods(unittest.TestCase): - ''' + """ Test the private methods of the JULES river standalone driver - ''' + """ - @mock.patch('rivers_driver.shellout._exec_subprocess', return_value=[0, 'output']) + @mock.patch("rivers_driver.shellout._exec_subprocess", return_value=[0, "output"]) def test_setup_dates(self, mock_exec): - ''' Test the _setup_dates method ''' + """Test the _setup_dates method""" start, end = rivers_driver._setup_dates(COMMON_ENV) - self.assertIn(mock.call('isodatetime 19790901T0000Z -f "%Y-%m-%d %H:%M:%S"'), - mock_exec.mock_calls) - self.assertIn(mock.call('isodatetime 19790901T0000Z -f "%Y-%m-%d %H:%M:%S" -s P1Y4M10DT0H0M --calendar gregorian'), - mock_exec.mock_calls) + self.assertIn( + mock.call('isodatetime 19790901T0000Z -f "%Y-%m-%d %H:%M:%S"'), + mock_exec.mock_calls, + ) + self.assertIn( + mock.call( + 'isodatetime 19790901T0000Z -f "%Y-%m-%d %H:%M:%S" -s P1Y4M10DT0H0M --calendar gregorian' + ), + mock_exec.mock_calls, + ) self.assertEqual(len(mock_exec.mock_calls), 2) - @mock.patch('rivers_driver.common') - @mock.patch('rivers_driver.shellout') - @mock.patch('rivers_driver.os.path.isfile') - @mock.patch('rivers_driver.pathlib') + @mock.patch("rivers_driver.common") + @mock.patch("rivers_driver.shellout") + @mock.patch("rivers_driver.os.path.isfile") + @mock.patch("rivers_driver.pathlib") def test_update_river_nl(self, mock_lib, mock_path, mock_shellout, mock_common): - ''' Test the _update_river_nl method ''' + """Test the _update_river_nl method""" mock_shellout._exec_subprocess.returnvalue = (0, 'dir="this/path/"') - rivers_driver._update_river_nl(RIVER_ENV, - '19790901T0000Z', '19810121T0000Z') + rivers_driver._update_river_nl(RIVER_ENV, "19790901T0000Z", "19810121T0000Z") path_calls = mock_path.mock_calls - self.assertIn(mock.call('output.nml'), path_calls) - self.assertIn(mock.call('timesteps.nml'), path_calls) + self.assertIn(mock.call("output.nml"), path_calls) + self.assertIn(mock.call("timesteps.nml"), path_calls) nml_calls = mock_common.ModNamelist.mock_calls - self.assertIn(mock.call('output.nml'), nml_calls) - self.assertIn(mock.call('timesteps.nml'), nml_calls) - self.assertIn(mock.call().var_val('output_start', '19790901T0000Z'), - nml_calls) + self.assertIn(mock.call("output.nml"), nml_calls) + self.assertIn(mock.call("timesteps.nml"), nml_calls) + self.assertIn(mock.call().var_val("output_start", "19790901T0000Z"), nml_calls) self.assertIn(mock.call().replace(), nml_calls) - self.assertIn(mock.call().var_val('main_run_start', '19790901T0000Z'), - nml_calls) - self.assertIn(mock.call().var_val('main_run_end', '19810121T0000Z'), - nml_calls) + self.assertIn( + mock.call().var_val("main_run_start", "19790901T0000Z"), nml_calls + ) + self.assertIn(mock.call().var_val("main_run_end", "19810121T0000Z"), nml_calls) mock_shellout._exec_subprocess.assert_called_once_with( - 'grep output_dir output.nml' + "grep output_dir output.nml" ) - mock_lib.Path.assert_called_once_with('this/path') - mock_lib.Path().mkdir.assert_called_once_with(parents=True, - exist_ok=True) + mock_lib.Path.assert_called_once_with("this/path") + mock_lib.Path().mkdir.assert_called_once_with(parents=True, exist_ok=True) - @mock.patch('rivers_driver.os.path.isfile') - @mock.patch('rivers_driver.f90nml.read') + @mock.patch("rivers_driver.os.path.isfile") + @mock.patch("rivers_driver.f90nml.read") def test_get_river_resol(self, mock_read, mock_path): - '''Test the _get_river_resol function''' - mock_read.return_value = {'jules_input_grid': {'nx': 10, 'ny': 20}} + """Test the _get_river_resol function""" + mock_read.return_value = {"jules_input_grid": {"nx": 10, "ny": 20}} - out_info = rivers_driver._get_river_resol('riv_nl', {}) - self.assertIn(mock.call('riv_nl'), mock_path.mock_calls) - self.assertEqual(out_info, {'RIV_resol': [10, 20]}) + out_info = rivers_driver._get_river_resol("riv_nl", {}) + self.assertIn(mock.call("riv_nl"), mock_path.mock_calls) + self.assertEqual(out_info, {"RIV_resol": [10, 20]}) - @mock.patch('rivers_driver.os.path') - @mock.patch('rivers_driver._get_river_resol') - @mock.patch('rivers_driver.f90nml') + @mock.patch("rivers_driver.os.path") + @mock.patch("rivers_driver._get_river_resol") + @mock.patch("rivers_driver.f90nml") def test_sent_coupling_fields(self, mock_nml, mock_res, mock_path): - '''Run info should pass through, and should also return None as a - second value''' - run_info = {'exec_list': ['toyriv'], - 'river_nl': 'rivers_coupling.nml'} + """Run info should pass through, and should also return None as a + second value""" + run_info = {"exec_list": ["toyriv"], "river_nl": "rivers_coupling.nml"} mock_res.return_value = run_info mock_nml.read.return_value = { - 'oasis_riv_send_nml': {'oasis_riv_send': 'fields'} + "oasis_riv_send_nml": {"oasis_riv_send": "fields"} } mock_namcpl = mock.Mock(add_to_cpl_list=mock.MagicMock()) - mock_namcpl.add_to_cpl_list.return_value = 'send_list' + mock_namcpl.add_to_cpl_list.return_value = "send_list" - with mock.patch.dict(sys.modules, {'write_namcouple': mock_namcpl}): + with mock.patch.dict(sys.modules, {"write_namcouple": mock_namcpl}): rtn = rivers_driver._sent_coupling_fields(RIVER_ENV, {}) - self.assertEqual(rtn, (run_info, 'send_list')) + self.assertEqual(rtn, (run_info, "send_list")) path_calls = mock_path.mock_calls - self.assertIn(mock.call.exists('OASIS_RIV_SEND'), path_calls) - self.assertIn(mock.call.isfile('rivers_coupling.nml'), path_calls) - mock_res.assert_called_once_with('model_grid.nml', - {'exec_list': ['toyriv']}) - mock_nml.read.assert_called_once_with('OASIS_RIV_SEND') - mock_namcpl.add_to_cpl_list.assert_called_once_with( - 'RIV', False, 0, 'fields' - ) + self.assertIn(mock.call.exists("OASIS_RIV_SEND"), path_calls) + self.assertIn(mock.call.isfile("rivers_coupling.nml"), path_calls) + mock_res.assert_called_once_with("model_grid.nml", {"exec_list": ["toyriv"]}) + mock_nml.read.assert_called_once_with("OASIS_RIV_SEND") + mock_namcpl.add_to_cpl_list.assert_called_once_with("RIV", False, 0, "fields") class TestSetupExecutable(unittest.TestCase): - ''' + """ Test the loading of environment variables and file copies - ''' - @mock.patch('rivers_driver.dr_env_lib.env_lib') - @mock.patch('rivers_driver.common') - @mock.patch('rivers_driver.os.symlink') - @mock.patch('rivers_driver._setup_dates', return_value=('start', 'end')) - @mock.patch('rivers_driver._update_river_nl') - def test_setup_executable(self, mock_upd, mock_date, - mock_link, mock_cmn, mock_env): - '''Test the _setup_executable function''' + """ + + @mock.patch("rivers_driver.dr_env_lib.env_lib") + @mock.patch("rivers_driver.common") + @mock.patch("rivers_driver.os.symlink") + @mock.patch("rivers_driver._setup_dates", return_value=("start", "end")) + @mock.patch("rivers_driver._update_river_nl") + def test_setup_executable(self, mock_upd, mock_date, mock_link, mock_cmn, mock_env): + """Test the _setup_executable function""" rivers_envar = RIVER_ENV - rivers_envar['RIVER_EXEC'] = 'executable' + rivers_envar["RIVER_EXEC"] = "executable" mock_env.load_envar_from_definition.return_value = rivers_envar return_rivers_envar = rivers_driver._setup_executable(COMMON_ENV) mock_env.LoadEnvar.assert_called_once_with() - mock_cmn.remove_file.assert_called_once_with('river.exe') - mock_link.assert_called_once_with('executable', 'river.exe') + mock_cmn.remove_file.assert_called_once_with("river.exe") + mock_link.assert_called_once_with("executable", "river.exe") mock_date.assert_called_once_with(COMMON_ENV) - mock_upd.assert_called_once_with(rivers_envar, 'start', 'end') + mock_upd.assert_called_once_with(rivers_envar, "start", "end") self.assertEqual(return_rivers_envar, rivers_envar) def test_launcher_command(self): - '''Test the _set_launcher_command function''' + """Test the _set_launcher_command function""" env = RIVER_ENV - env['ROSE_LAUNCHER_PREOPTS_RIVER'] = 'river pre-opts' + env["ROSE_LAUNCHER_PREOPTS_RIVER"] = "river pre-opts" cmd = rivers_driver._set_launcher_command(env) - self.assertEqual(cmd, 'river pre-opts ./river.exe') - self.assertEqual(env['ROSE_LAUNCHER_PREOPTS_RIVER'], - "'river pre-opts'") + self.assertEqual(cmd, "river pre-opts ./river.exe") + self.assertEqual(env["ROSE_LAUNCHER_PREOPTS_RIVER"], "'river pre-opts'") class TestFinalizeExecutable(unittest.TestCase): - ''' + """ Test the finalizing of the executable - ''' + """ + def test_finalize_executable(self): - '''Test the finalize. It does nothing at the moment''' + """Test the finalize. It does nothing at the moment""" self.assertIsNone(rivers_driver._finalize_executable()) class TestRunDriver(unittest.TestCase): - ''' + """ Test the interface to run the driver - ''' - @mock.patch('rivers_driver._setup_executable') - @mock.patch('rivers_driver._finalize_executable') + """ + + @mock.patch("rivers_driver._setup_executable") + @mock.patch("rivers_driver._finalize_executable") def test_run_driver_finalize(self, mock_finalize, mock_setup): - '''Test finalise mode''' - rvalue = rivers_driver.run_driver('common_env', 'finalize', 'run_info') - self.assertEqual(rvalue, (None, None, 'run_info', None)) + """Test finalise mode""" + rvalue = rivers_driver.run_driver("common_env", "finalize", "run_info") + self.assertEqual(rvalue, (None, None, "run_info", None)) mock_setup.assert_not_called() mock_finalize.assert_called_once_with() - @mock.patch('rivers_driver._setup_executable') - @mock.patch('rivers_driver._set_launcher_command') - @mock.patch('rivers_driver._sent_coupling_fields') - @mock.patch('rivers_driver._finalize_executable') + @mock.patch("rivers_driver._setup_executable") + @mock.patch("rivers_driver._set_launcher_command") + @mock.patch("rivers_driver._sent_coupling_fields") + @mock.patch("rivers_driver._finalize_executable") def test_run_driver_l_namcouple( - self, mock_finalize, mock_namc, mock_launcher_cmd, mock_setup + self, mock_finalize, mock_namc, mock_launcher_cmd, mock_setup ): - '''Test run mode with l_namcouple set in run info''' - run_info = {'l_namcouple': True} - common_env = {'ROSE_LAUNCHER': 'launcher'} - mock_setup.return_value = 'exe_envar' - mock_launcher_cmd.return_value = 'launch_cmd' - rvalue = rivers_driver.run_driver(common_env, 'run_driver', run_info) - self.assertEqual(rvalue, ('exe_envar', 'launch_cmd', run_info, None)) + """Test run mode with l_namcouple set in run info""" + run_info = {"l_namcouple": True} + common_env = {"ROSE_LAUNCHER": "launcher"} + mock_setup.return_value = "exe_envar" + mock_launcher_cmd.return_value = "launch_cmd" + rvalue = rivers_driver.run_driver(common_env, "run_driver", run_info) + self.assertEqual(rvalue, ("exe_envar", "launch_cmd", run_info, None)) mock_setup.assert_called_once_with(common_env) - mock_launcher_cmd.assert_called_once_with('exe_envar') + mock_launcher_cmd.assert_called_once_with("exe_envar") mock_namc.assert_not_called() mock_finalize.assert_not_called() - @mock.patch('rivers_driver._setup_executable') - @mock.patch('rivers_driver._set_launcher_command') - @mock.patch('rivers_driver._sent_coupling_fields') - @mock.patch('rivers_driver._finalize_executable') + @mock.patch("rivers_driver._setup_executable") + @mock.patch("rivers_driver._set_launcher_command") + @mock.patch("rivers_driver._sent_coupling_fields") + @mock.patch("rivers_driver._finalize_executable") def test_run_driver_no_l_namcouple( - self, mock_finalize, mock_namc, mock_launcher_cmd, mock_setup + self, mock_finalize, mock_namc, mock_launcher_cmd, mock_setup ): - '''Test run mode with l_namcouple set to False in run info''' - run_info = {'l_namcouple': False} - common_env = {'ROSE_LAUNCHER': 'launcher'} - mock_setup.return_value = 'exe_envar' - mock_launcher_cmd.return_value = 'launch_cmd' - mock_namc.return_value = ('run_info', 'model_snd_list') + """Test run mode with l_namcouple set to False in run info""" + run_info = {"l_namcouple": False} + common_env = {"ROSE_LAUNCHER": "launcher"} + mock_setup.return_value = "exe_envar" + mock_launcher_cmd.return_value = "launch_cmd" + mock_namc.return_value = ("run_info", "model_snd_list") - rvalue = rivers_driver.run_driver(common_env, 'run_driver', run_info) + rvalue = rivers_driver.run_driver(common_env, "run_driver", run_info) self.assertEqual( - rvalue, ('exe_envar', 'launch_cmd', 'run_info', 'model_snd_list')) + rvalue, ("exe_envar", "launch_cmd", "run_info", "model_snd_list") + ) mock_setup.assert_called_once_with(common_env) - mock_launcher_cmd.assert_called_once_with('exe_envar') - mock_namc.assert_called_once_with('exe_envar', run_info) + mock_launcher_cmd.assert_called_once_with("exe_envar") + mock_namc.assert_called_once_with("exe_envar", run_info) mock_finalize.assert_not_called() diff --git a/Coupled_Drivers/write_namcouple.py b/Coupled_Drivers/write_namcouple.py index 4328497..621dcbb 100644 --- a/Coupled_Drivers/write_namcouple.py +++ b/Coupled_Drivers/write_namcouple.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -''' +""" *****************************COPYRIGHT****************************** (C) Crown copyright 2021-2025 Met Office. All rights reserved. Use, duplication or disclosure of this code is subject to the restrictions @@ -15,7 +15,7 @@ DESCRIPTION Write namcouple file at run time. -''' +""" import sys import itertools import common @@ -27,23 +27,39 @@ import write_namcouple_header # Dictionary containing the RMP mappings -RMP_MAPPING = {'Bc':'BICUBIC', - 'Bi':'BILINEA', - 'CD':'CONSERV_DESTAREA', - 'CF':'CONSERV_FRACAREA', - '0D':'OneVal', - '1D':'OneD', - 'NB':'nomask_BILINEA', - 'remove':'remove'} - -class NamcoupleEntry(): - ''' +RMP_MAPPING = { + "Bc": "BICUBIC", + "Bi": "BILINEA", + "CD": "CONSERV_DESTAREA", + "CF": "CONSERV_FRACAREA", + "0D": "OneVal", + "1D": "OneD", + "NB": "nomask_BILINEA", + "remove": "remove", +} + + +class NamcoupleEntry: + """ Container to hold the information for one namcouple entry - ''' + """ - def __init__(self, name_out, field_id, grid, origin, dest, nlev, l_soil, - mapping, mapping_type, weight, l_hybrid, n_cpl_freq, - override_cpl_freq): + def __init__( + self, + name_out, + field_id, + grid, + origin, + dest, + nlev, + l_soil, + mapping, + mapping_type, + weight, + l_hybrid, + n_cpl_freq, + override_cpl_freq, + ): self.name_out = name_out self.field_id = field_id self.grid = grid @@ -59,148 +75,185 @@ def __init__(self, name_out, field_id, grid, origin, dest, nlev, l_soil, self.override_cpl_freq = override_cpl_freq def __repr__(self): - return repr((self.name_out, self.field_id, self.grid, self.origin, - self.dest, self.nlev, self.l_soil, self.mapping, - self.mapping_type, self.weight, self.l_hybrid, - self.n_cpl_freq, self.override_cpl_freq)) + return repr( + ( + self.name_out, + self.field_id, + self.grid, + self.origin, + self.dest, + self.nlev, + self.l_soil, + self.mapping, + self.mapping_type, + self.weight, + self.l_hybrid, + self.n_cpl_freq, + self.override_cpl_freq, + ) + ) + def _print_run_info(run_info): - ''' + """ Print the information in run_info - ''' - sys.stdout.write('[INFO] Display the contents of run_info:\n') - sys.stdout.write('[INFO] -------- Resolutions -------- \n') - if 'ATM_grid' in run_info: - sys.stdout.write('[INFO] Atmosphere: %s (%d, %d)\n' % - (run_info['ATM_grid'], run_info['ATM_resol'][0], - run_info['ATM_resol'][1])) - if 'JNR_grid' in run_info: - sys.stdout.write('[INFO] Junior atmosphere: %s (%d, %d)\n' % - (run_info['JNR_grid'], run_info['JNR_resol'][0], - run_info['JNR_resol'][1])) - if 'OCN_grid' in run_info: + """ + sys.stdout.write("[INFO] Display the contents of run_info:\n") + sys.stdout.write("[INFO] -------- Resolutions -------- \n") + if "ATM_grid" in run_info: + sys.stdout.write( + "[INFO] Atmosphere: %s (%d, %d)\n" + % (run_info["ATM_grid"], run_info["ATM_resol"][0], run_info["ATM_resol"][1]) + ) + if "JNR_grid" in run_info: + sys.stdout.write( + "[INFO] Junior atmosphere: %s (%d, %d)\n" + % (run_info["JNR_grid"], run_info["JNR_resol"][0], run_info["JNR_resol"][1]) + ) + if "OCN_grid" in run_info: # If running ATM<->JNR coupling we can have an ocean resolution # without an ocean. - if 'OCN_resol' in run_info: - sys.stdout.write('[INFO] Ocean: %s (%d, %d)' % - (run_info['OCN_grid'], run_info['OCN_resol'][0], - run_info['OCN_resol'][1])) + if "OCN_resol" in run_info: + sys.stdout.write( + "[INFO] Ocean: %s (%d, %d)" + % ( + run_info["OCN_grid"], + run_info["OCN_resol"][0], + run_info["OCN_resol"][1], + ) + ) else: - sys.stdout.write('[INFO] Ocean: %s' % - run_info['OCN_grid']) - if 'NEMO_VERSION' in run_info: - sys.stdout.write(' (NEMO version: %s)\n' % - run_info['NEMO_VERSION']) + sys.stdout.write("[INFO] Ocean: %s" % run_info["OCN_grid"]) + if "NEMO_VERSION" in run_info: + sys.stdout.write(" (NEMO version: %s)\n" % run_info["NEMO_VERSION"]) else: - sys.stdout.write('\n') - if 'riv3' in run_info: - if run_info['riv3'] > 0: - sys.stdout.write('[INFO] Number of rivers: %d\n' % - run_info['riv3']) - - sys.stdout.write('[INFO] -------- Coupling frequencies (in mins) ' - '-------- \n') - comp_order = ['ATM', 'JNR', 'OCN'] - comp_list = [comp for comp in comp_order if '{}_resol'.format(comp) in - run_info] + sys.stdout.write("\n") + if "riv3" in run_info: + if run_info["riv3"] > 0: + sys.stdout.write("[INFO] Number of rivers: %d\n" % run_info["riv3"]) + + sys.stdout.write("[INFO] -------- Coupling frequencies (in mins) " "-------- \n") + comp_order = ["ATM", "JNR", "OCN"] + comp_list = [comp for comp in comp_order if "{}_resol".format(comp) in run_info] for component1, component2 in itertools.permutations(comp_list, r=2): - key = component2 + '2' + component1 + '_freq' - sys.stdout.write('[INFO] %s -> %s: %0.1f\n' % - (component2, component1, - (run_info[key][0]/60.0))) - key2 = 'l_hyb_stats_' + component2 + '2' + component1 + key = component2 + "2" + component1 + "_freq" + sys.stdout.write( + "[INFO] %s -> %s: %0.1f\n" + % (component2, component1, (run_info[key][0] / 60.0)) + ) + key2 = "l_hyb_stats_" + component2 + "2" + component1 if key2 in run_info: if run_info[key2]: - sys.stdout.write('[INFO] %s -> %s for stats: %0.1f\n' % - (component2, component1, - (run_info[key][1]/60.0))) - key = component1 + '2' + component2 + '_freq' - sys.stdout.write('[INFO] %s -> %s: %0.1f\n' % - (component1, component2, - (run_info[key][0]/60.0))) - key2 = 'l_hyb_stats_' + component1 + '2' + component2 + sys.stdout.write( + "[INFO] %s -> %s for stats: %0.1f\n" + % (component2, component1, (run_info[key][1] / 60.0)) + ) + key = component1 + "2" + component2 + "_freq" + sys.stdout.write( + "[INFO] %s -> %s: %0.1f\n" + % (component1, component2, (run_info[key][0] / 60.0)) + ) + key2 = "l_hyb_stats_" + component1 + "2" + component2 if key2 in run_info: if run_info[key2]: - sys.stdout.write('[INFO] %s -> %s for stats: %0.1f\n' % - (component2, component1, - (run_info[key][1]/60.0))) - if 'ATM_grid' in run_info: - sys.stdout.write('[INFO] -------- Atmosphere information -------- \n') - sys.stdout.write('[INFO] Atmosphere levels: %d\n' % - run_info['ATM_model_levels']) - sys.stdout.write('[INFO] Soil levels: %d\n' % - run_info['ATM_soil_levels']) - sys.stdout.write('[INFO] Number of vegetation tiles: %d\n' % - run_info['ATM_veg_tiles']) - sys.stdout.write('[INFO] Number of non-vegetation tiles: %d\n' % - run_info['ATM_non_veg_tiles']) - sys.stdout.write('[INFO] STASHmaster directory: %s\n' % - run_info['STASHMASTER']) - sys.stdout.write('[INFO] -------- Namcouple settings -------- \n') - sys.stdout.write('[INFO] nlogprt: %d' % run_info['nlogprt'][0]) - if len(run_info['nlogprt']) == 2: - sys.stdout.write(' %d\n' % run_info['nlogprt'][1]) + sys.stdout.write( + "[INFO] %s -> %s for stats: %0.1f\n" + % (component2, component1, (run_info[key][1] / 60.0)) + ) + if "ATM_grid" in run_info: + sys.stdout.write("[INFO] -------- Atmosphere information -------- \n") + sys.stdout.write( + "[INFO] Atmosphere levels: %d\n" % run_info["ATM_model_levels"] + ) + sys.stdout.write( + "[INFO] Soil levels: %d\n" % run_info["ATM_soil_levels"] + ) + sys.stdout.write( + "[INFO] Number of vegetation tiles: %d\n" % run_info["ATM_veg_tiles"] + ) + sys.stdout.write( + "[INFO] Number of non-vegetation tiles: %d\n" + % run_info["ATM_non_veg_tiles"] + ) + sys.stdout.write( + "[INFO] STASHmaster directory: %s\n" % run_info["STASHMASTER"] + ) + sys.stdout.write("[INFO] -------- Namcouple settings -------- \n") + sys.stdout.write("[INFO] nlogprt: %d" % run_info["nlogprt"][0]) + if len(run_info["nlogprt"]) == 2: + sys.stdout.write(" %d\n" % run_info["nlogprt"][1]) else: - sys.stdout.write('\n') - sys.stdout.write('[INFO] Executable list:\n') - for execut in run_info['exec_list']: - sys.stdout.write('[INFO] - %s\n' % execut) - if 'expout' in run_info: - sys.stdout.write('[INFO] Fields with EXPOUT argument:\n') - for field in run_info['expout']: - sys.stdout.write('[INFO] - %s\n' % field) - if 'rmp_create' in run_info: - sys.stdout.write('[INFO] Fields where remapping files will be ' - 'created are:\n') - for field in run_info['rmp_create']: - sys.stdout.write('[INFO] - %s\n' % field) - sys.stdout.write('[INFO] -------- Files -------- \n') - sys.stdout.write('[INFO] File containing coupling frequencies: %s\n' % - run_info['SHARED_FILE']) - if 'nemo_nl' in run_info: - sys.stdout.write('[INFO] Default couplings determined from: %s\n' % - run_info['nemo_nl']) + sys.stdout.write("\n") + sys.stdout.write("[INFO] Executable list:\n") + for execut in run_info["exec_list"]: + sys.stdout.write("[INFO] - %s\n" % execut) + if "expout" in run_info: + sys.stdout.write("[INFO] Fields with EXPOUT argument:\n") + for field in run_info["expout"]: + sys.stdout.write("[INFO] - %s\n" % field) + if "rmp_create" in run_info: + sys.stdout.write( + "[INFO] Fields where remapping files will be " "created are:\n" + ) + for field in run_info["rmp_create"]: + sys.stdout.write("[INFO] - %s\n" % field) + sys.stdout.write("[INFO] -------- Files -------- \n") + sys.stdout.write( + "[INFO] File containing coupling frequencies: %s\n" % run_info["SHARED_FILE"] + ) + if "nemo_nl" in run_info: + sys.stdout.write( + "[INFO] Default couplings determined from: %s\n" % run_info["nemo_nl"] + ) + def _checks_on_run_info(run_info): - ''' + """ Run some checks on the data in run_info - ''' + """ # If coupling contains both hybrid components, check the model_levels # are the same for both. - if 'ATM_model_levels' in run_info and 'JNR_model_levels' in run_info: - if run_info['ATM_model_levels'] != run_info['JNR_model_levels']: - sys.stderr.write('[FAIL] model_levels for Snr (=%d) and Jnr ' - '(=%d) are different.\n' % - (run_info['ATM_model_levels'], - run_info['JNR_model_levels'])) + if "ATM_model_levels" in run_info and "JNR_model_levels" in run_info: + if run_info["ATM_model_levels"] != run_info["JNR_model_levels"]: + sys.stderr.write( + "[FAIL] model_levels for Snr (=%d) and Jnr " + "(=%d) are different.\n" + % (run_info["ATM_model_levels"], run_info["JNR_model_levels"]) + ) sys.exit(error.DIFFERENT_MODEL_LEVELS) - if 'ATM_soil_levels' in run_info and 'JNR_soil_levels' in run_info: - if run_info['ATM_soil_levels'] != run_info['JNR_soil_levels']: - sys.stderr.write('[FAIL] soil levels for Snr (=%d) and Jnr ' - '(=%d) are different.\n' % - (run_info['ATM_soil_levels'], - run_info['JNR_soil_levels'])) + if "ATM_soil_levels" in run_info and "JNR_soil_levels" in run_info: + if run_info["ATM_soil_levels"] != run_info["JNR_soil_levels"]: + sys.stderr.write( + "[FAIL] soil levels for Snr (=%d) and Jnr " + "(=%d) are different.\n" + % (run_info["ATM_soil_levels"], run_info["JNR_soil_levels"]) + ) sys.exit(error.DIFFERENT_SOIL_LEVELS) # If stats are turned on, we want to sample the core fields for # stats at least as often as the stat fields. - if 'l_hyb_stats_ATM2JNR' in run_info: - if run_info['ATM2JNR_freq'][0] > run_info['ATM2JNR_freq'][1]: - sys.stdout.write('[INFO] matching the main coupling frequency ' - 'between ATM->JNR to the stats frequency\n') - run_info['ATM2JNR_freq'][0] = run_info['ATM2JNR_freq'][1] - if 'l_hyb_stats_JNR2ATM' in run_info: - if run_info['JNR2ATM_freq'][0] > run_info['JNR2ATM_freq'][1]: - sys.stdout.write('[INFO] matching the main coupling frequency ' - 'between JNR->ATM to the stats frequency\n') - run_info['JNR2ATM_freq'][0] = run_info['JNR2ATM_freq'][1] + if "l_hyb_stats_ATM2JNR" in run_info: + if run_info["ATM2JNR_freq"][0] > run_info["ATM2JNR_freq"][1]: + sys.stdout.write( + "[INFO] matching the main coupling frequency " + "between ATM->JNR to the stats frequency\n" + ) + run_info["ATM2JNR_freq"][0] = run_info["ATM2JNR_freq"][1] + if "l_hyb_stats_JNR2ATM" in run_info: + if run_info["JNR2ATM_freq"][0] > run_info["JNR2ATM_freq"][1]: + sys.stdout.write( + "[INFO] matching the main coupling frequency " + "between JNR->ATM to the stats frequency\n" + ) + run_info["JNR2ATM_freq"][0] = run_info["JNR2ATM_freq"][1] return run_info + def add_to_cpl_list(origin, l_hybrid, n_cpl_freq, send_list_raw): - ''' + """ Add a new set of couplings to model_snd_list - ''' + """ mapping = None weighting = None model_snd_list = [] @@ -209,11 +262,25 @@ def add_to_cpl_list(origin, l_hybrid, n_cpl_freq, send_list_raw): # Loop across the raw entries for cpl_entry_raw in send_list_raw: - if cpl_entry_raw == 'default': + if cpl_entry_raw == "default": # Entry will later be filled with the default options model_snd_list.append( - NamcoupleEntry('default', '?', '?', origin, '?', '?', '?', - '?', '?', '?', l_hybrid, n_cpl_freq, None)) + NamcoupleEntry( + "default", + "?", + "?", + origin, + "?", + "?", + "?", + "?", + "?", + "?", + l_hybrid, + n_cpl_freq, + None, + ) + ) else: # A raw coupling entry can have up to 7 arguments: # ;;;;; @@ -226,68 +293,87 @@ def add_to_cpl_list(origin, l_hybrid, n_cpl_freq, send_list_raw): nlev = 1 mapping_type = -99 - if isinstance(cpl_entry_raw, str) and ';' in cpl_entry_raw: + if isinstance(cpl_entry_raw, str) and ";" in cpl_entry_raw: # Split the input - parts = cpl_entry_raw.split(';') + parts = cpl_entry_raw.split(";") # Check we have enough compulsory input if len(parts) < 6: - sys.stderr.write('[FAIL] Insufficent information in %s\n' % - cpl_entry_raw) + sys.stderr.write( + "[FAIL] Insufficent information in %s\n" % cpl_entry_raw + ) sys.exit(error.MISSING_NAMCOUPLE_INPUT) # Set the fields we should know name_out = parts[0] field_id = int(parts[1]) grid = parts[2] - dests = parts[3].split('&') + dests = parts[3].split("&") # Check if number of level are provided if len(parts) > 4: nlev = int(parts[4]) # Check if a mapping is provided if len(parts) > 5: - sub_parts = parts[5].split('&') + sub_parts = parts[5].split("&") if sub_parts[0] in RMP_MAPPING: mapping = RMP_MAPPING[sub_parts[0]] else: - sys.stderr.write("[FAIL] Don't recognise this " - "mapping: %s.\n" % parts[5]) + sys.stderr.write( + "[FAIL] Don't recognise this " "mapping: %s.\n" % parts[5] + ) sys.exit(error.UNRECOGNISED_MAPPING) if len(sub_parts) > 1: mapping_type = int(sub_parts[1]) else: if not mapping: - sys.stderr.write('[FAIL] Need to specify a mapping ' - 'for first entry %s\n' % - cpl_entry_raw) + sys.stderr.write( + "[FAIL] Need to specify a mapping " + "for first entry %s\n" % cpl_entry_raw + ) sys.exit(error.MISSING_MAPPING) # Check if we have a coupling weighting if len(parts) > 6: weighting = int(parts[6]) else: if not weighting: - sys.stderr.write('[FAIL] Need to specify a weighting ' - 'for first entry.\n') + sys.stderr.write( + "[FAIL] Need to specify a weighting " "for first entry.\n" + ) sys.exit(error.MISSING_WEIGHTING) # Loop over the destinations for dest in dests: model_snd_list.append( - NamcoupleEntry(name_out, field_id, grid, origin, - dest, nlev, '?', mapping, mapping_type, - weighting, l_hybrid, n_cpl_freq, None)) + NamcoupleEntry( + name_out, + field_id, + grid, + origin, + dest, + nlev, + "?", + mapping, + mapping_type, + weighting, + l_hybrid, + n_cpl_freq, + None, + ) + ) # Just add to the default weighting weighting += 2 else: - sys.stderr.write('[FAIL] the following coupling entry looks ' - 'to be in the wrong format: %s\n' % - cpl_entry_raw) + sys.stderr.write( + "[FAIL] the following coupling entry looks " + "to be in the wrong format: %s\n" % cpl_entry_raw + ) sys.exit(error.WRONG_CPL_FORMAT) return model_snd_list + def write_namcouple(common_env, run_info, coupling_list): - ''' + """ Write the namcouple file - ''' + """ # Key information is contained in run_info _print_run_info(run_info) @@ -296,36 +382,37 @@ def write_namcouple(common_env, run_info, coupling_list): # See if any default couplings need adding for nam_entry in coupling_list: - if 'default' in nam_entry.name_out: - coupling_list = \ - default_couplings.add_default_couplings(run_info, - coupling_list) + if "default" in nam_entry.name_out: + coupling_list = default_couplings.add_default_couplings( + run_info, coupling_list + ) break # See if any couplings need removing store_coupling_list = [] for nam_entry in coupling_list: - if nam_entry.mapping != 'remove': + if nam_entry.mapping != "remove": store_coupling_list.append(nam_entry) coupling_list = store_coupling_list # Open the file - nam_file = common.open_text_file('namcouple', 'w') + nam_file = common.open_text_file("namcouple", "w") # Create the header - write_namcouple_header.write_namcouple_header(common_env, nam_file, - run_info, len(coupling_list)) + write_namcouple_header.write_namcouple_header( + common_env, nam_file, run_info, len(coupling_list) + ) # Sort the coupling_list by weighting - coupling_list = sorted(coupling_list, - key=lambda nam_entry: nam_entry.weight) + coupling_list = sorted(coupling_list, key=lambda nam_entry: nam_entry.weight) # Write the coupling fields cf_names = write_namcouple_fields.write_namcouple_fields( - nam_file, run_info, coupling_list) + nam_file, run_info, coupling_list + ) # Close file - nam_file.write('#\n$END\n') + nam_file.write("#\n$END\n") nam_file.close() # Write cf_name_table.txt file @@ -333,4 +420,4 @@ def write_namcouple(common_env, run_info, coupling_list): # Now that namcouple has been created, we can create the transient # field namelist - _, _ = shellout._exec_subprocess('./OASIS_fields') + _, _ = shellout._exec_subprocess("./OASIS_fields") diff --git a/Coupled_Drivers/xios_driver.py b/Coupled_Drivers/xios_driver.py index b08cd4d..b7331c5 100644 --- a/Coupled_Drivers/xios_driver.py +++ b/Coupled_Drivers/xios_driver.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -''' +""" *****************************COPYRIGHT****************************** (C) Crown copyright 2023-2025 Met Office. All rights reserved. @@ -17,8 +17,7 @@ DESCRIPTION Driver for the XIOS component, called from link_drivers. Can cater for XIOS running in either attatched or detatched mode -''' - +""" import os @@ -28,47 +27,53 @@ import dr_env_lib.xios_def import dr_env_lib.env_lib + def _copy_iodef_custom(xios_evar): - ''' + """ If a custom iodef file exists, copy this to the required input filename - ''' - if xios_evar['IODEF_CUSTOM']: - shutil.copy(xios_evar['IODEF_CUSTOM'], xios_evar['IODEF_FILENAME']) + """ + if xios_evar["IODEF_CUSTOM"]: + shutil.copy(xios_evar["IODEF_CUSTOM"], xios_evar["IODEF_FILENAME"]) + -def _update_iodef( - is_server_mode, is_coupled_mode, oasis_components, iodef_fname): - ''' +def _update_iodef(is_server_mode, is_coupled_mode, oasis_components, iodef_fname): + """ Update the iodef.xml file for server/attatched mode and couplng mode. is_server_mode and is_coupled_mode are boolean. (true when each option is activated, false otherwise). - ''' + """ # Work-around in lieu of viable multi component iodef.xml handling - _, _ = shellout._exec_subprocess('cp mydef.xml %s' % iodef_fname) + _, _ = shellout._exec_subprocess("cp mydef.xml %s" % iodef_fname) # Note we do not use python's xml module for this job, as the comment # line prevalent in the first line of the GO5 iodef.xml files renders # the file invalid as far as the xml module is concerned. - swapfile_name = 'swap_iodef' - iodef_file = common.open_text_file(iodef_fname, 'r') - iodef_swap = common.open_text_file(swapfile_name, 'w') - text_bool = ['false', 'true'] + swapfile_name = "swap_iodef" + iodef_file = common.open_text_file(iodef_fname, "r") + iodef_swap = common.open_text_file(swapfile_name, "w") + text_bool = ["false", "true"] for line in iodef_file.readlines(): # Update the server_mode if the current setting is not what we want - if '' \ - + oasis_components+'' + line = ( + '' + + oasis_components + + "" + ) else: - line = '' + line = "" iodef_swap.write(line) @@ -78,7 +83,7 @@ def _update_iodef( def _setup_coupling_components(xios_envar): - ''' + """ Set up the coupling components for the iodef file. This is less straightforward than you might imagine, since the names of the componenets are hard coded in the component source code. Nemo becomes toyoce and @@ -86,28 +91,29 @@ def _setup_coupling_components(xios_envar): We use the COUPLING_COMPONENTS environment variable to determine this, however it is borrowed from MCT, do we must delete it from the xios_envar container after use - ''' + """ oasis_components = [] - if 'lfric' in xios_envar['COUPLING_COMPONENTS']: - oasis_components.append('lfric') - if 'nemo' in xios_envar['COUPLING_COMPONENTS']: - oasis_components.append('toyoce') - xios_envar.remove('COUPLING_COMPONENTS') - oasis_components = ','.join(oasis_components) + if "lfric" in xios_envar["COUPLING_COMPONENTS"]: + oasis_components.append("lfric") + if "nemo" in xios_envar["COUPLING_COMPONENTS"]: + oasis_components.append("toyoce") + xios_envar.remove("COUPLING_COMPONENTS") + oasis_components = ",".join(oasis_components) return oasis_components, xios_envar def _setup_executable(common_env): - ''' + """ Setup the environment and any files required by the executable and/or by the iodef file update procedure. - ''' + """ # Load the environment variables required xios_envar = dr_env_lib.env_lib.LoadEnvar() xios_envar = dr_env_lib.env_lib.load_envar_from_definition( - xios_envar, dr_env_lib.xios_def.XIOS_ENVIRONMENT_VARS_INITIAL) + xios_envar, dr_env_lib.xios_def.XIOS_ENVIRONMENT_VARS_INITIAL + ) - if xios_envar['XIOS_NPROC'] == '0': + if xios_envar["XIOS_NPROC"] == "0": # Running in attached mode using_server = False else: @@ -115,13 +121,12 @@ def _setup_executable(common_env): # The following environment variables are only relevant for this # mode using_server = True - common.remove_file(xios_envar['XIOS_LINK']) - os.symlink(xios_envar['XIOS_EXEC'], - xios_envar['XIOS_LINK']) + common.remove_file(xios_envar["XIOS_LINK"]) + os.symlink(xios_envar["XIOS_EXEC"], xios_envar["XIOS_LINK"]) # Check our list of component drivers to see if MCT is active. If it is, # then this is a coupled model. Set the coupler flag accordingly. - using_coupler = 'mct' in common_env['models'] + using_coupler = "mct" in common_env["models"] # Copy the custom IO file if required _copy_iodef_custom(xios_envar) @@ -129,75 +134,85 @@ def _setup_executable(common_env): # Get the list of coupled componenets oasis_components, xios_envar = _setup_coupling_components(xios_envar) # Update the iodef file - _update_iodef(using_server, using_coupler, oasis_components, - xios_envar['IODEF_FILENAME']) + _update_iodef( + using_server, using_coupler, oasis_components, xios_envar["IODEF_FILENAME"] + ) return xios_envar def _set_launcher_command(launcher, xios_envar): - ''' + """ Setup the launcher command for the executable, bearing in mind that XIOS can run attached. If this is so, this function will return an empty string - ''' - if xios_envar['XIOS_NPROC'] != '0': - if xios_envar['ROSE_LAUNCHER_PREOPTS_XIOS'] == 'unset': + """ + if xios_envar["XIOS_NPROC"] != "0": + if xios_envar["ROSE_LAUNCHER_PREOPTS_XIOS"] == "unset": ompthr = 1 hyperthreads = 1 ss = True - xios_envar['ROSE_LAUNCHER_PREOPTS_XIOS'] = \ - common.set_aprun_options(xios_envar['XIOS_NPROC'], \ - xios_envar['XIOS_NODES'], ompthr, \ - hyperthreads, ss) \ - if launcher == 'aprun' else '' - - launch_cmd = '%s ./%s' % \ - (xios_envar['ROSE_LAUNCHER_PREOPTS_XIOS'], \ - xios_envar['XIOS_LINK']) + xios_envar["ROSE_LAUNCHER_PREOPTS_XIOS"] = ( + common.set_aprun_options( + xios_envar["XIOS_NPROC"], + xios_envar["XIOS_NODES"], + ompthr, + hyperthreads, + ss, + ) + if launcher == "aprun" + else "" + ) + + launch_cmd = "%s ./%s" % ( + xios_envar["ROSE_LAUNCHER_PREOPTS_XIOS"], + xios_envar["XIOS_LINK"], + ) # Put in quotes to allow this environment variable to be exported as it # contains (or can contain) spaces - xios_envar['ROSE_LAUNCHER_PREOPTS_XIOS'] = "'%s'" % \ - xios_envar['ROSE_LAUNCHER_PREOPTS_XIOS'] + xios_envar["ROSE_LAUNCHER_PREOPTS_XIOS"] = ( + "'%s'" % xios_envar["ROSE_LAUNCHER_PREOPTS_XIOS"] + ) else: - launch_cmd = '' + launch_cmd = "" return launch_cmd + def _sent_coupling_fields(run_info): - ''' + """ Add XIOS executable to list of executables. This function is only used when creating the namcouple at run time. - ''' + """ # Add xios to our list of executables - if not 'exec_list' in run_info: - run_info['exec_list'] = [] - run_info['exec_list'].append('xios.x') + if not "exec_list" in run_info: + run_info["exec_list"] = [] + run_info["exec_list"].append("xios.x") return run_info + def _finalize_executable(_): - ''' + """ There is no finalization required for XIOS - ''' + """ pass def run_driver(common_env, mode, run_info): - ''' + """ Run the driver, and return an instance of LoadEnvar and as string containing the launcher command for the XIOS component - ''' - if mode == 'run_driver': + """ + if mode == "run_driver": exe_envar = _setup_executable(common_env) - launch_cmd = _set_launcher_command(common_env['ROSE_LAUNCHER'], - exe_envar) + launch_cmd = _set_launcher_command(common_env["ROSE_LAUNCHER"], exe_envar) model_snd_list = None - if not run_info['l_namcouple']: + if not run_info["l_namcouple"]: run_info = _sent_coupling_fields(run_info) - elif mode == 'finalize': + elif mode == "finalize": _finalize_executable(common_env) exe_envar = None launch_cmd = None diff --git a/Postprocessing/common/utils.py b/Postprocessing/common/utils.py index 159d664..d9274a1 100644 --- a/Postprocessing/common/utils.py +++ b/Postprocessing/common/utils.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -''' +""" *****************************COPYRIGHT****************************** (C) Crown copyright 2015-2025 Met Office. All rights reserved. @@ -17,7 +17,7 @@ DESCRIPTION Common utilities for post-processing methods -''' +""" import sys import re import os @@ -27,15 +27,16 @@ import timer -globals()['debug_mode'] = None -globals()['debug_ok'] = True +globals()["debug_mode"] = None +globals()["debug_ok"] = True + class Variables(object): - '''Object to hold a group of variables''' + """Object to hold a group of variables""" def load_env(varname, default_value=None, required=False): - ''' + """ Load requested environment variable Arguments: varname - Name of environment variable @@ -45,96 +46,97 @@ def load_env(varname, default_value=None, required=False): required - Default=False Exit with system failure if True and no default_value is specified. - ''' + """ try: envar = os.environ[varname] except KeyError: envar = default_value if required is True and default_value is None: - msg = 'REQUIRED variable not found in the environment: ' - log_msg(msg + varname, level='FAIL') + msg = "REQUIRED variable not found in the environment: " + log_msg(msg + varname, level="FAIL") return envar class CylcCycle(object): - ''' Object representing the current Cylc cycle point ''' + """Object representing the current Cylc cycle point""" + def __init__(self, cyclepoint=None, cycleperiod=None): - ''' + """ Optional argument: cyclepoint - ISOformat datestring OR list/tuple of digits - ''' + """ if cyclepoint is None: # Load optional cycle point override environment - cyclepoint = load_env('CYCLEPOINT_OVERRIDE') + cyclepoint = load_env("CYCLEPOINT_OVERRIDE") if cyclepoint is None: - cyclepoint = load_env('CYLC_TASK_CYCLE_POINT', required=True) + cyclepoint = load_env("CYLC_TASK_CYCLE_POINT", required=True) self.startcycle = self._cyclepoint(cyclepoint) if cycleperiod is None: - cycleperiod = load_env('CYCLEPERIOD', required=True) + cycleperiod = load_env("CYCLEPERIOD", required=True) try: # Split period into list of integers if possible - cycleperiod = [int(x) for x in cycleperiod.split(',')] + cycleperiod = [int(x) for x in cycleperiod.split(",")] except ValueError: # Period provided is intended as a string pass self._period = cycleperiod - enddate = add_period_to_date(self.startcycle['intlist'], - cycleperiod) + enddate = add_period_to_date(self.startcycle["intlist"], cycleperiod) self.endcycle = self._cyclepoint(enddate) @property def period(self): - ''' Return the cycle period for the cycle point ''' + """Return the cycle period for the cycle point""" return self._period @staticmethod def isoformat(cpoint): - ''' Return cycle point as ISO format datestring ''' + """Return cycle point as ISO format datestring""" if isinstance(cpoint, (list, tuple)): cyclepoint = list(cpoint) while len(cyclepoint) < 5: cyclepoint.append(0) - cpoint = '{:0>4}{:0>2}{:0>2}T{:0>2}{:0>2}Z'.format(*cyclepoint) + cpoint = "{:0>4}{:0>2}{:0>2}T{:0>2}{:0>2}Z".format(*cyclepoint) - if re.match(r'\d{8}T\d{4}Z', cpoint): + if re.match(r"\d{8}T\d{4}Z", cpoint): return cpoint else: - msg = 'Unable to determine cycle point in ISO format: ' - log_msg(msg + str(cpoint), level='FAIL') + msg = "Unable to determine cycle point in ISO format: " + log_msg(msg + str(cpoint), level="FAIL") def _cyclepoint(self, cpoint): - ''' + """ Return a dictionary representing a cycle point in 3 formats: iso = ISO format datestring intlist = List of 5 values: [Y,M,D,hh,mm] strlist = List of 5 values: ['Y','M','D','hh','mm'] - ''' - cycle_repr = {'iso': self.isoformat(cpoint)} - cycle_repr['strlist'] = list(re.match( - r'(\d{4})(\d{2})(\d{2})T(\d{2})(\d{2})Z', cycle_repr['iso'] - ).groups()) - cycle_repr['intlist'] = [int(x) for x in cycle_repr['strlist']] + """ + cycle_repr = {"iso": self.isoformat(cpoint)} + cycle_repr["strlist"] = list( + re.match( + r"(\d{4})(\d{2})(\d{2})T(\d{2})(\d{2})Z", cycle_repr["iso"] + ).groups() + ) + cycle_repr["intlist"] = [int(x) for x in cycle_repr["strlist"]] return cycle_repr def finalcycle(): - ''' + """ Determine whether this cycle is the final cycle for the running suite. Return True/False - ''' - arch_final = load_env('ARCHIVE_FINAL', 'false') - if ('true' in arch_final.lower()): + """ + arch_final = load_env("ARCHIVE_FINAL", "false") + if "true" in arch_final.lower(): fcycle = True - log_msg('ARCHIVE_FINAL=true. End-of-run data will be archived.', - level='INFO') + log_msg("ARCHIVE_FINAL=true. End-of-run data will be archived.", level="INFO") else: - finalpoint = load_env('FINALCYCLE_OVERRIDE') + finalpoint = load_env("FINALCYCLE_OVERRIDE") if finalpoint is None: - finalpoint = load_env('CYLC_SUITE_FINAL_CYCLE_POINT') - if finalpoint == 'None': + finalpoint = load_env("CYLC_SUITE_FINAL_CYCLE_POINT") + if finalpoint == "None": # Convert from string. finalpoint = None @@ -144,8 +146,10 @@ def finalcycle(): # Cylc will not trigger further cycles beyond this point. # Set fcycle=True in this instance. if finalpoint: - fcycle = (CylcCycle().endcycle['intlist'] > - CylcCycle(cyclepoint=finalpoint).startcycle['intlist']) + fcycle = ( + CylcCycle().endcycle["intlist"] + > CylcCycle(cyclepoint=finalpoint).startcycle["intlist"] + ) else: # Cylc8 no longer requires a final cycle point to be set at all fcycle = False @@ -154,23 +158,23 @@ def finalcycle(): def get_utility_avail(utility): - '''Return True/False if shell command is available''' + """Return True/False if shell command is available""" try: status = shutil.which(utility) except AttributeError: # subprocess.getstatusoutput does not exist at Python2.7 - status, _ = shellout.exec_subprocess(utility + ' --help') + status, _ = shellout.exec_subprocess(utility + " --help") return bool(status) def get_subset(datadir, pattern): - '''Returns a list of files matching a given regex''' + """Returns a list of files matching a given regex""" datadir = check_directory(datadir) try: patt = re.compile(pattern) except TypeError: - log_msg('get_subset: Incompatible pattern supplied.', level='WARN') + log_msg("get_subset: Incompatible pattern supplied.", level="WARN") files = [] else: files = [fn for fn in sorted(os.listdir(datadir)) if patt.search(fn)] @@ -178,31 +182,30 @@ def get_subset(datadir, pattern): def check_directory(datadir): - ''' + """ Ensure that a given directory actually exists. Program will exit with an error if the test is unsuccessful. - ''' + """ try: datadir = os.path.expandvars(datadir) except TypeError: - log_msg('check_directory: Exiting - No directory provided', - level='FAIL') + log_msg("check_directory: Exiting - No directory provided", level="FAIL") if not os.path.isdir(datadir): - msg = 'check_directory: Exiting - Directory does not exist: ' - log_msg(msg + str(datadir), level='FAIL') + msg = "check_directory: Exiting - Directory does not exist: " + log_msg(msg + str(datadir), level="FAIL") return datadir def compare_mod_times(pathlist, last_mod=True): - ''' + """ Compare the modification time of files. Return the last modified file, or first listed of multiple files modified last. Optional arguments: last_mod Set to False to return the oldest file - ''' + """ mod_times = [] pathlist = ensure_list(pathlist) for path in pathlist: @@ -221,11 +224,11 @@ def compare_mod_times(pathlist, last_mod=True): def ensure_list(value, listnone=False): - ''' + """ Return a list for a given input. Optional argument: listnone - True=Return [''] or [None] False=Return [] - ''' + """ if value or listnone: if not isinstance(value, (list, tuple, type({}.keys()))): value = [value] @@ -236,7 +239,7 @@ def ensure_list(value, listnone=False): def add_path(files, path): - ''' Add a given path to a file or list of files provided ''' + """Add a given path to a file or list of files provided""" path = check_directory(path) files = ensure_list(files) @@ -244,7 +247,7 @@ def add_path(files, path): def create_dir(dirname, path=None): - ''' Create a directory ''' + """Create a directory""" if path: dirname = os.path.join(path, dirname) try: @@ -253,13 +256,12 @@ def create_dir(dirname, path=None): if exc.errno == errno.EEXIST and os.path.isdir(dirname): pass else: - log_msg('create_dir: Unable to create directory: ' + dirname, - level='FAIL') + log_msg("create_dir: Unable to create directory: " + dirname, level="FAIL") @timer.run_timer -def copy_files(cpfiles, destination=None, tmp_ext='.tmp'): - ''' +def copy_files(cpfiles, destination=None, tmp_ext=".tmp"): + """ Copy file(s). Optional arguments: destination - Where provided destination must be a writable @@ -268,7 +270,7 @@ def copy_files(cpfiles, destination=None, tmp_ext='.tmp'): copied to the original directory (os.path.dirname(filename)) with a "tmp_ext" extension tmp_ext - Extension used when copying to the same directory - ''' + """ if destination: destination = check_directory(destination) @@ -281,16 +283,16 @@ def copy_files(cpfiles, destination=None, tmp_ext='.tmp'): output = srcfile + tmp_ext try: - src = open(srcfile, 'rb') + src = open(srcfile, "rb") except IOError as exc: - msg = 'copy_files: Failed to read from source file: ' + srcfile - log_msg(' - '.join([msg, exc.strerror]), level='ERROR') + msg = "copy_files: Failed to read from source file: " + srcfile + log_msg(" - ".join([msg, exc.strerror]), level="ERROR") try: - out = open(output, 'wb') + out = open(output, "wb") except IOError as exc: - msg = 'copy_files: Failed to write to target file: ' + output - log_msg(' - '.join([msg, exc.strerror]), level='ERROR') + msg = "copy_files: Failed to write to target file: " + output + log_msg(" - ".join([msg, exc.strerror]), level="ERROR") shutil.copyfileobj(src, out) src.close() @@ -299,16 +301,17 @@ def copy_files(cpfiles, destination=None, tmp_ext='.tmp'): return outputfiles + @timer.run_timer def remove_files(delfiles, path=None, ignore_non_exist=False): - ''' + """ Delete files. Optional arguments: path - if not provided full path is assumed to have been provided in the filename. ignore_non_exist - flag to allow a non-existent file to be ignored. Default behaviour is to provide a warning and continue. - ''' + """ if path: path = check_directory(path) delfiles = add_path(delfiles, path) @@ -319,13 +322,12 @@ def remove_files(delfiles, path=None, ignore_non_exist=False): os.remove(fname) except OSError: if not ignore_non_exist: - log_msg('remove_files: File does not exist: ' + fname, - level='WARN') + log_msg("remove_files: File does not exist: " + fname, level="WARN") @timer.run_timer def move_files(mvfiles, destination, originpath=None, fail_on_err=False): - ''' + """ Move a single file or list of files to a given directory. Optionally a directory of origin may be specified. Arguments: @@ -337,8 +339,8 @@ def move_files(mvfiles, destination, originpath=None, fail_on_err=False): fail_on_err - Failure to move the file results in app failure. Primary cause of failure is a non-existent target file. Default=False - ''' - msglevel = 'ERROR' if fail_on_err else 'WARN' + """ + msglevel = "ERROR" if fail_on_err else "WARN" destination = check_directory(destination) if originpath: @@ -350,53 +352,55 @@ def move_files(mvfiles, destination, originpath=None, fail_on_err=False): shutil.move(fname, destination) except shutil.Error: if os.path.dirname(fname) == destination: - msg = 'move_files: Attempted to overwrite original file: ' + msg = "move_files: Attempted to overwrite original file: " log_msg(msg + fname, level=msglevel) else: dest_file = os.path.join(destination, os.path.basename(fname)) remove_files(dest_file) - msg = 'move_files: Deleted pre-existing file with same name ' \ - 'prior to move: ' + dest_file - log_msg(msg, level='WARN') + msg = ( + "move_files: Deleted pre-existing file with same name " + "prior to move: " + dest_file + ) + log_msg(msg, level="WARN") shutil.move(fname, destination) except IOError: # Exception changes in Python 3: # IOError has been merged into OSError # shutil.Error is now a child of IOError, therefore exception # order is important here for compatibility with both 2.7 and 3+ - log_msg('move_files: File does not exist: ' + fname, level=msglevel) + log_msg("move_files: File does not exist: " + fname, level=msglevel) def calendar(): - ''' Return the calendar based on the suite environment ''' - cal = load_env('CYLC_CYCLING_MODE', default_value='360day') - if cal.lower() == 'integer': + """Return the calendar based on the suite environment""" + cal = load_env("CYLC_CYCLING_MODE", default_value="360day") + if cal.lower() == "integer": # Non-Cycling suites should export the CALENDAR environment # variable. DEFAULT VALUE: 360day - cal = load_env('CALENDAR', default_value='360day') + cal = load_env("CALENDAR", default_value="360day") return cal def monthlength(month, year): - '''Returns length of given month in days - calendar dependent''' + """Returns length of given month in days - calendar dependent""" days_per_month = { # Days list runs from Dec -> Nov - '360day': [30]*12, - '365day': [31, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30], - 'gregorian': [31, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30], - } + "360day": [30] * 12, + "365day": [31, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30], + "gregorian": [31, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30], + } year = int(year) + (int(month) // 12) month = int(month) % 12 if year % 4 == 0 and (year % 100 != 0 or year % 400 == 0): - days_per_month['gregorian'][2] = 29 + days_per_month["gregorian"][2] = 29 return days_per_month[calendar()][month % 12] def add_period_to_date(indate, delta): - ''' + """ Add a delta (list of integers) to a given date (list of integers). For 360day calendar, add period with simple arithmetic for speed For other calendars, call one of @@ -405,7 +409,7 @@ def add_period_to_date(indate, delta): with calendar argument - taken from environment variable CYLC_CYCLING_MODE. If no indate is provided ([0,0,0,0,0]) then delta is returned. - ''' + """ if isinstance(delta, str): delta = get_frequency(delta, rtn_delta=True) @@ -413,7 +417,7 @@ def add_period_to_date(indate, delta): outdate = delta else: cal = calendar() - if cal == '360day': + if cal == "360day": outdate = _mod_360day_calendar_date(indate, delta) else: outdate = _mod_all_calendars_date(indate, delta, cal) @@ -423,7 +427,7 @@ def add_period_to_date(indate, delta): @timer.run_timer def _mod_all_calendars_date(indate, delta, cal): - ''' Call `isodatetime` or `rose date` to return a date ''' + """Call `isodatetime` or `rose date` to return a date""" outdate = [int(d) for d in indate] while len(outdate) < 5: # ISOdatetime format string requires outdate list of length=5 @@ -431,34 +435,39 @@ def _mod_all_calendars_date(indate, delta, cal): outdate.append(val) # Check whether `isodatetime` command exists, or default to `rose date` - datecmd = 'isodatetime' if get_utility_avail('isodatetime') else 'rose date' + datecmd = "isodatetime" if get_utility_avail("isodatetime") else "rose date" for elem in delta: if elem != 0: - offset = ('-' if elem < 0 else '') + 'P' + offset = ("-" if elem < 0 else "") + "P" try: - offset += str(abs(elem)) + ['Y', 'M', 'D'][delta.index(elem)] + offset += str(abs(elem)) + ["Y", "M", "D"][delta.index(elem)] except IndexError: - if 'T' not in offset: - offset += 'T' - offset += str(abs(elem)) + ['M', 'H'][delta.index(elem)-4] + if "T" not in offset: + offset += "T" + offset += str(abs(elem)) + ["M", "H"][delta.index(elem) - 4] - dateinput = '{0:0>4}{1:0>2}{2:0>2}T{3:0>2}{4:0>2}'.format(*outdate) - if re.match(r'^\d{8}T\d{4}$', dateinput): - cmd = '{} {} --calendar {} --offset {} --print-format ' \ - '%Y,%m,%d,%H,%M'.format(datecmd, dateinput, cal, offset) + dateinput = "{0:0>4}{1:0>2}{2:0>2}T{3:0>2}{4:0>2}".format(*outdate) + if re.match(r"^\d{8}T\d{4}$", dateinput): + cmd = ( + "{} {} --calendar {} --offset {} --print-format " + "%Y,%m,%d,%H,%M".format(datecmd, dateinput, cal, offset) + ) rcode, output = shellout._exec_subprocess(cmd) else: - log_msg('add_period_to_date: Invalid date for conversion to ' - 'ISO 8601 date representation: ' + str(outdate), - level='ERROR') + log_msg( + "add_period_to_date: Invalid date for conversion to " + "ISO 8601 date representation: " + str(outdate), + level="ERROR", + ) if rcode == 0: - outdate = [int(x) for x in output.split(',')] + outdate = [int(x) for x in output.split(",")] else: - log_msg('`{}` command failed:\n{}'.format(datecmd, output), - level='ERROR') + log_msg( + "`{}` command failed:\n{}".format(datecmd, output), level="ERROR" + ) outdate = None break @@ -467,18 +476,20 @@ def _mod_all_calendars_date(indate, delta, cal): @timer.run_timer def _mod_360day_calendar_date(indate, delta): - ''' + """ Simple arithmetic calculation of new date for 360 day calendar. Use of `isodatetime`, while possible is inefficient. - ''' + """ try: outdate = [int(x) for x in indate] except ValueError: - log_msg('add_period_to_date: Invalid date representation: ' + - str(indate), level='FAIL') + log_msg( + "add_period_to_date: Invalid date representation: " + str(indate), + level="FAIL", + ) diff_hours = 0 # multiplier to convert the delta list to a total number of hours - multiplier = [360*24, 30*24, 24, 1, 1./60, 1./60/60] + multiplier = [360 * 24, 30 * 24, 24, 1, 1.0 / 60, 1.0 / 60 / 60] for i, val in enumerate(delta): diff_hours += multiplier[i] * val if len(outdate) <= i: @@ -515,33 +526,34 @@ def _mod_360day_calendar_date(indate, delta): def get_frequency(delta, rtn_delta=False): - r''' + r""" Extract the frequency and base period from a delta string in the form '\d+\w+' or an ISO period e.g. P1Y2M Optional argument: rtn_delta = True - return a delta in the form of a list = False - return the frequency and base period - ''' + """ # all_targets dictionary: key=base period, val=date list index - all_targets = {'h': 3, 'd': 2, 'm': 1, 's': 1, 'y': 0, 'a': 0, 'x': 0} - regex = r'(-?\d+)([{}])'.format(''.join(all_targets.keys())) - rval = [0]*5 + all_targets = {"h": 3, "d": 2, "m": 1, "s": 1, "y": 0, "a": 0, "x": 0} + regex = r"(-?\d+)([{}])".format("".join(all_targets.keys())) + rval = [0] * 5 preserve_neg = None while delta: - neg, iso, subdaily, delta = re.match(r'(-?)(p?)(t?)([\w\-]*)', - delta.lower()).groups() + neg, iso, subdaily, delta = re.match( + r"(-?)(p?)(t?)([\w\-]*)", delta.lower() + ).groups() if subdaily: # Redefine "m" to "minutes" (date index 4) - all_targets['m'] = 4 + all_targets["m"] = 4 if iso: # `delta` prefix is [-]P indicating an ISO period. # Any negative should be preserved such that it is applied # to each frequency in the whole string. Examples: # -P1Y3M is "-1 year and -1 month" # PT1H30M is "+1 hour and +30 minutes" - preserve_neg = (neg == '-') + preserve_neg = neg == "-" multiplier = -1 if (preserve_neg or neg) else 1 try: @@ -554,91 +566,91 @@ def get_frequency(delta, rtn_delta=False): try: index = [all_targets[t] for t in all_targets if t == base][0] except IndexError: - concatdelta = ''.join([neg, subdaily, delta]) - log_msg('get_frequency - Invalid target provided: ' + concatdelta, - level='FAIL') + concatdelta = "".join([neg, subdaily, delta]) + log_msg( + "get_frequency - Invalid target provided: " + concatdelta, level="FAIL" + ) if rtn_delta: # Strip freq/base from the start of the delta string for next pass delta = delta.lstrip(str(freq)) delta = delta.lstrip(base) - if not re.search(r'\d+', delta): + if not re.search(r"\d+", delta): # Remaining delta string cannot be a period - pass complete - delta = '' + delta = "" # Return delta in the form of an integer list - if base == 's': + if base == "s": freq = freq * 3 - elif base == 'x': + elif base == "x": freq = freq * 10 rval[index] = freq else: # Return an integer frequency and string base rval = [freq, base] - delta = '' + delta = "" return rval -def log_msg(msg, level='INFO'): - ''' +def log_msg(msg, level="INFO"): + """ Produce a message to the appropriate output stream. Messages tagged with 'ERROR' and 'FAIL' will result in the program exiting, unless model is running in debug_mode, in which case only 'FAIL' will exit. - ''' + """ out = sys.stdout err = sys.stderr level = str(level).upper() output = { - 'DEBUG': (err, '[DEBUG] '), - 'INFO': (out, '[INFO] '), - 'OK': (out, '[ OK ] '), - 'WARN': (err, '[WARN] '), - 'ERROR': (err, '[ERROR] '), - 'FAIL': (err, '[FAIL] '), + "DEBUG": (err, "[DEBUG] "), + "INFO": (out, "[INFO] "), + "OK": (out, "[ OK ] "), + "WARN": (err, "[WARN] "), + "ERROR": (err, "[ERROR] "), + "FAIL": (err, "[FAIL] "), } try: - output[level][0].write('{} {}\n'.format(output[level][1], msg)) + output[level][0].write("{} {}\n".format(output[level][1], msg)) except KeyError: - level = 'WARN' - msg = 'log_msg: Unknown severity level for log message.' - output[level][0].write('{} {}\n'.format(output[level][1], msg)) + level = "WARN" + msg = "log_msg: Unknown severity level for log message." + output[level][0].write("{} {}\n".format(output[level][1], msg)) - if level == 'ERROR': + if level == "ERROR": # If in debug mode, terminate at the end of the task. # Otherwise terminate now. catch_failure() - elif level == 'FAIL': - sys.exit(output[level][1] + 'Terminating PostProc...') + elif level == "FAIL": + sys.exit(output[level][1] + "Terminating PostProc...") def set_debugmode(debug): - '''Set method for the debug_mode global variable''' - globals()['debug_mode'] = debug - globals()['debug_ok'] = True + """Set method for the debug_mode global variable""" + globals()["debug_mode"] = debug + globals()["debug_ok"] = True def get_debugmode(): - '''Get method for the debug_mode global variable''' - return globals()['debug_mode'] + """Get method for the debug_mode global variable""" + return globals()["debug_mode"] def get_debugok(): - '''Get method for the debug_ok global variable''' - return globals()['debug_ok'] + """Get method for the debug_ok global variable""" + return globals()["debug_ok"] def catch_failure(): - ''' + """ Ignore errors in external subprocess commands or other failures, allowing the task to continue to completion. Ultimately causes the task to fail due to the global debug_ok setting. - ''' + """ if get_debugmode(): - log_msg('Ignoring failed external command. Continuing...', - level='DEBUG') - globals()['debug_ok'] = False + log_msg("Ignoring failed external command. Continuing...", level="DEBUG") + globals()["debug_ok"] = False else: - log_msg('Command Terminated', level='FAIL') + log_msg("Command Terminated", level="FAIL") From 665b2fa8b712b33ac8c6c79b0aae4f9c2dbfee47 Mon Sep 17 00:00:00 2001 From: Pierre Siddall <43399998+Pierre-siddall@users.noreply.github.com> Date: Tue, 20 Jan 2026 14:55:27 +0000 Subject: [PATCH 12/14] Revert "Apply black to all modified files" This reverts commit 787b5b50158cd53823580d55eb436e8503c91e19. --- Coupled_Drivers/cice_driver.py | 494 ++++---- Coupled_Drivers/common.py | 221 ++-- Coupled_Drivers/cpmip_utils.py | 158 ++- Coupled_Drivers/cpmip_xios.py | 43 +- Coupled_Drivers/mct_driver.py | 303 +++-- Coupled_Drivers/nemo_driver.py | 1089 ++++++++--------- Coupled_Drivers/rivers_driver.py | 214 ++-- Coupled_Drivers/si3_controller.py | 255 ++-- Coupled_Drivers/top_controller.py | 290 +++-- Coupled_Drivers/unittests/test_cpmip_utils.py | 238 ++-- Coupled_Drivers/unittests/test_cpmip_xios.py | 166 ++- .../unittests/test_rivers_driver.py | 239 ++-- Coupled_Drivers/write_namcouple.py | 423 +++---- Coupled_Drivers/xios_driver.py | 163 ++- Postprocessing/common/utils.py | 336 +++-- 15 files changed, 2103 insertions(+), 2529 deletions(-) diff --git a/Coupled_Drivers/cice_driver.py b/Coupled_Drivers/cice_driver.py index a9da184..176b8a4 100644 --- a/Coupled_Drivers/cice_driver.py +++ b/Coupled_Drivers/cice_driver.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -""" +''' *****************************COPYRIGHT****************************** (C) Crown copyright 2023-2025 Met Office. All rights reserved. @@ -18,7 +18,7 @@ Driver for the CICE model, called from link_drivers. Currently this does not cater for stand alone CICE and therefore must be run in conjuction with the NEMO driver -""" +''' import os @@ -37,263 +37,235 @@ def __expand_array(short_array): - """ + ''' Expand a shortened array containing n*m entries into a full list - """ - long_array = "" - for group in short_array.split(","): - if "*" not in group: - long_array += "%s," % group + ''' + long_array = '' + for group in short_array.split(','): + if '*' not in group: + long_array += '%s,' % group else: - multiplier = int(group.split("*")[0]) - value = group.split("*")[1] - long_array += ("%s," % value) * multiplier - if long_array[-1] == ",": + multiplier = int(group.split('*')[0]) + value = group.split('*')[1] + long_array += ('%s,' % value) * multiplier + if long_array[-1] == ',': long_array = long_array[:-1] return long_array - def _verify_fix_rst(pointerfile, task_start): - """ + ''' Verify the restart file for cice is at the time associated with the TASKSTART variable. The pointerfile contains a string of the path to the restart file. If the dates dont match, fix the date in the pointerfile. - """ + ''' # Convert the format of the task start time. Seasonal forecasting # uses a date format that includes seconds, so account for this in # the choice of date formatting. try: task_start_datetime = datetime.datetime.strptime( - task_start, "%Y,%m,%d,%H,%M,%S" - ) + task_start, "%Y,%m,%d,%H,%M,%S") except ValueError: - task_start_datetime = datetime.datetime.strptime(task_start, "%Y,%m,%d,%H,%M") - task_start = task_start_datetime.strftime("%Y%m%d") + task_start_datetime = datetime.datetime.strptime( + task_start, "%Y,%m,%d,%H,%M") + task_start = task_start_datetime.strftime('%Y%m%d') # deal with the pointer file - with common.open_text_file(pointerfile, "r") as pointer_handle: + with common.open_text_file(pointerfile, 'r') as pointer_handle: restart_path = pointer_handle.readlines()[0].strip() if not os.path.isfile(restart_path): - sys.stderr.write( - "[INFO] The CICE restart file %s can not be found\n" % restart_path - ) + sys.stderr.write('[INFO] The CICE restart file %s can not be found\n' % + restart_path) sys.exit(error.MISSING_MODEL_FILE_ERROR) - # grab the date from the restart file name. It has form yyyy-mm-dd, to - # match cyclepoint strip the -'s. - restartmatch = re.search(r"\d{4}-\d{2}-\d{2}", os.path.basename(restart_path)) - restartdate = restartmatch.group(0).replace("-", "") + #grab the date from the restart file name. It has form yyyy-mm-dd, to + #match cyclepoint strip the -'s. + restartmatch = re.search(r'\d{4}-\d{2}-\d{2}', + os.path.basename(restart_path)) + restartdate = restartmatch.group(0).replace('-', '') if restartdate != task_start: # write the message to both standard out and standard error - msg = ( - "[WARN ]The CICE restart data does not match the " - " current task start time\n." - " Task start time is %s\n" - " CICE restart time is %s\n" % (task_start, restartdate) - ) + msg = '[WARN ]The CICE restart data does not match the ' \ + ' current task start time\n.' \ + ' Task start time is %s\n' \ + ' CICE restart time is %s\n' % (task_start, restartdate) sys.stdout.write(msg) sys.stderr.write(msg) - # Turn the task_start variable into form yyyy-mm-dd - fixed_restart_date = "%s-%s-%s" % ( - task_start[:4], - task_start[4:6], - task_start[6:8], - ) - # Swap the date in the restart path - restart_path_fixed = restart_path.replace( - restartmatch.group(0), fixed_restart_date - ) - new_pointerfile = "%s.tmp" % (pointerfile) - with common.open_text_file(new_pointerfile, "w") as new_pointer_handle: - # The restart path line should be padded to 256 characters + #Turn the task_start variable into form yyyy-mm-dd + fixed_restart_date = '%s-%s-%s' % (task_start[:4], + task_start[4:6], + task_start[6:8]) + #Swap the date in the restart path + restart_path_fixed = restart_path.replace(restartmatch.group(0), + fixed_restart_date) + new_pointerfile = '%s.tmp' % (pointerfile) + with common.open_text_file(new_pointerfile, 'w') as new_pointer_handle: + #The restart path line should be padded to 256 characters new_pointer_handle.write("{:<256}".format(restart_path_fixed)) os.rename(new_pointerfile, pointerfile) - sys.stdout.write("%s\n" % ("*" * 42,)) - sys.stdout.write("[WARN] Automatically fixing CICE restart\n") - sys.stdout.write( - "[WARN] Update pointer file %s to replace \n" - "[WARN] restart file %s\n" - "[WARN] with\n" - "[WARN] restart file %s\n" % (pointerfile, restart_path, restart_path_fixed) - ) - sys.stdout.write("%s\n" % ("*" * 42,)) + sys.stdout.write('%s\n' % ('*'*42,)) + sys.stdout.write('[WARN] Automatically fixing CICE restart\n') + sys.stdout.write('[WARN] Update pointer file %s to replace \n' + '[WARN] restart file %s\n' + '[WARN] with\n' + '[WARN] restart file %s\n' % + (pointerfile, restart_path, restart_path_fixed)) + sys.stdout.write('%s\n' % ('*'*42,)) else: - sys.stdout.write("[INFO] Validated CICE restart date\n") + sys.stdout.write('[INFO] Validated CICE restart date\n') def _load_environment_variables(cice_envar): - """ + ''' Load the CICE environment variables required for the model run into the cice_envar container - """ + ''' cice_envar = dr_env_lib.env_lib.load_envar_from_definition( - cice_envar, dr_env_lib.cice_def.CICE_ENVIRONMENT_VARS_INITIAL - ) - - cice_envar["ATM_DATA_DIR"] = "%s:%s" % ( - cice_envar["ATM_DATA_DIR"], - cice_envar["CICE_ATMOS_DATA"], - ) - cice_envar["OCN_DATA_DIR"] = "%s:%s" % ( - cice_envar["OCN_DATA_DIR"], - cice_envar["CICE_OCEAN_DATA"], - ) + cice_envar, dr_env_lib.cice_def.CICE_ENVIRONMENT_VARS_INITIAL) + + cice_envar['ATM_DATA_DIR'] = '%s:%s' % \ + (cice_envar['ATM_DATA_DIR'], cice_envar['CICE_ATMOS_DATA']) + cice_envar['OCN_DATA_DIR'] = '%s:%s' % \ + (cice_envar['OCN_DATA_DIR'], cice_envar['CICE_OCEAN_DATA']) return cice_envar def _setup_executable(common_env): - """ + ''' Setup the environment and any files required by the executable - """ + ''' # Create the environment variable container cice_envar = dr_env_lib.env_lib.LoadEnvar() # Load the ice namelist path. Information will be retrieved from this file # druing the running of the driver, so check if it exists. - _ = cice_envar.load_envar("CICE_IN", "ice_in") - cice_nl = cice_envar["CICE_IN"] + _ = cice_envar.load_envar('CICE_IN', 'ice_in') + cice_nl = cice_envar['CICE_IN'] if not os.path.isfile(cice_nl): - sys.stderr.write("[FAIL] Can not find the cice namelist file %s\n" % cice_nl) + sys.stderr.write('[FAIL] Can not find the cice namelist file %s\n' % + cice_nl) sys.exit(error.MISSING_DRIVER_FILE_ERROR) # load the remaining environment variables cice_envar = _load_environment_variables(cice_envar) - calendar = common_env["CALENDAR"] - if calendar == "360day": - calendar = "360" + calendar = common_env['CALENDAR'] + if calendar == '360day': + calendar = '360' caldays = 360 cice_leap_years = ".false." - elif calendar == "365day": - calendar = "365" + elif calendar == '365day': + calendar = '365' caldays = 365 cice_leap_years = ".false." else: caldays = 365 cice_leap_years = ".true." - # turn our times into lists of integers - model_basis = [int(i) for i in common_env["MODELBASIS"].split(",")] - run_start = [int(i) for i in common_env["TASKSTART"].split(",")] - run_length = [int(i) for i in common_env["TASKLENGTH"].split(",")] - - run_days = inc_days.inc_days( - run_start[0], - run_start[1], - run_start[2], - run_length[0], - run_length[1], - run_length[2], - calendar, - ) - days_to_start = time2days.time2days( - run_start[0], run_start[1], run_start[2], calendar - ) - - tot_runlen_sec = ( - run_days * 86400 + run_length[3] * 3600 + run_length[4] * 60 + run_length[5] - ) + #turn our times into lists of integers + model_basis = [int(i) for i in common_env['MODELBASIS'].split(',')] + run_start = [int(i) for i in common_env['TASKSTART'].split(',')] + run_length = [int(i) for i in common_env['TASKLENGTH'].split(',')] + + run_days = inc_days.inc_days(run_start[0], run_start[1], run_start[2], + run_length[0], run_length[1], run_length[2], + calendar) + days_to_start = time2days.time2days(run_start[0], run_start[1], + run_start[2], calendar) + + tot_runlen_sec = run_days * 86400 + run_length[3]*3600 + run_length[4]*60 \ + + run_length[5] # These variables default to zero except in operational NWP suite where # a run can be restarted part way through after a failure. # In this case CONTINUE_FROM_FAIL should also be true - last_dump_hours = int(common_env["LAST_DUMP_HOURS"]) - last_dump_seconds = last_dump_hours * 3600 - - # any variables containing things that can be globbed will start with gl_ - gl_step_int_match = "^dt=" - _, step_int_val = shellout._exec_subprocess( - "grep %s %s" % (gl_step_int_match, cice_nl) - ) - cice_step_int = int(re.findall(r"^dt=(\d*)\.?", step_int_val)[0]) + last_dump_hours = int(common_env['LAST_DUMP_HOURS']) + last_dump_seconds = last_dump_hours*3600 + + #any variables containing things that can be globbed will start with gl_ + gl_step_int_match = '^dt=' + _, step_int_val = shellout._exec_subprocess('grep %s %s' % (gl_step_int_match, cice_nl)) + cice_step_int = int(re.findall(r'^dt=(\d*)\.?', step_int_val)[0]) cice_steps = (tot_runlen_sec - last_dump_seconds) // cice_step_int - _, cice_histfreq_val = shellout._exec_subprocess("grep histfreq %s" % cice_nl) - cice_histfreq_val = re.findall(r"histfreq\s*=\s*(.*)", cice_histfreq_val)[0] + _, cice_histfreq_val = shellout._exec_subprocess('grep histfreq %s' % cice_nl) + cice_histfreq_val = re.findall(r'histfreq\s*=\s*(.*)', cice_histfreq_val)[0] cice_histfreq = __expand_array(cice_histfreq_val)[1] - _, cice_histfreq_n_val = shellout._exec_subprocess("grep histfreq_n %s" % cice_nl) - cice_histfreq_n_val = re.findall(r"histfreq_n\s*=\s*(.*)", cice_histfreq_n_val)[0] + _, cice_histfreq_n_val = shellout._exec_subprocess('grep histfreq_n %s' % cice_nl) + cice_histfreq_n_val = re.findall(r'histfreq_n\s*=\s*(.*)', + cice_histfreq_n_val)[0] cice_histfreq_n = __expand_array(cice_histfreq_n_val) - cice_histfreq_n = int(cice_histfreq_n.split(",")[0]) + cice_histfreq_n = int(cice_histfreq_n.split(',')[0]) - _, cice_age_rest_val = shellout._exec_subprocess("grep ^restart_age %s" % cice_nl) - cice_age_rest = re.findall(r"restart_age\s*=\s*(.*)", cice_age_rest_val)[0] + _, cice_age_rest_val = shellout._exec_subprocess('grep ^restart_age %s' % cice_nl) + cice_age_rest = re.findall(r'restart_age\s*=\s*(.*)', + cice_age_rest_val)[0] # If the variables MODELBASIS, TASKSTART, TASKLENGTH are unset from the # environment then read from the shared namelist file - if False in ( - common_env["MODELBASIS"], - common_env["TASKSTART"], - common_env["TASKLENGTH"], - ): + if False in (common_env['MODELBASIS'], + common_env['TASKSTART'], + common_env['TASKLENGTH']): # at least one variable has to be read from the shared namelist file - if not os.path.ispath(cice_envar["SHARED_FNAME"]): - sys.stderr.write( - "[FAIL] Can not find shared namelist file %s\n" - % cice_envar["SHARED_FNAME"] - ) + if not os.path.ispath(cice_envar['SHARED_FNAME']): + sys.stderr.write('[FAIL] Can not find shared namelist file %s\n' % + cice_envar['SHARED_FNAME']) sys.exit(error.MISSING_DRIVER_FILE_ERROR) - if not common_env["MODELBASIS"]: - _, modelbasis_val = shellout._exec_subprocess( - "grep model_basis_time %s" % cice_envar["SHARED_FNAME"] - ) - modelbasis_val = re.findall(r"model_basis_time\s*=\s*(.*)", modelbasis_val) + if not common_env['MODELBASIS']: + _, modelbasis_val = shellout._exec_subprocess('grep model_basis_time %s' % + cice_envar['SHARED_FNAME']) + modelbasis_val = re.findall(r'model_basis_time\s*=\s*(.*)', + modelbasis_val) modelbasis = [int(i) for i in __expand_array(modelbasis_val)] - common_env.add("MODELBASIS", modelbasis) - if not common_env["TASKSTART"]: - common_env.add("TASKSTART", common_env["MODELBASIS"]) - if not common_env["TASKLENGTH"]: - _, tasklength_val = shellout._exec_subprocess( - "grep run_target_end %s" % cice_envar["SHARED_FNAME"] - ) - tasklength_val = re.findall(r"run_target_end\s*=\s*(.*)", tasklength_val) + common_env.add('MODELBASIS', modelbasis) + if not common_env['TASKSTART']: + common_env.add('TASKSTART', common_env['MODELBASIS']) + if not common_env['TASKLENGTH']: + _, tasklength_val = shellout._exec_subprocess('grep run_target_end %s' % + cice_envar['SHARED_FNAME']) + tasklength_val = re.findall(r'run_target_end\s*=\s*(.*)', + tasklength_val) tasklength = [int(i) for i in __expand_array(tasklength_val)] - common_env.add("TASKLENGTH", tasklength) + common_env.add('TASKLENGTH', tasklength) - if cice_envar["TASK_START_TIME"] == "unavaliable": + if cice_envar['TASK_START_TIME'] == 'unavaliable': # This is probably a climate suite days_to_year_init = time2days.time2days(model_basis[0], 1, 1, calendar) - days_to_start = time2days.time2days( - run_start[0], run_start[1], run_start[2], calendar - ) - cice_istep0 = (days_to_start - days_to_year_init) * 86400 // cice_step_int + days_to_start = time2days.time2days(run_start[0], run_start[1], + run_start[2], calendar) + cice_istep0 = (days_to_start - days_to_year_init) * 86400 \ + // cice_step_int else: # This is probably a coupled NWP suite - cmd = "rose date %s0101T0000Z %s" % ( - str(run_start[0]), - cice_envar["TASK_START_TIME"], - ) + cmd = 'rose date %s0101T0000Z %s' % (str(run_start[0]), cice_envar['TASK_START_TIME']) _, time_since_year_start = shellout._exec_subprocess(cmd) - # The next command works because rose date assumes + #The next command works because rose date assumes # 19700101T0000Z is second 0 - cmd = ( - "rose date --print-format=%%s 19700101T00Z --offset=%s" - % time_since_year_start - ) + cmd = 'rose date --print-format=%%s 19700101T00Z --offset=%s' % time_since_year_start # Account for restarting from a failure in next line # shellout._exec_subprocess returns a tuple containing (return_code, output) - seconds_since_year_start = ( - int(shellout._exec_subprocess(cmd)[1]) + last_dump_seconds - ) - cice_istep0 = seconds_since_year_start / cice_step_int - - _, cice_rst_val = shellout._exec_subprocess("grep restart_dir %s" % cice_nl) - cice_rst = re.findall(r"restart_dir\s*=\s*\'(.*)\',", cice_rst_val)[0] - if cice_rst[-1] == "/": + seconds_since_year_start = int(shellout._exec_subprocess(cmd)[1]) \ + + last_dump_seconds + cice_istep0 = seconds_since_year_start/cice_step_int + + _, cice_rst_val = shellout._exec_subprocess('grep restart_dir %s' % cice_nl) + cice_rst = re.findall(r'restart_dir\s*=\s*\'(.*)\',', cice_rst_val)[0] + if cice_rst[-1] == '/': cice_rst = cice_rst[:-1] - if cice_rst in (os.getcwd(), "."): - cice_restart = os.path.join(common_env["DATAM"], cice_envar["CICE_RESTART"]) + if cice_rst in (os.getcwd(), '.'): + cice_restart = os.path.join(common_env['DATAM'], + cice_envar['CICE_RESTART']) else: - cice_restart = os.path.join(cice_rst, cice_envar["CICE_RESTART"]) + cice_restart = os.path.join(cice_rst, + cice_envar['CICE_RESTART']) - _, cice_hist_val = shellout._exec_subprocess("grep history_dir %s" % cice_nl) - cice_hist = re.findall(r"history_dir\s*=\s*\'(.*)\',", cice_hist_val)[0] - _, cice_incond_val = shellout._exec_subprocess("grep incond_dir %s" % cice_nl) - cice_incond = re.findall(r"incond_dir\s*=\s*\'(.*)\',", cice_incond_val)[0] + _, cice_hist_val = shellout._exec_subprocess('grep history_dir %s' % cice_nl) + cice_hist = re.findall(r'history_dir\s*=\s*\'(.*)\',', cice_hist_val)[0] + _, cice_incond_val = shellout._exec_subprocess('grep incond_dir %s' % cice_nl) + cice_incond = re.findall(r'incond_dir\s*=\s*\'(.*)\',', cice_incond_val)[0] for direc in (cice_rst, cice_hist, cice_incond): # Strip white space @@ -301,158 +273,148 @@ def _setup_executable(common_env): # Check for trailing slashes in directory names and strip them # out if they're present. - if direc.endswith("/"): - direc = direc.rstrip("/") - - if ( - os.path.isdir(direc) - and (direc not in ("./", ".")) - and common_env["CONTINUE"] == "false" - ): - sys.stdout.write("[INFO] directory is %s\n" % direc) - sys.stdout.write( - "[INFO] This is a New Run. Renaming old CICE" " history directory\n" - ) + if direc.endswith('/'): + direc = direc.rstrip('/') + + if os.path.isdir(direc) and (direc not in ('./', '.')) and \ + common_env['CONTINUE'] == 'false': + sys.stdout.write('[INFO] directory is %s\n' % direc) + sys.stdout.write('[INFO] This is a New Run. Renaming old CICE' + ' history directory\n') # In seasonal forecasting, we automatically apply # short-stepping to re-try the model. Before re-attempting # it, remove the associated CICE history directory. old_hist_dir = "%s.%s" % (direc, time.strftime("%Y%m%d%H%M")) - if ( - common_env["SEASONAL"] == "True" - and int(common_env["CYLC_TASK_TRY_NUMBER"]) > 1 - ): + if (common_env['SEASONAL'] == 'True' and + int(common_env['CYLC_TASK_TRY_NUMBER']) > 1): common.remove_latest_hist_dir(old_hist_dir) os.rename(direc, old_hist_dir) os.makedirs(direc) elif not os.path.isdir(direc): - sys.stdout.write("[INFO] Creating CICE output directory %s\n" % direc) + sys.stdout.write('[INFO] Creating CICE output directory %s\n' % + direc) os.makedirs(direc) - cice_restart_files = [ - f for f in os.listdir(cice_rst) if re.findall(r".*i\.restart\..*", f) - ] + cice_restart_files = [f for f in os.listdir(cice_rst) if + re.findall(r'.*i\.restart\..*', f)] if not cice_restart_files: - cice_restart_files = ["nofile"] + cice_restart_files = ['nofile'] if not os.path.isfile(os.path.join(cice_rst, cice_restart_files[-1])): - if cice_envar["CICE_START"]: - if cice_age_rest == "true": - cice_runtype = "continue" - ice_ic = "set in pointer file" - _, _ = shellout._exec_subprocess( - "%s > %s" % (cice_envar["CICE_START"], cice_restart) - ) - sys.stdout.write( - "[INFO] %s > %s" % (cice_envar["CICE_START"], cice_restart) - ) + if cice_envar['CICE_START']: + if cice_age_rest == 'true': + cice_runtype = 'continue' + ice_ic = 'set in pointer file' + _, _ = shellout._exec_subprocess('%s > %s' % (cice_envar['CICE_START'], cice_restart)) + sys.stdout.write('[INFO] %s > %s' % + (cice_envar['CICE_START'], + cice_restart)) else: - cice_runtype = "initial" - ice_ic = cice_envar["CICE_START"] - restart = ".true." + cice_runtype = 'initial' + ice_ic = cice_envar['CICE_START'] + restart = '.true.' else: - ice_ic = "default" - cice_runtype = "initial" - restart = ".false." + ice_ic = 'default' + cice_runtype = 'initial' + restart = '.false.' else: - cice_runtype = "continue" - restart = ".true." - if cice_envar["CICE_START"]: - ice_ic = "set_in_pointer_file" + cice_runtype = 'continue' + restart = '.true.' + if cice_envar['CICE_START']: + ice_ic = 'set_in_pointer_file' else: - ice_ic = "default" + ice_ic = 'default' # if this is a continuation verify the restart file date - if cice_runtype == "continue" and common_env["DRIVERS_VERIFY_RST"] == "True": - _verify_fix_rst(cice_restart, common_env["TASKSTART"]) + if cice_runtype == 'continue' and \ + common_env['DRIVERS_VERIFY_RST'] == 'True': + _verify_fix_rst(cice_restart, common_env['TASKSTART']) # if this is a continuation from a failed NWP job we check that the last # CICE dump matches the time of LAST_DUMP_HOURS - if common_env["CONTINUE_FROM_FAIL"] == "true": - # Read the filename from pointer file + if common_env['CONTINUE_FROM_FAIL'] == 'true': + #Read the filename from pointer file with open(cice_restart) as fid: rst_file = fid.readline() - rst_file = rst_file.rstrip("\n").strip() + rst_file = rst_file.rstrip('\n').strip() rst_file = os.path.basename(rst_file) - ymds = [int(f) for f in rst_file[-19:-3].split("-")] - since_start = datetime.datetime( - ymds[0], - ymds[1], - ymds[2], - ymds[3] // 3600, - (ymds[3] % 3600) // 60, - (ymds[3] % 3600) % 60, - ) - datetime.datetime( - run_start[0], run_start[1], run_start[2], run_start[3], run_start[4] - ) + ymds = [int(f) for f in rst_file[-19:-3].split('-')] + since_start = datetime.datetime(ymds[0], ymds[1], ymds[2], \ + ymds[3]//3600, (ymds[3]%3600)//60, \ + (ymds[3]%3600)%60) \ + - datetime.datetime(run_start[0], run_start[1], run_start[2], + run_start[3], run_start[4]) if int(since_start.total_seconds()) != last_dump_seconds: - sys.stderr.write("[FAIL] Last CICE restart not at correct time") - sys.stderr.write("since_start=" + since_start.total_seconds()) - sys.stderr.write("last_dump_seconds=" + last_dump_seconds) + sys.stderr.write('[FAIL] Last CICE restart not at correct time') + sys.stderr.write('since_start='+since_start.total_seconds()) + sys.stderr.write('last_dump_seconds='+last_dump_seconds) sys.exit(error.RESTART_FILE_ERROR) - # block of code to modify the main CICE namelist + #block of code to modify the main CICE namelist mod_cicenl = common.ModNamelist(cice_nl) - mod_cicenl.var_val("days_per_year", caldays) - mod_cicenl.var_val( - "history_file", - "%si.%i%s" % (common_env["RUNID"], cice_histfreq_n, cice_histfreq), - ) - mod_cicenl.var_val("ice_ic", ice_ic) - mod_cicenl.var_val("incond_file", "%si_ic" % common_env["RUNID"]) - mod_cicenl.var_val("istep0", int(cice_istep0)) - mod_cicenl.var_val("npt", int(cice_steps)) - mod_cicenl.var_val("pointer_file", cice_restart) - mod_cicenl.var_val("restart", restart) - mod_cicenl.var_val("restart_file", "%si.restart" % common_env["RUNID"]) - mod_cicenl.var_val("runtype", cice_runtype) - mod_cicenl.var_val("use_leap_years", cice_leap_years) - mod_cicenl.var_val("year_init", int(model_basis[0])) - mod_cicenl.var_val("grid_file", cice_envar["CICE_GRID"]) - mod_cicenl.var_val("kmt_file", cice_envar["CICE_KMT"]) - mod_cicenl.var_val("nprocs", int(cice_envar["CICE_NPROC"])) - mod_cicenl.var_val("atm_data_dir", cice_envar["ATM_DATA_DIR"]) - mod_cicenl.var_val("ocn_data_dir", cice_envar["OCN_DATA_DIR"]) + mod_cicenl.var_val('days_per_year', caldays) + mod_cicenl.var_val('history_file', '%si.%i%s' % + (common_env['RUNID'], + cice_histfreq_n, + cice_histfreq)) + mod_cicenl.var_val('ice_ic', ice_ic) + mod_cicenl.var_val('incond_file', '%si_ic' % common_env['RUNID']) + mod_cicenl.var_val('istep0', int(cice_istep0)) + mod_cicenl.var_val('npt', int(cice_steps)) + mod_cicenl.var_val('pointer_file', cice_restart) + mod_cicenl.var_val('restart', restart) + mod_cicenl.var_val('restart_file', '%si.restart' % + common_env['RUNID']) + mod_cicenl.var_val('runtype', cice_runtype) + mod_cicenl.var_val('use_leap_years', cice_leap_years) + mod_cicenl.var_val('year_init', int(model_basis[0])) + mod_cicenl.var_val('grid_file', cice_envar['CICE_GRID']) + mod_cicenl.var_val('kmt_file', cice_envar['CICE_KMT']) + mod_cicenl.var_val('nprocs', int(cice_envar['CICE_NPROC'])) + mod_cicenl.var_val('atm_data_dir', cice_envar['ATM_DATA_DIR']) + mod_cicenl.var_val('ocn_data_dir', cice_envar['OCN_DATA_DIR']) mod_cicenl.replace() + return cice_envar def _set_launcher_command(_): - """ + ''' Setup the launcher command for the executable - """ - sys.stdout.write("[INFO] CICE uses the same launch command as NEMO\n") - launch_cmd = "" + ''' + sys.stdout.write('[INFO] CICE uses the same launch command as NEMO\n') + launch_cmd = '' return launch_cmd - def _finalize_executable(_): - """ + ''' Write the Ice output to stdout - """ - ice_out_file = "ice_diag.d" + ''' + ice_out_file = 'ice_diag.d' if os.path.isfile(ice_out_file): - sys.stdout.write("[INFO] CICE output from file %s\n" % ice_out_file) - with open(ice_out_file, "r") as i_out: + sys.stdout.write('[INFO] CICE output from file %s\n' % ice_out_file) + with open(ice_out_file, 'r') as i_out: for line in i_out: sys.stdout.write(line) else: - sys.stdout.write("[INFO] CICE output file %s not avaliable\n" % ice_out_file) + sys.stdout.write('[INFO] CICE output file %s not avaliable\n' + % ice_out_file) def run_driver(common_env, mode, run_info): - """ + ''' Run the driver, and return an instance of dr_env_lib.env_lib.LoadEnvar and as string containing the launcher command for the CICE model - """ - if mode == "run_driver": + ''' + if mode == 'run_driver': exe_envar = _setup_executable(common_env) launch_cmd = _set_launcher_command(exe_envar) model_snd_list = None - elif mode == "finalize" or "failure": + elif mode == 'finalize' or 'failure': _finalize_executable(common_env) exe_envar = None launch_cmd = None diff --git a/Coupled_Drivers/common.py b/Coupled_Drivers/common.py index 2c4266f..fdcb0be 100644 --- a/Coupled_Drivers/common.py +++ b/Coupled_Drivers/common.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -""" +''' *****************************COPYRIGHT****************************** (C) Crown copyright 2022-2025 Met Office. All rights reserved. @@ -16,7 +16,8 @@ DESCRIPTION Common functions and classes required by multiple model drivers -""" +''' + import datetime @@ -33,78 +34,75 @@ class ModNamelist(object): - """ + ''' Modify a fortran namelist. This will not add any new variables, only modify existing ones - """ + ''' def __init__(self, filename): - """ + ''' Initialise the container, with the name of file to be updated - """ + ''' self.filename = filename self.replace_vars = {} def var_val(self, variable, value): - """ + ''' Create a container of variable name, value pairs to be updated. Note that if a variable doesn't exist in the namelist file, then it will be ignored - """ + ''' if isinstance(value, str): - if value.lower() not in (".true.", ".false."): - value = "'%s'" % value + if value.lower() not in ('.true.', '.false.'): + value = '\'%s\'' % value self.replace_vars[variable] = value def replace(self): - """ + ''' Do the update - """ - output_file = open_text_file(self.filename + "out", "w") - input_file = open_text_file(self.filename, "r") + ''' + output_file = open_text_file(self.filename+'out', 'w') + input_file = open_text_file(self.filename, 'r') for line in input_file.readlines(): - variable_name = re.findall(r"\s*(\S*)\s*=\s*", line) + variable_name = re.findall(r'\s*(\S*)\s*=\s*', line) if variable_name: variable_name = variable_name[0] if variable_name in list(self.replace_vars.keys()): - output_file.write( - "%s=%s,\n" % (variable_name, self.replace_vars[variable_name]) - ) + output_file.write('%s=%s,\n' % + (variable_name, + self.replace_vars[variable_name])) else: output_file.write(line) input_file.close() output_file.close() os.remove(self.filename) - os.rename(self.filename + "out", self.filename) - + os.rename(self.filename+'out', self.filename) def find_previous_workdir(cyclepoint, workdir, taskname, task_param_run=None): - """ + ''' Find the work directory for the previous cycle. Takes as argument the current cyclepoint, the path to the current work directory, and the current taskname, a value specifying multiple tasks within same cycle (e.g. coupled_run1, coupled_run2) as used in coupled NWP and returns an absolute path. - """ + ''' if task_param_run: stem = workdir.rstrip(task_param_run) nchars = len(task_param_run) - prev_param_run = "{:0{}d}".format(int(task_param_run) - 1, nchars) + prev_param_run = '{:0{}d}'.format(int(task_param_run) - 1, nchars) previous_workdir = stem + prev_param_run if not os.path.isdir(previous_workdir): - sys.stderr.write( - "[FAIL] Can not find previous work directory for" - " task %s\n" % taskname - ) + sys.stderr.write('[FAIL] Can not find previous work directory for' + ' task %s\n' % taskname) sys.exit(error.MISSING_DRIVER_FILE_ERROR) return previous_workdir else: cyclesdir = os.sep.join(workdir.split(os.sep)[:-2]) - # find the work directory for the previous cycle + #find the work directory for the previous cycle work_cycles = os.listdir(cyclesdir) work_cycles.sort() try: @@ -120,21 +118,19 @@ def find_previous_workdir(cyclepoint, workdir, taskname, task_param_run=None): break if not previous_task_cycle: - sys.stderr.write( - "[FAIL] Can not find previous work directory for" - " task %s\n" % taskname - ) + sys.stderr.write('[FAIL] Can not find previous work directory for' + ' task %s\n' % taskname) sys.exit(error.MISSING_DRIVER_FILE_ERROR) return os.path.join(cyclesdir, previous_task_cycle, taskname) def get_filepaths(directory): - """ + ''' Equivilant to ls -d Provides an absolute path to every file in directory including subdirectorys - """ + ''' file_paths = [] for root, _, files in os.walk(directory): for filename in files: @@ -144,138 +140,118 @@ def get_filepaths(directory): def open_text_file(name, mode): - """ + ''' Provide a common function to open a file and provide a suitiable error should this not be possible - """ - modes = { - "r": "reading", - "w": "writing", - "a": "appending", - "r+": "updating (reading)", - "w+": "updating (writing)", - "a+": "updating (appending)", - } + ''' + modes = {'r':'reading', + 'w':'writing', + 'a':'appending', + 'r+':'updating (reading)', + 'w+':'updating (writing)', + 'a+':'updating (appending)'} if mode not in list(modes.keys()): - options = "" + options = '' for k in modes: - options += " %s: %s\n" % (k, modes[k]) - sys.stderr.write( - "[FAIL] Attempting to open file %s, do not recognise" - " mode %s. Please use one of the following modes:\n%s" - % (name, mode, options) - ) + options += ' %s: %s\n' % (k, modes[k]) + sys.stderr.write('[FAIL] Attempting to open file %s, do not recognise' + ' mode %s. Please use one of the following modes:\n%s' + % (name, mode, options)) sys.exit(error.IOERROR) try: handle = open(name, mode) except IOError: - sys.stderr.write( - "[FAIL] Unable to open file %s using mode %s (%s)\n" - % (name, mode, modes[mode]) - ) + sys.stderr.write('[FAIL] Unable to open file %s using mode %s (%s)\n' + % (name, mode, modes[mode])) sys.exit(error.IOERROR) return handle - def is_non_zero_file(path): - """ + ''' Check to see if a file 'path' exists and has non zero length. Returns True if that is the case. If the file a) doesn't exist, or b) has zero length, returns False - """ + ''' if os.path.isfile(path) and os.path.getsize(path) > 0: return True else: return False - def remove_file(filename): - """ + ''' Check to see if a file or a link exists and if it does, remove it. Return True if a file/link was removed, False otherwise. - """ + ''' if os.path.isfile(filename) or os.path.islink(filename): os.remove(filename) return True else: return False - def setup_runtime(common_env): - """ + ''' Set up model run length in seconds based on the model suite env vars (rather than in the manner of the old UM control scripts by interrogating NEMO namelists!) - """ - if not common_env["CALENDAR"]: - sys.stderr.write( - "[WARN] setup_runtime: Environment variable" - " CALENDAR not set. Assuming 360 day calendar.\n" - ) - calendar = "360" + ''' + if not common_env['CALENDAR']: + sys.stderr.write('[WARN] setup_runtime: Environment variable' \ + ' CALENDAR not set. Assuming 360 day calendar.\n') + calendar = '360' else: - calendar = common_env["CALENDAR"] - if calendar == "360day": - calendar = "360" - elif calendar == "365day": - calendar = "365" - elif calendar == "gregorian": + calendar = common_env['CALENDAR'] + if calendar == '360day': + calendar = '360' + elif calendar == '365day': + calendar = '365' + elif calendar == 'gregorian': pass else: - sys.stderr.write( - "[FAIL] setup_runtime: Calendar type %s not" " recognised\n" % calendar - ) + sys.stderr.write('[FAIL] setup_runtime: Calendar type %s not' \ + ' recognised\n' % calendar) sys.exit(error.INVALID_EVAR_ERROR) + # Turn our times into lists of integers - run_start = [int(i) for i in common_env["TASKSTART"].split(",")] - run_length = [int(i) for i in common_env["TASKLENGTH"].split(",")] - - run_days = inc_days.inc_days( - run_start[0], - run_start[1], - run_start[2], - run_length[0], - run_length[1], - run_length[2], - calendar, - ) + run_start = [int(i) for i in common_env['TASKSTART'].split(',')] + run_length = [int(i) for i in common_env['TASKLENGTH'].split(',')] + + run_days = inc_days.inc_days(run_start[0], run_start[1], run_start[2], + run_length[0], run_length[1], run_length[2], + calendar) # Work out the total run length in seconds - runlen_sec = ( - (run_days * 86400) - + (run_length[3] * 3600) - + (run_length[4] * 60) - + run_length[5] - ) + runlen_sec = (run_days * 86400) \ + + (run_length[3]*3600) \ + + (run_length[4]*60) \ + + run_length[5] return runlen_sec def _calculate_ppn_values(nproc, nodes): - """ + ''' Calculates number of processes per node and numa node for launch command options - """ + ''' nproc = int(nproc) nodes = float(nodes) numa_nodes = 2 - ppnu = int(math.ceil(nproc / nodes / numa_nodes)) + ppnu = int(math.ceil(nproc/nodes/numa_nodes)) ppn = (ppnu * numa_nodes) if nproc > 1 else nproc return ppnu, ppn def set_aprun_options(nproc, nodes, ompthr, hyperthreads, ss): - """ + ''' Setup the aprun options for the launcher command - """ + ''' ppnu, ppn = _calculate_ppn_values(nproc, nodes) - rose_launcher_preopts = ( - "-n %s -N %s -S %s -d %s -j %s env OMP_NUM_THREADS=%s env HYPERTHREADS=%s" - % (nproc, ppn, ppnu, ompthr, hyperthreads, ompthr, hyperthreads) - ) + rose_launcher_preopts = \ + '-n %s -N %s -S %s -d %s -j %s env OMP_NUM_THREADS=%s env HYPERTHREADS=%s' \ + % (nproc, ppn, ppnu, ompthr, hyperthreads, ompthr, hyperthreads) if ss: rose_launcher_preopts = "-ss " + rose_launcher_preopts @@ -284,21 +260,18 @@ def set_aprun_options(nproc, nodes, ompthr, hyperthreads, ss): def _sort_hist_dirs_by_date(dir_list): - """ + ''' Sort a list of history directories by date - """ + ''' # Pattern that defines the name of the history directories, # which contain a date of the form YYYYmmddHHMM. - pattern = r"\.(\d{12})" + pattern = r'\.(\d{12})' try: - dir_list.sort( - key=lambda dname: datetime.datetime.strptime( - re.search(pattern, dname).group(1), "%Y%m%d%H%M" - ) - ) + dir_list.sort(key=lambda dname: datetime.datetime.strptime( + re.search(pattern, dname).group(1), '%Y%m%d%H%M')) except AttributeError: - msg = "[FAIL] Cannot order directories: %s" % " ".join(dir_list) + msg = '[FAIL] Cannot order directories: %s' % " ".join(dir_list) sys.stderr.write(msg) sys.exit(error.IOERROR) @@ -306,15 +279,16 @@ def _sort_hist_dirs_by_date(dir_list): def remove_latest_hist_dir(old_hist_dir): - """ + ''' If a model task has failed, then removed the last created history directory, before a new one is created, associated with the re-attempt. - """ + ''' # Replace the regex pattern that defines the history directory # name (that contains a date of the format YYYYmmddHHMM) with a # generic pattern so that we can perform the directory glob. - history_pattern = re.sub(r"\.\d{12}", ".????????????", old_hist_dir) + history_pattern = re.sub( + r'\.\d{12}', '.????????????', old_hist_dir) # Find and sort the history directories, and delete # the latest one, corresponding to the last entry in @@ -322,15 +296,14 @@ def remove_latest_hist_dir(old_hist_dir): history_dirs = glob.glob(history_pattern) history_dirs = _sort_hist_dirs_by_date(history_dirs) - msg = "[INFO] Found history directories: %s \n" % " ".join(history_dirs) + msg = '[INFO] Found history directories: %s \n' % ' '.join( + history_dirs) sys.stdout.write(msg) latest_hist_dir = history_dirs[-1] - msg = ( - "[WARN] Re-attempting failed model step. \n" - "[WARN] Clearing out latest history \n" - "[WARN] directory %s. \n" % latest_hist_dir - ) + msg = ("[WARN] Re-attempting failed model step. \n" + "[WARN] Clearing out latest history \n" + "[WARN] directory %s. \n" % latest_hist_dir) sys.stdout.write(msg) shutil.rmtree(latest_hist_dir) diff --git a/Coupled_Drivers/cpmip_utils.py b/Coupled_Drivers/cpmip_utils.py index 5ca2779..946be5c 100644 --- a/Coupled_Drivers/cpmip_utils.py +++ b/Coupled_Drivers/cpmip_utils.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -""" +''' *****************************COPYRIGHT****************************** (C) Crown copyright 2023-2025 Met Office. All rights reserved. @@ -16,7 +16,7 @@ DESCRIPTION Utility functions for the CPMIP controller -""" +''' import glob import os import re @@ -25,220 +25,200 @@ import common import shellout - def get_component_resolution(nlist_file, resolution_variables): - """ + ''' Get the total componenet resolution nx x ny x nz from a given namelist file. The arguments are a namelist file, and a list of the resolution variables within that namelist. Returns a single value - """ + ''' resolution = 1 for res_var in resolution_variables: - _, out = shellout._exec_subprocess( - "grep %s %s" % (res_var, nlist_file), verbose=True - ) + _, out = shellout._exec_subprocess('grep %s %s' % (res_var, nlist_file), + verbose=True) try: - i_res = int(re.search(r"(\d+)", out).group(0)) + i_res = int(re.search(r'(\d+)', out).group(0)) resolution *= i_res except AttributeError: - msg = "[WARN] Failed to find resolution %s in file %s.\n" % ( - res_var, - nlist_file, - ) + msg = '[WARN] Failed to find resolution %s in file %s.\n' % \ + (res_var, nlist_file) sys.stdout.write(msg) return resolution - def get_glob_usage(glob_path, timeout=60): - """ + ''' Get the total data from a list of files produced by a glob expression, using the du -c command. This command takes two arguments, a glob expression, and a timeout in seconds. This timeout is required as some filesystems (notably Lustre) can take a long time to respond to metadata queries - """ + ''' size_k = -1.0 filelist = glob.glob(glob_path) if filelist: - du_command = ["du", "-c"] + filelist + du_command = ['du', '-c'] + filelist rcode, output = shellout._exec_subprocess(du_command, timeout) if rcode == 0: size_k = float(output.split()[-2]) else: - sys.stderr.write( - "[WARN] Attepting to find the size of files described" - " by glob expression %s. There are no files found" % glob_path - ) + sys.stderr.write('[WARN] Attepting to find the size of files described' + ' by glob expression %s. There are no files found' + % glob_path) size_k = 0.0 return size_k - def get_datam_output_runonly(common_envar, cpmip_envar, timeout): - """ + ''' Grab the data of interest within the datam directory. We only want the contents of NEMOhist and CICEhist, and the output files labelled with the runid. We must avoid any directories containing lrun/nrun/crun restart tests etc. - """ + ''' total_usage = 0.0 failed_components = [] # um files - if "um" in common_envar["models"]: - um_path_gl = os.path.join(common_envar["DATAM"], "%s*" % common_envar["RUNID"]) + if 'um' in common_envar['models']: + um_path_gl = os.path.join(common_envar['DATAM'], + '%s*' % common_envar['RUNID']) um_usage = get_glob_usage(um_path_gl, timeout) if um_usage >= 0.0: total_usage += um_usage else: - failed_components.append("UM") + failed_components.append('UM') # Jnr UM files - if "jnr" in common_envar["models"]: - jnr_path_gl = os.path.join( - common_envar["DATAM"], "%s*" % cpmip_envar["RUNID_JNR"] - ) + if 'jnr' in common_envar['models']: + jnr_path_gl = os.path.join(common_envar['DATAM'], + '%s*' % cpmip_envar['RUNID_JNR']) jnr_usage = get_glob_usage(jnr_path_gl, timeout) if jnr_usage >= 0.0: total_usage += jnr_usage else: - failed_components.append("Jnr") + failed_components.append('Jnr') # nemo files - if "nemo" in common_envar["models"]: - nemo_path_gl = os.path.join(common_envar["DATAM"], "NEMOhist", "*") + if 'nemo' in common_envar['models']: + nemo_path_gl = os.path.join(common_envar['DATAM'], 'NEMOhist', '*') nemo_usage = get_glob_usage(nemo_path_gl, timeout) if nemo_usage >= 0.0: total_usage += nemo_usage else: - failed_components.append("NEMO") + failed_components.append('NEMO') # cice file - if "cice" in common_envar["models"]: - cice_path_gl = os.path.join(common_envar["DATAM"], "CICEhist", "*") + if 'cice' in common_envar['models']: + cice_path_gl = os.path.join(common_envar['DATAM'], 'CICEhist', '*') cice_usage = get_glob_usage(cice_path_gl, timeout) if cice_usage >= 0.0: total_usage += cice_usage else: - failed_components.append("CICE") + failed_components.append('CICE') if failed_components: for failed_component in failed_components: - sys.stderr.write( - "[FAIL] Unable to determine the usage in DATAM" - " for the %s component\n" % failed_component - ) + sys.stderr.write('[FAIL] Unable to determine the usage in DATAM' + ' for the %s component\n' % + failed_component) sys.exit(error.MISSING_MODEL_FILE_ERROR) else: return total_usage def get_workdir_netcdf_output(timeout=60): - """ + ''' Gather any netcdf output files written to the work directory - """ - output_files = [ - i_f - for i_f in os.listdir(".") - if i_f.split(".")[-1] == "nc" and not os.path.islink(i_f) - ] + ''' + output_files = [i_f for i_f in os.listdir('.') if \ + i_f.split('.')[-1] == 'nc' and not os.path.islink(i_f)] size_k = -1.0 - du_command = ["du", "-c"] + output_files + du_command = ['du', '-c'] + output_files rcode, output = shellout._exec_subprocess(du_command, timeout) if rcode == 0: size_k = float(output.split()[-2]) return size_k - def tasklength_to_years(tasklength): - """ + ''' Takes in a tasklength variable (string of form Y,M,D,h,m,s) and returns an integer value of the equivalent number of years for a 360 day calendar. - """ - length = [int(i) for i in tasklength.split(",")] - to_years = ( - 1, - 1.0 / 30.0, - 1.0 / 360.0, - 1.0 / (360.0 * 24.0), - 1.0 / (360.0 * 24.0 * 60.0), - 1.0 / (360.0 * 24.0 * 3600.0), - ) - years = sum([x * y for x, y in zip(to_years, length)]) + ''' + length = [int(i) for i in tasklength.split(',')] + to_years = (1, 1./30., 1./360., 1./(360.*24.), + 1./(360.*24.*60.), 1./(360.*24.*3600.)) + years = sum([x*y for x, y in zip(to_years, length)]) return years - def seconds_to_days(time_secs): - """ + ''' Takes in an integer value in units of seconds, and returns a floating point value of that time in days - """ - time_days = time_secs / (24.0 * 3600.0) + ''' + time_days = time_secs / (24.*3600.) return time_days - def get_jobfile_info(jobfile): - """ + ''' Takes in a path to the jobfile and returns a dictionary containing all the directives set by PBS -l. This code is specific to the PBS load scheduler present on the Cray systems - """ - job_f = common.open_text_file(jobfile, "r") + ''' + job_f = common.open_text_file(jobfile, 'r') pbs_l_dict = {} for line in job_f.readlines(): # Grab key value pairs of the PBS variables. The pairs are delimited # by colons in the PBS directive. Times are also however defined using # colons (for example on hour is 01:00:00). - if line.strip().startswith("#PBS -l"): - for item in re.findall(r"(\w+)=(\w+(:\d+)*)", line): + if line.strip().startswith('#PBS -l'): + for item in re.findall(r'(\w+)=(\w+(:\d+)*)', line): pbs_l_dict[item[0]] = item[1] job_f.close() return pbs_l_dict def get_select_nodes(jobfile): - """ + ''' Takes in a path to the jobfile and returns a dictionary containing the selected nodes for each component MPMD model - """ - pbs_line = "" + ''' + pbs_line = '' model_nodes = [] - with common.open_text_file(jobfile, "r") as job_handle: + with common.open_text_file(jobfile, 'r') as job_handle: for line in job_handle.readlines(): # Grab the line containing the -l select command - if line[:14] == "#PBS -l select": + if line[:14] == '#PBS -l select': pbs_line = line break - # break up the line - # First model - first_model_nodes = re.match(r"#PBS -l select=(\d+)", pbs_line).group(1) + #break up the line + #First model + first_model_nodes = re.match(r'#PBS -l select=(\d+)', pbs_line).group(1) model_nodes.append(int(first_model_nodes)) # Any additional models - split_pbs_line = pbs_line.split("+")[1:] + split_pbs_line = pbs_line.split('+')[1:] for i_model in split_pbs_line: - i_model_node = re.match(r"(\d+):", i_model).group(1) + i_model_node = re.match(r'(\d+):', i_model).group(1) model_nodes.append(int(i_model_node)) # Check for a coretype try: - coretype = re.match(r".+coretype=([a-z]+)", line).group(1) + coretype = re.match(r'.+coretype=([a-z]+)', line).group(1) except AttributeError: # As chip not specified assume milan chip - coretype = "milan" + coretype = 'milan' return model_nodes, coretype def increment_dump(datestr, resub, resub_units): - """ + ''' Increment the dump date to end of cycle, so it can be found to calculate complexity - """ + ''' year = int(datestr[:4]) month = int(datestr[4:6]) day = int(datestr[6:8]) resub = int(resub) - if "m" in resub_units.lower(): + if 'm' in resub_units.lower(): resub *= 30 if resub >= 360: i_years = resub // 360 @@ -261,4 +241,4 @@ def increment_dump(datestr, resub, resub_units): output_month -= 12 i_years += 12 output_year = year + i_years - return "%04d%02d%02d" % (output_year, output_month, output_day) + return '%04d%02d%02d' % (output_year, output_month, output_day) diff --git a/Coupled_Drivers/cpmip_xios.py b/Coupled_Drivers/cpmip_xios.py index 171a45c..848ff2a 100644 --- a/Coupled_Drivers/cpmip_xios.py +++ b/Coupled_Drivers/cpmip_xios.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -""" +''' *****************************COPYRIGHT****************************** (C) Crown copyright 2021-2025 Met Office. All rights reserved. @@ -16,28 +16,26 @@ DESCRIPTION CPMIP functions for XIOS -""" +''' import os import shutil import sys import common import shellout - def data_metrics_setup_nemo(): - """ + ''' Set up IODEF file to produce XIOS timing files - """ - with open("iodef.xml", "r") as f_in, open("iodef_out.xml", "w") as f_out: + ''' + with open('iodef.xml', 'r') as f_in, \ + open('iodef_out.xml', 'w') as f_out: update = False for line in f_in.readlines(): if 'variable id="print_file"' in line: continue if update: - updated_line = ( - '\t true\n' - ) + updated_line = '\t true\n' f_out.write(updated_line) f_out.write(line) update = False @@ -45,35 +43,34 @@ def data_metrics_setup_nemo(): f_out.write(line) if 'variable id="using_server"' in line: update = True - shutil.move("iodef_out.xml", "iodef.xml") - + shutil.move('iodef_out.xml', 'iodef.xml') def measure_xios_client_times(timeout=120): - """ + ''' Gather the output from XIOS client files. Takes in an optional value of timeout in seconds, as there may be a lot of files and we don't want to hang around forever if there is a problem opening them all. Returns the mean time and high watermark time - """ + ''' total_measured = 0 - total_time = 0.0 - max_time = 0.0 - files = [i_f for i_f in os.listdir(".") if "xios_client" in i_f and "out" in i_f] + total_time = 0. + max_time = 0. + files = [i_f for i_f in os.listdir('.') if \ + 'xios_client' in i_f and 'out' in i_f] total_files = len(files) for i_f in files: - rcode, out = shellout._exec_subprocess('grep "total time" %s' % i_f, timeout) + rcode, out = shellout._exec_subprocess( + 'grep "total time" %s' % i_f, timeout) if rcode == 0: meas_time = float(out.split()[-2]) total_measured += 1 total_time += meas_time if meas_time > max_time: max_time = meas_time - sys.stdout.write( - "[INFO] Measured timings for (%s/%s) XIOS clients\n" - % (total_measured, total_files) - ) + sys.stdout.write('[INFO] Measured timings for (%s/%s) XIOS clients\n' % + (total_measured, total_files)) if total_measured == 0: - sys.stderr.write("[WARN] Unable to find any XIOS client output files\n") + sys.stderr.write('[WARN] Unable to find any XIOS client output files\n') mean_time = 0.0 max_time = 0.0 else: diff --git a/Coupled_Drivers/mct_driver.py b/Coupled_Drivers/mct_driver.py index f80b809..b25dc66 100644 --- a/Coupled_Drivers/mct_driver.py +++ b/Coupled_Drivers/mct_driver.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -""" +''' *****************************COPYRIGHT****************************** (C) Crown copyright 2021-2025 Met Office. All rights reserved. Use, duplication or disclosure of this code is subject to the restrictions @@ -15,7 +15,8 @@ DESCRIPTION Driver for OASIS3-MCT -""" +''' + import os @@ -28,160 +29,153 @@ import dr_env_lib.mct_def import dr_env_lib.env_lib import cpmip_controller - try: import f90nml except ImportError: pass - def _multiglob(*args): - """ + ''' Takes in a list of globbable strings, and returns a single list of filenames matching those strings - """ + ''' filenames = [] for arg in args: filenames += glob.glob(arg) return filenames - def _setup_river_cpld(common_envar, mct_envar, river_envar): - """ + ''' Setup JULES rivers for coupled configurations - """ - river_debug_files = glob.glob("*%s*.nc" % river_envar["RIVER_LINK"]) + ''' + river_debug_files = glob.glob('*%s*.nc' % river_envar['RIVER_LINK']) for river_debug_file in river_debug_files: common.remove_file(river_debug_file) - def _setup_nemo_cpld(common_envar, mct_envar, nemo_envar): - """ + ''' Setup NEMO for coupled configurations - """ - nemo_debug_files = glob.glob("*%s*.nc" % nemo_envar["OCEAN_LINK"]) + ''' + nemo_debug_files = glob.glob('*%s*.nc' % nemo_envar['OCEAN_LINK']) for nemo_debug_file in nemo_debug_files: common.remove_file(nemo_debug_file) def _setup_lfric_cpld(common_envar, mct_envar, lfric_envar): - """ + ''' Setup LFRIC for coupled configurations - """ + ''' # Remove potential LFRIC debug netcdf files. If this isn't done MCT will # just append details to existing files - lfric_debug_files = glob.glob("*%s*.nc" % lfric_envar["LFRIC_LINK"]) + lfric_debug_files = glob.glob('*%s*.nc' % lfric_envar['LFRIC_LINK']) for lfric_debug_file in lfric_debug_files: common.remove_file(lfric_debug_file) def _setup_um_cpld(common_env, mct_envar, um_envar): - """ + ''' Setup UM for coupled configurations - """ + ''' # Remove potential UM debug netcdf files. If this isn't done MCT will # just append details to existing files - um_debug_files = glob.glob("*%s*.nc" % um_envar["ATMOS_LINK"]) + um_debug_files = glob.glob('*%s*.nc' % um_envar['ATMOS_LINK']) for um_debug_file in um_debug_files: common.remove_file(um_debug_file) def _setup_jnr_cpld(common_env, mct_envar, jnr_envar): - """ + ''' Setup Jnr UM for coupled configurations. This function is only used when creating the namcouple at run time. - """ + ''' # Remove potential UM debug netcdf files. If this isn't done MCT will # just append details to existing files - um_debug_files = glob.glob("*%s*.nc" % jnr_envar["ATMOS_LINK_JNR"]) + um_debug_files = glob.glob('*%s*.nc' % jnr_envar['ATMOS_LINK_JNR']) for um_debug_file in um_debug_files: common.remove_file(um_debug_file) def _generate_ngms_namcouple(): - """ + ''' Generate the namcouple files for ngms coupled models. This function should only be called if we request it - """ + ''' # This requires access to the MOCI namcouple generation library, test to # see if we can access this try: import generate_nam except ModuleNotFoundError: - sys.stderr.write( - "This run requires access to the MOCI namcouple" - " generation library\n. Please ensure this is" - " available\n" - ) + sys.stderr.write('This run requires access to the MOCI namcouple' + ' generation library\n. Please ensure this is' + ' available\n') sys.exit(error.IMPORT_ERROR) # First remove any existing namcouple files. - files_to_tidy = _multiglob("namcouple*") + files_to_tidy = _multiglob('namcouple*') for f_to_tidy in files_to_tidy: # some driver files may have namcouple in the filename - if f_to_tidy.split(".")[-1] != "py": + if f_to_tidy.split('.')[-1] != 'py': common.remove_file(f_to_tidy) # Set up input and output file names for namcouple # creation and select namelist reading mode for input file. - file_in = "cpl_configuration.nml" - file_out = "namcouple" - file_mode = "namelist" + file_in = 'cpl_configuration.nml' + file_out = 'namcouple' + file_mode = 'namelist' generate_nam.generate_nam(file_in, file_out, file_mode) def _setup_rmp_dir(mct_envar, run_info): - """ + ''' Set up link to the remapping weights files. This function is only used when creating the namcouple at run time. - """ + ''' # It's only when the namcouple file doesn't exist that we're # anticipating needing more than one remapping directory. - if run_info["l_namcouple"]: + if run_info['l_namcouple']: # Organise the remapping files - remap_files = glob.glob("%s/rmp_*" % mct_envar["RMP_DIR"]) + remap_files = glob.glob('%s/rmp_*' % mct_envar['RMP_DIR']) for remap_file in remap_files: linkname = os.path.split(remap_file)[-1] os.symlink(remap_file, linkname) else: # Need to be precise about order of components - comp_names = {"um": "ATM", "jnr": "JNR", "nemo": "OCN"} - comp_order = ["um", "jnr", "nemo"] + comp_names = {'um':'ATM', 'jnr':'JNR', 'nemo':'OCN'} + comp_order = ['um', 'jnr', 'nemo'] comp_list = [] for component in comp_order: - if component in mct_envar["COUPLING_COMPONENTS"].split(): + if component in mct_envar['COUPLING_COMPONENTS'].split(): comp_list.append(comp_names[component]) # Links to areas.nc, grids.nc and masks.nc core_dir_str = None for comp in comp_order: - grid = comp_names[comp] + "_grid" + grid = comp_names[comp] + '_grid' if grid in run_info: grid_name = run_info[grid] else: grid_name = "*" if core_dir_str: - core_dir_str = "%s_%s" % (core_dir_str, grid_name) + core_dir_str = ('%s_%s' % (core_dir_str, grid_name)) else: core_dir_str = grid_name - core_dir_str = mct_envar["RMP_DIR"] + "/" + core_dir_str + core_dir_str = mct_envar['RMP_DIR'] + '/' + core_dir_str # Find the core remapping directory and link the core # remapping files core_dirs = glob.glob(core_dir_str) if len(core_dirs) < 1: - sys.stderr.write( - "[FAIL] failed to find core remapping " "directory %s\n" % core_dir_str - ) + sys.stderr.write('[FAIL] failed to find core remapping ' + 'directory %s\n' % core_dir_str) sys.exit(error.MISSING_CORE_RMP_DIR) - for core_file in ["areas.nc", "grids.nc", "masks.nc"]: - core_file2 = core_dirs[0] + "/" + core_file + for core_file in ['areas.nc', 'grids.nc', 'masks.nc']: + core_file2 = core_dirs[0] + '/' + core_file if os.path.isfile(core_file2): # Remove link if it already exists common.remove_file(core_file) # Create symbolic link os.symlink(core_file2, core_file) else: - sys.stderr.write("[FAIL] failed to find %s" % core_file2) + sys.stderr.write('[FAIL] failed to find %s' % core_file2) sys.exit(error.MISSING_CORE_RMP_FILE) # Links to the remapping weight files @@ -191,44 +185,38 @@ def _setup_rmp_dir(mct_envar, run_info): break # Create the links for remapping file between these # components - grid1 = comp1 + "_grid" - grid2 = comp2 + "_grid" + grid1 = comp1 + '_grid' + grid2 = comp2 + '_grid' if not grid1 in run_info or not grid2 in run_info: - sys.stderr.write( - "[FAIL] either %s or %s is missing " - "from run_info.\n" % (grid1, grid2) - ) + sys.stderr.write('[FAIL] either %s or %s is missing ' + 'from run_info.\n' % (grid1, grid2)) sys.exit(error.MISSING_GRID_IN_RUN_INFO) - rmp_dir = ( - mct_envar["RMP_DIR"] + "/" + run_info[grid2] + "_" + run_info[grid1] - ) + rmp_dir = mct_envar['RMP_DIR'] + '/' + run_info[grid2] +\ + '_' + run_info[grid1] # Check that directory exists if not os.path.isdir(rmp_dir): - sys.stderr.write( - "[FAIL] failed to find remapping " "directory %s\n." % rmp_dir - ) + sys.stderr.write('[FAIL] failed to find remapping ' + 'directory %s\n.' % rmp_dir) sys.exit(error.MISSING_RMP_DIR) # Create the links - remap_files = glob.glob("%s/rmp_*" % rmp_dir) + remap_files = glob.glob('%s/rmp_*' % rmp_dir) for remap_file in remap_files: linkname = os.path.split(remap_file)[-1] os.symlink(remap_file, linkname) def _setup_executable(common_env, envarinsts, run_info): - """ + ''' Setup the environment and any files required by the executable - """ + ''' # Load the environment variables required mct_envar = dr_env_lib.env_lib.LoadEnvar() mct_envar = dr_env_lib.env_lib.load_envar_from_definition( - mct_envar, dr_env_lib.mct_def.MCT_ENVIRONMENT_VARS_INITIAL - ) + mct_envar, dr_env_lib.mct_def.MCT_ENVIRONMENT_VARS_INITIAL) # Tidyup our OASIS files before the setup process is started - files_to_tidy = _multiglob( - "nout.*", "debug.*root.*", "debug.??.*", "debug.???.*", "*fort*", "rmp_*" - ) + files_to_tidy = _multiglob('nout.*', 'debug.*root.*', 'debug.??.*', + 'debug.???.*', '*fort*', 'rmp_*') for f_to_tidy in files_to_tidy: common.remove_file(f_to_tidy) @@ -236,194 +224,177 @@ def _setup_executable(common_env, envarinsts, run_info): _setup_rmp_dir(mct_envar, run_info) # Are we using automatic namcouple generation for NG-Coupling? - if mct_envar["NAMCOUPLE_STATIC"] == ".false.": + if mct_envar['NAMCOUPLE_STATIC'] == '.false.': _generate_ngms_namcouple() # Are we expecting a namcouple file - if run_info["l_namcouple"]: + if run_info['l_namcouple']: # Does the namcouple file exist - if not os.path.exists("namcouple"): - sys.stderr.write( - "[FAIL] Could not find a namcouple file in the" - " working directory. This file should originate" - " in the Rose app's file directory\n" - ) + if not os.path.exists('namcouple'): + sys.stderr.write('[FAIL] Could not find a namcouple file in the' + ' working directory. This file should originate' + ' in the Rose app\'s file directory\n') sys.exit(error.MISSING_MODEL_FILE_ERROR) # Create transient field namelist (note if we're creating a # namcouple on the fly, this will have to wait until after # the namcouple have been created). - _, _ = shellout._exec_subprocess("./OASIS_fields") - - for component in mct_envar["COUPLING_COMPONENTS"].split(): - if not component in common_env["models"]: - sys.stderr.write( - "[FAIL] Attempting to couple component %s," - " however this component is not being run in" - " this configuration\n" % component - ) + _, _ = shellout._exec_subprocess('./OASIS_fields') + + for component in mct_envar['COUPLING_COMPONENTS'].split(): + if not component in common_env['models']: + sys.stderr.write('[FAIL] Attempting to couple component %s,' + ' however this component is not being run in' + ' this configuration\n' % component) sys.exit(999) if not component in list(SUPPORTED_MODELS.keys()): - sys.stderr.write( - "[FAIL] The component %s is not supported by the" - " mct driver\n" % component - ) + sys.stderr.write('[FAIL] The component %s is not supported by the' + ' mct driver\n' % component) sys.exit(999) # Setup coupling for individual component - sys.stdout.write("[INFO] MCT driver setting up %s component\n" % component) - SUPPORTED_MODELS[component](common_env, mct_envar, envarinsts[component]) + sys.stdout.write('[INFO] MCT driver setting up %s component\n' % + component) + SUPPORTED_MODELS[component](common_env, mct_envar, + envarinsts[component]) # Update the general, non-component specific namcouple details - if run_info["l_namcouple"]: - update_namcouple.update("mct", common_env) + if run_info['l_namcouple']: + update_namcouple.update('mct', common_env) # Run the CPMIP controller if appropriate # Check for the presence of t (as in TRUE, True, or true) in the # CPMIP_ANALYSIS value - if mct_envar["CPMIP_ANALYSIS"].lower().startswith("t"): + if mct_envar['CPMIP_ANALYSIS'].lower().startswith('t'): controller_mode = "run_controller" - sys.stdout.write("[INFO] mct_driver: CPMIP analyis will be performed\n") + sys.stdout.write('[INFO] mct_driver: CPMIP analyis will be performed\n') cpmip_controller.run_controller(controller_mode, common_env) return mct_envar def _set_launcher_command(_): - """ + ''' Setup the launcher command for the executable. MCT does not require a call to the launcher as it runs as a library - """ - launch_cmd = "" + ''' + launch_cmd = '' return launch_cmd def _sent_coupling_fields(mct_envar, run_info): - """ + ''' Read the SHARED file to get the coupling frequencies. This function is only used when creating the namcouple at run time. - """ + ''' # Dictionary for the component names - component_names = {"um": "ATM", "nemo": "OCN", "jnr": "JNR"} + component_names = {'um':'ATM', 'nemo':'OCN', 'jnr':'JNR'} # Dictionary for the coupling frequencies # (Note that for now, we're assuming that coupling frequencies # for JNR<->OCN are the same as ATM<->OCN) - couple_freqs = { - "ATM2OCN_freq": ["oasis_couple_freq_ao"], - "OCN2ATM_freq": ["oasis_couple_freq_oa"], - "ATM2JNR_freq": ["oasis_couple_freq_aj", "oasis_couple_freq_aj_stats"], - "JNR2ATM_freq": ["oasis_couple_freq_ja", "oasis_couple_freq_ja_stats"], - "JNR2OCN_freq": ["oasis_couple_freq_ao"], - "OCN2JNR_freq": ["oasis_couple_freq_oa"], - } + couple_freqs = {'ATM2OCN_freq': ['oasis_couple_freq_ao'], + 'OCN2ATM_freq': ['oasis_couple_freq_oa'], + 'ATM2JNR_freq': ['oasis_couple_freq_aj', + 'oasis_couple_freq_aj_stats'], + 'JNR2ATM_freq': ['oasis_couple_freq_ja', + 'oasis_couple_freq_ja_stats'], + 'JNR2OCN_freq': ['oasis_couple_freq_ao'], + 'OCN2JNR_freq': ['oasis_couple_freq_oa']} # Check that SHARED exists - if not os.path.isfile(run_info["SHARED_FILE"]): - sys.stderr.write("[FAIL] not found SHARED file.\n") + if not os.path.isfile(run_info['SHARED_FILE']): + sys.stderr.write('[FAIL] not found SHARED file.\n') sys.exit(error.NOT_FOUND_SHARED) # Read the namelist file SHARED - shared_nml = f90nml.read(run_info["SHARED_FILE"]) - for component1 in mct_envar["COUPLING_COMPONENTS"].split(): - for component2 in mct_envar["COUPLING_COMPONENTS"].split(): + shared_nml = f90nml.read(run_info['SHARED_FILE']) + for component1 in mct_envar['COUPLING_COMPONENTS'].split(): + for component2 in mct_envar['COUPLING_COMPONENTS'].split(): if component2 != component1: # Check component names exist - if ( - not component1 in component_names - or not component2 in component_names - ): - sys.stderr.write( - "[FAIL] %s or %s is unrecognised as " - "a component name\n" % (component1, component2) - ) + if not component1 in component_names or \ + not component2 in component_names: + sys.stderr.write('[FAIL] %s or %s is unrecognised as ' + 'a component name\n' % (component1, + component2)) sys.exit(error.UNRECOGNISED_COMP) # Determine the variable which stores the coupling frequency - cpl_var = ( - component_names[component1] - + "2" - + component_names[component2] - + "_freq" - ) + cpl_var = component_names[component1] + '2' + \ + component_names[component2] + '_freq' # Check the coupling frequency/ies exist if not cpl_var in couple_freqs: - sys.stderr.write("[FAIL] %s is not recognised\n" % cpl_var) + sys.stderr.write('[FAIL] %s is not recognised\n' % + cpl_var) sys.exit(error.UNRECOGNISED_CPL_VAR) nml_cpl_vars = couple_freqs[cpl_var] - if "coupling_control" not in shared_nml: - sys.stderr.write( - "[FAIL] failed to find coupling_control " - "in SHARED namelist.\n" - ) + if 'coupling_control' not in shared_nml: + sys.stderr.write('[FAIL] failed to find coupling_control ' + 'in SHARED namelist.\n') sys.exit(error.MISSING_CPL_CONTROL) # Loop across the coupling variables for nml_cpl_entry in nml_cpl_vars: - if not nml_cpl_entry in shared_nml["coupling_control"]: - sys.stderr.write( - "[FAIL] failed to find %s in " - "namelist coupling_control\n" % nml_cpl_entry - ) + if not nml_cpl_entry in shared_nml['coupling_control']: + sys.stderr.write('[FAIL] failed to find %s in ' + 'namelist coupling_control\n' % + nml_cpl_entry) sys.exit(error.MISSING_CPL_FREQ) # Store coupling frequency if not cpl_var in run_info: run_info[cpl_var] = [] - cpl_freq = ( - 3600 * shared_nml["coupling_control"][nml_cpl_entry][0] - + 60 * shared_nml["coupling_control"][nml_cpl_entry][1] - ) + cpl_freq = 3600 * \ + shared_nml['coupling_control'][nml_cpl_entry][0] + \ + 60 * shared_nml['coupling_control'][nml_cpl_entry][1] run_info[cpl_var].append(cpl_freq) return run_info def _finalize_executable(common_env): - """ + ''' Perform any tasks required after completion of model run - """ + ''' # Load the environment variables required mct_envar = dr_env_lib.env_lib.LoadEnvar() mct_envar = dr_env_lib.env_lib.load_envar_from_definition( - mct_envar, dr_env_lib.mct_def.MCT_ENVIRONMENT_VARS_FINAL - ) + mct_envar, dr_env_lib.mct_def.MCT_ENVIRONMENT_VARS_FINAL) # run the cpmip controller if appropriate # check for the presence of t (as in TRUE, True, or true) in the # CPMIP_ANALYSIS value - if mct_envar["CPMIP_ANALYSIS"].lower().startswith("t"): + if mct_envar['CPMIP_ANALYSIS'].lower().startswith('t'): controller_mode = "finalize" - sys.stdout.write("[INFO] mct_driver: CPMIP analyis is being performed\n") + sys.stdout.write( + '[INFO] mct_driver: CPMIP analyis is being performed\n') cpmip_controller.run_controller(controller_mode, common_env) def run_driver(envar_insts, mode, run_info): - """ + ''' Run the driver, and return an instance of LoadEnvar and as string containing the launcher command for the MCT component - """ - common_env = envar_insts["common"] - if mode == "run_driver": + ''' + common_env = envar_insts['common'] + if mode == 'run_driver': exe_envar = _setup_executable(common_env, envar_insts, run_info) launch_cmd = _set_launcher_command(exe_envar) model_snd_list = None - if not run_info["l_namcouple"]: + if not run_info['l_namcouple']: run_info = _sent_coupling_fields(exe_envar, run_info) - elif mode == "finalize": + elif mode == 'finalize': _finalize_executable(common_env) exe_envar = None launch_cmd = None model_snd_list = None return exe_envar, launch_cmd, run_info, model_snd_list - # Dictionary containing the supported models and their assosicated setup # function within the driver -SUPPORTED_MODELS = { - "rivers": _setup_river_cpld, - "nemo": _setup_nemo_cpld, - "um": _setup_um_cpld, - "jnr": _setup_jnr_cpld, - "lfric": _setup_lfric_cpld, -} +SUPPORTED_MODELS = {'rivers': _setup_river_cpld, + 'nemo': _setup_nemo_cpld, + 'um': _setup_um_cpld, + 'jnr': _setup_jnr_cpld, + 'lfric': _setup_lfric_cpld} diff --git a/Coupled_Drivers/nemo_driver.py b/Coupled_Drivers/nemo_driver.py index c880cbf..0a6cf37 100644 --- a/Coupled_Drivers/nemo_driver.py +++ b/Coupled_Drivers/nemo_driver.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -""" +''' *****************************COPYRIGHT****************************** (C) Crown copyright 2023-2025 Met Office. All rights reserved. @@ -17,7 +17,7 @@ DESCRIPTION Driver for the NEMO 3.6 model, called from link_drivers. Note that this does not cater for any earlier versions of NEMO -""" +''' import re import os import time @@ -33,14 +33,12 @@ try: import cf_units except ImportError: - IMPORT_ERROR_MSG = ( - "Unable to import cf_units. Ensure the scitools module" "has been loaded first." - ) + IMPORT_ERROR_MSG = ('Unable to import cf_units. Ensure the scitools module' + 'has been loaded first.') sys.exit(IMPORT_ERROR_MSG) import dr_env_lib.nemo_def import dr_env_lib.env_lib - try: import f90nml except ImportError: @@ -57,101 +55,95 @@ SERIAL_MODE_ERROR = 99 # Ocean resolutions -OCEAN_RESOLS = { - "orca2": [182, 149], - "orca1": [362, 332], - "orca025": [1442, 1021], - "orca12": [4322, 3059], - "orca36": [12960, 10850], -} - +OCEAN_RESOLS = {'orca2': [182, 149], + 'orca1': [362, 332], + 'orca025': [1442, 1021], + 'orca12': [4322, 3059], + 'orca36': [12960, 10850]} def _check_nemonl(envar_container): - """ + ''' As the environment variable NEMO_NL is required by both the setup and finalise functions, this will be encapsulated here - """ + ''' # Information will be retrieved from this file during the running of the # driver, so check it exists - if not os.path.isfile(envar_container["NEMO_NL"]): - sys.stderr.write( - "[FAIL] Can not find the nemo namelist file %s\n" - % envar_container["NEMO_NL"] - ) + if not os.path.isfile(envar_container['NEMO_NL']): + sys.stderr.write('[FAIL] Can not find the nemo namelist file %s\n' % + envar_container['NEMO_NL']) sys.exit(error.MISSING_DRIVER_FILE_ERROR) else: return 0 - def _get_nemorst(nemo_nl_file): - """ + ''' Retrieve the nemo restart directory from the nemo namelist file - """ + ''' ocerst_rcode, ocerst_val = shellout._exec_subprocess( - "grep cn_ocerst_outdir %s" % nemo_nl_file - ) + 'grep cn_ocerst_outdir %s' % nemo_nl_file) if ocerst_rcode == 0: - nemo_rst = re.findall(r"[\"\'](.*?)[\"\']", ocerst_val)[0] - if nemo_rst[-1] == "/": + nemo_rst = re.findall(r'[\"\'](.*?)[\"\']', ocerst_val)[0] + if nemo_rst[-1] == '/': nemo_rst = nemo_rst[:-1] return nemo_rst return None - def _get_ln_icebergs(nemo_nl_file): - """ + ''' Interrogate the nemo namelist to see if we are running with icebergs, Returns boolean, True if icebergs are used, False if not - """ - icb_rcode, icb_val = shellout._exec_subprocess("grep ln_icebergs %s" % nemo_nl_file) + ''' + icb_rcode, icb_val = shellout._exec_subprocess( + 'grep ln_icebergs %s' % nemo_nl_file) if icb_rcode != 0: - sys.stderr.write( - "Unable to read ln_icebergs in &namberg namelist" - " in the NEMO namelist file %s\n" % nemo_nl_file - ) + sys.stderr.write('Unable to read ln_icebergs in &namberg namelist' + ' in the NEMO namelist file %s\n' + % nemo_nl_file) sys.exit(error.SUBPROC_ERROR) else: - if "true" in icb_val.lower(): + if 'true' in icb_val.lower(): return True return False def _verify_nemo_rst(cyclepointstr, nemo_rst, nemo_nl, nemo_nproc, nemo_version): - """ + ''' Verify that the full set of nemo restart files match. Currently this is limited to the icebergs restart file. We require either a single restart file, or a number of restart files equal to the number of nemo processors. - """ - restart_files = [f for f in os.listdir(nemo_rst) if "restart" in f] + ''' + restart_files = [f for f in os.listdir(nemo_rst) if + 'restart' in f] + if _get_ln_icebergs(nemo_nl): if nemo_version < 402: - # Pre nemo 4.2 compatibility - nemo_icb_regex = r"_icebergs_%s_restart(_\d+)?\.nc" % cyclepointstr + # Pre nemo 4.2 compatibility + nemo_icb_regex = r'_icebergs_%s_restart(_\d+)?\.nc' % cyclepointstr else: - # Post nemo 4.2 compatibility - nemo_icb_regex = r"_%s_restart_icb(_\d+)?\.nc" % cyclepointstr + # Post nemo 4.2 compatibility + nemo_icb_regex = r'_%s_restart_icb(_\d+)?\.nc' % cyclepointstr - icb_restart_files = [f for f in restart_files if re.findall(nemo_icb_regex, f)] + icb_restart_files = [f for f in restart_files if + re.findall(nemo_icb_regex, f)] # we can have a single rebuilt file, number of files equal to # number of nemo processors, or rebuilt file and processor files. - if len(icb_restart_files) not in (1, nemo_nproc, nemo_nproc + 1): - sys.stderr.write( - "[FAIL] Unable to find iceberg restart files for" - " this cycle. Must either have one rebuilt file," - " as many as there are nemo processors (%i) or" - " both rebuilt and processor files." - "[FAIL] Found %i iceberg restart files\n" - % (nemo_nproc, len(icb_restart_files)) - ) + if len(icb_restart_files) not in (1, nemo_nproc, nemo_nproc+1): + sys.stderr.write('[FAIL] Unable to find iceberg restart files for' + ' this cycle. Must either have one rebuilt file,' + ' as many as there are nemo processors (%i) or' + ' both rebuilt and processor files.' + '[FAIL] Found %i iceberg restart files\n' + % (nemo_nproc, len(icb_restart_files))) sys.exit(error.MISSING_MODEL_FILE_ERROR) -def _calc_current_model_date(model_basis_time, time_step, num_steps, calendar): - """ +def _calc_current_model_date(model_basis_time, time_step, num_steps, + calendar): + ''' Calculate the current model date using the basis time, and the number of time-steps covered in a given model run. @@ -161,8 +153,8 @@ def _calc_current_model_date(model_basis_time, time_step, num_steps, calendar): model run :arg: string calendar : Calendar used in the model run - """ - ref_date_format = "seconds since %Y-%m-%d %H:%M:%S" + ''' + ref_date_format = 'seconds since %Y-%m-%d %H:%M:%S' # modify the calendar names for compatability with cf_units module if calendar == "360day": @@ -174,20 +166,17 @@ def _calc_current_model_date(model_basis_time, time_step, num_steps, calendar): ref_time = model_basis_time.strftime(ref_date_format) model_progress_secs = cf_units.date2num( - model_basis_time, ref_time, calendar=calendar - ) + (time_step * num_steps) + model_basis_time, ref_time, calendar=calendar) + (time_step * num_steps) current_model_date = cf_units.num2date( - model_progress_secs, ref_time, calendar=calendar - ) + model_progress_secs, ref_time, calendar=calendar) return current_model_date -def _verify_fix_rst( - restartdate, nemo_rst, model_basis_time, time_step, num_steps, calendar -): - """ +def _verify_fix_rst(restartdate, nemo_rst, model_basis_time, time_step, + num_steps, calendar): + ''' Verify that the restart file for nemo corresponds to the model time reached within a given model run. If they don't match, then make sure that nemo restarts from the correct restart date @@ -199,44 +188,41 @@ def _verify_fix_rst( :arg: int num_steps : Num. of time-steps covered :arg: string calendar : Calendar used in model - """ + ''' # Calculate the model restart time based on the start date of the # last calculated model step, the time-step and the number of # steps. Then convert the date format. - model_basis_datetime = datetime.datetime.strptime(model_basis_time, "%Y%m%d") + model_basis_datetime = datetime.datetime.strptime( + model_basis_time, "%Y%m%d") model_restart_date = _calc_current_model_date( - model_basis_datetime, time_step, num_steps, calendar - ) + model_basis_datetime, time_step, num_steps, calendar) - model_restart_date = model_restart_date.strftime("%Y%m%d") + model_restart_date = model_restart_date.strftime('%Y%m%d') if restartdate == model_restart_date: - sys.stdout.write("[INFO] Validated NEMO restart date\n") + sys.stdout.write('[INFO] Validated NEMO restart date\n') else: # Write the message to both standard out and standard error - msg = ( - "[WARN] The NEMO restart data does not match the " - " current model time\n." - " Current model date is %s\n" - " NEMO restart time is %s\n" - "[WARN] Automatically removing NEMO dumps ahead of " - "the current model date, and pick up the dump at " - "this time\n" % (model_restart_date, restartdate) - ) + msg = '[WARN] The NEMO restart data does not match the ' \ + ' current model time\n.' \ + ' Current model date is %s\n' \ + ' NEMO restart time is %s\n' \ + '[WARN] Automatically removing NEMO dumps ahead of ' \ + 'the current model date, and pick up the dump at ' \ + 'this time\n' % (model_restart_date, restartdate) sys.stdout.write(msg) sys.stderr.write(msg) - # Remove all nemo restart files that are later than the correct - # cycle times - # Make our generic restart regular expression, to cover normal NEMO - # restart, and potential iceberg, SI3 or passive tracer restart files, - # for both the rebuilt and non rebuilt cases - generic_rst_regex = r"(icebergs)?.*restart(_trc)?(_ice)?(_icb)?(_\d+)?\.nc" - all_restart_files = [ - f for f in os.listdir(nemo_rst) if re.findall(generic_rst_regex, f) - ] + #Remove all nemo restart files that are later than the correct + #cycle times + #Make our generic restart regular expression, to cover normal NEMO + #restart, and potential iceberg, SI3 or passive tracer restart files, + #for both the rebuilt and non rebuilt cases + generic_rst_regex = r'(icebergs)?.*restart(_trc)?(_ice)?(_icb)?(_\d+)?\.nc' + all_restart_files = [f for f in os.listdir(nemo_rst) if + re.findall(generic_rst_regex, f)] for restart_file in all_restart_files: - fname_date = re.findall(r"\d{8}", restart_file)[0] + fname_date = re.findall(r'\d{8}', restart_file)[0] if fname_date > model_restart_date: common.remove_file(os.path.join(nemo_rst, restart_file)) restartdate = model_restart_date @@ -244,90 +230,85 @@ def _verify_fix_rst( def _load_environment_variables(nemo_envar): - """ + ''' Load the NEMO environment variables required for the model run into the nemo_envar container - """ + ''' # Load the nemo namelist environment variable nemo_envar = dr_env_lib.env_lib.load_envar_from_definition( - nemo_envar, dr_env_lib.nemo_def.NEMO_ENVIRONMENT_VARS_INITIAL - ) + nemo_envar, dr_env_lib.nemo_def.NEMO_ENVIRONMENT_VARS_INITIAL) _ = _check_nemonl(nemo_envar) return nemo_envar def _setup_dates(common_env): - """ + ''' Setup the dates for the NEMO model run - """ - calendar = common_env["CALENDAR"] + ''' + calendar = common_env['CALENDAR'] - sys.stdout.write("[INFO] NEMO calendar= %s " % calendar) - if calendar == "360day": - calendar = "360" + sys.stdout.write('[INFO] NEMO calendar= %s ' % calendar) + if calendar == '360day': + calendar = '360' nleapy = 30 - elif calendar == "365day": - calendar = "365" + elif calendar == '365day': + calendar = '365' nleapy = 0 - elif calendar == "gregorian": + elif calendar == 'gregorian': nleapy = 1 else: - sys.stderr.write("[FAIL] Calendar type %s not recognised\n" % calendar) + sys.stderr.write('[FAIL] Calendar type %s not recognised\n' % + calendar) sys.exit(error.INVALID_EVAR_ERROR) - # turn our times into lists of integers - model_basis = [int(i) for i in common_env["MODELBASIS"].split(",")] - run_start = [int(i) for i in common_env["TASKSTART"].split(",")] - run_length = [int(i) for i in common_env["TASKLENGTH"].split(",")] - - run_days = inc_days.inc_days( - run_start[0], - run_start[1], - run_start[2], - run_length[0], - run_length[1], - run_length[2], - calendar, - ) + #turn our times into lists of integers + model_basis = [int(i) for i in common_env['MODELBASIS'].split(',')] + run_start = [int(i) for i in common_env['TASKSTART'].split(',')] + run_length = [int(i) for i in common_env['TASKLENGTH'].split(',')] + + run_days = inc_days.inc_days(run_start[0], run_start[1], run_start[2], + run_length[0], run_length[1], run_length[2], + calendar) return nleapy, model_basis, run_start, run_length, run_days + def _setup_executable(common_env): - """ + ''' Setup the environment and any files required by the executable - """ + ''' # Create the environment variable container nemo_envar = dr_env_lib.env_lib.LoadEnvar() # Load the environment variables required nemo_envar = _load_environment_variables(nemo_envar) - # Link the ocean executable - common.remove_file(nemo_envar["OCEAN_LINK"]) - os.symlink(nemo_envar["OCEAN_EXEC"], nemo_envar["OCEAN_LINK"]) + + #Link the ocean executable + common.remove_file(nemo_envar['OCEAN_LINK']) + os.symlink(nemo_envar['OCEAN_EXEC'], + nemo_envar['OCEAN_LINK']) # Setup date variables - nleapy, model_basis, run_start, run_length, run_days = _setup_dates(common_env) + nleapy, model_basis, run_start, \ + run_length, run_days = _setup_dates(common_env) # NEMO model setup - if int(nemo_envar["NEMO_VERSION"]) < 306: - sys.stderr.write( - "[FAIL] The python drivers are only valid for nemo" - " versions greater than 3.6" - ) + if int(nemo_envar['NEMO_VERSION']) < 306: + sys.stderr.write('[FAIL] The python drivers are only valid for nemo' + ' versions greater than 3.6') sys.exit(error.INVALID_COMPONENT_VER_ERROR) # Read restart from nemo namelist restart_direcs = [] - nemo_rst = _get_nemorst(nemo_envar["NEMO_NL"]) + nemo_rst = _get_nemorst(nemo_envar['NEMO_NL']) if nemo_rst: restart_direcs.append(nemo_rst) icerst_rcode, icerst_val = shellout._exec_subprocess( - "grep cn_icerst_dir %s" % nemo_envar["NEMO_NL"] - ) + 'grep cn_icerst_dir %s' % nemo_envar['NEMO_NL']) if icerst_rcode == 0: - ice_rst = re.findall(r"[\"\'](.*?)[\"\']", icerst_val)[0] - if ice_rst[-1] == "/": + ice_rst = re.findall(r'[\"\'](.*?)[\"\']', icerst_val)[0] + if ice_rst[-1] == '/': ice_rst = ice_rst[:-1] restart_direcs.append(ice_rst) @@ -337,34 +318,29 @@ def _setup_executable(common_env): # Check for trailing slashes in directory names and strip them # out if they're present. - if direc.endswith("/"): - direc = direc.rstrip("/") - - if ( - os.path.isdir(direc) - and (direc not in ("./", ".")) - and common_env["CONTINUE"] == "false" - ): - sys.stdout.write("[INFO] directory is %s\n" % direc) - sys.stdout.write( - "[INFO] This is a New Run. Renaming old NEMO" " history directory\n" - ) + if direc.endswith('/'): + direc = direc.rstrip('/') + + if os.path.isdir(direc) and (direc not in ('./', '.')) and \ + common_env['CONTINUE'] == 'false': + sys.stdout.write('[INFO] directory is %s\n' % direc) + sys.stdout.write('[INFO] This is a New Run. Renaming old NEMO' + ' history directory\n') # In seasonal forecasting, we automatically apply # short-stepping to re-try the model. Before re-attempting # it, remove the associated NEMO history directory. old_hist_dir = "%s.%s" % (direc, time.strftime("%Y%m%d%H%M")) - if ( - common_env["SEASONAL"] == "True" - and int(common_env["CYLC_TASK_TRY_NUMBER"]) > 1 - ): + if (common_env['SEASONAL'] == 'True' and + int(common_env['CYLC_TASK_TRY_NUMBER']) > 1): common.remove_latest_hist_dir(old_hist_dir) os.rename(direc, old_hist_dir) os.makedirs(direc) elif not os.path.isdir(direc): - sys.stdout.write("[INFO] Creating NEMO restart directory:\n %s" % direc) + sys.stdout.write('[INFO] Creating NEMO restart directory:\n %s' % + direc) os.makedirs(direc) # Compile a list of NEMO, seaice and iceberg restart files, if any exist. @@ -373,95 +349,85 @@ def _setup_executable(common_env): # _yyyymmdd_restart_icb_.nc where # may itself contain underscores, hence we # do not parse details based on counting the number of underscores. - nemo_restart_files = [ - f - for f in os.listdir(nemo_rst) - if re.findall(r".+_\d{8}_restart(_\d+)?\.nc", f) - or re.findall(r".+_\d{8}_restart_icb(_\d+)?\.nc", f) - ] + nemo_restart_files = [f for f in os.listdir(nemo_rst) if + re.findall(r'.+_\d{8}_restart(_\d+)?\.nc', f) or + re.findall(r'.+_\d{8}_restart_icb(_\d+)?\.nc', f)] nemo_restart_files.sort() if nemo_restart_files: - latest_nemo_dump = nemo_rst + "/" + nemo_restart_files[-1] + latest_nemo_dump = nemo_rst + '/' + nemo_restart_files[-1] else: - latest_nemo_dump = "unset" + latest_nemo_dump = 'unset' - nemo_init_dir = "." + nemo_init_dir = '.' # We need to ensure any lingering NEMO or iceberg retarts from # previous runs are removed to ensure they're not accidentally # picked up if we're starting from climatology on this occasion. - common.remove_file("restart.nc") - common.remove_file("restart_icebergs.nc") - common.remove_file("restart_icb.nc") + common.remove_file('restart.nc') + common.remove_file('restart_icebergs.nc') + common.remove_file('restart_icb.nc') - if common_env["CONTINUE"] == "false": + if common_env['CONTINUE'] == 'false': # This is a new run - sys.stdout.write("[INFO] New nemo run\n") + sys.stdout.write('[INFO] New nemo run\n') if os.path.isfile(latest_nemo_dump): - # os.path.isfile will return true for symbolic links aswell - sys.stdout.write("[INFO] Removing old NEMO restart data\n") - for file_path in glob.glob(nemo_rst + "/*restart*"): + #os.path.isfile will return true for symbolic links aswell + sys.stdout.write('[INFO] Removing old NEMO restart data\n') + for file_path in glob.glob(nemo_rst+'/*restart*'): common.remove_file(file_path) - for file_path in glob.glob(ice_rst + "/*restart*"): + for file_path in glob.glob(ice_rst+'/*restart*'): common.remove_file(file_path) - for file_path in glob.glob(nemo_rst + "/*trajectory*"): + for file_path in glob.glob(nemo_rst+'/*trajectory*'): common.remove_file(file_path) # source our history namelist file from the current directory in case # of first cycle - history_nemo_nl = os.path.join(nemo_init_dir, nemo_envar["NEMO_NL"]) + history_nemo_nl = os.path.join(nemo_init_dir, nemo_envar['NEMO_NL']) elif os.path.isfile(latest_nemo_dump): - sys.stdout.write( - "[INFO] Restart data available in NEMO restart " - "directory %s. Restarting from previous task output\n" % nemo_rst - ) - sys.stdout.write( - "[INFO] Sourcing namelist file from the work " - "directory of the previous cycle\n" - ) + sys.stdout.write('[INFO] Restart data available in NEMO restart ' + 'directory %s. Restarting from previous task output\n' + % nemo_rst) + sys.stdout.write('[INFO] Sourcing namelist file from the work ' + 'directory of the previous cycle\n') # find the previous work directory if there is one - if common_env["CONTINUE_FROM_FAIL"] == "false": - if common_env["CNWP_SUB_CYCLING"] == "True": - prev_workdir = common.find_previous_workdir( - common_env["CYLC_TASK_CYCLE_POINT"], - common_env["CYLC_TASK_WORK_DIR"], - common_env["CYLC_TASK_NAME"], - common_env["CYLC_TASK_PARAM_run"], - ) + if common_env['CONTINUE_FROM_FAIL'] == 'false': + if common_env['CNWP_SUB_CYCLING'] == 'True': + prev_workdir = common.find_previous_workdir( \ + common_env['CYLC_TASK_CYCLE_POINT'], + common_env['CYLC_TASK_WORK_DIR'], + common_env['CYLC_TASK_NAME'], + common_env['CYLC_TASK_PARAM_run']) else: - prev_workdir = common.find_previous_workdir( - common_env["CYLC_TASK_CYCLE_POINT"], - common_env["CYLC_TASK_WORK_DIR"], - common_env["CYLC_TASK_NAME"], - ) - history_nemo_nl = os.path.join(prev_workdir, nemo_envar["NEMO_NL"]) + prev_workdir = common.find_previous_workdir( \ + common_env['CYLC_TASK_CYCLE_POINT'], + common_env['CYLC_TASK_WORK_DIR'], + common_env['CYLC_TASK_NAME']) + history_nemo_nl = os.path.join(prev_workdir, nemo_envar['NEMO_NL']) else: - history_nemo_nl = nemo_envar["NEMO_NL"] + history_nemo_nl = nemo_envar['NEMO_NL'] nemo_init_dir = nemo_rst else: - sys.stderr.write( - "[FAIL] No restart data available in NEMO restart " - "directory:\n %s\n" % nemo_rst - ) + sys.stderr.write('[FAIL] No restart data available in NEMO restart ' + 'directory:\n %s\n' % nemo_rst) sys.exit(error.MISSING_MODEL_FILE_ERROR) # Strings which are different pre-NEMO4.2 and at NEMO4.2 - if int(nemo_envar["NEMO_VERSION"]) < 402: - gl_step_int_match = "rn_rdt=" - iceberg_rst_part1 = "_icebergs_" - iceberg_rst_part2 = "_restart" - iceberg_link_name = "restart_icebergs" + if int(nemo_envar['NEMO_VERSION']) < 402: + gl_step_int_match = 'rn_rdt=' + iceberg_rst_part1 = '_icebergs_' + iceberg_rst_part2 = '_restart' + iceberg_link_name = 'restart_icebergs' else: - gl_step_int_match = "rn_dt=" - iceberg_rst_part1 = "_" - iceberg_rst_part2 = "_restart_icb" - iceberg_link_name = "restart_icb" + gl_step_int_match = 'rn_dt=' + iceberg_rst_part1 = '_' + iceberg_rst_part2 = '_restart_icb' + iceberg_link_name = 'restart_icb' # Any variables containing things that can be globbed will start with gl_ - gl_first_step_match = "nn_it000=" - gl_last_step_match = "nn_itend=" + gl_first_step_match = 'nn_it000=' + gl_last_step_match = 'nn_itend=' - gl_nemo_restart_date_match = "ln_rstdate" - gl_model_basis_time = "nn_date0=" + gl_nemo_restart_date_match = 'ln_rstdate' + gl_model_basis_time = 'nn_date0=' # Read values from the nemo namelist file used by the previous cycle # (if appropriate), or the configuration namelist if this is the initial @@ -470,24 +436,20 @@ def _setup_executable(common_env): # Make sure this file exists before trying to read it since restarted models # may have had old work directories removed for numerous reasons. if not os.path.isfile(history_nemo_nl): - sys.stderr.write( - "[FAIL] Cannot find namelist file %s to extract " - "timestep data.\n" % history_nemo_nl - ) + sys.stderr.write('[FAIL] Cannot find namelist file %s to extract ' + 'timestep data.\n' % history_nemo_nl) sys.exit(error.MISSING_MODEL_FILE_ERROR) # First timestep of the previous cycle - _, first_step_val = shellout._exec_subprocess( - "grep %s %s" % (gl_first_step_match, history_nemo_nl) - ) + _, first_step_val = shellout._exec_subprocess('grep %s %s' % (gl_first_step_match, + history_nemo_nl)) - nemo_first_step = int(re.findall(r".+=(.+),", first_step_val)[0]) + nemo_first_step = int(re.findall(r'.+=(.+),', first_step_val)[0]) # Last timestep of the previous cycle - _, last_step_val = shellout._exec_subprocess( - "grep %s %s" % (gl_last_step_match, history_nemo_nl) - ) - nemo_last_step = re.findall(r".+=(.+),", last_step_val)[0] + _, last_step_val = shellout._exec_subprocess('grep %s %s' % (gl_last_step_match, + history_nemo_nl)) + nemo_last_step = re.findall(r'.+=(.+),', last_step_val)[0] # The string in the nemo time step field might have any one of # a number of variants. e.g. "set_by_rose", "set_by_system", @@ -499,65 +461,53 @@ def _setup_executable(common_env): nemo_last_step = 0 # Determine (as an integer) the number of seconds per model timestep - _, nemo_step_int_val = shellout._exec_subprocess( - "grep %s %s" % (gl_step_int_match, nemo_envar["NEMO_NL"]) - ) - nemo_step_int = int(re.findall(r".+=(\d*)", nemo_step_int_val)[0]) + _, nemo_step_int_val = shellout._exec_subprocess('grep %s %s' % (gl_step_int_match, + nemo_envar['NEMO_NL'])) + nemo_step_int = int(re.findall(r'.+=(\d*)', nemo_step_int_val)[0]) # If the value for nemo_rst_date_value is true then the model uses # absolute date convention, otherwise the dump times are relative to the # start of the model run and have an integer representation _, nemo_rst_date_value = shellout._exec_subprocess( - "grep %s %s" % (gl_nemo_restart_date_match, history_nemo_nl) - ) - if "true" in nemo_rst_date_value: + 'grep %s %s' % (gl_nemo_restart_date_match, history_nemo_nl)) + if 'true' in nemo_rst_date_value: nemo_rst_date_bool = True else: nemo_rst_date_bool = False # The initial date of the model run (YYYYMMDD) - nemo_ndate0 = "%04d%02d%02d" % tuple(model_basis[:3]) + nemo_ndate0 = '%04d%02d%02d' % tuple(model_basis[:3]) nemo_dump_time = "00000000" # Get the model basis time for this run (YYYYMMDD) _, model_basis_val = shellout._exec_subprocess( - "grep %s %s" % (gl_model_basis_time, history_nemo_nl) - ) - nemo_model_basis = re.findall(r".+=(.+),", model_basis_val)[0] + 'grep %s %s' % (gl_model_basis_time, history_nemo_nl)) + nemo_model_basis = re.findall(r'.+=(.+),', model_basis_val)[0] if os.path.isfile(latest_nemo_dump): - nemo_dump_time = re.findall(r"_(\d*)_restart", latest_nemo_dump)[0] + nemo_dump_time = re.findall(r'_(\d*)_restart', latest_nemo_dump)[0] # Verify the dump time against cycle time if appropriate, do the # automatic fix, and check all other restart files match - if common_env["DRIVERS_VERIFY_RST"] == "True": + if common_env['DRIVERS_VERIFY_RST'] == 'True': nemo_dump_time = _verify_fix_rst( nemo_dump_time, nemo_rst, - nemo_model_basis, - nemo_step_int, - nemo_last_step, - common_env["CALENDAR"], - ) + nemo_model_basis, nemo_step_int, nemo_last_step, + common_env['CALENDAR']) - _verify_nemo_rst( - nemo_dump_time, - nemo_rst, - nemo_envar["NEMO_NL"], - int(nemo_envar["NEMO_NPROC"]), - int(nemo_envar["NEMO_VERSION"]), - ) + _verify_nemo_rst(nemo_dump_time, nemo_rst, nemo_envar['NEMO_NL'], + int(nemo_envar['NEMO_NPROC']), + int(nemo_envar['NEMO_VERSION'])) # link restart files no that the last output one becomes next input one - common.remove_file("restart.nc") + common.remove_file('restart.nc') - common.remove_file("restart_ice_in.nc") + common.remove_file('restart_ice_in.nc') # Sort out the processor restart files - if int(nemo_envar["NEMO_NPROC"]) == 1: - sys.stderr.write( - "[FAIL] NEMO driver does not support the running" - " of NEMO in serial mode\n" - ) + if int(nemo_envar['NEMO_NPROC']) == 1: + sys.stderr.write('[FAIL] NEMO driver does not support the running' + ' of NEMO in serial mode\n') sys.exit(SERIAL_MODE_ERROR) else: @@ -566,598 +516,535 @@ def _setup_executable(common_env): iceberg_restart_count = 0 # Nemo has multiple processors - for i_proc in range(int(nemo_envar["NEMO_NPROC"])): + for i_proc in range(int(nemo_envar['NEMO_NPROC'])): tag = str(i_proc).zfill(4) - nemo_rst_source = "%s/%so_%s_restart_%s.nc" % ( - nemo_init_dir, - common_env["RUNID"], - nemo_dump_time, - tag, - ) - nemo_rst_link = "restart_%s.nc" % tag + nemo_rst_source = '%s/%so_%s_restart_%s.nc' % \ + (nemo_init_dir, common_env['RUNID'], \ + nemo_dump_time, tag) + nemo_rst_link = 'restart_%s.nc' % tag common.remove_file(nemo_rst_link) if os.path.isfile(nemo_rst_source): os.symlink(nemo_rst_source, nemo_rst_link) nemo_restart_count += 1 - ice_rst_source = "%s/%so_%s_restart_ice_%s.nc" % ( - nemo_init_dir, - common_env["RUNID"], - nemo_dump_time, - tag, - ) + ice_rst_source = '%s/%so_%s_restart_ice_%s.nc' % \ + (nemo_init_dir, common_env['RUNID'], \ + nemo_dump_time, tag) if os.path.isfile(ice_rst_source): - ice_rst_link = "restart_ice_in_%s.nc" % tag + ice_rst_link = 'restart_ice_in_%s.nc' % tag common.remove_file(ice_rst_link) os.symlink(ice_rst_source, ice_rst_link) ice_restart_count += 1 - iceberg_rst_source = "%s/%so%s%s%s_%s.nc" % ( - nemo_init_dir, - common_env["RUNID"], - iceberg_rst_part1, - nemo_dump_time, - iceberg_rst_part2, - tag, - ) + iceberg_rst_source = '%s/%so%s%s%s_%s.nc' % \ + (nemo_init_dir, common_env['RUNID'], iceberg_rst_part1, + nemo_dump_time, iceberg_rst_part2, tag) if os.path.isfile(iceberg_rst_source): - iceberg_rst_link = "%s_%s.nc" % (iceberg_link_name, tag) + iceberg_rst_link = '%s_%s.nc' % (iceberg_link_name, tag) common.remove_file(iceberg_rst_link) os.symlink(iceberg_rst_source, iceberg_rst_link) iceberg_restart_count += 1 - # endfor + #endfor if nemo_restart_count < 1: - sys.stdout.write("[INFO] No NEMO sub-PE restarts found\n") + sys.stdout.write('[INFO] No NEMO sub-PE restarts found\n') # We found no nemo restart sub-domain files let's # look for a global file. - nemo_rst_source = "%s/%so_%s_restart.nc" % ( - nemo_init_dir, - common_env["RUNID"], - nemo_dump_time, - ) + nemo_rst_source = '%s/%so_%s_restart.nc' % \ + (nemo_init_dir, common_env['RUNID'], \ + nemo_dump_time) if os.path.isfile(nemo_rst_source): - sys.stdout.write( - "[INFO] Using rebuilt NEMO restart " - "file: %s\n" % nemo_rst_source - ) - nemo_rst_link = "restart.nc" + sys.stdout.write('[INFO] Using rebuilt NEMO restart '\ + 'file: %s\n' % nemo_rst_source) + nemo_rst_link = 'restart.nc' common.remove_file(nemo_rst_link) os.symlink(nemo_rst_source, nemo_rst_link) if ice_restart_count < 1: - sys.stdout.write("[INFO] No ice sub-PE restarts found\n") + sys.stdout.write('[INFO] No ice sub-PE restarts found\n') # We found no ice restart sub-domain files let's # look for a global file. - ice_rst_source = "%s/%so_%s_restart_ice.nc" % ( - nemo_init_dir, - common_env["RUNID"], - nemo_dump_time, - ) + ice_rst_source = '%s/%so_%s_restart_ice.nc' % \ + (nemo_init_dir, common_env['RUNID'], \ + nemo_dump_time) if os.path.isfile(ice_rst_source): - sys.stdout.write( - "[INFO] Using rebuilt ice restart " - "file: %s\n" % ice_rst_source - ) - ice_rst_link = "restart_ice_in.nc" + sys.stdout.write('[INFO] Using rebuilt ice restart '\ + 'file: %s\n' % ice_rst_source) + ice_rst_link = 'restart_ice_in.nc' common.remove_file(ice_rst_link) os.symlink(ice_rst_source, ice_rst_link) if iceberg_restart_count < 1: - sys.stdout.write("[INFO] No iceberg sub-PE restarts found\n") + sys.stdout.write('[INFO] No iceberg sub-PE restarts found\n') # We found no iceberg restart sub-domain files let's # look for a global file. - iceberg_rst_source = "%s/%so%s%s%s.nc" % ( - nemo_init_dir, - common_env["RUNID"], - iceberg_rst_part1, - nemo_dump_time, - iceberg_rst_part2, - ) + iceberg_rst_source = '%s/%so%s%s%s.nc' % \ + (nemo_init_dir, common_env['RUNID'], iceberg_rst_part1, + nemo_dump_time, iceberg_rst_part2) if os.path.isfile(iceberg_rst_source): - sys.stdout.write( - "[INFO] Using rebuilt iceberg restart" - "file: %s\n" % iceberg_rst_source - ) - iceberg_rst_nc = iceberg_link_name + ".nc" + sys.stdout.write('[INFO] Using rebuilt iceberg restart'\ + 'file: %s\n' % iceberg_rst_source) + iceberg_rst_nc = iceberg_link_name + '.nc' common.remove_file(iceberg_rst_nc) os.symlink(iceberg_rst_source, iceberg_rst_link) - # endif (nemo_envar(NEMO_NPROC) == 1) + #endif (nemo_envar(NEMO_NPROC) == 1) if nemo_rst_date_bool: - # Then nemo_dump_time has the form YYYYMMDD + #Then nemo_dump_time has the form YYYYMMDD pass else: - # nemo_dump_time is relative to start of model run and is an - # integer + #nemo_dump_time is relative to start of model run and is an + #integer nemo_dump_time = int(nemo_dump_time) completed_days = nemo_dump_time * (nemo_step_int // 86400) - sys.stdout.write( - "[INFO] Nemo has previously completed %i days\n" % completed_days - ) + sys.stdout.write('[INFO] Nemo has previously completed %i days\n' % + completed_days) ln_restart = ".true." restart_ctl = 2 - if common_env["CONTINUE_FROM_FAIL"] == "true": + if common_env['CONTINUE_FROM_FAIL'] == 'true': # This is only used for coupled NWP where we don't have dates in # NEMO restart file names - nemo_next_step = int(nemo_dump_time) + 1 + nemo_next_step = int(nemo_dump_time)+1 else: nemo_next_step = nemo_last_step + 1 else: # This is an NRUN - if nemo_envar["NEMO_START"] != "": - if os.path.isfile(nemo_envar["NEMO_START"]): + if nemo_envar['NEMO_START'] != '': + if os.path.isfile(nemo_envar['NEMO_START']): - os.symlink(nemo_envar["NEMO_START"], "restart.nc") + os.symlink(nemo_envar['NEMO_START'], 'restart.nc') ln_restart = ".true." - elif os.path.isfile("%s_0000.nc" % nemo_envar["NEMO_START"]): - for fname in glob.glob("%s_????.nc" % nemo_envar["NEMO_START"]): - proc_number = fname.split(".")[-2][-4:] + elif os.path.isfile('%s_0000.nc' % + nemo_envar['NEMO_START']): + for fname in glob.glob('%s_????.nc' % + nemo_envar['NEMO_START']): + proc_number = fname.split('.')[-2][-4:] # We need to make sure there isn't already # a restart file link set up, and if there is, get # rid of it because symlink wont work otherwise! - common.remove_file("restart_%s.nc" % proc_number) + common.remove_file('restart_%s.nc' % proc_number) - os.symlink(fname, "restart_%s.nc" % proc_number) + os.symlink(fname, 'restart_%s.nc' % proc_number) ln_restart = ".true." - elif os.path.isfile("%s_0000.nc" % nemo_envar["NEMO_START"][:-3]): - for fname in glob.glob("%s_????.nc" % nemo_envar["NEMO_START"][:-3]): - proc_number = fname.split(".")[-2][-4:] + elif os.path.isfile('%s_0000.nc' % + nemo_envar['NEMO_START'][:-3]): + for fname in glob.glob('%s_????.nc' % + nemo_envar['NEMO_START'][:-3]): + proc_number = fname.split('.')[-2][-4:] # We need to make sure there isn't already # a restart file link set up, and if there is, get # rid of it because symlink wont work otherwise! - common.remove_file("restart_%s.nc" % proc_number) + common.remove_file('restart_%s.nc' % proc_number) - os.symlink(fname, "restart_%s.nc" % proc_number) + os.symlink(fname, 'restart_%s.nc' % proc_number) ln_restart = ".true." else: - sys.stderr.write( - "[FAIL] file %s not found\n" % nemo_envar["NEMO_START"] - ) + sys.stderr.write('[FAIL] file %s not found\n' % + nemo_envar['NEMO_START']) sys.exit(error.MISSING_MODEL_FILE_ERROR) else: - # NEMO_START is unset - sys.stdout.write( - "[WARN] NEMO_START not set\n" "NEMO will use climatology\n" - ) + #NEMO_START is unset + sys.stdout.write('[WARN] NEMO_START not set\n' + 'NEMO will use climatology\n') ln_restart = ".false." - if nemo_envar["NEMO_ICEBERGS_START"] != "": + if nemo_envar['NEMO_ICEBERGS_START'] != '': - if os.path.isfile(nemo_envar["NEMO_ICEBERGS_START"]): + if os.path.isfile(nemo_envar['NEMO_ICEBERGS_START']): # We need to make sure there isn't already # an iceberg restart file link set up, and if there is, get # rid of it because symlink wont work otherwise! - iceberg_rst_file = iceberg_link_name + ".nc" + iceberg_rst_file = iceberg_link_name + '.nc' common.remove_file(iceberg_rst_file) - os.symlink(nemo_envar["NEMO_ICEBERGS_START"], iceberg_rst_file) - elif os.path.isfile("%s_0000.nc" % nemo_envar["NEMO_ICEBERGS_START"]): - for fname in glob.glob( - "%s_????.nc" % nemo_envar["NEMO_ICEBERGS_START"] - ): - proc_number = fname.split(".")[-2][-4:] + os.symlink(nemo_envar['NEMO_ICEBERGS_START'], + iceberg_rst_file) + elif os.path.isfile('%s_0000.nc' % + nemo_envar['NEMO_ICEBERGS_START']): + for fname in glob.glob('%s_????.nc' % + nemo_envar['NEMO_ICEBERGS_START']): + proc_number = fname.split('.')[-2][-4:] # We need to make sure there isn't already # an iceberg restart file link set up, and if there is, get # rid of it because symlink wont work otherwise! - common.remove_file("%s_%s.nc" % (iceberg_rst_link, proc_number)) + common.remove_file('%s_%s.nc' % + (iceberg_rst_link, proc_number)) - os.symlink(fname, "%s_%s.nc" % (iceberg_rst_file, proc_number)) - elif os.path.isfile("%s_0000.nc" % nemo_envar["NEMO_ICEBERGS_START"][:-3]): - for fname in glob.glob( - "%s_????.nc" % nemo_envar["NEMO_ICEBERGS_START"][:-3] - ): - proc_number = fname.split(".")[-2][-4:] + os.symlink(fname, '%s_%s.nc' % + (iceberg_rst_file, proc_number)) + elif os.path.isfile('%s_0000.nc' % + nemo_envar['NEMO_ICEBERGS_START'][:-3]): + for fname in glob.glob('%s_????.nc' % + nemo_envar['NEMO_ICEBERGS_START'][:-3]): + proc_number = fname.split('.')[-2][-4:] # We need to make sure there isn't already # an iceberg restart file link set up, and if there is, get # rid of it because symlink wont work otherwise! - common.remove_file("%s_%s.nc" % (iceberg_rst_link, proc_number)) + common.remove_file('%s_%s.nc' % + (iceberg_rst_link, proc_number)) - os.symlink(fname, "%s_%s.nc" % (iceberg_rst_link, proc_number)) + os.symlink(fname, '%s_%s.nc' % + (iceberg_rst_link, proc_number)) else: - sys.stderr.write( - "[FAIL] file %s not found\n" % nemo_envar["NEMO_ICEBERGS_START"] - ) + sys.stderr.write('[FAIL] file %s not found\n' % + nemo_envar['NEMO_ICEBERGS_START']) sys.exit(error.MISSING_MODEL_FILE_ERROR) else: - # NEMO_ICEBERGS_START unset - sys.stdout.write( - "[WARN] NEMO_ICEBERGS_START not set or file(s)" - " not found. Icebergs (if switched on) will start" - " from a state of zero icebergs\n" - ) + #NEMO_ICEBERGS_START unset + sys.stdout.write('[WARN] NEMO_ICEBERGS_START not set or file(s)' + ' not found. Icebergs (if switched on) will start' + ' from a state of zero icebergs\n') restart_ctl = 0 nemo_next_step = nemo_first_step nemo_last_step = nemo_first_step - 1 - if common_env["CONTINUE_FROM_FAIL"] == "true": - # Check that the length of run is correct - # (it won't be if this is the wrong restart file) - run_start_dt = datetime.datetime( - run_start[0], run_start[1], run_start[2], run_start[3] - ) - model_basis_dt = datetime.datetime( - model_basis[0], model_basis[1], model_basis[2], model_basis[3] - ) - nemo_init_step = (run_start_dt - model_basis_dt).total_seconds() / nemo_step_int - tot_runlen_sec = ( - run_days * 86400 + run_length[3] * 3600 + run_length[4] * 60 + run_length[5] - ) - nemo_final_step = int((tot_runlen_sec // nemo_step_int) + nemo_init_step) + if common_env['CONTINUE_FROM_FAIL'] == 'true': + #Check that the length of run is correct + #(it won't be if this is the wrong restart file) + run_start_dt = datetime.datetime(run_start[0], run_start[1], + run_start[2], run_start[3]) + model_basis_dt = datetime.datetime(model_basis[0], model_basis[1], + model_basis[2], model_basis[3]) + nemo_init_step = (run_start_dt-model_basis_dt).total_seconds() \ + /nemo_step_int + tot_runlen_sec = run_days * 86400 + run_length[3]*3600 \ + + run_length[4]*60 + run_length[5] + nemo_final_step = int((tot_runlen_sec//nemo_step_int) + nemo_init_step) # Check that nemo_next_step is the correct number of hours to # match LAST_DUMP_HOURS variable - steps_per_hour = 3600.0 / nemo_step_int - last_dump_hrs = int(common_env["LAST_DUMP_HOURS"]) - last_dump_step = int(nemo_init_step + last_dump_hrs * steps_per_hour) - if nemo_next_step - 1 != last_dump_step: - sys.stderr.write("[FAIL] Last NEMO restarts not at correct time") + steps_per_hour = 3600./nemo_step_int + last_dump_hrs = int(common_env['LAST_DUMP_HOURS']) + last_dump_step = int(nemo_init_step + last_dump_hrs*steps_per_hour) + if nemo_next_step-1 != last_dump_step: + sys.stderr.write('[FAIL] Last NEMO restarts not at correct time') sys.exit(error.RESTART_FILE_ERROR) else: - tot_runlen_sec = ( - run_days * 86400 + run_length[3] * 3600 + run_length[4] * 60 + run_length[5] - ) + tot_runlen_sec = run_days * 86400 + run_length[3]*3600 \ + + run_length[4]*60 + run_length[5] nemo_final_step = (tot_runlen_sec // nemo_step_int) + nemo_last_step - # Make our call to update the nemo namelist. First generate the list - # of commands - if int(nemo_envar["NEMO_VERSION"]) >= 400: + + #Make our call to update the nemo namelist. First generate the list + #of commands + if int(nemo_envar['NEMO_VERSION']) >= 400: # from NEMO 4.0 onwards we don't have jpnij in the namelist - update_nl_cmd = ( - "--file %s --runid %so --restart %s --restart_ctl %s" - " --next_step %i --final_step %s --start_date %s --leapyear %i" - " --iproc %s --jproc %s --cpl_river_count %s --verbose" - % ( - nemo_envar["NEMO_NL"], - common_env["RUNID"], - ln_restart, - restart_ctl, - nemo_next_step, - nemo_final_step, - nemo_ndate0, - nleapy, - nemo_envar["NEMO_IPROC"], - nemo_envar["NEMO_JPROC"], - common_env["CPL_RIVER_COUNT"], - ) - ) + update_nl_cmd = '--file %s --runid %so --restart %s --restart_ctl %s' \ + ' --next_step %i --final_step %s --start_date %s --leapyear %i' \ + ' --iproc %s --jproc %s --cpl_river_count %s --verbose' % \ + (nemo_envar['NEMO_NL'], \ + common_env['RUNID'], \ + ln_restart, \ + restart_ctl, \ + nemo_next_step, \ + nemo_final_step, \ + nemo_ndate0, \ + nleapy, \ + nemo_envar['NEMO_IPROC'], \ + nemo_envar['NEMO_JPROC'], \ + common_env['CPL_RIVER_COUNT']) else: - update_nl_cmd = ( - "--file %s --runid %so --restart %s --restart_ctl %s" - " --next_step %i --final_step %s --start_date %s" - " --leapyear %i --iproc %s --jproc %s --ijproc %s" - " --cpl_river_count %s --verbose" - % ( - nemo_envar["NEMO_NL"], - common_env["RUNID"], - ln_restart, - restart_ctl, - nemo_next_step, - nemo_final_step, - nemo_ndate0, - nleapy, - nemo_envar["NEMO_IPROC"], - nemo_envar["NEMO_JPROC"], - nemo_envar["NEMO_NPROC"], - common_env["CPL_RIVER_COUNT"], - ) - ) - - update_nl_cmd = "./update_nemo_nl %s" % update_nl_cmd + update_nl_cmd = '--file %s --runid %so --restart %s --restart_ctl %s' \ + ' --next_step %i --final_step %s --start_date %s' \ + ' --leapyear %i --iproc %s --jproc %s --ijproc %s' \ + ' --cpl_river_count %s --verbose' % \ + (nemo_envar['NEMO_NL'], \ + common_env['RUNID'], \ + ln_restart, \ + restart_ctl, \ + nemo_next_step, \ + nemo_final_step, \ + nemo_ndate0, \ + nleapy, \ + nemo_envar['NEMO_IPROC'], \ + nemo_envar['NEMO_JPROC'], \ + nemo_envar['NEMO_NPROC'], \ + common_env['CPL_RIVER_COUNT']) + + update_nl_cmd = './update_nemo_nl %s' % update_nl_cmd # REFACTOR TO USE THE SAFE EXEC SUBPROC update_nl_rcode, _ = shellout._exec_subprocess(update_nl_cmd) if update_nl_rcode != 0: - sys.stderr.write("[FAIL] Error updating nemo namelist\n") + sys.stderr.write('[FAIL] Error updating nemo namelist\n') sys.exit(error.SUBPROC_ERROR) # We just check for the presence of T or t (as in TRUE, True or true) # in the L_OCN_PASS_TRC value. - if ("T" in nemo_envar["L_OCN_PASS_TRC"]) or ("t" in nemo_envar["L_OCN_PASS_TRC"]): + if ('T' in nemo_envar['L_OCN_PASS_TRC']) or \ + ('t' in nemo_envar['L_OCN_PASS_TRC']): - sys.stdout.write("[INFO] nemo_driver: Passive tracer code is " "active.\n") + sys.stdout.write('[INFO] nemo_driver: Passive tracer code is ' + 'active.\n') controller_mode = "run_controller" - top_controller.run_controller( - common_env, - restart_ctl, - int(nemo_envar["NEMO_NPROC"]), - common_env["RUNID"], - common_env["DRIVERS_VERIFY_RST"], - nemo_dump_time, - controller_mode, - ) + top_controller.run_controller(common_env, + restart_ctl, + int(nemo_envar['NEMO_NPROC']), + common_env['RUNID'], + common_env['DRIVERS_VERIFY_RST'], + nemo_dump_time, + controller_mode) else: - sys.stdout.write("[INFO] nemo_driver: " "Passive tracer code not active\n.") + sys.stdout.write('[INFO] nemo_driver: ' + 'Passive tracer code not active\n.') - use_si3 = "si3" in common_env["models"] + use_si3 = 'si3' in common_env['models'] if use_si3: - sys.stdout.write("[INFO] nemo_driver: SI3 code is active.\n") + sys.stdout.write('[INFO] nemo_driver: SI3 code is active.\n') controller_mode = "run_controller" - si3_controller.run_controller( - common_env, - restart_ctl, - int(nemo_envar["NEMO_NPROC"]), - common_env["RUNID"], - common_env["DRIVERS_VERIFY_RST"], - nemo_dump_time, - controller_mode, - ) + si3_controller.run_controller(common_env, + restart_ctl, + int(nemo_envar['NEMO_NPROC']), + common_env['RUNID'], + common_env['DRIVERS_VERIFY_RST'], + nemo_dump_time, + controller_mode) return nemo_envar def _set_launcher_command(launcher, nemo_envar): - """ + ''' Setup the launcher command for the executable - """ - if nemo_envar["ROSE_LAUNCHER_PREOPTS_NEMO"] == "unset": + ''' + if nemo_envar['ROSE_LAUNCHER_PREOPTS_NEMO'] == 'unset': ss = False - nemo_envar["ROSE_LAUNCHER_PREOPTS_NEMO"] = ( - common.set_aprun_options( - nemo_envar["NEMO_NPROC"], - nemo_envar["OCEAN_NODES"], - nemo_envar["OMPTHR_OCN"], - nemo_envar["OHYPERTHREADS"], - ss, - ) - if launcher == "aprun" - else "" - ) - - launch_cmd = "%s ./%s" % ( - nemo_envar["ROSE_LAUNCHER_PREOPTS_NEMO"], - nemo_envar["OCEAN_LINK"], - ) + nemo_envar['ROSE_LAUNCHER_PREOPTS_NEMO'] = \ + common.set_aprun_options(nemo_envar['NEMO_NPROC'], \ + nemo_envar['OCEAN_NODES'], nemo_envar['OMPTHR_OCN'], \ + nemo_envar['OHYPERTHREADS'], ss) \ + if launcher == 'aprun' else '' + + launch_cmd = '%s ./%s' % \ + (nemo_envar['ROSE_LAUNCHER_PREOPTS_NEMO'], \ + nemo_envar['OCEAN_LINK']) # Put in quotes to allow this environment variable to be exported as it # contains (or can contain) spaces - nemo_envar["ROSE_LAUNCHER_PREOPTS_NEMO"] = ( - "'%s'" % nemo_envar["ROSE_LAUNCHER_PREOPTS_NEMO"] - ) + nemo_envar['ROSE_LAUNCHER_PREOPTS_NEMO'] = "'%s'" % \ + nemo_envar['ROSE_LAUNCHER_PREOPTS_NEMO'] return launch_cmd - def get_ocean_resol(nemo_nl_file, run_info): - """ + ''' Determine the ocean resolution. This function is only used when creating the namcouple at run time. - """ + ''' # See if resolution is contained within namelists (existent of # namelist_cfg has already been checked) ocean_nml = f90nml.read(nemo_nl_file) # Check the required entries exist - if "namcfg" not in ocean_nml: - sys.stderr.write("[FAIL] namcfg not found in namelist_cfg\n") + if 'namcfg' not in ocean_nml: + sys.stderr.write('[FAIL] namcfg not found in namelist_cfg\n') sys.exit(error.MISSING_OCN_RESOL_NML) - if "jpiglo" in ocean_nml["namcfg"]: + if 'jpiglo' in ocean_nml['namcfg']: # Resolution is contained within namelists - if ( - "jpiglo" not in ocean_nml["namcfg"] - or "jpjglo" not in ocean_nml["namcfg"] - or "cp_cfg" not in ocean_nml["namcfg"] - or "jp_cfg" not in ocean_nml["namcfg"] - ): - sys.stderr.write( - "[FAIL] cp_cfg, jp_cfg, jpiglo or jpjglo are " - "missing from namelist namcf in namelist_cfg\n" - ) + if 'jpiglo' not in ocean_nml['namcfg'] or \ + 'jpjglo' not in ocean_nml['namcfg'] or \ + 'cp_cfg' not in ocean_nml['namcfg'] or \ + 'jp_cfg' not in ocean_nml['namcfg']: + sys.stderr.write('[FAIL] cp_cfg, jp_cfg, jpiglo or jpjglo are ' + 'missing from namelist namcf in namelist_cfg\n') sys.exit(error.MISSING_OCN_RESOL) # Check it is on orca grid - if ocean_nml["namcfg"]["cp_cfg"] != "orca": - sys.stderr.write("[FAIL] we can currently only handle the " "ORCA grid\n") + if ocean_nml['namcfg']['cp_cfg'] != 'orca': + sys.stderr.write('[FAIL] we can currently only handle the ' + 'ORCA grid\n') sys.exit(error.NOT_ORCA_GRID) # Check this is a grid we recognise - if ocean_nml["namcfg"]["jp_cfg"] == 25: - run_info["OCN_grid"] = "orca025" + if ocean_nml['namcfg']['jp_cfg'] == 25: + run_info['OCN_grid'] = 'orca025' else: - run_info["OCN_grid"] = "orca" + str(ocean_nml["namcfg"]["jp_cfg"]) + run_info['OCN_grid'] = 'orca' + str(ocean_nml['namcfg']['jp_cfg']) # Store the ocean resolution - run_info["OCN_resol"] = [ - ocean_nml["namcfg"]["jpiglo"], - ocean_nml["namcfg"]["jpjglo"], - ] + run_info['OCN_resol'] = [ocean_nml['namcfg']['jpiglo'], + ocean_nml['namcfg']['jpjglo']] else: # Resolution should be contained within a domain_cfg netCDF file. # Rather than read this file, assume resolution is declared. - if "OCN_grid" not in run_info: - sys.stderr.write( - "[FAIL] it is necessary to declare the ocean " - "resolution by setting the OCN_RES environment " - "variable." - ) + if 'OCN_grid' not in run_info: + sys.stderr.write('[FAIL] it is necessary to declare the ocean ' + 'resolution by setting the OCN_RES environment ' + 'variable.') sys.exit(error.NOT_DECLARE_OCN_RES) else: # Determine the ocean resolution - if run_info["OCN_grid"] not in OCEAN_RESOLS: - sys.stderr.write( - "[FAIL] the ocean resolution for %s is " - "unknown" % run_info["OCN_grid"] - ) + if run_info['OCN_grid'] not in OCEAN_RESOLS: + sys.stderr.write('[FAIL] the ocean resolution for %s is ' + 'unknown' % run_info['OCN_grid']) sys.exit(error.UNKNOWN_OCN_RESOL) else: - run_info["OCN_resol"] = [ - OCEAN_RESOLS[run_info["OCN_grid"]][0], - OCEAN_RESOLS[run_info["OCN_grid"]][1], - ] + run_info['OCN_resol'] = [OCEAN_RESOLS[run_info['OCN_grid']][0], + OCEAN_RESOLS[run_info['OCN_grid']][1]] return run_info - def _sent_coupling_fields(nemo_envar, run_info): - """ + ''' Write the coupling fields sent from NEMO into model_snd_list. This function is only used when creating the namcouple at run time. - """ + ''' # Check that file specifying the coupling fields sent from # NEMO is present - if not os.path.exists("OASIS_OCN_SEND"): - sys.stderr.write("[FAIL] OASIS_OCN_SEND is missing.\n") + if not os.path.exists('OASIS_OCN_SEND'): + sys.stderr.write('[FAIL] OASIS_OCN_SEND is missing.\n') sys.exit(error.MISSING_OASIS_OCN_SEND) # Add toyatm to our list of executables - if not "exec_list" in run_info: - run_info["exec_list"] = [] - run_info["exec_list"].append("toyoce") + if not 'exec_list' in run_info: + run_info['exec_list'] = [] + run_info['exec_list'].append('toyoce') # Store ocean resolution if it is provided - if nemo_envar["OCN_RES"]: - run_info["OCN_grid"] = nemo_envar["OCN_RES"] + if nemo_envar['OCN_RES']: + run_info['OCN_grid'] = nemo_envar['OCN_RES'] # Store the nemo version - if nemo_envar["NEMO_VERSION"]: - run_info["NEMO_VERSION"] = nemo_envar["NEMO_VERSION"] + if nemo_envar['NEMO_VERSION']: + run_info['NEMO_VERSION'] = nemo_envar['NEMO_VERSION'] # Determine the ocean resolution - run_info = get_ocean_resol(nemo_envar["NEMO_NL"], run_info) + run_info = get_ocean_resol(nemo_envar['NEMO_NL'], run_info) # If using the default coupling option, we'll need to read the # NEMO namelist later - run_info["nemo_nl"] = nemo_envar["NEMO_NL"] + run_info['nemo_nl'] = nemo_envar['NEMO_NL'] # Read the namelist - oasis_nml = f90nml.read("OASIS_OCN_SEND") + oasis_nml = f90nml.read('OASIS_OCN_SEND') # Check we have the expected information - if "oasis_ocn_send_nml" not in oasis_nml: - sys.stderr.write( - "[FAIL] namelist oasis_ocn_send_nml is " "missing from OASIS_OCN_SEND.\n" - ) + if 'oasis_ocn_send_nml' not in oasis_nml: + sys.stderr.write('[FAIL] namelist oasis_ocn_send_nml is ' + 'missing from OASIS_OCN_SEND.\n') sys.exit(error.MISSING_OASIS_OCN_SEND_NML) - if "oasis_ocn_send" not in oasis_nml["oasis_ocn_send_nml"]: - sys.stderr.write( - "[FAIL] entry oasis_ocn_send is missing " - "from namelist oasis_ocn_send_nml in " - "OASIS_OCN_SEND.\n" - ) + if 'oasis_ocn_send' not in oasis_nml['oasis_ocn_send_nml']: + sys.stderr.write('[FAIL] entry oasis_ocn_send is missing ' + 'from namelist oasis_ocn_send_nml in ' + 'OASIS_OCN_SEND.\n') sys.exit(error.MISSING_OASIS_OCN_SEND) # Create a list of fields sent from OCN import write_namcouple - - model_snd_list = write_namcouple.add_to_cpl_list( - "OCN", False, 0, oasis_nml["oasis_ocn_send_nml"]["oasis_ocn_send"] - ) + model_snd_list = \ + write_namcouple.add_to_cpl_list( \ + 'OCN', False, 0, + oasis_nml['oasis_ocn_send_nml']['oasis_ocn_send']) return run_info, model_snd_list - def write_ocean_out_to_stdout(): - """ + ''' Write the contents of ocean.output to stnadard out - """ + ''' # append the ocean output and solver stat file to standard out. Use an # iterator to read the files, incase they are too large to fit into # memory. Try to find both the NEMO 3.6 and NEMO 4.0 solver files for # compatiblilty reasons - nemo_stdout_file = "ocean.output" - nemo36_solver_file = "solver.stat" - nemo40_solver_file = "run.stat" - icebergs_stat_file = "icebergs.stat" - for nemo_output_file in ( - nemo_stdout_file, - nemo36_solver_file, - nemo40_solver_file, - icebergs_stat_file, - ): + nemo_stdout_file = 'ocean.output' + nemo36_solver_file = 'solver.stat' + nemo40_solver_file = 'run.stat' + icebergs_stat_file = 'icebergs.stat' + for nemo_output_file in (nemo_stdout_file, + nemo36_solver_file, nemo40_solver_file, + icebergs_stat_file): # The output file from NEMO4.0 has some suspect utf8 encoding, # this try/except will handle it if os.path.isfile(nemo_output_file): - sys.stdout.write("[INFO] Ocean output from file %s\n" % nemo_output_file) - with open(nemo_output_file, "r", encoding="utf-8") as n_out: + sys.stdout.write('[INFO] Ocean output from file %s\n' % + nemo_output_file) + with open(nemo_output_file, 'r', encoding='utf-8') as n_out: for line in n_out: try: sys.stdout.write(line) except UnicodeEncodeError: pass else: - sys.stdout.write( - "[INFO] Nemo output file %s not avaliable\n" % nemo_output_file - ) - + sys.stdout.write('[INFO] Nemo output file %s not avaliable\n' + % nemo_output_file) def _finalize_executable(common_env): - """ + ''' Finalize the NEMO run, copy the nemo namelist to the restart directory for the next cycle, update standard out, and ensure that no errors have been found in the NEMO execution. - """ - sys.stdout.write("[INFO] finalizing NEMO\n") - sys.stdout.write("[INFO] running finalize in %s\n" % os.getcwd()) + ''' + sys.stdout.write('[INFO] finalizing NEMO\n') + sys.stdout.write('[INFO] running finalize in %s\n' % os.getcwd()) write_ocean_out_to_stdout() - _, error_count = shellout._exec_subprocess('grep "E R R O R" ocean.output | wc -l') + _, error_count = shellout._exec_subprocess( + 'grep "E R R O R" ocean.output | wc -l') if int(error_count) >= 1: - sys.stderr.write( - "[FAIL] An error has been found with the NEMO run." - " Please investigate the ocean.output file for more" - " details\n" - ) + sys.stderr.write('[FAIL] An error has been found with the NEMO run.' + ' Please investigate the ocean.output file for more' + ' details\n') sys.exit(error.COMPONENT_MODEL_ERROR) # move the nemo namelist to the restart directory to allow the next cycle # to pick it up nemo_envar_fin = dr_env_lib.env_lib.LoadEnvar() nemo_envar_fin = dr_env_lib.env_lib.load_envar_from_definition( - nemo_envar_fin, dr_env_lib.nemo_def.NEMO_ENVIRONMENT_VARS_FINAL - ) - nemo_rst = _get_nemorst(nemo_envar_fin["NEMO_NL"]) - if os.path.isdir(nemo_rst) and os.path.isfile(nemo_envar_fin["NEMO_NL"]): - shutil.copy(nemo_envar_fin["NEMO_NL"], nemo_rst) + nemo_envar_fin, dr_env_lib.nemo_def.NEMO_ENVIRONMENT_VARS_FINAL) + nemo_rst = _get_nemorst(nemo_envar_fin['NEMO_NL']) + if os.path.isdir(nemo_rst) and \ + os.path.isfile(nemo_envar_fin['NEMO_NL']): + shutil.copy(nemo_envar_fin['NEMO_NL'], nemo_rst) # The only way to check if TOP is active is by checking the # passive tracer env var. # Check whether we need to finalize the TOP controller - if ("T" in nemo_envar_fin["L_OCN_PASS_TRC"]) or ( - "t" in nemo_envar_fin["L_OCN_PASS_TRC"] - ): + if ('T' in nemo_envar_fin['L_OCN_PASS_TRC']) or \ + ('t' in nemo_envar_fin['L_OCN_PASS_TRC']): - sys.stdout.write("[INFO] nemo_driver: Finalize TOP controller.") + sys.stdout.write('[INFO] nemo_driver: Finalize TOP controller.') controller_mode = "finalize" top_controller.run_controller([], [], [], [], [], [], controller_mode) - use_si3 = "si3" in common_env["models"] + use_si3 = 'si3' in common_env['models'] if use_si3: - sys.stdout.write("[INFO] nemo_driver: Finalise SI3 controller\n") + sys.stdout.write('[INFO] nemo_driver: Finalise SI3 controller\n') controller_mode = "finalize" si3_controller.run_controller([], [], [], [], [], [], controller_mode) - def run_driver(common_env, mode, run_info): - """ + ''' Run the driver, and return an instance of LoadEnvar and as string containing the launcher command for the NEMO model - """ - if mode == "run_driver": + ''' + if mode == 'run_driver': exe_envar = _setup_executable(common_env) - launch_cmd = _set_launcher_command(common_env["ROSE_LAUNCHER"], exe_envar) - if run_info["l_namcouple"]: + launch_cmd = _set_launcher_command(common_env['ROSE_LAUNCHER'], + exe_envar) + if run_info['l_namcouple']: model_snd_list = None else: - run_info, model_snd_list = _sent_coupling_fields(exe_envar, run_info) - elif mode == "finalize": + run_info, model_snd_list = \ + _sent_coupling_fields(exe_envar, run_info) + elif mode == 'finalize': _finalize_executable(common_env) exe_envar = None launch_cmd = None model_snd_list = None - elif mode == "failure": + elif mode == 'failure': # subset of operations of the model fails write_ocean_out_to_stdout() exe_envar = None diff --git a/Coupled_Drivers/rivers_driver.py b/Coupled_Drivers/rivers_driver.py index f959bd9..42ed0f7 100644 --- a/Coupled_Drivers/rivers_driver.py +++ b/Coupled_Drivers/rivers_driver.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -""" +''' *****************************COPYRIGHT****************************** (C) Crown copyright 2025 Met Office. All rights reserved. @@ -16,7 +16,7 @@ DESCRIPTION Driver for the JULES river standalone model, called from link_drivers. -""" +''' import os import sys @@ -27,7 +27,6 @@ import error import dr_env_lib.rivers_def import dr_env_lib.env_lib - try: import f90nml except ImportError: @@ -35,82 +34,74 @@ def _setup_dates(common_envar): - """ + ''' Setup the dates for the JULES river model run - """ - calendar = common_envar["CALENDAR"] + ''' + calendar = common_envar['CALENDAR'] - sys.stdout.write("[INFO] River calendar= %s " % calendar) - if calendar not in ("360day", "365day", "gregorian"): - sys.stderr.write("[FAIL] Calendar type %s not recognised\n" % calendar) + sys.stdout.write('[INFO] River calendar= %s ' % calendar) + if calendar not in ('360day', '365day', 'gregorian'): + sys.stderr.write('[FAIL] Calendar type %s not recognised\n' % + calendar) sys.exit(error.INVALID_EVAR_ERROR) # Find the start and end times in the right format - task_start = common_envar["TASKSTART"].split(",") - task_length = common_envar["TASKLENGTH"].split(",") - - start_date = "%s%s%sT%s%sZ" % ( - task_start[0].zfill(4), - task_start[1].zfill(2), - task_start[2].zfill(2), - task_start[3].zfill(2), - task_start[4].zfill(2), - ) - format_date = "%Y-%m-%d %H:%M:%S" - length_date = "P%sY%sM%sDT%sH%sM" % ( - task_length[0], - task_length[1], - task_length[2], - task_length[3], - task_length[4], - ) + task_start = common_envar['TASKSTART'].split(',') + task_length = common_envar['TASKLENGTH'].split(',') + + start_date = '%s%s%sT%s%sZ' % (task_start[0].zfill(4), + task_start[1].zfill(2), + task_start[2].zfill(2), + task_start[3].zfill(2), + task_start[4].zfill(2)) + format_date = '%Y-%m-%d %H:%M:%S' + length_date = 'P%sY%sM%sDT%sH%sM' % (task_length[0], task_length[1], + task_length[2], task_length[3], + task_length[4]) start_cmd = 'isodatetime %s -f "%s"' % (start_date, format_date) - end_cmd = 'isodatetime %s -f "%s" -s %s --calendar %s' % ( - start_date, - format_date, - length_date, - calendar, - ) + end_cmd = 'isodatetime %s -f "%s" -s %s --calendar %s' % (start_date, format_date, + length_date, calendar) _, run_start = shellout._exec_subprocess(start_cmd) _, run_end = shellout._exec_subprocess(end_cmd) return run_start.strip(), run_end.strip() - def _update_river_nl(river_envar, run_start, run_end): - """ + ''' Check that the JULES river namelist files exist, update the start and end dates, and create the output directory - """ + ''' # Check that the namelist files exist - output_nl = river_envar["OUTPUT_NLIST"] - time_nl = river_envar["TIME_NLIST"] + output_nl = river_envar['OUTPUT_NLIST'] + time_nl = river_envar['TIME_NLIST'] if not os.path.isfile(output_nl): - sys.stderr.write("[FAIL] Can not find the river namelist file %s\n" % output_nl) + sys.stderr.write('[FAIL] Can not find the river namelist file %s\n' % + output_nl) sys.exit(error.MISSING_DRIVER_FILE_ERROR) if not os.path.isfile(time_nl): - sys.stderr.write("[FAIL] Can not find the river namelist file %s\n" % time_nl) + sys.stderr.write('[FAIL] Can not find the river namelist file %s\n' % + time_nl) sys.exit(error.MISSING_DRIVER_FILE_ERROR) # Update the start and end dates mod_outputnl = common.ModNamelist(output_nl) - mod_outputnl.var_val("output_start", run_start) - mod_outputnl.var_val("output_end", run_end) + mod_outputnl.var_val('output_start', run_start) + mod_outputnl.var_val('output_end', run_end) mod_outputnl.replace() mod_timenl = common.ModNamelist(time_nl) - mod_timenl.var_val("main_run_start", run_start) - mod_timenl.var_val("main_run_end", run_end) + mod_timenl.var_val('main_run_start', run_start) + mod_timenl.var_val('main_run_end', run_end) mod_timenl.replace() # Create the output directory, do not rely on f90nml - rcode, val = shellout._exec_subprocess("grep output_dir %s" % output_nl) + rcode, val = shellout._exec_subprocess('grep output_dir %s' % output_nl) if rcode == 0: try: - output_dir = re.findall(r"[\"\'](.*?)[\"\']", val)[0].rstrip("/") + output_dir = re.findall(r'[\"\'](.*?)[\"\']', val)[0].rstrip('/') pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True) except IndexError: # No path found @@ -118,19 +109,19 @@ def _update_river_nl(river_envar, run_start, run_end): def _setup_executable(common_envar): - """ + ''' Setup the environment and any files required by the executable - """ + ''' # Create the environment variable container river_envar = dr_env_lib.env_lib.LoadEnvar() # Load the environment variables required river_envar = dr_env_lib.env_lib.load_envar_from_definition( - river_envar, dr_env_lib.rivers_def.RIVERS_ENVIRONMENT_VARS_INITIAL - ) + river_envar, dr_env_lib.rivers_def.RIVERS_ENVIRONMENT_VARS_INITIAL) - # Link the ocean executable - common.remove_file(river_envar["RIVER_LINK"]) - os.symlink(river_envar["RIVER_EXEC"], river_envar["RIVER_LINK"]) + #Link the ocean executable + common.remove_file(river_envar['RIVER_LINK']) + os.symlink(river_envar['RIVER_EXEC'], + river_envar['RIVER_LINK']) # Setup date variables run_start, run_end = _setup_dates(common_envar) @@ -142,144 +133,133 @@ def _setup_executable(common_envar): def _set_launcher_command(river_envar): - """ + ''' Setup the launcher command for the executable - """ - launch_cmd = river_envar["ROSE_LAUNCHER_PREOPTS_RIVER"] + ''' + launch_cmd = river_envar['ROSE_LAUNCHER_PREOPTS_RIVER'] - launch_cmd = "%s ./%s" % ( - river_envar["ROSE_LAUNCHER_PREOPTS_RIVER"], - river_envar["RIVER_LINK"], - ) + launch_cmd = '%s ./%s' % \ + (river_envar['ROSE_LAUNCHER_PREOPTS_RIVER'], + river_envar['RIVER_LINK']) # Put in quotes to allow this environment variable to be exported as it # contains (or can contain) spaces - river_envar["ROSE_LAUNCHER_PREOPTS_RIVER"] = ( - "'%s'" % river_envar["ROSE_LAUNCHER_PREOPTS_RIVER"] - ) + river_envar['ROSE_LAUNCHER_PREOPTS_RIVER'] = "'%s'" % \ + river_envar['ROSE_LAUNCHER_PREOPTS_RIVER'] return launch_cmd def _get_river_resol(river_nl_file, run_info): - """ + ''' Determine the JULES river resolution. This function is only used when creating the namcouple at run time. - """ + ''' # Check if the namelist file exists if not os.path.isfile(river_nl_file): - sys.stderr.write( - "[FAIL] Can not find the river namelist file %s\n" % river_nl_file - ) + sys.stderr.write('[FAIL] Can not find the river namelist file %s\n' % + river_nl_file) sys.exit(error.MISSING_DRIVER_FILE_ERROR) # Read in the resolution of JULES river river_nml = f90nml.read(river_nl_file) # Check the required entries exist - if "jules_input_grid" not in river_nml: - sys.stderr.write("[FAIL] jules_input_grid not found in %s\n" % river_nl_file) + if 'jules_input_grid' not in river_nml: + sys.stderr.write('[FAIL] jules_input_grid not found in %s\n' % + river_nl_file) sys.exit(error.MISSING_RIVER_RESOL_NML) - if ( - "nx" not in river_nml["jules_input_grid"] - or "ny" not in river_nml["jules_input_grid"] - ): - sys.stderr.write( - "[FAIL] nx or ny are missing from namelist" - "jules_input_grid in %s\n" % river_nl_file - ) + if 'nx' not in river_nml['jules_input_grid'] or \ + 'ny' not in river_nml['jules_input_grid']: + sys.stderr.write('[FAIL] nx or ny are missing from namelist' + 'jules_input_grid in %s\n' % river_nl_file) sys.exit(error.MISSING_RIVER_RESOL) # Store the ocean resolution - run_info["RIV_resol"] = [ - river_nml["jules_input_grid"]["nx"], - river_nml["jules_input_grid"]["ny"], - ] + run_info['RIV_resol'] = [river_nml['jules_input_grid']['nx'], + river_nml['jules_input_grid']['ny']] return run_info def _sent_coupling_fields(river_envar, run_info): - """ + ''' Write the coupling fields sent from JULES river into model_snd_list. This function is only used when creating the namcouple at run time. - """ + ''' from write_namcouple import add_to_cpl_list # Check that file specifying the coupling fields sent from # JULES river is present - if not os.path.exists("OASIS_RIV_SEND"): - sys.stderr.write("[FAIL] OASIS_RIV_SEND is missing.\n") + if not os.path.exists('OASIS_RIV_SEND'): + sys.stderr.write('[FAIL] OASIS_RIV_SEND is missing.\n') sys.exit(error.MISSING_OASIS_RIV_SEND) # Add toyatm to our list of executables - if not "exec_list" in run_info: - run_info["exec_list"] = [] - run_info["exec_list"].append("toyriv") + if not 'exec_list' in run_info: + run_info['exec_list'] = [] + run_info['exec_list'].append('toyriv') # Determine the ocean resolution - run_info = _get_river_resol(river_envar["MODEL_NLIST"], run_info) + run_info = _get_river_resol(river_envar['MODEL_NLIST'], run_info) # If using the default coupling option, we'll need to read the # JULES river namelist later - river_nl = river_envar["COUPLE_NLIST"] + river_nl = river_envar['COUPLE_NLIST'] if not os.path.isfile(river_nl): - sys.stderr.write("[FAIL] Can not find the river namelist file %s\n" % river_nl) + sys.stderr.write('[FAIL] Can not find the river namelist file %s\n' % + river_nl) sys.exit(error.MISSING_DRIVER_FILE_ERROR) - run_info["river_nl"] = river_nl + run_info['river_nl'] = river_nl # Read the namelist - oasis_nml = f90nml.read("OASIS_RIV_SEND") + oasis_nml = f90nml.read('OASIS_RIV_SEND') # Check we have the expected information - if "oasis_riv_send_nml" not in oasis_nml: - sys.stderr.write( - "[FAIL] namelist oasis_riv_send_nml is " "missing from OASIS_RIV_SEND.\n" - ) + if 'oasis_riv_send_nml' not in oasis_nml: + sys.stderr.write('[FAIL] namelist oasis_riv_send_nml is ' + 'missing from OASIS_RIV_SEND.\n') sys.exit(error.MISSING_OASIS_RIV_SEND_NML) - if "oasis_riv_send" not in oasis_nml["oasis_riv_send_nml"]: - sys.stderr.write( - "[FAIL] entry oasis_riv_send is missing " - "from namelist oasis_riv_send_nml in " - "OASIS_RIV_SEND.\n" - ) + if 'oasis_riv_send' not in oasis_nml['oasis_riv_send_nml']: + sys.stderr.write('[FAIL] entry oasis_riv_send is missing ' + 'from namelist oasis_riv_send_nml in ' + 'OASIS_RIV_SEND.\n') sys.exit(error.MISSING_OASIS_RIV_SEND) # Create a list of fields sent from RIV model_snd_list = add_to_cpl_list( - "RIV", False, 0, oasis_nml["oasis_riv_send_nml"]["oasis_riv_send"] + 'RIV', False, 0, oasis_nml['oasis_riv_send_nml']['oasis_riv_send'] ) return run_info, model_snd_list - def _finalize_executable(): - """ + ''' Finalize the JULES river run, copy the nemo namelist to the restart directory for the next cycle, update standard out, and ensure that no errors have been found in the NEMO execution. - """ - sys.stdout.write("[INFO] finalizing JULES river") - sys.stdout.write("[INFO] running finalize in %s" % os.getcwd()) + ''' + sys.stdout.write('[INFO] finalizing JULES river') + sys.stdout.write('[INFO] running finalize in %s' % os.getcwd()) # The JULES river output is written by default to the standard output # JULES river does not produce a restart file yet - def run_driver(common_env, mode, run_info): - """ + ''' Run the driver, and return an instance of common.LoadEnvar and as string containing the launcher command for the JULES river model - """ - if mode == "run_driver": + ''' + if mode == 'run_driver': exe_envar = _setup_executable(common_env) launch_cmd = _set_launcher_command(exe_envar) - if run_info["l_namcouple"]: + if run_info['l_namcouple']: model_snd_list = None else: - run_info, model_snd_list = _sent_coupling_fields(exe_envar, run_info) - elif mode == "finalize": + run_info, model_snd_list = \ + _sent_coupling_fields(exe_envar, run_info) + elif mode == 'finalize': _finalize_executable() exe_envar = None launch_cmd = None diff --git a/Coupled_Drivers/si3_controller.py b/Coupled_Drivers/si3_controller.py index 740ca6c..48c6ce8 100644 --- a/Coupled_Drivers/si3_controller.py +++ b/Coupled_Drivers/si3_controller.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -""" +''' *****************************COPYRIGHT****************************** (C) Crown copyright 2021-2025 Met Office. All rights reserved. Use, duplication or disclosure of this code is subject to the restrictions @@ -14,7 +14,7 @@ si3_controller.py DESCRIPTION -""" +''' import re @@ -27,77 +27,73 @@ import dr_env_lib.ocn_cont_def import dr_env_lib.env_lib - def _check_si3nl_envar(envar_container): - """ + ''' Get the si3 namelist file exists - """ + ''' - # Information will be retrieved from this file during the running of the - # controller, so check it exists. + #Information will be retrieved from this file during the running of the + #controller, so check it exists. - if not os.path.isfile(envar_container["SI3_NL"]): - sys.stderr.write( - "[FAIL] si3_controller: Can not find the SI3 namelist " - "file %s\n" % envar_container["SI3_NL"] - ) + if not os.path.isfile(envar_container['SI3_NL']): + sys.stderr.write('[FAIL] si3_controller: Can not find the SI3 namelist ' + 'file %s\n' % envar_container['SI3_NL']) sys.exit(error.MISSING_CONTROLLER_FILE_ERROR) return 0 - def _get_si3rst(si3_nl_file): - """ + ''' Retrieve the SI3 restart directory from the nemo namelist file - """ + ''' si3rst_rcode, si3rst_val = shellout._exec_subprocess( - "grep cn_icerst_outdir %s" % si3_nl_file - ) + 'grep cn_icerst_outdir %s' % si3_nl_file) if si3rst_rcode == 0: - si3_rst = re.findall("[\"'](.*?)[\"']", si3rst_val)[0] - if si3_rst[-1] == "/": + si3_rst = re.findall('[\"\'](.*?)[\"\']', si3rst_val)[0] + if si3_rst[-1] == '/': si3_rst = si3_rst[:-1] return si3_rst return None def _verify_si3_rst(cyclepointstr, nemo_nproc, si3_restart_files): - """ + ''' Verify that the SI3 restart files match what we expect from the number of NEMO processors. - """ - si3_rst_regex = r"%s_restart_ice(_\d+)?\.nc" % cyclepointstr - current_rst_files = [f for f in si3_restart_files if re.findall(si3_rst_regex, f)] - - if len(current_rst_files) not in (1, nemo_nproc, nemo_nproc + 1): - sys.stderr.write( - "[FAIL] Unable to find SI3 restart files for" - " this cycle. Must either have one rebuilt file," - " as many as there are nemo processors (%i) or" - " both rebuilt and processor files." - "[FAIL] Found %i SI3 restart files\n" % (nemo_nproc, len(current_rst_files)) - ) + ''' + si3_rst_regex = r'%s_restart_ice(_\d+)?\.nc' % cyclepointstr + current_rst_files = [f for f in si3_restart_files if + re.findall(si3_rst_regex, f)] + + if len(current_rst_files) not in (1, nemo_nproc, nemo_nproc+1): + sys.stderr.write('[FAIL] Unable to find SI3 restart files for' + ' this cycle. Must either have one rebuilt file,' + ' as many as there are nemo processors (%i) or' + ' both rebuilt and processor files.' + '[FAIL] Found %i SI3 restart files\n' + % (nemo_nproc, len(current_rst_files))) sys.exit(error.MISSING_MODEL_FILE_ERROR) def _load_environment_variables(si3_envar): - """ + ''' Load the SI3 environment variables required for the model run into the si3_envar container - """ + ''' si3_envar = dr_env_lib.env_lib.load_envar_from_definition( - si3_envar, dr_env_lib.ocn_cont_def.SI3_ENVIRONMENT_VARS_INITIAL - ) + si3_envar, dr_env_lib.ocn_cont_def.SI3_ENVIRONMENT_VARS_INITIAL) return si3_envar - -def _setup_si3_controller( - common_env, restart_ctl, nemo_nproc, runid, verify_restart, nemo_dump_time -): - """ +def _setup_si3_controller(common_env, + restart_ctl, + nemo_nproc, + runid, + verify_restart, + nemo_dump_time): + ''' Setup the environment and any files required by the executable - """ + ''' # Create the environment variable container si3_envar = dr_env_lib.env_lib.LoadEnvar() @@ -107,14 +103,13 @@ def _setup_si3_controller( # SI3 hasn't been set up to use CONTINUE_FROM_FAIL yet # Raise an error if it's set to prevent unexpected behaviour in future - if common_env["CONTINUE_FROM_FAIL"] == "true": - sys.stderr.write( - "[FAIL] si3_controller is not coded to work with" "CONTINUE_FROM_FAIL=true" - ) + if common_env['CONTINUE_FROM_FAIL'] == 'true': + sys.stderr.write('[FAIL] si3_controller is not coded to work with' + 'CONTINUE_FROM_FAIL=true') sys.exit(error.INVALID_EVAR_ERROR) restart_direcs = [] - si3_rst = _get_si3rst(si3_envar["SI3_NL"]) + si3_rst = _get_si3rst(si3_envar['SI3_NL']) if si3_rst: restart_direcs.append(si3_rst) @@ -128,9 +123,8 @@ def _setup_si3_controller( # _yyyymmdd_restart_ice.nc" or # _yyyymmdd_restart_ice.nc" in the case # of the restart file having been rebuilt. - si3_restart_files = [ - f for f in os.listdir(si3_rst) if re.findall(r".+_\d{8}_restart_ice", f) - ] + si3_restart_files = [f for f in os.listdir(si3_rst) if + re.findall(r'.+_\d{8}_restart_ice', f)] si3_restart_files.sort() # Default position is that we're starting from a restart file and @@ -144,84 +138,86 @@ def _setup_si3_controller( else: # If we didn't find any restart files in the suite data directory, # check the SI3_START env var. - if common_env["CONTINUE"] == "false": - latest_si3_dump = si3_envar["SI3_START"] + if common_env['CONTINUE'] == 'false': + latest_si3_dump = si3_envar['SI3_START'] else: # We don't have a restart file, which implies we must be # starting from climatology. - latest_si3_dump = "unset" + latest_si3_dump = 'unset' # If we have a link to restart_ice.nc left over from a previous run, # remove it for both NRUNs and CRUNs - common.remove_file("restart_ice.nc") + common.remove_file('restart_ice.nc') # Is this a CRUN or an NRUN? - if common_env["CONTINUE"] == "false": + if common_env['CONTINUE'] == 'false': # This is definitely a new run - sys.stdout.write("[INFO] si3_controller: New SI3 run\n\n") + sys.stdout.write('[INFO] si3_controller: New SI3 run\n\n') if os.path.isfile(latest_si3_dump): - sys.stdout.write( - "[INFO] si3_controller: Removing old SI3 " "restart data\n\n" - ) + sys.stdout.write('[INFO] si3_controller: Removing old SI3 ' + 'restart data\n\n') # For NRUNS, get rid of any existing restart files from # previous runs. - for file_path in glob.glob(si3_rst + "/*restart_ice*"): + for file_path in glob.glob(si3_rst+'/*restart_ice*'): # os.path.isfile will return true for symbolic links as well # as physical files. common.remove_file(file_path) # If we do have a SI3 start dump. - if si3_envar["SI3_START"] != "": - if os.path.isfile(si3_envar["SI3_START"]): - os.symlink(si3_envar["SI3_START"], "restart_ice.nc") - elif os.path.isfile("%s_0000.nc" % si3_envar["SI3_START"]): - for fname in glob.glob("%s_????.nc" % si3_envar["SI3_START"]): - proc_number = fname.split(".")[-2][-4:] - common.remove_file("restart_ice_%s.nc" % proc_number) - os.symlink(fname, "restart_ice_%s.nc" % proc_number) - elif os.path.isfile("%s_0000.nc" % si3_envar["SI3_START"][:-3]): - for fname in glob.glob("%s_????.nc" % si3_envar["SI3_START"][:-3]): - proc_number = fname.split(".")[-2][-4:] + if si3_envar['SI3_START'] != '': + if os.path.isfile(si3_envar['SI3_START']): + os.symlink(si3_envar['SI3_START'], 'restart_ice.nc') + elif os.path.isfile('%s_0000.nc' % + si3_envar['SI3_START']): + for fname in glob.glob('%s_????.nc' % + si3_envar['SI3_START']): + proc_number = fname.split('.')[-2][-4:] + common.remove_file('restart_ice_%s.nc' % proc_number) + os.symlink(fname, 'restart_ice_%s.nc' % proc_number) + elif os.path.isfile('%s_0000.nc' % + si3_envar['SI3_START'][:-3]): + for fname in glob.glob('%s_????.nc' % + si3_envar['SI3_START'][:-3]): + proc_number = fname.split('.')[-2][-4:] # We need to make sure there isn't already # a restart file link set up, and if there is, get # rid of it because symlink wont work otherwise! - common.remove_file("restart_ice_%s.nc" % proc_number) + common.remove_file('restart_ice_%s.nc' % proc_number) - os.symlink(fname, "restart_ice_%s.nc" % proc_number) + os.symlink(fname, 'restart_ice_%s.nc' % proc_number) else: - sys.stderr.write("[FAIL] file %s not found\n" % si3_envar["SI3_START"]) + sys.stderr.write('[FAIL] file %s not found\n' % + si3_envar['SI3_START']) sys.exit(error.MISSING_MODEL_FILE_ERROR) else: # If there's no SI3 restart we must be starting from climatology. - sys.stdout.write( - "[INFO] si3_controller: SI3 is starting from" " climatology.\n\n" - ) + sys.stdout.write('[INFO] si3_controller: SI3 is starting from' + ' climatology.\n\n') + elif os.path.isfile(latest_si3_dump): # We have a valid restart file so we're not starting from climatology # This could be a new run or a continutaion run. - si3_dump_time = re.findall(r"_(\d*)_restart_ice", latest_si3_dump)[0] + si3_dump_time = re.findall(r'_(\d*)_restart_ice', latest_si3_dump)[0] - if verify_restart == "True": + if verify_restart == 'True': _verify_si3_rst(nemo_dump_time, nemo_nproc, si3_restart_files) if si3_dump_time != nemo_dump_time: - sys.stderr.write( - "[FAIL] si3_controller: Mismatch in SI3 restart " - "file date %s and NEMO restart file date %s\n" - % (si3_dump_time, nemo_dump_time) - ) + sys.stderr.write('[FAIL] si3_controller: Mismatch in SI3 restart ' + 'file date %s and NEMO restart file date %s\n' + % (si3_dump_time, nemo_dump_time)) sys.exit(error.MISMATCH_RESTART_DATE_ERROR) + # This could be a new run (the first NRUN of a cycle) or # a CRUN. - sys.stdout.write( - "[INFO] si3_controller: Restart data avaliable in " - "SI3 restart directory %s. Restarting from previous " - "task output\n\n" % si3_rst - ) + sys.stdout.write('[INFO] si3_controller: Restart data avaliable in ' + 'SI3 restart directory %s. Restarting from previous ' + 'task output\n\n' + % si3_rst) # For each PE, set up a link to the appropriate sub-domain # restart file. @@ -229,33 +225,25 @@ def _setup_si3_controller( for i_proc in range(nemo_nproc): tag = str(i_proc).zfill(4) - si3_rst_source = "%s/%so_%s_restart_ice_%s.nc" % ( - si3_rst, - runid, - si3_dump_time, - tag, - ) - si3_rst_link = "restart_ice_%s.nc" % tag + si3_rst_source = '%s/%so_%s_restart_ice_%s.nc' % \ + (si3_rst, runid, si3_dump_time, tag) + si3_rst_link = 'restart_ice_%s.nc' % tag common.remove_file(si3_rst_link) if os.path.isfile(si3_rst_source): os.symlink(si3_rst_source, si3_rst_link) si3_restart_count += 1 if si3_restart_count < 1: - sys.stdout.write("[INFO] No SI3 sub-PE restarts found\n") + sys.stdout.write('[INFO] No SI3 sub-PE restarts found\n') # We found no passive tracer restart sub-domain files let's # look for a full domain file. - si3_rst_source = "%s/%so_%s_restart_ice.nc" % ( - si3_rst, - runid, - si3_dump_time, - ) + si3_rst_source = '%s/%so_%s_restart_ice.nc' % \ + (si3_rst, runid, si3_dump_time) if os.path.isfile(si3_rst_source): - sys.stdout.write( - "[INFO] Using rebuilt SI3 restart " "file: %s\n" % si3_rst_source - ) - si3_rst_link = "restart_ice.nc" + sys.stdout.write('[INFO] Using rebuilt SI3 restart '\ + 'file: %s\n' % si3_rst_source) + si3_rst_link = 'restart_ice.nc' common.remove_file(si3_rst_link) os.symlink(si3_rst_source, si3_rst_link) @@ -265,45 +253,48 @@ def _setup_si3_controller( # during a CRUN seems pretty slim. else: - sys.stderr.write( - "[FAIL] si3_controller: No restart data avaliable in " - "SI3 restart directory:\n %s\n" % si3_rst - ) + sys.stderr.write('[FAIL] si3_controller: No restart data avaliable in ' + 'SI3 restart directory:\n %s\n' % si3_rst) sys.exit(error.MISSING_MODEL_FILE_ERROR) - return si3_envar + return si3_envar + def _set_launcher_command(_): - """ + ''' Setup the launcher command for the executable - """ - sys.stdout.write( - "[INFO] si3_controller: SI3 uses the same launch " "command as NEMO\n\n" - ) - launch_cmd = "" + ''' + sys.stdout.write('[INFO] si3_controller: SI3 uses the same launch ' + 'command as NEMO\n\n') + launch_cmd = '' return launch_cmd - def _finalize_si3_controller(): - """ + ''' Finalize the passive SI3 setup - """ - - -def run_controller( - common_env, restart_ctl, nemo_nproc, runid, verify_restart, nemo_dump_time, mode -): - """ + ''' + +def run_controller(common_env, + restart_ctl, + nemo_nproc, + runid, + verify_restart, + nemo_dump_time, + mode): + ''' Run the passive tracer controller. - """ - if mode == "run_controller": - exe_envar = _setup_si3_controller( - common_env, restart_ctl, nemo_nproc, runid, verify_restart, nemo_dump_time - ) + ''' + if mode == 'run_controller': + exe_envar = _setup_si3_controller(common_env, + restart_ctl, + nemo_nproc, + runid, + verify_restart, + nemo_dump_time) launch_cmd = _set_launcher_command(exe_envar) - elif mode == "finalize": + elif mode == 'finalize': _finalize_si3_controller() exe_envar = None launch_cmd = None diff --git a/Coupled_Drivers/top_controller.py b/Coupled_Drivers/top_controller.py index 518299d..833834a 100644 --- a/Coupled_Drivers/top_controller.py +++ b/Coupled_Drivers/top_controller.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -""" +''' *****************************COPYRIGHT****************************** (C) Crown copyright 2021-2025 Met Office. All rights reserved. @@ -59,7 +59,8 @@ all pre- and post-proccessing code. Version compatibility: NEMO vn3.6 -""" +''' + import re @@ -76,82 +77,73 @@ # Define errors for the TOP controller only SERIAL_MODE_ERROR = 99 - def _check_topnl_envar(envar_container): - """ + ''' As the environment variable TOP_NL is required by both the setup and finalize functions, this will be encapsulated here. - """ - # Information will be retrieved from this file during the running of the - # controller, so check it exists. - - if not os.path.isfile(envar_container["TOP_NL"]): - sys.stderr.write( - "[FAIL] top_controller: Can not find the TOP namelist " - "file %s\n" % envar_container["TOP_NL"] - ) + ''' + #Information will be retrieved from this file during the running of the + #controller, so check it exists. + + if not os.path.isfile(envar_container['TOP_NL']): + sys.stderr.write('[FAIL] top_controller: Can not find the TOP namelist ' + 'file %s\n' % envar_container['TOP_NL']) sys.exit(error.MISSING_CONTROLLER_FILE_ERROR) return 0 - def _get_toprst_dir(top_nl_file): - """ + ''' Retrieve the restart directory from the TOP namelist file. Currently TOP/MEDUSA uses the same restart directory as the main NEMO component so we could in principle get this from the NEMO namelist. However, for complete flexibility we interrogate the TOP namelist in case this is ever defined as something different. - """ + ''' toprst_rcode, toprst_val = shellout._exec_subprocess( - "grep cn_trcrst_outdir %s" % top_nl_file - ) + 'grep cn_trcrst_outdir %s' % top_nl_file) if toprst_rcode == 0: - top_rst_dir = re.findall("[\"'](.*?)[\"']", toprst_val)[0] - if top_rst_dir[-1] == "/": + top_rst_dir = re.findall('[\"\'](.*?)[\"\']', toprst_val)[0] + if top_rst_dir[-1] == '/': top_rst_dir = top_rst_dir[:-1] return top_rst_dir - def _verify_top_rst(cyclepointstr, nemo_nproc, top_restart_files): - """ + ''' Verify that the top restart files match what we expect from the number of NEMO processors. - """ - top_rst_regex = r"%s_restart_trc(_\d+)?\.nc" % cyclepointstr - current_rst_files = [f for f in top_restart_files if re.findall(top_rst_regex, f)] - if len(current_rst_files) not in (1, nemo_nproc, nemo_nproc + 1): - sys.stderr.write( - "[FAIL] Unable to find top restart files for" - " this cycle. Must either have one rebuilt file," - " as many as there are nemo processors (%i) or" - " both rebuilt and processor files." - "[FAIL] Found %i top restart files\n" % (nemo_nproc, len(current_rst_files)) - ) + ''' + top_rst_regex = r'%s_restart_trc(_\d+)?\.nc' % cyclepointstr + current_rst_files = [f for f in top_restart_files if + re.findall(top_rst_regex, f)] + if len(current_rst_files) not in (1, nemo_nproc, nemo_nproc+1): + sys.stderr.write('[FAIL] Unable to find top restart files for' + ' this cycle. Must either have one rebuilt file,' + ' as many as there are nemo processors (%i) or' + ' both rebuilt and processor files.' + '[FAIL] Found %i top restart files\n' + % (nemo_nproc, len(current_rst_files))) sys.exit(error.MISSING_MODEL_FILE_ERROR) def _load_environment_variables(top_envar): - """ + ''' Load the TOP environment variables required for the model run into the top_envar container - """ + ''' top_envar = dr_env_lib.env_lib.load_envar_from_definition( - top_envar, dr_env_lib.ocn_cont_def.TOP_ENVIRONMENT_VARS_INITIAL - ) + top_envar, dr_env_lib.ocn_cont_def.TOP_ENVIRONMENT_VARS_INITIAL) _ = _check_topnl_envar(top_envar) return top_envar - -def _setup_top_controller( - common_env, restart_ctl, nemo_nproc, runid, verify_restart, nemo_dump_time -): - """ +def _setup_top_controller(common_env, restart_ctl, nemo_nproc, runid, + verify_restart, nemo_dump_time): + ''' Setup the environment and any files required by the executable - """ + ''' # Create the environment variable container top_envar = dr_env_lib.env_lib.LoadEnvar() @@ -160,17 +152,17 @@ def _setup_top_controller( # TOP controller hasn't been set up to use CONTINUE_FROM_FAIL yet # Raise an error if it's set to prevent unexpected behaviour in future - if common_env["CONTINUE_FROM_FAIL"] == "true": - sys.stderr.write( - "[FAIL] top_controller is not coded to work with" "CONTINUE_FROM_FAIL=true" - ) + if common_env['CONTINUE_FROM_FAIL'] == 'true': + sys.stderr.write('[FAIL] top_controller is not coded to work with' + 'CONTINUE_FROM_FAIL=true') sys.exit(error.INVALID_EVAR_ERROR) + # Read restart from TOP namelist restart_direcs = [] # Find the TOP restart location - top_rst = _get_toprst_dir(top_envar["TOP_NL"]) + top_rst = _get_toprst_dir(top_envar['TOP_NL']) if top_rst: restart_direcs.append(top_rst) @@ -185,9 +177,8 @@ def _setup_top_controller( # _yyyymmdd_restart_trc_.nc" or # _yyyymmdd_restart_trc.nc" in the case # of the restart file having been rebuilt. - top_restart_files = [ - f for f in os.listdir(top_rst) if re.findall(r".+_\d{8}_restart_trc", f) - ] + top_restart_files = [f for f in os.listdir(top_rst) if + re.findall(r'.+_\d{8}_restart_trc', f)] top_restart_files.sort() # Default position is that we're starting from a restart file and @@ -201,55 +192,57 @@ def _setup_top_controller( else: # If we didn't find any restart files in the suite data directory, # check the TOP_START env var. - if common_env["CONTINUE"] == "false": - latest_top_dump = top_envar["TOP_START"] + if common_env['CONTINUE'] == 'false': + latest_top_dump = top_envar['TOP_START'] else: # We don't have a restart file, which implies we must be # starting from climatology. - latest_top_dump = "unset" + latest_top_dump = 'unset' - top_init_dir = "." + top_init_dir = '.' # If we have a link to restart_trc.nc left over from a previous run, # remove it for both NRUNs and CRUNs - common.remove_file("restart_trc.nc") + common.remove_file('restart_trc.nc') # Is this a CRUN or an NRUN? - if common_env["CONTINUE"] == "false": + if common_env['CONTINUE'] == 'false': # This is definitely a new run - sys.stdout.write("[INFO] top_controller: New TOP/MEDUSA run\n\n") + sys.stdout.write('[INFO] top_controller: New TOP/MEDUSA run\n\n') if os.path.isfile(latest_top_dump): - sys.stdout.write( - "[INFO] top_controller: Removing old TOP " "restart data\n\n" - ) + sys.stdout.write('[INFO] top_controller: Removing old TOP ' + 'restart data\n\n') # For NRUNS, get rid of any existing restart files from # previous runs. - for file_path in glob.glob(top_rst + "/*restart_trc*"): + for file_path in glob.glob(top_rst+'/*restart_trc*'): # os.path.isfile will return true for symbolic links as well # as physical files. common.remove_file(file_path) # If we do have a passive tracer start dump. - if top_envar["TOP_START"] != "": - if os.path.isfile(top_envar["TOP_START"]): - os.symlink(top_envar["TOP_START"], "restart_trc.nc") - elif os.path.isfile("%s_0000.nc" % top_envar["TOP_START"]): - for fname in glob.glob("%s_????.nc" % top_envar["TOP_START"]): - proc_number = fname.split(".")[-2][-4:] - common.remove_file("restart_trc_%s.nc" % proc_number) - os.symlink(fname, "restart_trc_%s.nc" % proc_number) - elif os.path.isfile("%s_0000.nc" % top_envar["TOP_START"][:-3]): - for fname in glob.glob("%s_????.nc" % top_envar["TOP_START"][-3:]): - proc_number = fname.split(".")[-2][-4:] - common.remove_file("restart_trc_%s.nc" % proc_number) - os.symlink(fname, "restart_trc_%s.nc" % proc_number) + if top_envar['TOP_START'] != '': + if os.path.isfile(top_envar['TOP_START']): + os.symlink(top_envar['TOP_START'], 'restart_trc.nc') + elif os.path.isfile('%s_0000.nc' % + top_envar['TOP_START']): + for fname in glob.glob('%s_????.nc' % + top_envar['TOP_START']): + proc_number = fname.split('.')[-2][-4:] + common.remove_file('restart_trc_%s.nc' % proc_number) + os.symlink(fname, 'restart_trc_%s.nc' % proc_number) + elif os.path.isfile('%s_0000.nc' % + top_envar['TOP_START'][:-3]): + for fname in glob.glob('%s_????.nc' % + top_envar['TOP_START'][-3:]): + proc_number = fname.split('.')[-2][-4:] + common.remove_file('restart_trc_%s.nc' % proc_number) + os.symlink(fname, 'restart_trc_%s.nc' % proc_number) else: # If there's no TOP restart we must be starting from climatology. - sys.stdout.write( - "[INFO] top_controller: TOP is starting from" " climatology.\n\n" - ) + sys.stdout.write('[INFO] top_controller: TOP is starting from' + ' climatology.\n\n') # Set the restart flag accordingly ln_restart = ".false." @@ -259,28 +252,27 @@ def _setup_top_controller( # in all components. restart_ctl = 0 + elif os.path.isfile(latest_top_dump): # We have a valid restart file so we're not starting from climatology # This could be a new run or a continutaion run. - top_dump_time = re.findall(r"_(\d*)_restart_trc", latest_top_dump)[0] + top_dump_time = re.findall(r'_(\d*)_restart_trc', latest_top_dump)[0] - if verify_restart == "True": + if verify_restart == 'True': _verify_top_rst(nemo_dump_time, nemo_nproc, top_restart_files) if top_dump_time != nemo_dump_time: - sys.stderr.write( - "[FAIL] top_controller: Mismatch in TOP restart " - "file date %s and NEMO restart file date %s\n" - % (top_dump_time, nemo_dump_time) - ) + sys.stderr.write('[FAIL] top_controller: Mismatch in TOP restart ' + 'file date %s and NEMO restart file date %s\n' + % (top_dump_time, nemo_dump_time)) sys.exit(error.MISMATCH_RESTART_DATE_ERROR) + # This could be a new run (the first NRUN of a cycle) or # a CRUN. - sys.stdout.write( - "[INFO] top_controller: Restart data avaliable in " - "TOP restart directory %s. Restarting from previous " - "task output\n\n" % top_rst - ) + sys.stdout.write('[INFO] top_controller: Restart data avaliable in ' + 'TOP restart directory %s. Restarting from previous ' + 'task output\n\n' + % top_rst) top_init_dir = top_rst # For each PE, set up a link to the appropriate sub-domain @@ -289,33 +281,25 @@ def _setup_top_controller( for i_proc in range(nemo_nproc): tag = str(i_proc).zfill(4) - top_rst_source = "%s/%so_%s_restart_trc_%s.nc" % ( - top_init_dir, - runid, - top_dump_time, - tag, - ) - top_rst_link = "restart_trc_%s.nc" % tag + top_rst_source = '%s/%so_%s_restart_trc_%s.nc' % \ + (top_init_dir, runid, top_dump_time, tag) + top_rst_link = 'restart_trc_%s.nc' % tag common.remove_file(top_rst_link) if os.path.isfile(top_rst_source): os.symlink(top_rst_source, top_rst_link) top_restart_count += 1 if top_restart_count < 1: - sys.stdout.write("[INFO] No TOP sub-PE restarts found\n") + sys.stdout.write('[INFO] No TOP sub-PE restarts found\n') # We found no passive tracer restart sub-domain files let's # look for a full domain file. - top_rst_source = "%s/%so_%s_restart_trc.nc" % ( - top_init_dir, - runid, - top_dump_time, - ) + top_rst_source = '%s/%so_%s_restart_trc.nc' % \ + (top_init_dir, runid, top_dump_time) if os.path.isfile(top_rst_source): - sys.stdout.write( - "[INFO] Using rebuilt TOP restart " "file: %s\n" % top_rst_source - ) - top_rst_link = "restart_trc.nc" + sys.stdout.write('[INFO] Using rebuilt TOP restart '\ + 'file: %s\n' % top_rst_source) + top_rst_link = 'restart_trc.nc' common.remove_file(top_rst_link) os.symlink(top_rst_source, top_rst_link) @@ -325,10 +309,8 @@ def _setup_top_controller( # during a CRUN seems pretty slim. else: - sys.stderr.write( - "[FAIL] top_controller: No restart data available in " - "TOP restart directory:\n %s\n" % top_rst - ) + sys.stderr.write('[FAIL] top_controller: No restart data available in ' + 'TOP restart directory:\n %s\n' % top_rst) sys.exit(error.MISSING_MODEL_FILE_ERROR) # ln_trcdta appears to always be the opposite of ln_restart, so we @@ -342,72 +324,74 @@ def _setup_top_controller( elif ln_restart == ".false.": ln_trcdta = ".true." else: - sys.stderr.write( - "[FAIL] top_controller: invalid ln_restart value: " "%s\n" % ln_restart - ) + sys.stderr.write('[FAIL] top_controller: invalid ln_restart value: ' + '%s\n' % ln_restart) sys.exit(error.INVALID_LOCAL_ERROR) # Update the TOP namelist. - mod_topnl = common.ModNamelist(top_envar["TOP_NL"]) - mod_topnl.var_val("ln_rsttr", ln_restart) - mod_topnl.var_val("nn_rsttr", restart_ctl) - mod_topnl.var_val("ln_trcdta", ln_trcdta) + mod_topnl = common.ModNamelist(top_envar['TOP_NL']) + mod_topnl.var_val('ln_rsttr', ln_restart) + mod_topnl.var_val('nn_rsttr', restart_ctl) + mod_topnl.var_val('ln_trcdta', ln_trcdta) mod_topnl.replace() # Write details of our namelist settings - sys.stdout.write("[INFO] top_controller: Start of TOP namelist settings:\n") - sys.stdout.write("[INFO] Namelist file: %s \n" % top_envar["TOP_NL"]) - sys.stdout.write("[INFO] ln_rsttr = %s \n" % ln_restart) - sys.stdout.write("[INFO] nn_rsttr = %d \n" % restart_ctl) - sys.stdout.write("[INFO] ln_trcdta = %s \n" % ln_trcdta) - sys.stdout.write("[INFO] top_controller: End of TOP namelist settings\n\n") + sys.stdout.write('[INFO] top_controller: Start of TOP namelist settings:\n') + sys.stdout.write('[INFO] Namelist file: %s \n' % top_envar['TOP_NL']) + sys.stdout.write('[INFO] ln_rsttr = %s \n' % ln_restart) + sys.stdout.write('[INFO] nn_rsttr = %d \n' % restart_ctl) + sys.stdout.write('[INFO] ln_trcdta = %s \n' % ln_trcdta) + sys.stdout.write('[INFO] top_controller: End of TOP namelist settings\n\n') return top_envar - def _set_launcher_command(_): - """ + ''' Setup the launcher command for the executable - """ - sys.stdout.write( - "[INFO] top_controller: MEDUSA/TOP uses the same launch " "command as NEMO\n\n" - ) - launch_cmd = "" + ''' + sys.stdout.write('[INFO] top_controller: MEDUSA/TOP uses the same launch ' + 'command as NEMO\n\n') + launch_cmd = '' return launch_cmd - def _finalize_top_controller(): - """ + ''' Finalize the passive tracer set-up, copy the TOP namelist to the restart directory for the next cycle. - """ - sys.stdout.write("[INFO] finalizing Ocean Passive Tracers \n") - sys.stdout.write("[INFO] running finalize in %s \n" % os.getcwd()) + ''' + sys.stdout.write('[INFO] finalizing Ocean Passive Tracers \n') + sys.stdout.write('[INFO] running finalize in %s \n' % os.getcwd()) # Move the TOP namelist to the restart directory to allow the next cycle # to pick it up top_envar_fin = dr_env_lib.env_lib.LoadEnvar() top_envar_fin = dr_env_lib.env_lib.load_envar_from_definition( - top_envar_fin, dr_env_lib.ocn_cont_def.TOP_ENVIRONMENT_VARS_FINAL - ) - top_rst = _get_toprst_dir(top_envar_fin["TOP_NL"]) - if os.path.isdir(top_rst) and os.path.isfile(top_envar_fin["TOP_NL"]): - shutil.copy(top_envar_fin["TOP_NL"], top_rst) - - -def run_controller( - common_env, restart_ctl, nemo_nproc, runid, verify_restart, nemo_dump_time, mode -): - """ + top_envar_fin, dr_env_lib.ocn_cont_def.TOP_ENVIRONMENT_VARS_FINAL) + top_rst = _get_toprst_dir(top_envar_fin['TOP_NL']) + if os.path.isdir(top_rst) and \ + os.path.isfile(top_envar_fin['TOP_NL']): + shutil.copy(top_envar_fin['TOP_NL'], top_rst) + + +def run_controller(common_env, + restart_ctl, + nemo_nproc, + runid, + verify_restart, + nemo_dump_time, mode): + ''' Run the passive tracer controller. - """ - if mode == "run_controller": - exe_envar = _setup_top_controller( - common_env, restart_ctl, nemo_nproc, runid, verify_restart, nemo_dump_time - ) + ''' + if mode == 'run_controller': + exe_envar = _setup_top_controller(common_env, + restart_ctl, + nemo_nproc, + runid, + verify_restart, + nemo_dump_time) launch_cmd = _set_launcher_command(exe_envar) - elif mode == "finalize": + elif mode == 'finalize': _finalize_top_controller() exe_envar = None launch_cmd = None diff --git a/Coupled_Drivers/unittests/test_cpmip_utils.py b/Coupled_Drivers/unittests/test_cpmip_utils.py index 1e55d36..c1a35a1 100644 --- a/Coupled_Drivers/unittests/test_cpmip_utils.py +++ b/Coupled_Drivers/unittests/test_cpmip_utils.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -""" +''' *****************************COPYRIGHT****************************** (C) Crown copyright 2023-2025 Met Office. All rights reserved. @@ -11,10 +11,9 @@ Met Office, FitzRoy Road, Exeter, Devon, EX1 3PB, United Kingdom *****************************COPYRIGHT****************************** -""" +''' import unittest - try: # mock is integrated into unittest as of Python 3.3 import unittest.mock as mock @@ -26,123 +25,114 @@ import os import cpmip_utils - class TestGetComponentResolution(unittest.TestCase): - """ + ''' Test the construction of component resolution from namelist - """ - - @mock.patch("cpmip_utils.shellout._exec_subprocess") + ''' + @mock.patch('cpmip_utils.shellout._exec_subprocess') def test_get_component_resolution(self, mock_subproc): - """ + ''' Test construction of total resolution - """ - res_vars = ("i_dim", "j_dim", "k_dim") - subproc_return_values = [(0, "i_dim=10"), (0, "j_dim=20"), (0, "k_dim=30")] + ''' + res_vars = ('i_dim', 'j_dim', 'k_dim') + subproc_return_values = [(0, 'i_dim=10'), + (0, 'j_dim=20'), + (0, 'k_dim=30')] mock_subproc.side_effect = subproc_return_values - self.assertEqual( - cpmip_utils.get_component_resolution("NEMO_NL", res_vars), 6000 - ) + self.assertEqual(cpmip_utils.get_component_resolution('NEMO_NL', + res_vars), + 6000) subproc_calls = [] for res_var in res_vars: - subproc_calls.append(mock.call("grep %s NEMO_NL" % res_var, verbose=True)) + subproc_calls.append(mock.call('grep %s NEMO_NL' % res_var, + verbose=True)) mock_subproc.assert_has_calls(subproc_calls) class TestGlobUsage(unittest.TestCase): - """ + ''' Test the determination of disk usage using globs - """ - - @mock.patch("cpmip_utils.glob.glob", return_value=[]) + ''' + @mock.patch('cpmip_utils.glob.glob', return_value=[]) def test_get_glob_usage_nofile(self, mock_glob): - """ + ''' Test glob usage if there are no files - """ - expected_output = ( - "[WARN] Attepting to find the size of files" - " described by glob expression a*b*. There are" - " no files found" - ) - with mock.patch("sys.stderr", new=io.StringIO()) as patch_output: - dusize = cpmip_utils.get_glob_usage("a*b*") + ''' + expected_output = '[WARN] Attepting to find the size of files' \ + ' described by glob expression a*b*. There are' \ + ' no files found' + with mock.patch('sys.stderr', new=io.StringIO()) as patch_output: + dusize = cpmip_utils.get_glob_usage('a*b*') self.assertEqual(dusize, 0.0) self.assertEqual(patch_output.getvalue(), expected_output) - @mock.patch("cpmip_utils.glob.glob", return_value=["file1", "file2"]) - @mock.patch( - "cpmip_utils.shellout._exec_subprocess", - return_value=(0, "\n128 file1\n128 file2\n256 total\n"), - ) + @mock.patch('cpmip_utils.glob.glob', return_value=['file1', 'file2']) + @mock.patch('cpmip_utils.shellout._exec_subprocess', + return_value=(0, '\n128 file1\n128 file2\n256 total\n')) def test_get_glob_usage(self, mock_subproc, mock_glob): - """ + ''' Test file size from glob - """ - self.assertEqual(cpmip_utils.get_glob_usage("a*b*"), 256.0) - + ''' + self.assertEqual(cpmip_utils.get_glob_usage('a*b*'), + 256.0) class TestNCDFOutput(unittest.TestCase): - """ + ''' Test measurment of NCDF file sizes - """ - - @mock.patch("cpmip_utils.os.listdir", return_value=[]) - @mock.patch("cpmip_utils.shellout._exec_subprocess", return_value=(1, None)) + ''' + @mock.patch('cpmip_utils.os.listdir', return_value=[]) + @mock.patch('cpmip_utils.shellout._exec_subprocess', + return_value=(1, None)) def test_no_files_output(self, mock_subproc, mock_ncdffiles): - """ + ''' Test what happens if we cant find any files - """ + ''' self.assertEqual(cpmip_utils.get_workdir_netcdf_output(), -1.0) - @mock.patch("cpmip_utils.os.listdir", return_value=["file1.nc", "file2.nc"]) - @mock.patch( - "cpmip_utils.shellout._exec_subprocess", - return_value=(0, "\n128 file1.nc\n128 file2.nc\n256 total\n"), - ) + @mock.patch('cpmip_utils.os.listdir', return_value=['file1.nc', + 'file2.nc']) + @mock.patch('cpmip_utils.shellout._exec_subprocess', + return_value=(0, '\n128 file1.nc\n128 file2.nc\n256 total\n')) def test_files_output(self, mock_subproc, mock_ncdffiles): - """ + ''' Check to see if the function can get the correct total value - """ + ''' self.assertEqual(cpmip_utils.get_workdir_netcdf_output(), 256.0) class TestTimeFunctions(unittest.TestCase): - """ + ''' Test the time related functions - """ - + ''' def test_seconds_to_days_halfday(self): - """ + ''' Test half a day of seconds - """ + ''' self.assertEqual(cpmip_utils.seconds_to_days(43200.0), 0.5) def test_seconds_to_days_twodays(self): - """ + ''' Test two full days of seconds - """ + ''' self.assertEqual(cpmip_utils.seconds_to_days(172800.0), 2.0) def test_tasklength_to_years(self): - """ + ''' Test tasklength to years, pass in all ones, to check everything - """ - self.assertEqual( - cpmip_utils.tasklength_to_years("0001,01,01,01,01,01"), 1.0362288130144035 - ) - + ''' + self.assertEqual(cpmip_utils.tasklength_to_years( + '0001,01,01,01,01,01'), 1.0362288130144035) class TestPBSJobFileXc40Case(unittest.TestCase): - """ + ''' Test the reading of an example PBS job file - """ - + ''' def setUp(self): - """ + ''' Create an example jobfile - """ - self.jobfile_name = "test_jobfile" - example_input = """ + ''' + self.jobfile_name = 'test_jobfile' + example_input = ''' # DIRECTIVES: #PBS -N coupled.19600101T0000Z.mi-ba962_compiler_upgrade_877 #PBS -o cylc-run/mi-ba962_compiler_upgrade_877/log/job/19600101T0000Z/coupled/01/job.out @@ -153,48 +143,45 @@ def setUp(self): #PBS -q high # N.B. CYLC_DIR has been updated on the remote host export CYLC_DIR='/common/fcm/cylc-7.8.6' - """ - with open(self.jobfile_name, "w") as test_jobfile: + ''' + with open(self.jobfile_name, 'w') as test_jobfile: test_jobfile.write(example_input) def tearDown(self): - """ + ''' Remove the example job file at end of test - """ + ''' try: os.remove(self.jobfile_name) except OSError: pass def test_jobfile(self): - """ + ''' Test the retrival of the pbs -l directives is correct. As this is a double underscore function, we need to apply the name mangling rule - """ - expected_result = { - "walltime": "02:30:00", - "select": "36", - "subproject": "ukesmdev", - "funding": "hccp", - "coretype": "broadwell", - } + ''' + expected_result = {'walltime': '02:30:00', + 'select': '36', + 'subproject': 'ukesmdev', + 'funding': 'hccp', + 'coretype': 'broadwell'} result = cpmip_utils.get_jobfile_info(self.jobfile_name) self.assertEqual(result, expected_result) class TestPBSJobFileExCase(unittest.TestCase): - """ + ''' Test the reading of an example PBS job file - """ - + ''' def setUp(self): - """ + ''' Create example jobfiles - """ + ''' # A fully fledged jobfile - self.jobfile_name = "test_jobfile" - example_input = """# DIRECTIVES: + self.jobfile_name = 'test_jobfile' + example_input = '''# DIRECTIVES: #PBS -N coupled.19780901T0000Z.mi-bd155_add_cpmip_metrics #PBS -o cylc-run/mi-bd155_add_cpmip_metrics/log/job/19780901T0000Z/coupled/01/job.out #PBS -e cylc-run/mi-bd155_add_cpmip_metrics/log/job/19780901T0000Z/coupled/01/job.err @@ -202,20 +189,20 @@ def setUp(self): #PBS -q normal #PBS -l select=2:ncpus=256:mpiprocs=90+5:ncpus=256:mpiprocs=120+1:ncpus=256:mpiprocs=6 # N.B. CYLC_DIR has been updated on the remote host - """ - with open(self.jobfile_name, "w") as test_jobfile: + ''' + with open(self.jobfile_name, 'w') as test_jobfile: test_jobfile.write(example_input) # A jobfile with one model - self.onemodel_jobfile_name = "test_onemodel_jobfile" - example_input = """#PBS -l select=24:ncpus=256""" - with open(self.onemodel_jobfile_name, "w") as test_jobfile: + self.onemodel_jobfile_name = 'test_onemodel_jobfile' + example_input='''#PBS -l select=24:ncpus=256''' + with open(self.onemodel_jobfile_name, 'w') as test_jobfile: test_jobfile.write(example_input) def tearDown(self): - """ + ''' Remove the example job file at end of test - """ + ''' jobfiles = (self.jobfile_name, self.onemodel_jobfile_name) for jobfile in jobfiles: try: @@ -224,55 +211,58 @@ def tearDown(self): pass def test_jobfile(self): - """ + ''' Test the retrival of the pbs -l select directive for nodes for each model in MPMD mode is correct - """ - expected_result = ([2, 5, 1], "milan") + ''' + expected_result = ([2, 5, 1], 'milan') result = cpmip_utils.get_select_nodes(self.jobfile_name) self.assertEqual(result, expected_result) def test_jobfile_one_model(self): - """ + ''' Test the correct retrieval for a single model in the -l select directive - """ - expected_result = ([24], "milan") + ''' + expected_result = ([24], 'milan') result = cpmip_utils.get_select_nodes(self.onemodel_jobfile_name) self.assertEqual(result, expected_result) - class TestIncrementDump(unittest.TestCase): - """ + ''' Test the increment of the dump date to the end of cycle - """ - + ''' def test_one_day_increment(self): - """ + ''' Test increment of one day - """ - self.assertEqual(cpmip_utils.increment_dump("20200115", 1, "d"), "20200116") + ''' + self.assertEqual(cpmip_utils.increment_dump('20200115', 1, 'd'), + '20200116') def test_thirty_five_day_increment(self): - """ + ''' Test increment of 35 days, to ensure that the month rolls over - """ - self.assertEqual(cpmip_utils.increment_dump("20200101", 35, "d"), "20200206") + ''' + self.assertEqual(cpmip_utils.increment_dump('20200101', 35, 'd'), + '20200206') def test_one_month_increment(self): - """ + ''' Test increment of one month - from not the first of the month testing a month rollover - """ - self.assertEqual(cpmip_utils.increment_dump("20200115", 1, "m"), "20200215") + ''' + self.assertEqual(cpmip_utils.increment_dump('20200115', 1, 'm'), + '20200215') def test_three_month_increment(self): - """ + ''' Test increment of three months - """ - self.assertEqual(cpmip_utils.increment_dump("20200115", 3, "M"), "20200415") + ''' + self.assertEqual(cpmip_utils.increment_dump('20200115', 3, 'M'), + '20200415') def test_thirteen_month_increment(self): - """ + ''' Test increment of three months - testing a month and a year rollover - """ - self.assertEqual(cpmip_utils.increment_dump("20200115", 13, "M"), "20210215") + ''' + self.assertEqual(cpmip_utils.increment_dump('20200115', 13, 'M'), + '20210215') diff --git a/Coupled_Drivers/unittests/test_cpmip_xios.py b/Coupled_Drivers/unittests/test_cpmip_xios.py index b8855f0..58df7c7 100644 --- a/Coupled_Drivers/unittests/test_cpmip_xios.py +++ b/Coupled_Drivers/unittests/test_cpmip_xios.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -""" +''' *****************************COPYRIGHT****************************** (C) Crown copyright 2021-2025 Met Office. All rights reserved. @@ -11,10 +11,9 @@ Met Office, FitzRoy Road, Exeter, Devon, EX1 3PB, United Kingdom *****************************COPYRIGHT****************************** -""" +''' import unittest - try: # mock is integrated into unittest as of Python 3.3 import unittest.mock as mock @@ -26,134 +25,111 @@ import os import cpmip_xios - class TestDataMetricSetupNemo(unittest.TestCase): - """ + ''' Check the setting up of XIOS for NEMO data metrics - """ - + ''' def setUp(self): - """ + ''' Create an iodef file for test - """ - self.xml_file_name = "iodef.xml" - input_contents = ( - "iodef line 1\n" - 'variable id="using_server"\n' - '\t false\n' - "iodef line 4\n" - ) - with open(self.xml_file_name, "w") as iodef_fh: + ''' + self.xml_file_name = 'iodef.xml' + input_contents = 'iodef line 1\n' \ + 'variable id="using_server"\n' \ + '\t false\n' \ + 'iodef line 4\n' + with open(self.xml_file_name, 'w') as iodef_fh: iodef_fh.write(input_contents) def tearDown(self): - """ + ''' Remove the iodef file at the end of test - """ + ''' try: os.remove(self.xml_file_name) except FileNotFoundError: pass def test_update_iodef(self): - """ + ''' Test the iodef file is updated correctly - """ - expected_output = ( - "iodef line 1\n" - 'variable id="using_server"\n' - '\t true\n' - "iodef line 4\n" - ) + ''' + expected_output = 'iodef line 1\n' \ + 'variable id="using_server"\n' \ + '\t true\n' \ + 'iodef line 4\n' cpmip_xios.data_metrics_setup_nemo() # check contents of new iodef.xml file - with open(self.xml_file_name, "r") as new_iodef_fh: + with open(self.xml_file_name, 'r') as new_iodef_fh: new_iodef_contents = new_iodef_fh.read() self.assertEqual(new_iodef_contents, expected_output) - self.assertFalse("iodef_out.xml" in os.listdir(".")) + self.assertFalse('iodef_out.xml' in os.listdir('.')) class TestMeasureXIOSClient(unittest.TestCase): - """ + ''' Test measurement of timings from XIOS client files - """ - - @mock.patch("cpmip_xios.os.listdir", return_value=[]) + ''' + @mock.patch('cpmip_xios.os.listdir', return_value=[]) def test_no_files(self, mock_listdir): - """ + ''' Test that the correct output and error messages are produced when no XIOS client files can be found - """ - expected_output = "[INFO] Measured timings for (0/0) XIOS clients\n" - expected_error = "[WARN] Unable to find any XIOS client output files\n" - - with mock.patch("sys.stdout", new=io.StringIO()) as patch_out: - with mock.patch("sys.stderr", new=io.StringIO()) as patch_err: - self.assertEqual(cpmip_xios.measure_xios_client_times(), (0.0, 0.0)) + ''' + expected_output = '[INFO] Measured timings for (0/0) XIOS clients\n' + expected_error = '[WARN] Unable to find any XIOS client output files\n' + + with mock.patch('sys.stdout', new=io.StringIO()) as patch_out: + with mock.patch('sys.stderr', new=io.StringIO()) as patch_err: + self.assertEqual(cpmip_xios.measure_xios_client_times(), + (0.0, 0.0)) self.assertEqual(patch_out.getvalue(), expected_output) self.assertEqual(patch_err.getvalue(), expected_error) - @mock.patch( - "cpmip_xios.os.listdir", - return_value=["xios_client0.out", "xios_client1.out", "xios_client2.out"], - ) - @mock.patch("cpmip_xios.shellout._exec_subprocess") + @mock.patch('cpmip_xios.os.listdir', return_value= + ['xios_client0.out', + 'xios_client1.out', + 'xios_client2.out']) + @mock.patch('cpmip_xios.shellout._exec_subprocess') def test_three_files(self, mock_exec_subproc, mock_listdir): - """ + ''' Test that three files with no timeout give mean and max - """ + ''' mock_exec_subproc.side_effect = [ - ( - 0, - "-> report : Performance report : total time spent for XIOS" - " : 100.0 s", - ), - ( - 0, - "-> report : Performance report : total time spent for XIOS" - " : 10.0 s", - ), - ( - 0, - "-> report : Performance report : total time spent for XIOS" - " : 1000.0 s", - ), - ] + (0, '-> report : Performance report : total time spent for XIOS' + ' : 100.0 s'), + (0, '-> report : Performance report : total time spent for XIOS' + ' : 10.0 s'), + (0, '-> report : Performance report : total time spent for XIOS' + ' : 1000.0 s')] expected_output = (370.0, 1000.0) - with mock.patch("sys.stdout", new=io.StringIO()) as patch_output: - self.assertEqual(cpmip_xios.measure_xios_client_times(), expected_output) - self.assertEqual( - patch_output.getvalue(), "[INFO] Measured timings for (3/3) XIOS clients\n" - ) - - @mock.patch( - "cpmip_xios.os.listdir", - return_value=["xios_client0.out", "xios_client1.out", "xios_client2.out"], - ) - @mock.patch("cpmip_xios.shellout._exec_subprocess") + with mock.patch('sys.stdout', new=io.StringIO()) as patch_output: + self.assertEqual(cpmip_xios.measure_xios_client_times(), + expected_output) + self.assertEqual(patch_output.getvalue(), + '[INFO] Measured timings for (3/3) XIOS clients\n') + + @mock.patch('cpmip_xios.os.listdir', return_value= + ['xios_client0.out', + 'xios_client1.out', + 'xios_client2.out']) + @mock.patch('cpmip_xios.shellout._exec_subprocess') def test_one_timeout(self, mock_exec_subproc, mock_listdir): - """ + ''' Test what happens if there is a timeout - """ + ''' mock_exec_subproc.side_effect = [ - ( - 0, - "-> report : Performance report : total time spent for XIOS" - " : 100.0 s", - ), + (0, '-> report : Performance report : total time spent for XIOS' + ' : 100.0 s'), (1, None), - ( - 0, - "-> report : Performance report : total time spent for XIOS" - " : 1000.0 s", - ), - ] + (0, '-> report : Performance report : total time spent for XIOS' + ' : 1000.0 s')] expected_output = (550.0, 1000.0) - with mock.patch("sys.stdout", new=io.StringIO()) as patch_output: - self.assertEqual(cpmip_xios.measure_xios_client_times(), expected_output) - self.assertEqual( - patch_output.getvalue(), "[INFO] Measured timings for (2/3) XIOS clients\n" - ) + with mock.patch('sys.stdout', new=io.StringIO()) as patch_output: + self.assertEqual(cpmip_xios.measure_xios_client_times(), + expected_output) + self.assertEqual(patch_output.getvalue(), + '[INFO] Measured timings for (2/3) XIOS clients\n') diff --git a/Coupled_Drivers/unittests/test_rivers_driver.py b/Coupled_Drivers/unittests/test_rivers_driver.py index 1d935b2..2f25233 100644 --- a/Coupled_Drivers/unittests/test_rivers_driver.py +++ b/Coupled_Drivers/unittests/test_rivers_driver.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -""" +''' *****************************COPYRIGHT****************************** (C) Crown copyright 2025 Met Office. All rights reserved. @@ -11,209 +11,206 @@ Met Office, FitzRoy Road, Exeter, Devon, EX1 3PB, United Kingdom *****************************COPYRIGHT****************************** -""" +''' import sys import unittest import unittest.mock as mock import rivers_driver -COMMON_ENV = { - "CALENDAR": "gregorian", - "TASKSTART": "1979,09,01,00,0", - "TASKLENGTH": "1,4,10,0,0", -} +COMMON_ENV = { 'CALENDAR': 'gregorian', + 'TASKSTART': '1979,09,01,00,0', + 'TASKLENGTH': '1,4,10,0,0', } nml = rivers_driver.dr_env_lib.rivers_def.RIVERS_ENVIRONMENT_VARS_INITIAL -RIVER_ENV = {k: nml[k].get("default_val", None) for k in nml.keys()} +RIVER_ENV = { k: nml[k].get('default_val', None) for k in nml.keys()} class TestPrivateMethods(unittest.TestCase): - """ + ''' Test the private methods of the JULES river standalone driver - """ + ''' - @mock.patch("rivers_driver.shellout._exec_subprocess", return_value=[0, "output"]) + @mock.patch('rivers_driver.shellout._exec_subprocess', return_value=[0, 'output']) def test_setup_dates(self, mock_exec): - """Test the _setup_dates method""" + ''' Test the _setup_dates method ''' start, end = rivers_driver._setup_dates(COMMON_ENV) - self.assertIn( - mock.call('isodatetime 19790901T0000Z -f "%Y-%m-%d %H:%M:%S"'), - mock_exec.mock_calls, - ) - self.assertIn( - mock.call( - 'isodatetime 19790901T0000Z -f "%Y-%m-%d %H:%M:%S" -s P1Y4M10DT0H0M --calendar gregorian' - ), - mock_exec.mock_calls, - ) + self.assertIn(mock.call('isodatetime 19790901T0000Z -f "%Y-%m-%d %H:%M:%S"'), + mock_exec.mock_calls) + self.assertIn(mock.call('isodatetime 19790901T0000Z -f "%Y-%m-%d %H:%M:%S" -s P1Y4M10DT0H0M --calendar gregorian'), + mock_exec.mock_calls) self.assertEqual(len(mock_exec.mock_calls), 2) - @mock.patch("rivers_driver.common") - @mock.patch("rivers_driver.shellout") - @mock.patch("rivers_driver.os.path.isfile") - @mock.patch("rivers_driver.pathlib") + @mock.patch('rivers_driver.common') + @mock.patch('rivers_driver.shellout') + @mock.patch('rivers_driver.os.path.isfile') + @mock.patch('rivers_driver.pathlib') def test_update_river_nl(self, mock_lib, mock_path, mock_shellout, mock_common): - """Test the _update_river_nl method""" + ''' Test the _update_river_nl method ''' mock_shellout._exec_subprocess.returnvalue = (0, 'dir="this/path/"') - rivers_driver._update_river_nl(RIVER_ENV, "19790901T0000Z", "19810121T0000Z") + rivers_driver._update_river_nl(RIVER_ENV, + '19790901T0000Z', '19810121T0000Z') path_calls = mock_path.mock_calls - self.assertIn(mock.call("output.nml"), path_calls) - self.assertIn(mock.call("timesteps.nml"), path_calls) + self.assertIn(mock.call('output.nml'), path_calls) + self.assertIn(mock.call('timesteps.nml'), path_calls) nml_calls = mock_common.ModNamelist.mock_calls - self.assertIn(mock.call("output.nml"), nml_calls) - self.assertIn(mock.call("timesteps.nml"), nml_calls) - self.assertIn(mock.call().var_val("output_start", "19790901T0000Z"), nml_calls) + self.assertIn(mock.call('output.nml'), nml_calls) + self.assertIn(mock.call('timesteps.nml'), nml_calls) + self.assertIn(mock.call().var_val('output_start', '19790901T0000Z'), + nml_calls) self.assertIn(mock.call().replace(), nml_calls) - self.assertIn( - mock.call().var_val("main_run_start", "19790901T0000Z"), nml_calls - ) - self.assertIn(mock.call().var_val("main_run_end", "19810121T0000Z"), nml_calls) + self.assertIn(mock.call().var_val('main_run_start', '19790901T0000Z'), + nml_calls) + self.assertIn(mock.call().var_val('main_run_end', '19810121T0000Z'), + nml_calls) mock_shellout._exec_subprocess.assert_called_once_with( - "grep output_dir output.nml" + 'grep output_dir output.nml' ) - mock_lib.Path.assert_called_once_with("this/path") - mock_lib.Path().mkdir.assert_called_once_with(parents=True, exist_ok=True) + mock_lib.Path.assert_called_once_with('this/path') + mock_lib.Path().mkdir.assert_called_once_with(parents=True, + exist_ok=True) - @mock.patch("rivers_driver.os.path.isfile") - @mock.patch("rivers_driver.f90nml.read") + @mock.patch('rivers_driver.os.path.isfile') + @mock.patch('rivers_driver.f90nml.read') def test_get_river_resol(self, mock_read, mock_path): - """Test the _get_river_resol function""" - mock_read.return_value = {"jules_input_grid": {"nx": 10, "ny": 20}} + '''Test the _get_river_resol function''' + mock_read.return_value = {'jules_input_grid': {'nx': 10, 'ny': 20}} - out_info = rivers_driver._get_river_resol("riv_nl", {}) - self.assertIn(mock.call("riv_nl"), mock_path.mock_calls) - self.assertEqual(out_info, {"RIV_resol": [10, 20]}) + out_info = rivers_driver._get_river_resol('riv_nl', {}) + self.assertIn(mock.call('riv_nl'), mock_path.mock_calls) + self.assertEqual(out_info, {'RIV_resol': [10, 20]}) - @mock.patch("rivers_driver.os.path") - @mock.patch("rivers_driver._get_river_resol") - @mock.patch("rivers_driver.f90nml") + @mock.patch('rivers_driver.os.path') + @mock.patch('rivers_driver._get_river_resol') + @mock.patch('rivers_driver.f90nml') def test_sent_coupling_fields(self, mock_nml, mock_res, mock_path): - """Run info should pass through, and should also return None as a - second value""" - run_info = {"exec_list": ["toyriv"], "river_nl": "rivers_coupling.nml"} + '''Run info should pass through, and should also return None as a + second value''' + run_info = {'exec_list': ['toyriv'], + 'river_nl': 'rivers_coupling.nml'} mock_res.return_value = run_info mock_nml.read.return_value = { - "oasis_riv_send_nml": {"oasis_riv_send": "fields"} + 'oasis_riv_send_nml': {'oasis_riv_send': 'fields'} } mock_namcpl = mock.Mock(add_to_cpl_list=mock.MagicMock()) - mock_namcpl.add_to_cpl_list.return_value = "send_list" + mock_namcpl.add_to_cpl_list.return_value = 'send_list' - with mock.patch.dict(sys.modules, {"write_namcouple": mock_namcpl}): + with mock.patch.dict(sys.modules, {'write_namcouple': mock_namcpl}): rtn = rivers_driver._sent_coupling_fields(RIVER_ENV, {}) - self.assertEqual(rtn, (run_info, "send_list")) + self.assertEqual(rtn, (run_info, 'send_list')) path_calls = mock_path.mock_calls - self.assertIn(mock.call.exists("OASIS_RIV_SEND"), path_calls) - self.assertIn(mock.call.isfile("rivers_coupling.nml"), path_calls) - mock_res.assert_called_once_with("model_grid.nml", {"exec_list": ["toyriv"]}) - mock_nml.read.assert_called_once_with("OASIS_RIV_SEND") - mock_namcpl.add_to_cpl_list.assert_called_once_with("RIV", False, 0, "fields") + self.assertIn(mock.call.exists('OASIS_RIV_SEND'), path_calls) + self.assertIn(mock.call.isfile('rivers_coupling.nml'), path_calls) + mock_res.assert_called_once_with('model_grid.nml', + {'exec_list': ['toyriv']}) + mock_nml.read.assert_called_once_with('OASIS_RIV_SEND') + mock_namcpl.add_to_cpl_list.assert_called_once_with( + 'RIV', False, 0, 'fields' + ) class TestSetupExecutable(unittest.TestCase): - """ + ''' Test the loading of environment variables and file copies - """ - - @mock.patch("rivers_driver.dr_env_lib.env_lib") - @mock.patch("rivers_driver.common") - @mock.patch("rivers_driver.os.symlink") - @mock.patch("rivers_driver._setup_dates", return_value=("start", "end")) - @mock.patch("rivers_driver._update_river_nl") - def test_setup_executable(self, mock_upd, mock_date, mock_link, mock_cmn, mock_env): - """Test the _setup_executable function""" + ''' + @mock.patch('rivers_driver.dr_env_lib.env_lib') + @mock.patch('rivers_driver.common') + @mock.patch('rivers_driver.os.symlink') + @mock.patch('rivers_driver._setup_dates', return_value=('start', 'end')) + @mock.patch('rivers_driver._update_river_nl') + def test_setup_executable(self, mock_upd, mock_date, + mock_link, mock_cmn, mock_env): + '''Test the _setup_executable function''' rivers_envar = RIVER_ENV - rivers_envar["RIVER_EXEC"] = "executable" + rivers_envar['RIVER_EXEC'] = 'executable' mock_env.load_envar_from_definition.return_value = rivers_envar return_rivers_envar = rivers_driver._setup_executable(COMMON_ENV) mock_env.LoadEnvar.assert_called_once_with() - mock_cmn.remove_file.assert_called_once_with("river.exe") - mock_link.assert_called_once_with("executable", "river.exe") + mock_cmn.remove_file.assert_called_once_with('river.exe') + mock_link.assert_called_once_with('executable', 'river.exe') mock_date.assert_called_once_with(COMMON_ENV) - mock_upd.assert_called_once_with(rivers_envar, "start", "end") + mock_upd.assert_called_once_with(rivers_envar, 'start', 'end') self.assertEqual(return_rivers_envar, rivers_envar) def test_launcher_command(self): - """Test the _set_launcher_command function""" + '''Test the _set_launcher_command function''' env = RIVER_ENV - env["ROSE_LAUNCHER_PREOPTS_RIVER"] = "river pre-opts" + env['ROSE_LAUNCHER_PREOPTS_RIVER'] = 'river pre-opts' cmd = rivers_driver._set_launcher_command(env) - self.assertEqual(cmd, "river pre-opts ./river.exe") - self.assertEqual(env["ROSE_LAUNCHER_PREOPTS_RIVER"], "'river pre-opts'") + self.assertEqual(cmd, 'river pre-opts ./river.exe') + self.assertEqual(env['ROSE_LAUNCHER_PREOPTS_RIVER'], + "'river pre-opts'") class TestFinalizeExecutable(unittest.TestCase): - """ + ''' Test the finalizing of the executable - """ - + ''' def test_finalize_executable(self): - """Test the finalize. It does nothing at the moment""" + '''Test the finalize. It does nothing at the moment''' self.assertIsNone(rivers_driver._finalize_executable()) class TestRunDriver(unittest.TestCase): - """ + ''' Test the interface to run the driver - """ - - @mock.patch("rivers_driver._setup_executable") - @mock.patch("rivers_driver._finalize_executable") + ''' + @mock.patch('rivers_driver._setup_executable') + @mock.patch('rivers_driver._finalize_executable') def test_run_driver_finalize(self, mock_finalize, mock_setup): - """Test finalise mode""" - rvalue = rivers_driver.run_driver("common_env", "finalize", "run_info") - self.assertEqual(rvalue, (None, None, "run_info", None)) + '''Test finalise mode''' + rvalue = rivers_driver.run_driver('common_env', 'finalize', 'run_info') + self.assertEqual(rvalue, (None, None, 'run_info', None)) mock_setup.assert_not_called() mock_finalize.assert_called_once_with() - @mock.patch("rivers_driver._setup_executable") - @mock.patch("rivers_driver._set_launcher_command") - @mock.patch("rivers_driver._sent_coupling_fields") - @mock.patch("rivers_driver._finalize_executable") + @mock.patch('rivers_driver._setup_executable') + @mock.patch('rivers_driver._set_launcher_command') + @mock.patch('rivers_driver._sent_coupling_fields') + @mock.patch('rivers_driver._finalize_executable') def test_run_driver_l_namcouple( - self, mock_finalize, mock_namc, mock_launcher_cmd, mock_setup + self, mock_finalize, mock_namc, mock_launcher_cmd, mock_setup ): - """Test run mode with l_namcouple set in run info""" - run_info = {"l_namcouple": True} - common_env = {"ROSE_LAUNCHER": "launcher"} - mock_setup.return_value = "exe_envar" - mock_launcher_cmd.return_value = "launch_cmd" - rvalue = rivers_driver.run_driver(common_env, "run_driver", run_info) - self.assertEqual(rvalue, ("exe_envar", "launch_cmd", run_info, None)) + '''Test run mode with l_namcouple set in run info''' + run_info = {'l_namcouple': True} + common_env = {'ROSE_LAUNCHER': 'launcher'} + mock_setup.return_value = 'exe_envar' + mock_launcher_cmd.return_value = 'launch_cmd' + rvalue = rivers_driver.run_driver(common_env, 'run_driver', run_info) + self.assertEqual(rvalue, ('exe_envar', 'launch_cmd', run_info, None)) mock_setup.assert_called_once_with(common_env) - mock_launcher_cmd.assert_called_once_with("exe_envar") + mock_launcher_cmd.assert_called_once_with('exe_envar') mock_namc.assert_not_called() mock_finalize.assert_not_called() - @mock.patch("rivers_driver._setup_executable") - @mock.patch("rivers_driver._set_launcher_command") - @mock.patch("rivers_driver._sent_coupling_fields") - @mock.patch("rivers_driver._finalize_executable") + @mock.patch('rivers_driver._setup_executable') + @mock.patch('rivers_driver._set_launcher_command') + @mock.patch('rivers_driver._sent_coupling_fields') + @mock.patch('rivers_driver._finalize_executable') def test_run_driver_no_l_namcouple( - self, mock_finalize, mock_namc, mock_launcher_cmd, mock_setup + self, mock_finalize, mock_namc, mock_launcher_cmd, mock_setup ): - """Test run mode with l_namcouple set to False in run info""" - run_info = {"l_namcouple": False} - common_env = {"ROSE_LAUNCHER": "launcher"} - mock_setup.return_value = "exe_envar" - mock_launcher_cmd.return_value = "launch_cmd" - mock_namc.return_value = ("run_info", "model_snd_list") + '''Test run mode with l_namcouple set to False in run info''' + run_info = {'l_namcouple': False} + common_env = {'ROSE_LAUNCHER': 'launcher'} + mock_setup.return_value = 'exe_envar' + mock_launcher_cmd.return_value = 'launch_cmd' + mock_namc.return_value = ('run_info', 'model_snd_list') - rvalue = rivers_driver.run_driver(common_env, "run_driver", run_info) + rvalue = rivers_driver.run_driver(common_env, 'run_driver', run_info) self.assertEqual( - rvalue, ("exe_envar", "launch_cmd", "run_info", "model_snd_list") - ) + rvalue, ('exe_envar', 'launch_cmd', 'run_info', 'model_snd_list')) mock_setup.assert_called_once_with(common_env) - mock_launcher_cmd.assert_called_once_with("exe_envar") - mock_namc.assert_called_once_with("exe_envar", run_info) + mock_launcher_cmd.assert_called_once_with('exe_envar') + mock_namc.assert_called_once_with('exe_envar', run_info) mock_finalize.assert_not_called() diff --git a/Coupled_Drivers/write_namcouple.py b/Coupled_Drivers/write_namcouple.py index 621dcbb..4328497 100644 --- a/Coupled_Drivers/write_namcouple.py +++ b/Coupled_Drivers/write_namcouple.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -""" +''' *****************************COPYRIGHT****************************** (C) Crown copyright 2021-2025 Met Office. All rights reserved. Use, duplication or disclosure of this code is subject to the restrictions @@ -15,7 +15,7 @@ DESCRIPTION Write namcouple file at run time. -""" +''' import sys import itertools import common @@ -27,39 +27,23 @@ import write_namcouple_header # Dictionary containing the RMP mappings -RMP_MAPPING = { - "Bc": "BICUBIC", - "Bi": "BILINEA", - "CD": "CONSERV_DESTAREA", - "CF": "CONSERV_FRACAREA", - "0D": "OneVal", - "1D": "OneD", - "NB": "nomask_BILINEA", - "remove": "remove", -} - - -class NamcoupleEntry: - """ +RMP_MAPPING = {'Bc':'BICUBIC', + 'Bi':'BILINEA', + 'CD':'CONSERV_DESTAREA', + 'CF':'CONSERV_FRACAREA', + '0D':'OneVal', + '1D':'OneD', + 'NB':'nomask_BILINEA', + 'remove':'remove'} + +class NamcoupleEntry(): + ''' Container to hold the information for one namcouple entry - """ + ''' - def __init__( - self, - name_out, - field_id, - grid, - origin, - dest, - nlev, - l_soil, - mapping, - mapping_type, - weight, - l_hybrid, - n_cpl_freq, - override_cpl_freq, - ): + def __init__(self, name_out, field_id, grid, origin, dest, nlev, l_soil, + mapping, mapping_type, weight, l_hybrid, n_cpl_freq, + override_cpl_freq): self.name_out = name_out self.field_id = field_id self.grid = grid @@ -75,185 +59,148 @@ def __init__( self.override_cpl_freq = override_cpl_freq def __repr__(self): - return repr( - ( - self.name_out, - self.field_id, - self.grid, - self.origin, - self.dest, - self.nlev, - self.l_soil, - self.mapping, - self.mapping_type, - self.weight, - self.l_hybrid, - self.n_cpl_freq, - self.override_cpl_freq, - ) - ) - + return repr((self.name_out, self.field_id, self.grid, self.origin, + self.dest, self.nlev, self.l_soil, self.mapping, + self.mapping_type, self.weight, self.l_hybrid, + self.n_cpl_freq, self.override_cpl_freq)) def _print_run_info(run_info): - """ + ''' Print the information in run_info - """ - sys.stdout.write("[INFO] Display the contents of run_info:\n") - sys.stdout.write("[INFO] -------- Resolutions -------- \n") - if "ATM_grid" in run_info: - sys.stdout.write( - "[INFO] Atmosphere: %s (%d, %d)\n" - % (run_info["ATM_grid"], run_info["ATM_resol"][0], run_info["ATM_resol"][1]) - ) - if "JNR_grid" in run_info: - sys.stdout.write( - "[INFO] Junior atmosphere: %s (%d, %d)\n" - % (run_info["JNR_grid"], run_info["JNR_resol"][0], run_info["JNR_resol"][1]) - ) - if "OCN_grid" in run_info: + ''' + sys.stdout.write('[INFO] Display the contents of run_info:\n') + sys.stdout.write('[INFO] -------- Resolutions -------- \n') + if 'ATM_grid' in run_info: + sys.stdout.write('[INFO] Atmosphere: %s (%d, %d)\n' % + (run_info['ATM_grid'], run_info['ATM_resol'][0], + run_info['ATM_resol'][1])) + if 'JNR_grid' in run_info: + sys.stdout.write('[INFO] Junior atmosphere: %s (%d, %d)\n' % + (run_info['JNR_grid'], run_info['JNR_resol'][0], + run_info['JNR_resol'][1])) + if 'OCN_grid' in run_info: # If running ATM<->JNR coupling we can have an ocean resolution # without an ocean. - if "OCN_resol" in run_info: - sys.stdout.write( - "[INFO] Ocean: %s (%d, %d)" - % ( - run_info["OCN_grid"], - run_info["OCN_resol"][0], - run_info["OCN_resol"][1], - ) - ) + if 'OCN_resol' in run_info: + sys.stdout.write('[INFO] Ocean: %s (%d, %d)' % + (run_info['OCN_grid'], run_info['OCN_resol'][0], + run_info['OCN_resol'][1])) else: - sys.stdout.write("[INFO] Ocean: %s" % run_info["OCN_grid"]) - if "NEMO_VERSION" in run_info: - sys.stdout.write(" (NEMO version: %s)\n" % run_info["NEMO_VERSION"]) + sys.stdout.write('[INFO] Ocean: %s' % + run_info['OCN_grid']) + if 'NEMO_VERSION' in run_info: + sys.stdout.write(' (NEMO version: %s)\n' % + run_info['NEMO_VERSION']) else: - sys.stdout.write("\n") - if "riv3" in run_info: - if run_info["riv3"] > 0: - sys.stdout.write("[INFO] Number of rivers: %d\n" % run_info["riv3"]) - - sys.stdout.write("[INFO] -------- Coupling frequencies (in mins) " "-------- \n") - comp_order = ["ATM", "JNR", "OCN"] - comp_list = [comp for comp in comp_order if "{}_resol".format(comp) in run_info] + sys.stdout.write('\n') + if 'riv3' in run_info: + if run_info['riv3'] > 0: + sys.stdout.write('[INFO] Number of rivers: %d\n' % + run_info['riv3']) + + sys.stdout.write('[INFO] -------- Coupling frequencies (in mins) ' + '-------- \n') + comp_order = ['ATM', 'JNR', 'OCN'] + comp_list = [comp for comp in comp_order if '{}_resol'.format(comp) in + run_info] for component1, component2 in itertools.permutations(comp_list, r=2): - key = component2 + "2" + component1 + "_freq" - sys.stdout.write( - "[INFO] %s -> %s: %0.1f\n" - % (component2, component1, (run_info[key][0] / 60.0)) - ) - key2 = "l_hyb_stats_" + component2 + "2" + component1 + key = component2 + '2' + component1 + '_freq' + sys.stdout.write('[INFO] %s -> %s: %0.1f\n' % + (component2, component1, + (run_info[key][0]/60.0))) + key2 = 'l_hyb_stats_' + component2 + '2' + component1 if key2 in run_info: if run_info[key2]: - sys.stdout.write( - "[INFO] %s -> %s for stats: %0.1f\n" - % (component2, component1, (run_info[key][1] / 60.0)) - ) - key = component1 + "2" + component2 + "_freq" - sys.stdout.write( - "[INFO] %s -> %s: %0.1f\n" - % (component1, component2, (run_info[key][0] / 60.0)) - ) - key2 = "l_hyb_stats_" + component1 + "2" + component2 + sys.stdout.write('[INFO] %s -> %s for stats: %0.1f\n' % + (component2, component1, + (run_info[key][1]/60.0))) + key = component1 + '2' + component2 + '_freq' + sys.stdout.write('[INFO] %s -> %s: %0.1f\n' % + (component1, component2, + (run_info[key][0]/60.0))) + key2 = 'l_hyb_stats_' + component1 + '2' + component2 if key2 in run_info: if run_info[key2]: - sys.stdout.write( - "[INFO] %s -> %s for stats: %0.1f\n" - % (component2, component1, (run_info[key][1] / 60.0)) - ) - if "ATM_grid" in run_info: - sys.stdout.write("[INFO] -------- Atmosphere information -------- \n") - sys.stdout.write( - "[INFO] Atmosphere levels: %d\n" % run_info["ATM_model_levels"] - ) - sys.stdout.write( - "[INFO] Soil levels: %d\n" % run_info["ATM_soil_levels"] - ) - sys.stdout.write( - "[INFO] Number of vegetation tiles: %d\n" % run_info["ATM_veg_tiles"] - ) - sys.stdout.write( - "[INFO] Number of non-vegetation tiles: %d\n" - % run_info["ATM_non_veg_tiles"] - ) - sys.stdout.write( - "[INFO] STASHmaster directory: %s\n" % run_info["STASHMASTER"] - ) - sys.stdout.write("[INFO] -------- Namcouple settings -------- \n") - sys.stdout.write("[INFO] nlogprt: %d" % run_info["nlogprt"][0]) - if len(run_info["nlogprt"]) == 2: - sys.stdout.write(" %d\n" % run_info["nlogprt"][1]) + sys.stdout.write('[INFO] %s -> %s for stats: %0.1f\n' % + (component2, component1, + (run_info[key][1]/60.0))) + if 'ATM_grid' in run_info: + sys.stdout.write('[INFO] -------- Atmosphere information -------- \n') + sys.stdout.write('[INFO] Atmosphere levels: %d\n' % + run_info['ATM_model_levels']) + sys.stdout.write('[INFO] Soil levels: %d\n' % + run_info['ATM_soil_levels']) + sys.stdout.write('[INFO] Number of vegetation tiles: %d\n' % + run_info['ATM_veg_tiles']) + sys.stdout.write('[INFO] Number of non-vegetation tiles: %d\n' % + run_info['ATM_non_veg_tiles']) + sys.stdout.write('[INFO] STASHmaster directory: %s\n' % + run_info['STASHMASTER']) + sys.stdout.write('[INFO] -------- Namcouple settings -------- \n') + sys.stdout.write('[INFO] nlogprt: %d' % run_info['nlogprt'][0]) + if len(run_info['nlogprt']) == 2: + sys.stdout.write(' %d\n' % run_info['nlogprt'][1]) else: - sys.stdout.write("\n") - sys.stdout.write("[INFO] Executable list:\n") - for execut in run_info["exec_list"]: - sys.stdout.write("[INFO] - %s\n" % execut) - if "expout" in run_info: - sys.stdout.write("[INFO] Fields with EXPOUT argument:\n") - for field in run_info["expout"]: - sys.stdout.write("[INFO] - %s\n" % field) - if "rmp_create" in run_info: - sys.stdout.write( - "[INFO] Fields where remapping files will be " "created are:\n" - ) - for field in run_info["rmp_create"]: - sys.stdout.write("[INFO] - %s\n" % field) - sys.stdout.write("[INFO] -------- Files -------- \n") - sys.stdout.write( - "[INFO] File containing coupling frequencies: %s\n" % run_info["SHARED_FILE"] - ) - if "nemo_nl" in run_info: - sys.stdout.write( - "[INFO] Default couplings determined from: %s\n" % run_info["nemo_nl"] - ) - + sys.stdout.write('\n') + sys.stdout.write('[INFO] Executable list:\n') + for execut in run_info['exec_list']: + sys.stdout.write('[INFO] - %s\n' % execut) + if 'expout' in run_info: + sys.stdout.write('[INFO] Fields with EXPOUT argument:\n') + for field in run_info['expout']: + sys.stdout.write('[INFO] - %s\n' % field) + if 'rmp_create' in run_info: + sys.stdout.write('[INFO] Fields where remapping files will be ' + 'created are:\n') + for field in run_info['rmp_create']: + sys.stdout.write('[INFO] - %s\n' % field) + sys.stdout.write('[INFO] -------- Files -------- \n') + sys.stdout.write('[INFO] File containing coupling frequencies: %s\n' % + run_info['SHARED_FILE']) + if 'nemo_nl' in run_info: + sys.stdout.write('[INFO] Default couplings determined from: %s\n' % + run_info['nemo_nl']) def _checks_on_run_info(run_info): - """ + ''' Run some checks on the data in run_info - """ + ''' # If coupling contains both hybrid components, check the model_levels # are the same for both. - if "ATM_model_levels" in run_info and "JNR_model_levels" in run_info: - if run_info["ATM_model_levels"] != run_info["JNR_model_levels"]: - sys.stderr.write( - "[FAIL] model_levels for Snr (=%d) and Jnr " - "(=%d) are different.\n" - % (run_info["ATM_model_levels"], run_info["JNR_model_levels"]) - ) + if 'ATM_model_levels' in run_info and 'JNR_model_levels' in run_info: + if run_info['ATM_model_levels'] != run_info['JNR_model_levels']: + sys.stderr.write('[FAIL] model_levels for Snr (=%d) and Jnr ' + '(=%d) are different.\n' % + (run_info['ATM_model_levels'], + run_info['JNR_model_levels'])) sys.exit(error.DIFFERENT_MODEL_LEVELS) - if "ATM_soil_levels" in run_info and "JNR_soil_levels" in run_info: - if run_info["ATM_soil_levels"] != run_info["JNR_soil_levels"]: - sys.stderr.write( - "[FAIL] soil levels for Snr (=%d) and Jnr " - "(=%d) are different.\n" - % (run_info["ATM_soil_levels"], run_info["JNR_soil_levels"]) - ) + if 'ATM_soil_levels' in run_info and 'JNR_soil_levels' in run_info: + if run_info['ATM_soil_levels'] != run_info['JNR_soil_levels']: + sys.stderr.write('[FAIL] soil levels for Snr (=%d) and Jnr ' + '(=%d) are different.\n' % + (run_info['ATM_soil_levels'], + run_info['JNR_soil_levels'])) sys.exit(error.DIFFERENT_SOIL_LEVELS) # If stats are turned on, we want to sample the core fields for # stats at least as often as the stat fields. - if "l_hyb_stats_ATM2JNR" in run_info: - if run_info["ATM2JNR_freq"][0] > run_info["ATM2JNR_freq"][1]: - sys.stdout.write( - "[INFO] matching the main coupling frequency " - "between ATM->JNR to the stats frequency\n" - ) - run_info["ATM2JNR_freq"][0] = run_info["ATM2JNR_freq"][1] - if "l_hyb_stats_JNR2ATM" in run_info: - if run_info["JNR2ATM_freq"][0] > run_info["JNR2ATM_freq"][1]: - sys.stdout.write( - "[INFO] matching the main coupling frequency " - "between JNR->ATM to the stats frequency\n" - ) - run_info["JNR2ATM_freq"][0] = run_info["JNR2ATM_freq"][1] + if 'l_hyb_stats_ATM2JNR' in run_info: + if run_info['ATM2JNR_freq'][0] > run_info['ATM2JNR_freq'][1]: + sys.stdout.write('[INFO] matching the main coupling frequency ' + 'between ATM->JNR to the stats frequency\n') + run_info['ATM2JNR_freq'][0] = run_info['ATM2JNR_freq'][1] + if 'l_hyb_stats_JNR2ATM' in run_info: + if run_info['JNR2ATM_freq'][0] > run_info['JNR2ATM_freq'][1]: + sys.stdout.write('[INFO] matching the main coupling frequency ' + 'between JNR->ATM to the stats frequency\n') + run_info['JNR2ATM_freq'][0] = run_info['JNR2ATM_freq'][1] return run_info - def add_to_cpl_list(origin, l_hybrid, n_cpl_freq, send_list_raw): - """ + ''' Add a new set of couplings to model_snd_list - """ + ''' mapping = None weighting = None model_snd_list = [] @@ -262,25 +209,11 @@ def add_to_cpl_list(origin, l_hybrid, n_cpl_freq, send_list_raw): # Loop across the raw entries for cpl_entry_raw in send_list_raw: - if cpl_entry_raw == "default": + if cpl_entry_raw == 'default': # Entry will later be filled with the default options model_snd_list.append( - NamcoupleEntry( - "default", - "?", - "?", - origin, - "?", - "?", - "?", - "?", - "?", - "?", - l_hybrid, - n_cpl_freq, - None, - ) - ) + NamcoupleEntry('default', '?', '?', origin, '?', '?', '?', + '?', '?', '?', l_hybrid, n_cpl_freq, None)) else: # A raw coupling entry can have up to 7 arguments: # ;;;;; @@ -293,87 +226,68 @@ def add_to_cpl_list(origin, l_hybrid, n_cpl_freq, send_list_raw): nlev = 1 mapping_type = -99 - if isinstance(cpl_entry_raw, str) and ";" in cpl_entry_raw: + if isinstance(cpl_entry_raw, str) and ';' in cpl_entry_raw: # Split the input - parts = cpl_entry_raw.split(";") + parts = cpl_entry_raw.split(';') # Check we have enough compulsory input if len(parts) < 6: - sys.stderr.write( - "[FAIL] Insufficent information in %s\n" % cpl_entry_raw - ) + sys.stderr.write('[FAIL] Insufficent information in %s\n' % + cpl_entry_raw) sys.exit(error.MISSING_NAMCOUPLE_INPUT) # Set the fields we should know name_out = parts[0] field_id = int(parts[1]) grid = parts[2] - dests = parts[3].split("&") + dests = parts[3].split('&') # Check if number of level are provided if len(parts) > 4: nlev = int(parts[4]) # Check if a mapping is provided if len(parts) > 5: - sub_parts = parts[5].split("&") + sub_parts = parts[5].split('&') if sub_parts[0] in RMP_MAPPING: mapping = RMP_MAPPING[sub_parts[0]] else: - sys.stderr.write( - "[FAIL] Don't recognise this " "mapping: %s.\n" % parts[5] - ) + sys.stderr.write("[FAIL] Don't recognise this " + "mapping: %s.\n" % parts[5]) sys.exit(error.UNRECOGNISED_MAPPING) if len(sub_parts) > 1: mapping_type = int(sub_parts[1]) else: if not mapping: - sys.stderr.write( - "[FAIL] Need to specify a mapping " - "for first entry %s\n" % cpl_entry_raw - ) + sys.stderr.write('[FAIL] Need to specify a mapping ' + 'for first entry %s\n' % + cpl_entry_raw) sys.exit(error.MISSING_MAPPING) # Check if we have a coupling weighting if len(parts) > 6: weighting = int(parts[6]) else: if not weighting: - sys.stderr.write( - "[FAIL] Need to specify a weighting " "for first entry.\n" - ) + sys.stderr.write('[FAIL] Need to specify a weighting ' + 'for first entry.\n') sys.exit(error.MISSING_WEIGHTING) # Loop over the destinations for dest in dests: model_snd_list.append( - NamcoupleEntry( - name_out, - field_id, - grid, - origin, - dest, - nlev, - "?", - mapping, - mapping_type, - weighting, - l_hybrid, - n_cpl_freq, - None, - ) - ) + NamcoupleEntry(name_out, field_id, grid, origin, + dest, nlev, '?', mapping, mapping_type, + weighting, l_hybrid, n_cpl_freq, None)) # Just add to the default weighting weighting += 2 else: - sys.stderr.write( - "[FAIL] the following coupling entry looks " - "to be in the wrong format: %s\n" % cpl_entry_raw - ) + sys.stderr.write('[FAIL] the following coupling entry looks ' + 'to be in the wrong format: %s\n' % + cpl_entry_raw) sys.exit(error.WRONG_CPL_FORMAT) return model_snd_list - def write_namcouple(common_env, run_info, coupling_list): - """ + ''' Write the namcouple file - """ + ''' # Key information is contained in run_info _print_run_info(run_info) @@ -382,37 +296,36 @@ def write_namcouple(common_env, run_info, coupling_list): # See if any default couplings need adding for nam_entry in coupling_list: - if "default" in nam_entry.name_out: - coupling_list = default_couplings.add_default_couplings( - run_info, coupling_list - ) + if 'default' in nam_entry.name_out: + coupling_list = \ + default_couplings.add_default_couplings(run_info, + coupling_list) break # See if any couplings need removing store_coupling_list = [] for nam_entry in coupling_list: - if nam_entry.mapping != "remove": + if nam_entry.mapping != 'remove': store_coupling_list.append(nam_entry) coupling_list = store_coupling_list # Open the file - nam_file = common.open_text_file("namcouple", "w") + nam_file = common.open_text_file('namcouple', 'w') # Create the header - write_namcouple_header.write_namcouple_header( - common_env, nam_file, run_info, len(coupling_list) - ) + write_namcouple_header.write_namcouple_header(common_env, nam_file, + run_info, len(coupling_list)) # Sort the coupling_list by weighting - coupling_list = sorted(coupling_list, key=lambda nam_entry: nam_entry.weight) + coupling_list = sorted(coupling_list, + key=lambda nam_entry: nam_entry.weight) # Write the coupling fields cf_names = write_namcouple_fields.write_namcouple_fields( - nam_file, run_info, coupling_list - ) + nam_file, run_info, coupling_list) # Close file - nam_file.write("#\n$END\n") + nam_file.write('#\n$END\n') nam_file.close() # Write cf_name_table.txt file @@ -420,4 +333,4 @@ def write_namcouple(common_env, run_info, coupling_list): # Now that namcouple has been created, we can create the transient # field namelist - _, _ = shellout._exec_subprocess("./OASIS_fields") + _, _ = shellout._exec_subprocess('./OASIS_fields') diff --git a/Coupled_Drivers/xios_driver.py b/Coupled_Drivers/xios_driver.py index b7331c5..b08cd4d 100644 --- a/Coupled_Drivers/xios_driver.py +++ b/Coupled_Drivers/xios_driver.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -""" +''' *****************************COPYRIGHT****************************** (C) Crown copyright 2023-2025 Met Office. All rights reserved. @@ -17,7 +17,8 @@ DESCRIPTION Driver for the XIOS component, called from link_drivers. Can cater for XIOS running in either attatched or detatched mode -""" +''' + import os @@ -27,53 +28,47 @@ import dr_env_lib.xios_def import dr_env_lib.env_lib - def _copy_iodef_custom(xios_evar): - """ + ''' If a custom iodef file exists, copy this to the required input filename - """ - if xios_evar["IODEF_CUSTOM"]: - shutil.copy(xios_evar["IODEF_CUSTOM"], xios_evar["IODEF_FILENAME"]) - + ''' + if xios_evar['IODEF_CUSTOM']: + shutil.copy(xios_evar['IODEF_CUSTOM'], xios_evar['IODEF_FILENAME']) -def _update_iodef(is_server_mode, is_coupled_mode, oasis_components, iodef_fname): - """ +def _update_iodef( + is_server_mode, is_coupled_mode, oasis_components, iodef_fname): + ''' Update the iodef.xml file for server/attatched mode and couplng mode. is_server_mode and is_coupled_mode are boolean. (true when each option is activated, false otherwise). - """ + ''' # Work-around in lieu of viable multi component iodef.xml handling - _, _ = shellout._exec_subprocess("cp mydef.xml %s" % iodef_fname) + _, _ = shellout._exec_subprocess('cp mydef.xml %s' % iodef_fname) # Note we do not use python's xml module for this job, as the comment # line prevalent in the first line of the GO5 iodef.xml files renders # the file invalid as far as the xml module is concerned. - swapfile_name = "swap_iodef" - iodef_file = common.open_text_file(iodef_fname, "r") - iodef_swap = common.open_text_file(swapfile_name, "w") - text_bool = ["false", "true"] + swapfile_name = 'swap_iodef' + iodef_file = common.open_text_file(iodef_fname, 'r') + iodef_swap = common.open_text_file(swapfile_name, 'w') + text_bool = ['false', 'true'] for line in iodef_file.readlines(): # Update the server_mode if the current setting is not what we want - if "' - + oasis_components - + "" - ) + line = '' \ + + oasis_components+'' else: - line = "" + line = '' iodef_swap.write(line) @@ -83,7 +78,7 @@ def _update_iodef(is_server_mode, is_coupled_mode, oasis_components, iodef_fname def _setup_coupling_components(xios_envar): - """ + ''' Set up the coupling components for the iodef file. This is less straightforward than you might imagine, since the names of the componenets are hard coded in the component source code. Nemo becomes toyoce and @@ -91,29 +86,28 @@ def _setup_coupling_components(xios_envar): We use the COUPLING_COMPONENTS environment variable to determine this, however it is borrowed from MCT, do we must delete it from the xios_envar container after use - """ + ''' oasis_components = [] - if "lfric" in xios_envar["COUPLING_COMPONENTS"]: - oasis_components.append("lfric") - if "nemo" in xios_envar["COUPLING_COMPONENTS"]: - oasis_components.append("toyoce") - xios_envar.remove("COUPLING_COMPONENTS") - oasis_components = ",".join(oasis_components) + if 'lfric' in xios_envar['COUPLING_COMPONENTS']: + oasis_components.append('lfric') + if 'nemo' in xios_envar['COUPLING_COMPONENTS']: + oasis_components.append('toyoce') + xios_envar.remove('COUPLING_COMPONENTS') + oasis_components = ','.join(oasis_components) return oasis_components, xios_envar def _setup_executable(common_env): - """ + ''' Setup the environment and any files required by the executable and/or by the iodef file update procedure. - """ + ''' # Load the environment variables required xios_envar = dr_env_lib.env_lib.LoadEnvar() xios_envar = dr_env_lib.env_lib.load_envar_from_definition( - xios_envar, dr_env_lib.xios_def.XIOS_ENVIRONMENT_VARS_INITIAL - ) + xios_envar, dr_env_lib.xios_def.XIOS_ENVIRONMENT_VARS_INITIAL) - if xios_envar["XIOS_NPROC"] == "0": + if xios_envar['XIOS_NPROC'] == '0': # Running in attached mode using_server = False else: @@ -121,12 +115,13 @@ def _setup_executable(common_env): # The following environment variables are only relevant for this # mode using_server = True - common.remove_file(xios_envar["XIOS_LINK"]) - os.symlink(xios_envar["XIOS_EXEC"], xios_envar["XIOS_LINK"]) + common.remove_file(xios_envar['XIOS_LINK']) + os.symlink(xios_envar['XIOS_EXEC'], + xios_envar['XIOS_LINK']) # Check our list of component drivers to see if MCT is active. If it is, # then this is a coupled model. Set the coupler flag accordingly. - using_coupler = "mct" in common_env["models"] + using_coupler = 'mct' in common_env['models'] # Copy the custom IO file if required _copy_iodef_custom(xios_envar) @@ -134,85 +129,75 @@ def _setup_executable(common_env): # Get the list of coupled componenets oasis_components, xios_envar = _setup_coupling_components(xios_envar) # Update the iodef file - _update_iodef( - using_server, using_coupler, oasis_components, xios_envar["IODEF_FILENAME"] - ) + _update_iodef(using_server, using_coupler, oasis_components, + xios_envar['IODEF_FILENAME']) return xios_envar def _set_launcher_command(launcher, xios_envar): - """ + ''' Setup the launcher command for the executable, bearing in mind that XIOS can run attached. If this is so, this function will return an empty string - """ - if xios_envar["XIOS_NPROC"] != "0": - if xios_envar["ROSE_LAUNCHER_PREOPTS_XIOS"] == "unset": + ''' + if xios_envar['XIOS_NPROC'] != '0': + if xios_envar['ROSE_LAUNCHER_PREOPTS_XIOS'] == 'unset': ompthr = 1 hyperthreads = 1 ss = True - xios_envar["ROSE_LAUNCHER_PREOPTS_XIOS"] = ( - common.set_aprun_options( - xios_envar["XIOS_NPROC"], - xios_envar["XIOS_NODES"], - ompthr, - hyperthreads, - ss, - ) - if launcher == "aprun" - else "" - ) - - launch_cmd = "%s ./%s" % ( - xios_envar["ROSE_LAUNCHER_PREOPTS_XIOS"], - xios_envar["XIOS_LINK"], - ) + xios_envar['ROSE_LAUNCHER_PREOPTS_XIOS'] = \ + common.set_aprun_options(xios_envar['XIOS_NPROC'], \ + xios_envar['XIOS_NODES'], ompthr, \ + hyperthreads, ss) \ + if launcher == 'aprun' else '' + + launch_cmd = '%s ./%s' % \ + (xios_envar['ROSE_LAUNCHER_PREOPTS_XIOS'], \ + xios_envar['XIOS_LINK']) # Put in quotes to allow this environment variable to be exported as it # contains (or can contain) spaces - xios_envar["ROSE_LAUNCHER_PREOPTS_XIOS"] = ( - "'%s'" % xios_envar["ROSE_LAUNCHER_PREOPTS_XIOS"] - ) + xios_envar['ROSE_LAUNCHER_PREOPTS_XIOS'] = "'%s'" % \ + xios_envar['ROSE_LAUNCHER_PREOPTS_XIOS'] else: - launch_cmd = "" + launch_cmd = '' return launch_cmd - def _sent_coupling_fields(run_info): - """ + ''' Add XIOS executable to list of executables. This function is only used when creating the namcouple at run time. - """ + ''' # Add xios to our list of executables - if not "exec_list" in run_info: - run_info["exec_list"] = [] - run_info["exec_list"].append("xios.x") + if not 'exec_list' in run_info: + run_info['exec_list'] = [] + run_info['exec_list'].append('xios.x') return run_info - def _finalize_executable(_): - """ + ''' There is no finalization required for XIOS - """ + ''' pass def run_driver(common_env, mode, run_info): - """ + ''' Run the driver, and return an instance of LoadEnvar and as string containing the launcher command for the XIOS component - """ - if mode == "run_driver": + ''' + if mode == 'run_driver': exe_envar = _setup_executable(common_env) - launch_cmd = _set_launcher_command(common_env["ROSE_LAUNCHER"], exe_envar) + launch_cmd = _set_launcher_command(common_env['ROSE_LAUNCHER'], + exe_envar) model_snd_list = None - if not run_info["l_namcouple"]: + if not run_info['l_namcouple']: run_info = _sent_coupling_fields(run_info) - elif mode == "finalize": + elif mode == 'finalize': _finalize_executable(common_env) exe_envar = None launch_cmd = None diff --git a/Postprocessing/common/utils.py b/Postprocessing/common/utils.py index d9274a1..159d664 100644 --- a/Postprocessing/common/utils.py +++ b/Postprocessing/common/utils.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -""" +''' *****************************COPYRIGHT****************************** (C) Crown copyright 2015-2025 Met Office. All rights reserved. @@ -17,7 +17,7 @@ DESCRIPTION Common utilities for post-processing methods -""" +''' import sys import re import os @@ -27,16 +27,15 @@ import timer -globals()["debug_mode"] = None -globals()["debug_ok"] = True - +globals()['debug_mode'] = None +globals()['debug_ok'] = True class Variables(object): - """Object to hold a group of variables""" + '''Object to hold a group of variables''' def load_env(varname, default_value=None, required=False): - """ + ''' Load requested environment variable Arguments: varname - Name of environment variable @@ -46,97 +45,96 @@ def load_env(varname, default_value=None, required=False): required - Default=False Exit with system failure if True and no default_value is specified. - """ + ''' try: envar = os.environ[varname] except KeyError: envar = default_value if required is True and default_value is None: - msg = "REQUIRED variable not found in the environment: " - log_msg(msg + varname, level="FAIL") + msg = 'REQUIRED variable not found in the environment: ' + log_msg(msg + varname, level='FAIL') return envar class CylcCycle(object): - """Object representing the current Cylc cycle point""" - + ''' Object representing the current Cylc cycle point ''' def __init__(self, cyclepoint=None, cycleperiod=None): - """ + ''' Optional argument: cyclepoint - ISOformat datestring OR list/tuple of digits - """ + ''' if cyclepoint is None: # Load optional cycle point override environment - cyclepoint = load_env("CYCLEPOINT_OVERRIDE") + cyclepoint = load_env('CYCLEPOINT_OVERRIDE') if cyclepoint is None: - cyclepoint = load_env("CYLC_TASK_CYCLE_POINT", required=True) + cyclepoint = load_env('CYLC_TASK_CYCLE_POINT', required=True) self.startcycle = self._cyclepoint(cyclepoint) if cycleperiod is None: - cycleperiod = load_env("CYCLEPERIOD", required=True) + cycleperiod = load_env('CYCLEPERIOD', required=True) try: # Split period into list of integers if possible - cycleperiod = [int(x) for x in cycleperiod.split(",")] + cycleperiod = [int(x) for x in cycleperiod.split(',')] except ValueError: # Period provided is intended as a string pass self._period = cycleperiod - enddate = add_period_to_date(self.startcycle["intlist"], cycleperiod) + enddate = add_period_to_date(self.startcycle['intlist'], + cycleperiod) self.endcycle = self._cyclepoint(enddate) @property def period(self): - """Return the cycle period for the cycle point""" + ''' Return the cycle period for the cycle point ''' return self._period @staticmethod def isoformat(cpoint): - """Return cycle point as ISO format datestring""" + ''' Return cycle point as ISO format datestring ''' if isinstance(cpoint, (list, tuple)): cyclepoint = list(cpoint) while len(cyclepoint) < 5: cyclepoint.append(0) - cpoint = "{:0>4}{:0>2}{:0>2}T{:0>2}{:0>2}Z".format(*cyclepoint) + cpoint = '{:0>4}{:0>2}{:0>2}T{:0>2}{:0>2}Z'.format(*cyclepoint) - if re.match(r"\d{8}T\d{4}Z", cpoint): + if re.match(r'\d{8}T\d{4}Z', cpoint): return cpoint else: - msg = "Unable to determine cycle point in ISO format: " - log_msg(msg + str(cpoint), level="FAIL") + msg = 'Unable to determine cycle point in ISO format: ' + log_msg(msg + str(cpoint), level='FAIL') def _cyclepoint(self, cpoint): - """ + ''' Return a dictionary representing a cycle point in 3 formats: iso = ISO format datestring intlist = List of 5 values: [Y,M,D,hh,mm] strlist = List of 5 values: ['Y','M','D','hh','mm'] - """ - cycle_repr = {"iso": self.isoformat(cpoint)} - cycle_repr["strlist"] = list( - re.match( - r"(\d{4})(\d{2})(\d{2})T(\d{2})(\d{2})Z", cycle_repr["iso"] - ).groups() - ) - cycle_repr["intlist"] = [int(x) for x in cycle_repr["strlist"]] + ''' + cycle_repr = {'iso': self.isoformat(cpoint)} + cycle_repr['strlist'] = list(re.match( + r'(\d{4})(\d{2})(\d{2})T(\d{2})(\d{2})Z', cycle_repr['iso'] + ).groups()) + cycle_repr['intlist'] = [int(x) for x in cycle_repr['strlist']] return cycle_repr def finalcycle(): - """ + ''' Determine whether this cycle is the final cycle for the running suite. Return True/False - """ - arch_final = load_env("ARCHIVE_FINAL", "false") - if "true" in arch_final.lower(): + ''' + arch_final = load_env('ARCHIVE_FINAL', 'false') + if ('true' in arch_final.lower()): fcycle = True - log_msg("ARCHIVE_FINAL=true. End-of-run data will be archived.", level="INFO") + log_msg('ARCHIVE_FINAL=true. End-of-run data will be archived.', + level='INFO') else: - finalpoint = load_env("FINALCYCLE_OVERRIDE") + finalpoint = load_env('FINALCYCLE_OVERRIDE') if finalpoint is None: - finalpoint = load_env("CYLC_SUITE_FINAL_CYCLE_POINT") - if finalpoint == "None": + finalpoint = load_env('CYLC_SUITE_FINAL_CYCLE_POINT') + if finalpoint == 'None': # Convert from string. finalpoint = None @@ -146,10 +144,8 @@ def finalcycle(): # Cylc will not trigger further cycles beyond this point. # Set fcycle=True in this instance. if finalpoint: - fcycle = ( - CylcCycle().endcycle["intlist"] - > CylcCycle(cyclepoint=finalpoint).startcycle["intlist"] - ) + fcycle = (CylcCycle().endcycle['intlist'] > + CylcCycle(cyclepoint=finalpoint).startcycle['intlist']) else: # Cylc8 no longer requires a final cycle point to be set at all fcycle = False @@ -158,23 +154,23 @@ def finalcycle(): def get_utility_avail(utility): - """Return True/False if shell command is available""" + '''Return True/False if shell command is available''' try: status = shutil.which(utility) except AttributeError: # subprocess.getstatusoutput does not exist at Python2.7 - status, _ = shellout.exec_subprocess(utility + " --help") + status, _ = shellout.exec_subprocess(utility + ' --help') return bool(status) def get_subset(datadir, pattern): - """Returns a list of files matching a given regex""" + '''Returns a list of files matching a given regex''' datadir = check_directory(datadir) try: patt = re.compile(pattern) except TypeError: - log_msg("get_subset: Incompatible pattern supplied.", level="WARN") + log_msg('get_subset: Incompatible pattern supplied.', level='WARN') files = [] else: files = [fn for fn in sorted(os.listdir(datadir)) if patt.search(fn)] @@ -182,30 +178,31 @@ def get_subset(datadir, pattern): def check_directory(datadir): - """ + ''' Ensure that a given directory actually exists. Program will exit with an error if the test is unsuccessful. - """ + ''' try: datadir = os.path.expandvars(datadir) except TypeError: - log_msg("check_directory: Exiting - No directory provided", level="FAIL") + log_msg('check_directory: Exiting - No directory provided', + level='FAIL') if not os.path.isdir(datadir): - msg = "check_directory: Exiting - Directory does not exist: " - log_msg(msg + str(datadir), level="FAIL") + msg = 'check_directory: Exiting - Directory does not exist: ' + log_msg(msg + str(datadir), level='FAIL') return datadir def compare_mod_times(pathlist, last_mod=True): - """ + ''' Compare the modification time of files. Return the last modified file, or first listed of multiple files modified last. Optional arguments: last_mod Set to False to return the oldest file - """ + ''' mod_times = [] pathlist = ensure_list(pathlist) for path in pathlist: @@ -224,11 +221,11 @@ def compare_mod_times(pathlist, last_mod=True): def ensure_list(value, listnone=False): - """ + ''' Return a list for a given input. Optional argument: listnone - True=Return [''] or [None] False=Return [] - """ + ''' if value or listnone: if not isinstance(value, (list, tuple, type({}.keys()))): value = [value] @@ -239,7 +236,7 @@ def ensure_list(value, listnone=False): def add_path(files, path): - """Add a given path to a file or list of files provided""" + ''' Add a given path to a file or list of files provided ''' path = check_directory(path) files = ensure_list(files) @@ -247,7 +244,7 @@ def add_path(files, path): def create_dir(dirname, path=None): - """Create a directory""" + ''' Create a directory ''' if path: dirname = os.path.join(path, dirname) try: @@ -256,12 +253,13 @@ def create_dir(dirname, path=None): if exc.errno == errno.EEXIST and os.path.isdir(dirname): pass else: - log_msg("create_dir: Unable to create directory: " + dirname, level="FAIL") + log_msg('create_dir: Unable to create directory: ' + dirname, + level='FAIL') @timer.run_timer -def copy_files(cpfiles, destination=None, tmp_ext=".tmp"): - """ +def copy_files(cpfiles, destination=None, tmp_ext='.tmp'): + ''' Copy file(s). Optional arguments: destination - Where provided destination must be a writable @@ -270,7 +268,7 @@ def copy_files(cpfiles, destination=None, tmp_ext=".tmp"): copied to the original directory (os.path.dirname(filename)) with a "tmp_ext" extension tmp_ext - Extension used when copying to the same directory - """ + ''' if destination: destination = check_directory(destination) @@ -283,16 +281,16 @@ def copy_files(cpfiles, destination=None, tmp_ext=".tmp"): output = srcfile + tmp_ext try: - src = open(srcfile, "rb") + src = open(srcfile, 'rb') except IOError as exc: - msg = "copy_files: Failed to read from source file: " + srcfile - log_msg(" - ".join([msg, exc.strerror]), level="ERROR") + msg = 'copy_files: Failed to read from source file: ' + srcfile + log_msg(' - '.join([msg, exc.strerror]), level='ERROR') try: - out = open(output, "wb") + out = open(output, 'wb') except IOError as exc: - msg = "copy_files: Failed to write to target file: " + output - log_msg(" - ".join([msg, exc.strerror]), level="ERROR") + msg = 'copy_files: Failed to write to target file: ' + output + log_msg(' - '.join([msg, exc.strerror]), level='ERROR') shutil.copyfileobj(src, out) src.close() @@ -301,17 +299,16 @@ def copy_files(cpfiles, destination=None, tmp_ext=".tmp"): return outputfiles - @timer.run_timer def remove_files(delfiles, path=None, ignore_non_exist=False): - """ + ''' Delete files. Optional arguments: path - if not provided full path is assumed to have been provided in the filename. ignore_non_exist - flag to allow a non-existent file to be ignored. Default behaviour is to provide a warning and continue. - """ + ''' if path: path = check_directory(path) delfiles = add_path(delfiles, path) @@ -322,12 +319,13 @@ def remove_files(delfiles, path=None, ignore_non_exist=False): os.remove(fname) except OSError: if not ignore_non_exist: - log_msg("remove_files: File does not exist: " + fname, level="WARN") + log_msg('remove_files: File does not exist: ' + fname, + level='WARN') @timer.run_timer def move_files(mvfiles, destination, originpath=None, fail_on_err=False): - """ + ''' Move a single file or list of files to a given directory. Optionally a directory of origin may be specified. Arguments: @@ -339,8 +337,8 @@ def move_files(mvfiles, destination, originpath=None, fail_on_err=False): fail_on_err - Failure to move the file results in app failure. Primary cause of failure is a non-existent target file. Default=False - """ - msglevel = "ERROR" if fail_on_err else "WARN" + ''' + msglevel = 'ERROR' if fail_on_err else 'WARN' destination = check_directory(destination) if originpath: @@ -352,55 +350,53 @@ def move_files(mvfiles, destination, originpath=None, fail_on_err=False): shutil.move(fname, destination) except shutil.Error: if os.path.dirname(fname) == destination: - msg = "move_files: Attempted to overwrite original file: " + msg = 'move_files: Attempted to overwrite original file: ' log_msg(msg + fname, level=msglevel) else: dest_file = os.path.join(destination, os.path.basename(fname)) remove_files(dest_file) - msg = ( - "move_files: Deleted pre-existing file with same name " - "prior to move: " + dest_file - ) - log_msg(msg, level="WARN") + msg = 'move_files: Deleted pre-existing file with same name ' \ + 'prior to move: ' + dest_file + log_msg(msg, level='WARN') shutil.move(fname, destination) except IOError: # Exception changes in Python 3: # IOError has been merged into OSError # shutil.Error is now a child of IOError, therefore exception # order is important here for compatibility with both 2.7 and 3+ - log_msg("move_files: File does not exist: " + fname, level=msglevel) + log_msg('move_files: File does not exist: ' + fname, level=msglevel) def calendar(): - """Return the calendar based on the suite environment""" - cal = load_env("CYLC_CYCLING_MODE", default_value="360day") - if cal.lower() == "integer": + ''' Return the calendar based on the suite environment ''' + cal = load_env('CYLC_CYCLING_MODE', default_value='360day') + if cal.lower() == 'integer': # Non-Cycling suites should export the CALENDAR environment # variable. DEFAULT VALUE: 360day - cal = load_env("CALENDAR", default_value="360day") + cal = load_env('CALENDAR', default_value='360day') return cal def monthlength(month, year): - """Returns length of given month in days - calendar dependent""" + '''Returns length of given month in days - calendar dependent''' days_per_month = { # Days list runs from Dec -> Nov - "360day": [30] * 12, - "365day": [31, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30], - "gregorian": [31, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30], - } + '360day': [30]*12, + '365day': [31, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30], + 'gregorian': [31, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30], + } year = int(year) + (int(month) // 12) month = int(month) % 12 if year % 4 == 0 and (year % 100 != 0 or year % 400 == 0): - days_per_month["gregorian"][2] = 29 + days_per_month['gregorian'][2] = 29 return days_per_month[calendar()][month % 12] def add_period_to_date(indate, delta): - """ + ''' Add a delta (list of integers) to a given date (list of integers). For 360day calendar, add period with simple arithmetic for speed For other calendars, call one of @@ -409,7 +405,7 @@ def add_period_to_date(indate, delta): with calendar argument - taken from environment variable CYLC_CYCLING_MODE. If no indate is provided ([0,0,0,0,0]) then delta is returned. - """ + ''' if isinstance(delta, str): delta = get_frequency(delta, rtn_delta=True) @@ -417,7 +413,7 @@ def add_period_to_date(indate, delta): outdate = delta else: cal = calendar() - if cal == "360day": + if cal == '360day': outdate = _mod_360day_calendar_date(indate, delta) else: outdate = _mod_all_calendars_date(indate, delta, cal) @@ -427,7 +423,7 @@ def add_period_to_date(indate, delta): @timer.run_timer def _mod_all_calendars_date(indate, delta, cal): - """Call `isodatetime` or `rose date` to return a date""" + ''' Call `isodatetime` or `rose date` to return a date ''' outdate = [int(d) for d in indate] while len(outdate) < 5: # ISOdatetime format string requires outdate list of length=5 @@ -435,39 +431,34 @@ def _mod_all_calendars_date(indate, delta, cal): outdate.append(val) # Check whether `isodatetime` command exists, or default to `rose date` - datecmd = "isodatetime" if get_utility_avail("isodatetime") else "rose date" + datecmd = 'isodatetime' if get_utility_avail('isodatetime') else 'rose date' for elem in delta: if elem != 0: - offset = ("-" if elem < 0 else "") + "P" + offset = ('-' if elem < 0 else '') + 'P' try: - offset += str(abs(elem)) + ["Y", "M", "D"][delta.index(elem)] + offset += str(abs(elem)) + ['Y', 'M', 'D'][delta.index(elem)] except IndexError: - if "T" not in offset: - offset += "T" - offset += str(abs(elem)) + ["M", "H"][delta.index(elem) - 4] + if 'T' not in offset: + offset += 'T' + offset += str(abs(elem)) + ['M', 'H'][delta.index(elem)-4] - dateinput = "{0:0>4}{1:0>2}{2:0>2}T{3:0>2}{4:0>2}".format(*outdate) - if re.match(r"^\d{8}T\d{4}$", dateinput): - cmd = ( - "{} {} --calendar {} --offset {} --print-format " - "%Y,%m,%d,%H,%M".format(datecmd, dateinput, cal, offset) - ) + dateinput = '{0:0>4}{1:0>2}{2:0>2}T{3:0>2}{4:0>2}'.format(*outdate) + if re.match(r'^\d{8}T\d{4}$', dateinput): + cmd = '{} {} --calendar {} --offset {} --print-format ' \ + '%Y,%m,%d,%H,%M'.format(datecmd, dateinput, cal, offset) rcode, output = shellout._exec_subprocess(cmd) else: - log_msg( - "add_period_to_date: Invalid date for conversion to " - "ISO 8601 date representation: " + str(outdate), - level="ERROR", - ) + log_msg('add_period_to_date: Invalid date for conversion to ' + 'ISO 8601 date representation: ' + str(outdate), + level='ERROR') if rcode == 0: - outdate = [int(x) for x in output.split(",")] + outdate = [int(x) for x in output.split(',')] else: - log_msg( - "`{}` command failed:\n{}".format(datecmd, output), level="ERROR" - ) + log_msg('`{}` command failed:\n{}'.format(datecmd, output), + level='ERROR') outdate = None break @@ -476,20 +467,18 @@ def _mod_all_calendars_date(indate, delta, cal): @timer.run_timer def _mod_360day_calendar_date(indate, delta): - """ + ''' Simple arithmetic calculation of new date for 360 day calendar. Use of `isodatetime`, while possible is inefficient. - """ + ''' try: outdate = [int(x) for x in indate] except ValueError: - log_msg( - "add_period_to_date: Invalid date representation: " + str(indate), - level="FAIL", - ) + log_msg('add_period_to_date: Invalid date representation: ' + + str(indate), level='FAIL') diff_hours = 0 # multiplier to convert the delta list to a total number of hours - multiplier = [360 * 24, 30 * 24, 24, 1, 1.0 / 60, 1.0 / 60 / 60] + multiplier = [360*24, 30*24, 24, 1, 1./60, 1./60/60] for i, val in enumerate(delta): diff_hours += multiplier[i] * val if len(outdate) <= i: @@ -526,34 +515,33 @@ def _mod_360day_calendar_date(indate, delta): def get_frequency(delta, rtn_delta=False): - r""" + r''' Extract the frequency and base period from a delta string in the form '\d+\w+' or an ISO period e.g. P1Y2M Optional argument: rtn_delta = True - return a delta in the form of a list = False - return the frequency and base period - """ + ''' # all_targets dictionary: key=base period, val=date list index - all_targets = {"h": 3, "d": 2, "m": 1, "s": 1, "y": 0, "a": 0, "x": 0} - regex = r"(-?\d+)([{}])".format("".join(all_targets.keys())) - rval = [0] * 5 + all_targets = {'h': 3, 'd': 2, 'm': 1, 's': 1, 'y': 0, 'a': 0, 'x': 0} + regex = r'(-?\d+)([{}])'.format(''.join(all_targets.keys())) + rval = [0]*5 preserve_neg = None while delta: - neg, iso, subdaily, delta = re.match( - r"(-?)(p?)(t?)([\w\-]*)", delta.lower() - ).groups() + neg, iso, subdaily, delta = re.match(r'(-?)(p?)(t?)([\w\-]*)', + delta.lower()).groups() if subdaily: # Redefine "m" to "minutes" (date index 4) - all_targets["m"] = 4 + all_targets['m'] = 4 if iso: # `delta` prefix is [-]P indicating an ISO period. # Any negative should be preserved such that it is applied # to each frequency in the whole string. Examples: # -P1Y3M is "-1 year and -1 month" # PT1H30M is "+1 hour and +30 minutes" - preserve_neg = neg == "-" + preserve_neg = (neg == '-') multiplier = -1 if (preserve_neg or neg) else 1 try: @@ -566,91 +554,91 @@ def get_frequency(delta, rtn_delta=False): try: index = [all_targets[t] for t in all_targets if t == base][0] except IndexError: - concatdelta = "".join([neg, subdaily, delta]) - log_msg( - "get_frequency - Invalid target provided: " + concatdelta, level="FAIL" - ) + concatdelta = ''.join([neg, subdaily, delta]) + log_msg('get_frequency - Invalid target provided: ' + concatdelta, + level='FAIL') if rtn_delta: # Strip freq/base from the start of the delta string for next pass delta = delta.lstrip(str(freq)) delta = delta.lstrip(base) - if not re.search(r"\d+", delta): + if not re.search(r'\d+', delta): # Remaining delta string cannot be a period - pass complete - delta = "" + delta = '' # Return delta in the form of an integer list - if base == "s": + if base == 's': freq = freq * 3 - elif base == "x": + elif base == 'x': freq = freq * 10 rval[index] = freq else: # Return an integer frequency and string base rval = [freq, base] - delta = "" + delta = '' return rval -def log_msg(msg, level="INFO"): - """ +def log_msg(msg, level='INFO'): + ''' Produce a message to the appropriate output stream. Messages tagged with 'ERROR' and 'FAIL' will result in the program exiting, unless model is running in debug_mode, in which case only 'FAIL' will exit. - """ + ''' out = sys.stdout err = sys.stderr level = str(level).upper() output = { - "DEBUG": (err, "[DEBUG] "), - "INFO": (out, "[INFO] "), - "OK": (out, "[ OK ] "), - "WARN": (err, "[WARN] "), - "ERROR": (err, "[ERROR] "), - "FAIL": (err, "[FAIL] "), + 'DEBUG': (err, '[DEBUG] '), + 'INFO': (out, '[INFO] '), + 'OK': (out, '[ OK ] '), + 'WARN': (err, '[WARN] '), + 'ERROR': (err, '[ERROR] '), + 'FAIL': (err, '[FAIL] '), } try: - output[level][0].write("{} {}\n".format(output[level][1], msg)) + output[level][0].write('{} {}\n'.format(output[level][1], msg)) except KeyError: - level = "WARN" - msg = "log_msg: Unknown severity level for log message." - output[level][0].write("{} {}\n".format(output[level][1], msg)) + level = 'WARN' + msg = 'log_msg: Unknown severity level for log message.' + output[level][0].write('{} {}\n'.format(output[level][1], msg)) - if level == "ERROR": + if level == 'ERROR': # If in debug mode, terminate at the end of the task. # Otherwise terminate now. catch_failure() - elif level == "FAIL": - sys.exit(output[level][1] + "Terminating PostProc...") + elif level == 'FAIL': + sys.exit(output[level][1] + 'Terminating PostProc...') def set_debugmode(debug): - """Set method for the debug_mode global variable""" - globals()["debug_mode"] = debug - globals()["debug_ok"] = True + '''Set method for the debug_mode global variable''' + globals()['debug_mode'] = debug + globals()['debug_ok'] = True def get_debugmode(): - """Get method for the debug_mode global variable""" - return globals()["debug_mode"] + '''Get method for the debug_mode global variable''' + return globals()['debug_mode'] def get_debugok(): - """Get method for the debug_ok global variable""" - return globals()["debug_ok"] + '''Get method for the debug_ok global variable''' + return globals()['debug_ok'] def catch_failure(): - """ + ''' Ignore errors in external subprocess commands or other failures, allowing the task to continue to completion. Ultimately causes the task to fail due to the global debug_ok setting. - """ + ''' if get_debugmode(): - log_msg("Ignoring failed external command. Continuing...", level="DEBUG") - globals()["debug_ok"] = False + log_msg('Ignoring failed external command. Continuing...', + level='DEBUG') + globals()['debug_ok'] = False else: - log_msg("Command Terminated", level="FAIL") + log_msg('Command Terminated', level='FAIL') From abe99cd3cfd207cbe020a32179163a0c77e09802 Mon Sep 17 00:00:00 2001 From: Pierre Siddall <43399998+Pierre-siddall@users.noreply.github.com> Date: Tue, 20 Jan 2026 15:13:46 +0000 Subject: [PATCH 13/14] Add AI statement --- Coupled_Drivers/cice_driver.py | 4 ++++ Coupled_Drivers/cpmip_utils.py | 4 ++++ Coupled_Drivers/cpmip_xios.py | 4 ++++ Coupled_Drivers/mct_driver.py | 4 ++++ Coupled_Drivers/nemo_driver.py | 4 ++++ Coupled_Drivers/rivers_driver.py | 4 ++++ Coupled_Drivers/si3_controller.py | 4 ++++ Coupled_Drivers/top_controller.py | 4 ++++ Coupled_Drivers/unittests/test_cpmip_utils.py | 4 ++++ Coupled_Drivers/unittests/test_cpmip_xios.py | 4 ++++ Coupled_Drivers/unittests/test_rivers_driver.py | 4 ++++ Coupled_Drivers/write_namcouple.py | 4 ++++ Coupled_Drivers/xios_driver.py | 4 ++++ 13 files changed, 52 insertions(+) diff --git a/Coupled_Drivers/cice_driver.py b/Coupled_Drivers/cice_driver.py index 176b8a4..c432d30 100644 --- a/Coupled_Drivers/cice_driver.py +++ b/Coupled_Drivers/cice_driver.py @@ -11,6 +11,10 @@ Met Office, FitzRoy Road, Exeter, Devon, EX1 3PB, United Kingdom *****************************COPYRIGHT****************************** + +# Some of the content of this file has been produced with the assistance of +# Claude Sonnet 4.5. + NAME cice_driver.py diff --git a/Coupled_Drivers/cpmip_utils.py b/Coupled_Drivers/cpmip_utils.py index 946be5c..23e48f2 100644 --- a/Coupled_Drivers/cpmip_utils.py +++ b/Coupled_Drivers/cpmip_utils.py @@ -11,6 +11,10 @@ Met Office, FitzRoy Road, Exeter, Devon, EX1 3PB, United Kingdom *****************************COPYRIGHT****************************** + +# Some of the content of this file has been produced with the assistance of +# Claude Sonnet 4.5. + NAME cpmip_utils.py diff --git a/Coupled_Drivers/cpmip_xios.py b/Coupled_Drivers/cpmip_xios.py index 848ff2a..f46f5e8 100644 --- a/Coupled_Drivers/cpmip_xios.py +++ b/Coupled_Drivers/cpmip_xios.py @@ -11,6 +11,10 @@ Met Office, FitzRoy Road, Exeter, Devon, EX1 3PB, United Kingdom *****************************COPYRIGHT****************************** + +# Some of the content of this file has been produced with the assistance of +# Claude Sonnet 4.5. + NAME cpmip_xios.py diff --git a/Coupled_Drivers/mct_driver.py b/Coupled_Drivers/mct_driver.py index b25dc66..27728a6 100644 --- a/Coupled_Drivers/mct_driver.py +++ b/Coupled_Drivers/mct_driver.py @@ -10,6 +10,10 @@ Met Office, FitzRoy Road, Exeter, Devon, EX1 3PB, United Kingdom *****************************COPYRIGHT****************************** + +# Some of the content of this file has been produced with the assistance of +# Claude Sonnet 4.5. + NAME mct_driver.py diff --git a/Coupled_Drivers/nemo_driver.py b/Coupled_Drivers/nemo_driver.py index 0a6cf37..dba8c95 100644 --- a/Coupled_Drivers/nemo_driver.py +++ b/Coupled_Drivers/nemo_driver.py @@ -11,6 +11,10 @@ Met Office, FitzRoy Road, Exeter, Devon, EX1 3PB, United Kingdom *****************************COPYRIGHT****************************** + +# Some of the content of this file has been produced with the assistance of +# Claude Sonnet 4.5. + NAME nemo_driver.py diff --git a/Coupled_Drivers/rivers_driver.py b/Coupled_Drivers/rivers_driver.py index 42ed0f7..bff5580 100644 --- a/Coupled_Drivers/rivers_driver.py +++ b/Coupled_Drivers/rivers_driver.py @@ -11,6 +11,10 @@ Met Office, FitzRoy Road, Exeter, Devon, EX1 3PB, United Kingdom *****************************COPYRIGHT****************************** + +# Some of the content of this file has been produced with the assistance of +# Claude Sonnet 4.5. + NAME rivers_driver.py diff --git a/Coupled_Drivers/si3_controller.py b/Coupled_Drivers/si3_controller.py index 48c6ce8..5b96c23 100644 --- a/Coupled_Drivers/si3_controller.py +++ b/Coupled_Drivers/si3_controller.py @@ -10,6 +10,10 @@ Met Office, FitzRoy Road, Exeter, Devon, EX1 3PB, United Kingdom *****************************COPYRIGHT****************************** + +# Some of the content of this file has been produced with the assistance of +# Claude Sonnet 4.5. + NAME si3_controller.py diff --git a/Coupled_Drivers/top_controller.py b/Coupled_Drivers/top_controller.py index 833834a..c09ed36 100644 --- a/Coupled_Drivers/top_controller.py +++ b/Coupled_Drivers/top_controller.py @@ -11,6 +11,10 @@ Met Office, FitzRoy Road, Exeter, Devon, EX1 3PB, United Kingdom *****************************COPYRIGHT****************************** + +# Some of the content of this file has been produced with the assistance of +# Claude Sonnet 4.5. + NAME top_controller.py diff --git a/Coupled_Drivers/unittests/test_cpmip_utils.py b/Coupled_Drivers/unittests/test_cpmip_utils.py index c1a35a1..9a5c2e9 100644 --- a/Coupled_Drivers/unittests/test_cpmip_utils.py +++ b/Coupled_Drivers/unittests/test_cpmip_utils.py @@ -11,6 +11,10 @@ Met Office, FitzRoy Road, Exeter, Devon, EX1 3PB, United Kingdom *****************************COPYRIGHT****************************** + +# Some of the content of this file has been produced with the assistance of +# Claude Sonnet 4.5. + ''' import unittest diff --git a/Coupled_Drivers/unittests/test_cpmip_xios.py b/Coupled_Drivers/unittests/test_cpmip_xios.py index 58df7c7..e9212c0 100644 --- a/Coupled_Drivers/unittests/test_cpmip_xios.py +++ b/Coupled_Drivers/unittests/test_cpmip_xios.py @@ -11,6 +11,10 @@ Met Office, FitzRoy Road, Exeter, Devon, EX1 3PB, United Kingdom *****************************COPYRIGHT****************************** + +# Some of the content of this file has been produced with the assistance of +# Claude Sonnet 4.5. + ''' import unittest diff --git a/Coupled_Drivers/unittests/test_rivers_driver.py b/Coupled_Drivers/unittests/test_rivers_driver.py index 2f25233..0c81758 100644 --- a/Coupled_Drivers/unittests/test_rivers_driver.py +++ b/Coupled_Drivers/unittests/test_rivers_driver.py @@ -11,6 +11,10 @@ Met Office, FitzRoy Road, Exeter, Devon, EX1 3PB, United Kingdom *****************************COPYRIGHT****************************** + +# Some of the content of this file has been produced with the assistance of +# Claude Sonnet 4.5. + ''' import sys import unittest diff --git a/Coupled_Drivers/write_namcouple.py b/Coupled_Drivers/write_namcouple.py index 4328497..b468237 100644 --- a/Coupled_Drivers/write_namcouple.py +++ b/Coupled_Drivers/write_namcouple.py @@ -10,6 +10,10 @@ Met Office, FitzRoy Road, Exeter, Devon, EX1 3PB, United Kingdom *****************************COPYRIGHT****************************** + +# Some of the content of this file has been produced with the assistance of +# Claude Sonnet 4.5. + NAME write_namcouple.py diff --git a/Coupled_Drivers/xios_driver.py b/Coupled_Drivers/xios_driver.py index b08cd4d..a5bb448 100644 --- a/Coupled_Drivers/xios_driver.py +++ b/Coupled_Drivers/xios_driver.py @@ -11,6 +11,10 @@ Met Office, FitzRoy Road, Exeter, Devon, EX1 3PB, United Kingdom *****************************COPYRIGHT****************************** + +# Some of the content of this file has been produced with the assistance of +# Claude Sonnet 4.5. + NAME xios_driver.py From d3aafae76a3c3aebd833490fd539207b7b32bb5f Mon Sep 17 00:00:00 2001 From: Pierre Siddall <43399998+Pierre-siddall@users.noreply.github.com> Date: Tue, 20 Jan 2026 15:16:18 +0000 Subject: [PATCH 14/14] Update copyright statements --- Coupled_Drivers/cice_driver.py | 2 +- Coupled_Drivers/cpmip_utils.py | 2 +- Coupled_Drivers/cpmip_xios.py | 2 +- Coupled_Drivers/mct_driver.py | 2 +- Coupled_Drivers/nemo_driver.py | 2 +- Coupled_Drivers/rivers_driver.py | 2 +- Coupled_Drivers/si3_controller.py | 2 +- Coupled_Drivers/top_controller.py | 2 +- Coupled_Drivers/unittests/test_cpmip_utils.py | 2 +- Coupled_Drivers/unittests/test_cpmip_xios.py | 2 +- Coupled_Drivers/unittests/test_rivers_driver.py | 2 +- Coupled_Drivers/write_namcouple.py | 2 +- Coupled_Drivers/xios_driver.py | 2 +- 13 files changed, 13 insertions(+), 13 deletions(-) diff --git a/Coupled_Drivers/cice_driver.py b/Coupled_Drivers/cice_driver.py index c432d30..304b858 100644 --- a/Coupled_Drivers/cice_driver.py +++ b/Coupled_Drivers/cice_driver.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 ''' *****************************COPYRIGHT****************************** - (C) Crown copyright 2023-2025 Met Office. All rights reserved. + (C) Crown copyright 2023-2026 Met Office. All rights reserved. Use, duplication or disclosure of this code is subject to the restrictions as set forth in the licence. If no licence has been raised with this copy diff --git a/Coupled_Drivers/cpmip_utils.py b/Coupled_Drivers/cpmip_utils.py index 23e48f2..ca0ab67 100644 --- a/Coupled_Drivers/cpmip_utils.py +++ b/Coupled_Drivers/cpmip_utils.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 ''' *****************************COPYRIGHT****************************** - (C) Crown copyright 2023-2025 Met Office. All rights reserved. + (C) Crown copyright 2023-2026 Met Office. All rights reserved. Use, duplication or disclosure of this code is subject to the restrictions as set forth in the licence. If no licence has been raised with this copy diff --git a/Coupled_Drivers/cpmip_xios.py b/Coupled_Drivers/cpmip_xios.py index f46f5e8..0c84e9f 100644 --- a/Coupled_Drivers/cpmip_xios.py +++ b/Coupled_Drivers/cpmip_xios.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 ''' *****************************COPYRIGHT****************************** - (C) Crown copyright 2021-2025 Met Office. All rights reserved. + (C) Crown copyright 2021-2026 Met Office. All rights reserved. Use, duplication or disclosure of this code is subject to the restrictions as set forth in the licence. If no licence has been raised with this copy diff --git a/Coupled_Drivers/mct_driver.py b/Coupled_Drivers/mct_driver.py index 27728a6..d6d4731 100644 --- a/Coupled_Drivers/mct_driver.py +++ b/Coupled_Drivers/mct_driver.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 ''' *****************************COPYRIGHT****************************** - (C) Crown copyright 2021-2025 Met Office. All rights reserved. + (C) Crown copyright 2021-2026 Met Office. All rights reserved. Use, duplication or disclosure of this code is subject to the restrictions as set forth in the licence. If no licence has been raised with this copy of the code, the use, duplication or disclosure of it is strictly diff --git a/Coupled_Drivers/nemo_driver.py b/Coupled_Drivers/nemo_driver.py index dba8c95..274f39a 100644 --- a/Coupled_Drivers/nemo_driver.py +++ b/Coupled_Drivers/nemo_driver.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 ''' *****************************COPYRIGHT****************************** - (C) Crown copyright 2023-2025 Met Office. All rights reserved. + (C) Crown copyright 2023-2026 Met Office. All rights reserved. Use, duplication or disclosure of this code is subject to the restrictions as set forth in the licence. If no licence has been raised with this copy diff --git a/Coupled_Drivers/rivers_driver.py b/Coupled_Drivers/rivers_driver.py index bff5580..2d526d2 100644 --- a/Coupled_Drivers/rivers_driver.py +++ b/Coupled_Drivers/rivers_driver.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 ''' *****************************COPYRIGHT****************************** - (C) Crown copyright 2025 Met Office. All rights reserved. + (C) Crown copyright 2025-2026 Met Office. All rights reserved. Use, duplication or disclosure of this code is subject to the restrictions as set forth in the licence. If no licence has been raised with this copy diff --git a/Coupled_Drivers/si3_controller.py b/Coupled_Drivers/si3_controller.py index 5b96c23..bd4087f 100644 --- a/Coupled_Drivers/si3_controller.py +++ b/Coupled_Drivers/si3_controller.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 ''' *****************************COPYRIGHT****************************** - (C) Crown copyright 2021-2025 Met Office. All rights reserved. + (C) Crown copyright 2021-2026 Met Office. All rights reserved. Use, duplication or disclosure of this code is subject to the restrictions as set forth in the licence. If no licence has been raised with this copy of the code, the use, duplication or disclosure of it is strictly diff --git a/Coupled_Drivers/top_controller.py b/Coupled_Drivers/top_controller.py index c09ed36..0b6a4ca 100644 --- a/Coupled_Drivers/top_controller.py +++ b/Coupled_Drivers/top_controller.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 ''' *****************************COPYRIGHT****************************** - (C) Crown copyright 2021-2025 Met Office. All rights reserved. + (C) Crown copyright 2021-2026 Met Office. All rights reserved. Use, duplication or disclosure of this code is subject to the restrictions as set forth in the licence. If no licence has been raised with this copy diff --git a/Coupled_Drivers/unittests/test_cpmip_utils.py b/Coupled_Drivers/unittests/test_cpmip_utils.py index 9a5c2e9..d82ab71 100644 --- a/Coupled_Drivers/unittests/test_cpmip_utils.py +++ b/Coupled_Drivers/unittests/test_cpmip_utils.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 ''' *****************************COPYRIGHT****************************** - (C) Crown copyright 2023-2025 Met Office. All rights reserved. + (C) Crown copyright 2023-2026 Met Office. All rights reserved. Use, duplication or disclosure of this code is subject to the restrictions as set forth in the licence. If no licence has been raised with this copy diff --git a/Coupled_Drivers/unittests/test_cpmip_xios.py b/Coupled_Drivers/unittests/test_cpmip_xios.py index e9212c0..ce361db 100644 --- a/Coupled_Drivers/unittests/test_cpmip_xios.py +++ b/Coupled_Drivers/unittests/test_cpmip_xios.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 ''' *****************************COPYRIGHT****************************** - (C) Crown copyright 2021-2025 Met Office. All rights reserved. + (C) Crown copyright 2021-2026 Met Office. All rights reserved. Use, duplication or disclosure of this code is subject to the restrictions as set forth in the licence. If no licence has been raised with this copy diff --git a/Coupled_Drivers/unittests/test_rivers_driver.py b/Coupled_Drivers/unittests/test_rivers_driver.py index 0c81758..1070ddb 100644 --- a/Coupled_Drivers/unittests/test_rivers_driver.py +++ b/Coupled_Drivers/unittests/test_rivers_driver.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 ''' *****************************COPYRIGHT****************************** - (C) Crown copyright 2025 Met Office. All rights reserved. + (C) Crown copyright 2025-2026 Met Office. All rights reserved. Use, duplication or disclosure of this code is subject to the restrictions as set forth in the licence. If no licence has been raised with this copy diff --git a/Coupled_Drivers/write_namcouple.py b/Coupled_Drivers/write_namcouple.py index b468237..23b1334 100644 --- a/Coupled_Drivers/write_namcouple.py +++ b/Coupled_Drivers/write_namcouple.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 ''' *****************************COPYRIGHT****************************** - (C) Crown copyright 2021-2025 Met Office. All rights reserved. + (C) Crown copyright 2021-2026 Met Office. All rights reserved. Use, duplication or disclosure of this code is subject to the restrictions as set forth in the licence. If no licence has been raised with this copy of the code, the use, duplication or disclosure of it is strictly diff --git a/Coupled_Drivers/xios_driver.py b/Coupled_Drivers/xios_driver.py index a5bb448..fd514c4 100644 --- a/Coupled_Drivers/xios_driver.py +++ b/Coupled_Drivers/xios_driver.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 ''' *****************************COPYRIGHT****************************** - (C) Crown copyright 2023-2025 Met Office. All rights reserved. + (C) Crown copyright 2023-2026 Met Office. All rights reserved. Use, duplication or disclosure of this code is subject to the restrictions as set forth in the licence. If no licence has been raised with this copy