diff --git a/README.md b/README.md index af555f2e2..46c528f95 100644 --- a/README.md +++ b/README.md @@ -30,7 +30,7 @@ used those components. [https://mpas-dev.github.io/MPAS-Analysis/stable/](https://mpas-dev.github.io/MPAS-Analysis/stable/) -## Installation +## Installation for users MPAS-Analysis is available as an anaconda package via the `conda-forge` channel: @@ -40,6 +40,8 @@ conda create -n mpas-analysis mpas-analysis conda activate mpas-analysis ``` +## Installation for developers + To use the latest version for developers, get the code from: [https://github.com/MPAS-Dev/MPAS-Analysis](https://github.com/MPAS-Dev/MPAS-Analysis) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 0e1931b92..d548902f4 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -21,12 +21,12 @@ jobs: vmImage: 'ubuntu-latest' strategy: matrix: - Python37: - python.version: '3.7' Python38: python.version: '3.8' Python39: python.version: '3.9' + Python310: + python.version: '3.10' steps: - bash: echo "##vso[task.prependpath]$CONDA/bin" @@ -62,8 +62,8 @@ jobs: conda activate build conda create --yes --quiet --name docs -c ${CONDA_PREFIX}/conda-bld/ \ python=$PYTHON_VERSION mpas-analysis sphinx mock sphinx_rtd_theme \ - tabulate m2r - condition: eq(variables['python.version'], '3.8') + tabulate m2r2 "mistune<2" + condition: eq(variables['python.version'], '3.9') displayName: Create Anaconda docs environment - bash: | @@ -128,7 +128,7 @@ jobs: fi popd || exit 1 fi - condition: eq(variables['python.version'], '3.8') + condition: eq(variables['python.version'], '3.9') displayName: build and deploy docs - job: @@ -137,8 +137,8 @@ jobs: vmImage: 'ubuntu-latest' strategy: matrix: - Python38: - python.version: '3.8' + Python39: + python.version: '3.9' steps: - bash: echo "##vso[task.prependpath]$CONDA/bin" @@ -178,12 +178,12 @@ jobs: vmImage: 'macOS-latest' strategy: matrix: - Python37: - python.version: '3.7' Python38: python.version: '3.8' Python39: python.version: '3.9' + Python310: + python.version: '3.10' steps: - bash: echo "##vso[task.prependpath]$CONDA/bin" diff --git a/ci/python3.7.yaml b/ci/python3.10.yaml similarity index 85% rename from ci/python3.7.yaml rename to ci/python3.10.yaml index 89022546e..366288703 100644 --- a/ci/python3.7.yaml +++ b/ci/python3.10.yaml @@ -5,4 +5,4 @@ pin_run_as_build: min_pin: x.x max_pin: x.x python: -- 3.7.* *_cpython +- 3.10.* *_cpython diff --git a/ci/recipe/meta.yaml b/ci/recipe/meta.yaml index caa524c8b..9dedce6ab 100644 --- a/ci/recipe/meta.yaml +++ b/ci/recipe/meta.yaml @@ -1,5 +1,5 @@ {% set name = "MPAS-Analysis" %} -{% set version = "1.5.0" %} +{% set version = "1.6.0" %} package: name: {{ name|lower }} @@ -18,10 +18,10 @@ build: requirements: host: - - python >=3.7,<3.10 + - python >=3.7 - pip run: - - python >=3.7,<3.10 + - python >=3.7 - bottleneck - cartopy >=0.18.0 - cartopy_offlinedata diff --git a/configs/acme1/config.20180129.DECKv1b_piControl.ne30_oEC.edison b/configs/acme1/config.20180129.DECKv1b_piControl.ne30_oEC.edison deleted file mode 100644 index 0407a4132..000000000 --- a/configs/acme1/config.20180129.DECKv1b_piControl.ne30_oEC.edison +++ /dev/null @@ -1,152 +0,0 @@ -[runs] -## options related to the run to be analyzed and control runs to be -## compared against - -# mainRunName is a name that identifies the simulation being analyzed. -mainRunName = 20180129.DECKv1b_piControl.ne30_oEC.edison - -# config file for a control run to which this run will be compared. The -# analysis should have already been run to completion once with this config -# file, so that the relevant MPAS climatologies already exist and have been -# remapped to the comparison grid. Leave this option commented out if no -# control run is desired. -# controlRunConfigFile = /path/to/config/file - -# config file for a main run on which the analysis was already run to -# completion. The relevant MPAS climatologies already exist and have been -# remapped to the comparison grid and time series have been extracted. -# Leave this option commented out if the analysis for the main run should be -# performed. -# mainRunConfigFile = /path/to/config/file - -[execute] -## options related to executing parallel tasks - -# the number of parallel tasks (1 means tasks run in serial, the default) -parallelTaskCount = 4 - -# the parallelism mode in ncclimo ("serial" or "bck") -# Set this to "bck" (background parallelism) if running on a machine that can -# handle 12 simultaneous processes, one for each monthly climatology. -ncclimoParallelMode = bck - -[diagnostics] -## config options related to observations, mapping files and region files used -## by MPAS-Analysis in diagnostics computations. - -# The base path to the diagnostics directory. Typically, this will be a shared -# directory on each E3SM supported machine (see the example config files for -# its location). For other machines, this would be the directory pointed to -# when running "download_analysis_data.py" to get the public observations, -# mapping files and region files. -baseDirectory = /space2/diagnostics - -[input] -## options related to reading in the results to be analyzed - -# directory containing model results -baseDirectory = /space2/asaydavis1/example_e3sm_output/20180129.DECKv1b_piControl.ne30_oEC.edison - -# subdirectory containing restart files -runSubdirectory = run -# subdirectory for ocean history files -oceanHistorySubdirectory = archive/ocn/hist -# subdirectory for sea ice history files -seaIceHistorySubdirectory = archive/ice/hist - -# name of the ocean and sea-ice mesh (e.g. EC30to60E2r2, WC14to60E2r3, -# ECwISC30to60E2r1, SOwISC12to60E2r4, oQU240, etc.) -mpasMeshName = oEC60to30v3 - -# names of namelist and streams files, either a path relative to baseDirectory -# or an absolute path. -oceanNamelistFileName = run/mpas-o_in -oceanStreamsFileName = run/streams.ocean -seaIceNamelistFileName = run/mpas-cice_in -seaIceStreamsFileName = run/streams.cice - -[output] -## options related to writing out plots, intermediate cached data sets, logs, -## etc. - -# directory where analysis should be written -baseDirectory = /dir/to/analysis/output - -# provide an absolute path to put HTML in an alternative location (e.g. a web -# portal) -# htmlSubdirectory = /global/project/projectdirs/acme/www/USERNAME/RUNNAME -htmlSubdirectory = html - -# a list of analyses to generate. Valid names can be seen by running: -# mpas_analysis --list -# This command also lists tags for each analysis. -# Shortcuts exist to generate (or not generate) several types of analysis. -# These include: -# 'all' -- all analyses will be run -# 'all_' -- all analysis with a particular tag will be run -# 'all_' -- all analyses from a given component (either 'ocean' -# or 'seaIce') will be run -# 'only_', 'only_' -- all analysis from this component or -# with this tag will be run, and all -# analysis for other components or -# without the tag will be skipped -# 'no_' -- skip the given task -# 'no_', 'no_' -- in analogy to 'all_*', skip all analysis -# tasks from the given compoonent or with -# the given tag. Do -# mpas_analysis --list -# to list all task names and their tags -# an equivalent syntax can be used on the command line to override this -# option: -# mpas_analysis config.analysis --generate \ -# all,no_ocean,all_timeSeries -# All tasks with tag "landIceCavities" are disabled because this run did not -# include land-ice cavities. -generate = ['all_publicObs', 'no_landIceCavities', 'no_eke', 'no_BGC', - 'no_icebergs', 'no_min', 'no_max'] - -[climatology] -## options related to producing climatologies, typically to compare against -## observations and previous runs - -# the first year over which to average climatalogies -startYear = 1 -# the last year over which to average climatalogies -endYear = 10 - -[timeSeries] -## options related to producing time series plots, often to compare against -## observations and previous runs - -# start and end years for timeseries analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = 10 - -[index] -## options related to producing nino index. - -# start and end years for El Nino 3.4 analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = 10 - -[streamfunctionMOC] -## options related to plotting the streamfunction of the meridional overturning -## circulation (MOC) - -# Use postprocessing script to compute the MOC? You want this to be True -# for low-resolution simulations that use GM to parameterize eddies, because -# the online MOC analysis member currently does not include the bolus velocity -# in its calculation, whereas the postprocessing script does. -# NOTE: this is a temporary option that will be removed once the online -# MOC takes into account the bolus velocity when GM is on. -usePostprocessingScript = True diff --git a/configs/alcf/config.20180410.A_WCYCL1950_HR.ne120_oRRS18v3_ICG.theta b/configs/alcf/20180410.A_WCYCL1950_HR.ne120_oRRS18v3_ICG.theta.cfg similarity index 92% rename from configs/alcf/config.20180410.A_WCYCL1950_HR.ne120_oRRS18v3_ICG.theta rename to configs/alcf/20180410.A_WCYCL1950_HR.ne120_oRRS18v3_ICG.theta.cfg index 0993dc741..fa9e657a2 100644 --- a/configs/alcf/config.20180410.A_WCYCL1950_HR.ne120_oRRS18v3_ICG.theta +++ b/configs/alcf/20180410.A_WCYCL1950_HR.ne120_oRRS18v3_ICG.theta.cfg @@ -30,16 +30,10 @@ parallelTaskCount = 4 # handle 12 simultaneous processes, one for each monthly climatology. ncclimoParallelMode = serial -[diagnostics] -## config options related to observations, mapping files and region files used -## by MPAS-Analysis in diagnostics computations. - -# The base path to the diagnostics directory. Typically, this will be a shared -# directory on each E3SM supported machine (see the example config files for -# its location). For other machines, this would be the directory pointed to -# when running "download_analysis_data.py" to get the public observations, -# mapping files and region files. -baseDirectory = /lus/theta-fs0/projects/ClimateEnergy_3/diagnostics +# the number of total threads to use when ncclimo runs in "bck" or "mpi" mode. +# Reduce this number if ncclimo is crashing (maybe because it is out of memory). +# The number of threads must be a factor of 12 (1, 2, 3, 4, 6 or 12). +ncclimoThreads = 4 [input] ## options related to reading in the results to be analyzed diff --git a/configs/alcf/20190405.GMPAS-DIB-IAF-ISMF.T62_oRRS30to10v3wLI.theta.yrs41-50.conf b/configs/alcf/20190405.GMPAS-DIB-IAF-ISMF.T62_oRRS30to10v3wLI.theta.yrs41-50.conf index bd972d72f..9abe29b01 100644 --- a/configs/alcf/20190405.GMPAS-DIB-IAF-ISMF.T62_oRRS30to10v3wLI.theta.yrs41-50.conf +++ b/configs/alcf/20190405.GMPAS-DIB-IAF-ISMF.T62_oRRS30to10v3wLI.theta.yrs41-50.conf @@ -23,13 +23,18 @@ mainRunName = 20190405.GMPAS-DIB-IAF-ISMF.T62_oRRS30to10v3wLI.theta ## options related to executing parallel tasks # the number of parallel tasks (1 means tasks run in serial, the default) -parallelTaskCount = 12 +parallelTaskCount = 4 # the parallelism mode in ncclimo ("serial" or "bck") # Set this to "bck" (background parallelism) if running on a machine that can # handle 12 simultaneous processes, one for each monthly climatology. ncclimoParallelMode = bck +# the number of total threads to use when ncclimo runs in "bck" or "mpi" mode. +# Reduce this number if ncclimo is crashing (maybe because it is out of memory). +# The number of threads must be a factor of 12 (1, 2, 3, 4, 6 or 12). +ncclimoThreads = 4 + [diagnostics] ## config options related to observations, mapping files and region files used ## by MPAS-Analysis in diagnostics computations. diff --git a/configs/alcf/config.20171031.tenYearTest.GMPAS-IAF.T62_oEC60to30v3wLI.60layer.theta b/configs/alcf/config.20171031.tenYearTest.GMPAS-IAF.T62_oEC60to30v3wLI.60layer.theta deleted file mode 100644 index c78239092..000000000 --- a/configs/alcf/config.20171031.tenYearTest.GMPAS-IAF.T62_oEC60to30v3wLI.60layer.theta +++ /dev/null @@ -1,137 +0,0 @@ -[runs] -## options related to the run to be analyzed and control runs to be -## compared against - -# mainRunName is a name that identifies the simulation being analyzed. -mainRunName = 20171031.tenYearTest.GMPAS-IAF.T62_oEC60to30v3wLI.60layer.theta - -# config file for a control run to which this run will be compared. The -# analysis should have already been run to completion once with this config -# file, so that the relevant MPAS climatologies already exist and have been -# remapped to the comparison grid. Leave this option commented out if no -# control run is desired. -# controlRunConfigFile = /path/to/config/file - -# config file for a main run on which the analysis was already run to -# completion. The relevant MPAS climatologies already exist and have been -# remapped to the comparison grid and time series have been extracted. -# Leave this option commented out if the analysis for the main run should be -# performed. -# mainRunConfigFile = /path/to/config/file - -[execute] -## options related to executing parallel tasks - -# the number of parallel tasks (1 means tasks run in serial, the default) -parallelTaskCount = 4 - -# the parallelism mode in ncclimo ("serial" or "bck") -# Set this to "bck" (background parallelism) if running on a machine that can -# handle 12 simultaneous processes, one for each monthly climatology. -ncclimoParallelMode = bck - -[diagnostics] -## config options related to observations, mapping files and region files used -## by MPAS-Analysis in diagnostics computations. - -# The base path to the diagnostics directory. Typically, this will be a shared -# directory on each E3SM supported machine (see the example config files for -# its location). For other machines, this would be the directory pointed to -# when running "download_analysis_data.py" to get the public observations, -# mapping files and region files. -baseDirectory = /lus/theta-fs0/projects/ClimateEnergy_3/diagnostics - -[input] -## options related to reading in the results to be analyzed - -# directory containing model results -baseDirectory = /lus/theta-fs0/projects/OceanClimate_2/mpeterse/20171031.tenYearTest.GMPAS-IAF.T62_oEC60to30v3wLI.60layer.theta/run - -# name of the ocean and sea-ice mesh (e.g. EC30to60E2r2, WC14to60E2r3, -# ECwISC30to60E2r1, SOwISC12to60E2r4, oQU240, etc.) -mpasMeshName = oEC60to30v3wLI - -# names of namelist and streams files, either a path relative to baseDirectory -# or an absolute path. -oceanNamelistFileName = mpas-o_in -oceanStreamsFileName = streams.ocean -seaIceNamelistFileName = mpas-cice_in -seaIceStreamsFileName = streams.cice - -[output] -## options related to writing out plots, intermediate cached data sets, logs, -## etc. - -# directory where analysis should be written -baseDirectory = /dir/to/analysis/output - -# a list of analyses to generate. Valid names can be seen by running: -# mpas_analysis --list -# This command also lists tags for each analysis. -# Shortcuts exist to generate (or not generate) several types of analysis. -# These include: -# 'all' -- all analyses will be run -# 'all_' -- all analysis with a particular tag will be run -# 'all_' -- all analyses from a given component (either 'ocean' -# or 'seaIce') will be run -# 'only_', 'only_' -- all analysis from this component or -# with this tag will be run, and all -# analysis for other components or -# without the tag will be skipped -# 'no_' -- skip the given task -# 'no_', 'no_' -- in analogy to 'all_*', skip all analysis -# tasks from the given compoonent or with -# the given tag. Do -# mpas_analysis --list -# to list all task names and their tags -# an equivalent syntax can be used on the command line to override this -# option: -# mpas_analysis config.analysis --generate \ -# all,no_ocean,all_timeSeries -generate = ['all', 'no_eke', 'no_BGC', 'no_icebergs', 'no_min', 'no_max'] - -[climatology] -## options related to producing climatologies, typically to compare against -## observations and previous runs - -# the first year over which to average climatalogies -startYear = 5 -# the last year over which to average climatalogies -endYear = 10 - -[timeSeries] -## options related to producing time series plots, often to compare against -## observations and previous runs - -# start and end years for timeseries analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = end - -[index] -## options related to producing nino index. - -# start and end years for El Nino 3.4 analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = end - -[streamfunctionMOC] -## options related to plotting the streamfunction of the meridional overturning -## circulation (MOC) - -# Use postprocessing script to compute the MOC? You want this to be True -# for low-resolution simulations that use GM to parameterize eddies, because -# the online MOC analysis member currently does not include the bolus velocity -# in its calculation, whereas the postprocessing script does. -# NOTE: this is a temporary option that will be removed once the online -# MOC takes into account the bolus velocity when GM is on. -usePostprocessingScript = True diff --git a/configs/alcf/job_script.cooley.18to6.bash b/configs/alcf/job_script.cooley.18to6.bash deleted file mode 100755 index bb276799e..000000000 --- a/configs/alcf/job_script.cooley.18to6.bash +++ /dev/null @@ -1,55 +0,0 @@ -#!/bin/bash -#COBALT -t 3:00:00 -#COBALT -n 1 -#COBALT -A OceanClimate_2 -# This software is open source software available under the BSD-3 license. -# -# Copyright (c) 2020 Triad National Security, LLC. All rights reserved. -# Copyright (c) 2020 Lawrence Livermore National Security, LLC. All rights -# reserved. -# Copyright (c) 2020 UT-Battelle, LLC. All rights reserved. -# -# Additional copyright and license information can be found in the LICENSE file -# distributed with this code, or at -# https://raw.githubusercontent.com/MPAS-Dev/MPAS-Analysis/master/LICENSE - -source /lus/theta-fs0/projects/ccsm/acme/tools/e3sm-unified/load_latest_e3sm_unified_cooley.sh - -# MPAS/ACME job to be analyzed, including paths to simulation data and -# observations. Change this name and path as needed -run_config_file="config.20180410.A_WCYCL1950_HR.ne120_oRRS18v3_ICG.theta" - -# NOTE: the following section will OVERWRITE values specified within the config file named above - -# one parallel task per node by default -parallel_task_count=8 - -# ncclimo can run with 1 (serial) or 12 (bck) threads -ncclimo_mode=serial - -if [ ! -f $run_config_file ]; then - echo "File $run_config_file not found!" - exit 1 -fi - -# This is a config file generated just for this job with the output directory, -# command prefix and parallel task count from above. -job_config_file=config.output.$COBALT_JOBID - -# write out the config file specific to this job -cat < $job_config_file -[execute] -# options related to executing parallel tasks - -# the number of parallel tasks (1 means tasks run in serial, the default) -parallelTaskCount = $parallel_task_count - -# the parallelism mode in ncclimo ("serial" or "bck") -# Set this to "bck" (background parallelism) if running on a machine that can -# handle 12 simultaneous processes, one for each monthly climatology. -ncclimoParallelMode = $ncclimo_mode - -EOF - -mpas_analysis $run_config_file $job_config_file - diff --git a/configs/alcf/job_script.cooley.30to10.bash b/configs/alcf/job_script.cooley.30to10.bash deleted file mode 100755 index ebc766559..000000000 --- a/configs/alcf/job_script.cooley.30to10.bash +++ /dev/null @@ -1,55 +0,0 @@ -#!/bin/bash -#COBALT -t 0:30:00 -#COBALT -n 1 -#COBALT -A OceanClimate_2 -# This software is open source software available under the BSD-3 license. -# -# Copyright (c) 2020 Triad National Security, LLC. All rights reserved. -# Copyright (c) 2020 Lawrence Livermore National Security, LLC. All rights -# reserved. -# Copyright (c) 2020 UT-Battelle, LLC. All rights reserved. -# -# Additional copyright and license information can be found in the LICENSE file -# distributed with this code, or at -# https://raw.githubusercontent.com/MPAS-Dev/MPAS-Analysis/master/LICENSE - -source /lus/theta-fs0/projects/ccsm/acme/tools/e3sm-unified/load_latest_e3sm_unified_cooley.sh - -# MPAS/ACME job to be analyzed, including paths to simulation data and -# observations. Change this name and path as needed -run_config_file="config.20190301.GMPAS-DIB-IAF-ISMF.T62_oRRS30to10v3wLI.theta" - -# NOTE: the following section will OVERWRITE values specified within the config file named above - -# one parallel task per node by default -parallel_task_count=12 - -# ncclimo can run with 1 (serial) or 12 (bck) threads -ncclimo_mode=bck - -if [ ! -f $run_config_file ]; then - echo "File $run_config_file not found!" - exit 1 -fi - -# This is a config file generated just for this job with the output directory, -# command prefix and parallel task count from above. -job_config_file=config.output.$COBALT_JOBID - -# write out the config file specific to this job -cat < $job_config_file -[execute] -# options related to executing parallel tasks - -# the number of parallel tasks (1 means tasks run in serial, the default) -parallelTaskCount = $parallel_task_count - -# the parallelism mode in ncclimo ("serial" or "bck") -# Set this to "bck" (background parallelism) if running on a machine that can -# handle 12 simultaneous processes, one for each monthly climatology. -ncclimoParallelMode = $ncclimo_mode - -EOF - -mpas_analysis $run_config_file $job_config_file - diff --git a/configs/alcf/job_script.cooley.bash b/configs/alcf/job_script.cooley.bash new file mode 100755 index 000000000..6640ff52f --- /dev/null +++ b/configs/alcf/job_script.cooley.bash @@ -0,0 +1,17 @@ +#!/bin/bash +#COBALT -t 3:00:00 +#COBALT -n 1 +#COBALT -A ClimateEnergy_4 + +source /lus/theta-fs0/projects/ccsm/acme/tools/e3sm-unified/load_latest_e3sm_unified_cooley.sh +# alternatively, you can load your own development environment +# source ~/miniconda3/etc/profile.d/conda.sh +# conda activate mpas_dev +# export E3SMU_MACHINE=cooley + +export HDF5_USE_FILE_LOCKING=FALSE + +# For an E3SM cryosphere run, include --polar_regions, or exclude +# this extra flag for default parameters +mpas_analysis 20180410.A_WCYCL1950_HR.ne120_oRRS18v3_ICG.theta.cfg + diff --git a/configs/compy/config.20190711.testLuke.A_WCYCL1850S.ne30_oECv3.compy b/configs/compy/20190711.testLuke.A_WCYCL1850S.ne30_oECv3.compy.cfg similarity index 84% rename from configs/compy/config.20190711.testLuke.A_WCYCL1850S.ne30_oECv3.compy rename to configs/compy/20190711.testLuke.A_WCYCL1850S.ne30_oECv3.compy.cfg index 9e5781921..ecd0ac25b 100644 --- a/configs/compy/config.20190711.testLuke.A_WCYCL1850S.ne30_oECv3.compy +++ b/configs/compy/20190711.testLuke.A_WCYCL1850S.ne30_oECv3.compy.cfg @@ -30,30 +30,15 @@ parallelTaskCount = 6 # handle 12 simultaneous processes, one for each monthly climatology. ncclimoParallelMode = bck +# the number of total threads to use when ncclimo runs in "bck" or "mpi" mode. +# Reduce this number if ncclimo is crashing (maybe because it is out of memory). +# The number of threads must be a factor of 12 (1, 2, 3, 4, 6 or 12). +ncclimoThreads = 4 + # the number of MPI tasks to use in creating mapping files (1 means tasks run in # serial, the default) mapMpiTasks = 6 -# "None" if ESMF should perform mapping file generation in serial without a -# command, or one of "srun" or "mpirun" if it should be run in parallel (or ins -# serial but with a command) -mapParallelExec = srun --mpi=pmi2 - -# "None" if ncremap should perform remapping without a command, or "srun" -# possibly with some flags if it should be run with that command -ncremapParallelExec = srun --mpi=pmi2 - -[diagnostics] -## config options related to observations, mapping files and region files used -## by MPAS-Analysis in diagnostics computations. - -# The base path to the diagnostics directory. Typically, this will be a shared -# directory on each E3SM supported machine (see the example config files for -# its location). For other machines, this would be the directory pointed to -# when running "download_analysis_data.py" to get the public observations, -# mapping files and region files. -baseDirectory = /compyfs/diagnostics - [input] ## options related to reading in the results to be analyzed @@ -64,12 +49,19 @@ baseDirectory = /compyfs/wolf966/e3sm_scratch/20190711.testLuke.A_WCYCL1850S.ne3 # ECwISC30to60E2r1, SOwISC12to60E2r4, oQU240, etc.) mpasMeshName = oEC60to30v3 +# subdirectory containing restart files +runSubdirectory = . +# subdirectory for ocean history files +oceanHistorySubdirectory = . +# subdirectory for sea ice history files +seaIceHistorySubdirectory = . + # names of namelist and streams files, either a path relative to baseDirectory # or an absolute path. -#oceanNamelistFileName = mpas-o_in -#oceanStreamsFileName = streams.ocean -#seaIceNamelistFileName = mpas-cice_in -#seaIceStreamsFileName = streams.cice +oceanNamelistFileName = mpaso_in +oceanStreamsFileName = streams.ocean +seaIceNamelistFileName = mpassi_in +seaIceStreamsFileName = streams.seaice [output] ## options related to writing out plots, intermediate cached data sets, logs, @@ -142,7 +134,7 @@ endYear = 40 # a "main vs. control" analysis run, the range of years must be valid and # cannot include "end" because the original data may not be available. startYear = 1 -endYear = end +endYear = 40 [streamfunctionMOC] ## options related to plotting the streamfunction of the meridional overturning diff --git a/configs/compy/job_script.compy.bash b/configs/compy/job_script.compy.bash index 582dddff0..2fab9fa57 100644 --- a/configs/compy/job_script.compy.bash +++ b/configs/compy/job_script.compy.bash @@ -6,10 +6,17 @@ #SBATCH --output=mpas_analysis.o%j #SBATCH --error=mpas_analysis.e%j -cd $SLURM_SUBMIT_DIR export OMP_NUM_THREADS=1 source /share/apps/E3SM/conda_envs/load_latest_e3sm_unified_compy.sh +# alternatively, you can load your own development environment +# source ~/miniconda3/etc/profile.d/conda.sh +# conda activate mpas_dev +# export E3SMU_MACHINE=compy -srun --mpi=pmi2 -N 1 -n 1 mpas_analysis config.run_name_here +export HDF5_USE_FILE_LOCKING=FALSE + +# For an E3SM cryosphere run, include --polar_regions, or exclude +# this extra flag for default parameters +mpas_analysis run_name_here.cfg diff --git a/configs/compy/test_suite/clean_suite.bash b/configs/compy/test_suite/clean_suite.bash deleted file mode 100755 index 9437f53c8..000000000 --- a/configs/compy/test_suite/clean_suite.bash +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env bash - -set -e - -machine=compy - -branch=$(git symbolic-ref --short HEAD) - -rm -rf ${machine}_test_suite -rm -rf /compyfs/asay932/analysis_testing/${machine}/${branch} -rm -rf /global/cfs/cdirs/e3sm/www/xylar/analysis_testing/${machine}/${branch} diff --git a/configs/compy/test_suite/ctrl.cfg b/configs/compy/test_suite/ctrl.cfg deleted file mode 100644 index 148de7b15..000000000 --- a/configs/compy/test_suite/ctrl.cfg +++ /dev/null @@ -1,127 +0,0 @@ -[runs] -## options related to the run to be analyzed and control runs to be -## compared against - -# mainRunName is a name that identifies the simulation being analyzed. -mainRunName = ctrl - -# config file for a control run to which this run will be compared. The -# analysis should have already been run to completion once with this config -# file, so that the relevant MPAS climatologies already exist and have been -# remapped to the comparison grid. Leave this option commented out if no -# control run is desired. -# controlRunConfigFile = /path/to/config/file - -# config file for a main run on which the analysis was already run to -# completion. The relevant MPAS climatologies already exist and have been -# remapped to the comparison grid and time series have been extracted. -# Leave this option commented out if the analysis for the main run should be -# performed. -# mainRunConfigFile = /path/to/config/file - -[input] -## options related to reading in the results to be analyzed - -# directory containing model results -baseDirectory = /compyfs/asay932/analysis_testing/test_output/20200305.A_WCYCL1850.ne4_oQU480.anvil - -# name of the ocean and sea-ice mesh (e.g. EC30to60E2r2, WC14to60E2r3, -# ECwISC30to60E2r1, SOwISC12to60E2r4, oQU240, etc.) -mpasMeshName = oQU480 - -# subdirectory containing restart files -runSubdirectory = run -# subdirectory for ocean history files -oceanHistorySubdirectory = archive/ocn/hist -# subdirectory for sea ice history files -seaIceHistorySubdirectory = archive/ice/hist - -# names of namelist and streams files, either a path relative to baseDirectory -# or an absolute path. -oceanNamelistFileName = run/mpaso_in -oceanStreamsFileName = run/streams.ocean -seaIceNamelistFileName = run/mpassi_in -seaIceStreamsFileName = run/streams.seaice - -[output] -## options related to writing out plots, intermediate cached data sets, logs, -## etc. - -# directory where analysis should be written -baseDirectory = /compyfs/asay932/analysis_testing/baseline - -htmlSubdirectory = /compyfs/www/asay932/analysis_testing/baseline - -# a list of analyses to generate. Valid names can be seen by running: -# mpas_analysis --list -# This command also lists tags for each analysis. -# Shortcuts exist to generate (or not generate) several types of analysis. -# These include: -# 'all' -- all analyses will be run -# 'all_' -- all analysis with a particular tag will be run -# 'all_' -- all analyses from a given component (either 'ocean' -# or 'seaIce') will be run -# 'only_', 'only_' -- all analysis from this component or -# with this tag will be run, and all -# analysis for other components or -# without the tag will be skipped -# 'no_' -- skip the given task -# 'no_', 'no_' -- in analogy to 'all_*', skip all analysis -# tasks from the given compoonent or with -# the given tag. Do -# mpas_analysis --list -# to list all task names and their tags -# an equivalent syntax can be used on the command line to override this -# option: -# mpas_analysis config.analysis --generate \ -# all,no_ocean,all_timeSeries -# All tasks with tag "landIceCavities" are disabled because this run did not -# include land-ice cavities. -generate = ['all', 'no_BGC', 'no_icebergs', 'no_index', 'no_eke', - 'no_landIceCavities'] - -[climatology] -## options related to producing climatologies, typically to compare against -## observations and previous runs - -# the first year over which to average climatalogies -startYear = 3 -# the last year over which to average climatalogies -endYear = 5 - -[timeSeries] -## options related to producing time series plots, often to compare against -## observations and previous runs - -# start and end years for timeseries analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = 5 - -[index] -## options related to producing nino index. - -# start and end years for El Nino 3.4 analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = 5 - -[streamfunctionMOC] -## options related to plotting the streamfunction of the meridional overturning -## circulation (MOC) - -# Use postprocessing script to compute the MOC? You want this to be True -# for low-resolution simulations that use GM to parameterize eddies, because -# the online MOC analysis member currently does not include the bolus velocity -# in its calculation, whereas the postprocessing script does. -# NOTE: this is a temporary option that will be removed once the online -# MOC takes into account the bolus velocity when GM is on. -usePostprocessingScript = True diff --git a/configs/compy/test_suite/job_script.bash b/configs/compy/test_suite/job_script.bash deleted file mode 100644 index 04b0042e2..000000000 --- a/configs/compy/test_suite/job_script.bash +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash -l -#SBATCH --nodes=1 -#SBATCH --time=2:00:00 -#SBATCH --account=e3sm -#SBATCH --job-name=mpas_analysis -#SBATCH --output=mpas_analysis.o%j -#SBATCH --error=mpas_analysis.e%j - -set -e - -cd $SLURM_SUBMIT_DIR -export OMP_NUM_THREADS=1 - -source ${HOME}/miniconda3/etc/profile.d/conda.sh -conda activate test_env -export HDF5_USE_FILE_LOCKING=FALSE -export E3SMU_MACHINE=compy - -echo env: test_env -echo configs: --polar_regions ../main.cfg - -srun -N 1 -n 1 mpas_analysis --list -srun -N 1 -n 1 mpas_analysis --plot_colormaps -srun -N 1 -n 1 mpas_analysis --setup_only --polar_regions ../main.cfg -srun -N 1 -n 1 mpas_analysis --purge --polar_regions ../main.cfg --verbose -srun -N 1 -n 1 mpas_analysis --html_only --polar_regions ../main.cfg - -chmod -R ugo+rX /compyfs/www/asay932/analysis_testing/ diff --git a/configs/compy/test_suite/job_script_no_polar_regions.bash b/configs/compy/test_suite/job_script_no_polar_regions.bash deleted file mode 100644 index f2f1c9a7f..000000000 --- a/configs/compy/test_suite/job_script_no_polar_regions.bash +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash -l -#SBATCH --nodes=1 -#SBATCH --time=2:00:00 -#SBATCH --account=e3sm -#SBATCH --job-name=mpas_analysis -#SBATCH --output=mpas_analysis.o%j -#SBATCH --error=mpas_analysis.e%j - -set -e - -cd $SLURM_SUBMIT_DIR -export OMP_NUM_THREADS=1 - -source ${HOME}/miniconda3/etc/profile.d/conda.sh -conda activate test_env -export HDF5_USE_FILE_LOCKING=FALSE -export E3SMU_MACHINE=compy - -echo env: test_env -echo configs: ../no_polar_regions.cfg - -srun -N 1 -n 1 mpas_analysis ../no_polar_regions.cfg --verbose - -chmod -R ugo+rX /compyfs/www/asay932/analysis_testing/ diff --git a/configs/compy/test_suite/main.cfg b/configs/compy/test_suite/main.cfg deleted file mode 100644 index 8111beabc..000000000 --- a/configs/compy/test_suite/main.cfg +++ /dev/null @@ -1,127 +0,0 @@ -[runs] -## options related to the run to be analyzed and control runs to be -## compared against - -# mainRunName is a name that identifies the simulation being analyzed. -mainRunName = main - -# config file for a control run to which this run will be compared. The -# analysis should have already been run to completion once with this config -# file, so that the relevant MPAS climatologies already exist and have been -# remapped to the comparison grid. Leave this option commented out if no -# control run is desired. -# controlRunConfigFile = /path/to/config/file - -# config file for a main run on which the analysis was already run to -# completion. The relevant MPAS climatologies already exist and have been -# remapped to the comparison grid and time series have been extracted. -# Leave this option commented out if the analysis for the main run should be -# performed. -# mainRunConfigFile = /path/to/config/file - -[input] -## options related to reading in the results to be analyzed - -# directory containing model results -baseDirectory = /compyfs/asay932/analysis_testing/test_output/20200305.A_WCYCL1850.ne4_oQU480.anvil - -# name of the ocean and sea-ice mesh (e.g. EC30to60E2r2, WC14to60E2r3, -# ECwISC30to60E2r1, SOwISC12to60E2r4, oQU240, etc.) -mpasMeshName = oQU480 - -# subdirectory containing restart files -runSubdirectory = run -# subdirectory for ocean history files -oceanHistorySubdirectory = archive/ocn/hist -# subdirectory for sea ice history files -seaIceHistorySubdirectory = archive/ice/hist - -# names of namelist and streams files, either a path relative to baseDirectory -# or an absolute path. -oceanNamelistFileName = run/mpaso_in -oceanStreamsFileName = run/streams.ocean -seaIceNamelistFileName = run/mpassi_in -seaIceStreamsFileName = run/streams.seaice - -[output] -## options related to writing out plots, intermediate cached data sets, logs, -## etc. - -# directory where analysis should be written -baseDirectory = /compyfs/asay932/analysis_testing/baseline - -htmlSubdirectory = /compyfs/www/asay932/analysis_testing/baseline - -# a list of analyses to generate. Valid names can be seen by running: -# mpas_analysis --list -# This command also lists tags for each analysis. -# Shortcuts exist to generate (or not generate) several types of analysis. -# These include: -# 'all' -- all analyses will be run -# 'all_' -- all analysis with a particular tag will be run -# 'all_' -- all analyses from a given component (either 'ocean' -# or 'seaIce') will be run -# 'only_', 'only_' -- all analysis from this component or -# with this tag will be run, and all -# analysis for other components or -# without the tag will be skipped -# 'no_' -- skip the given task -# 'no_', 'no_' -- in analogy to 'all_*', skip all analysis -# tasks from the given compoonent or with -# the given tag. Do -# mpas_analysis --list -# to list all task names and their tags -# an equivalent syntax can be used on the command line to override this -# option: -# mpas_analysis config.analysis --generate \ -# all,no_ocean,all_timeSeries -# All tasks with tag "landIceCavities" are disabled because this run did not -# include land-ice cavities. -generate = ['all', 'no_BGC', 'no_icebergs', 'no_index', 'no_eke', - 'no_landIceCavities'] - -[climatology] -## options related to producing climatologies, typically to compare against -## observations and previous runs - -# the first year over which to average climatalogies -startYear = 3 -# the last year over which to average climatalogies -endYear = 5 - -[timeSeries] -## options related to producing time series plots, often to compare against -## observations and previous runs - -# start and end years for timeseries analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = end - -[index] -## options related to producing nino index. - -# start and end years for El Nino 3.4 analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = end - -[streamfunctionMOC] -## options related to plotting the streamfunction of the meridional overturning -## circulation (MOC) - -# Use postprocessing script to compute the MOC? You want this to be True -# for low-resolution simulations that use GM to parameterize eddies, because -# the online MOC analysis member currently does not include the bolus velocity -# in its calculation, whereas the postprocessing script does. -# NOTE: this is a temporary option that will be removed once the online -# MOC takes into account the bolus velocity when GM is on. -usePostprocessingScript = True diff --git a/configs/compy/test_suite/main_vs_ctrl.cfg b/configs/compy/test_suite/main_vs_ctrl.cfg deleted file mode 100644 index f62aa39d9..000000000 --- a/configs/compy/test_suite/main_vs_ctrl.cfg +++ /dev/null @@ -1,128 +0,0 @@ -[runs] -## options related to the run to be analyzed and control runs to be -## compared against - -# mainRunName is a name that identifies the simulation being analyzed. -mainRunName = main - -# config file for a control run to which this run will be compared. The -# analysis should have already been run to completion once with this config -# file, so that the relevant MPAS climatologies already exist and have been -# remapped to the comparison grid. Leave this option commented out if no -# control run is desired. -controlRunConfigFile = ../ctrl.cfg - -# config file for a main run on which the analysis was already run to -# completion. The relevant MPAS climatologies already exist and have been -# remapped to the comparison grid and time series have been extracted. -# Leave this option commented out if the analysis for the main run should be -# performed. -mainRunConfigFile = ../main_py3.9.cfg - -[input] -## options related to reading in the results to be analyzed - -# directory containing model results -baseDirectory = /compyfs/asay932/analysis_testing/test_output/20200305.A_WCYCL1850.ne4_oQU480.anvil - -# name of the ocean and sea-ice mesh (e.g. EC30to60E2r2, WC14to60E2r3, -# ECwISC30to60E2r1, SOwISC12to60E2r4, oQU240, etc.) -mpasMeshName = oQU480 - -# subdirectory containing restart files -runSubdirectory = run -# subdirectory for ocean history files -oceanHistorySubdirectory = archive/ocn/hist -# subdirectory for sea ice history files -seaIceHistorySubdirectory = archive/ice/hist - -# names of namelist and streams files, either a path relative to baseDirectory -# or an absolute path. -oceanNamelistFileName = run/mpaso_in -oceanStreamsFileName = run/streams.ocean -seaIceNamelistFileName = run/mpassi_in -seaIceStreamsFileName = run/streams.seaice - -[output] -## options related to writing out plots, intermediate cached data sets, logs, -## etc. - -# directory where analysis should be written -baseDirectory = /compyfs/asay932/analysis_testing/baseline - -htmlSubdirectory = /compyfs/www/asay932/analysis_testing/baseline - - -# a list of analyses to generate. Valid names can be seen by running: -# mpas_analysis --list -# This command also lists tags for each analysis. -# Shortcuts exist to generate (or not generate) several types of analysis. -# These include: -# 'all' -- all analyses will be run -# 'all_' -- all analysis with a particular tag will be run -# 'all_' -- all analyses from a given component (either 'ocean' -# or 'seaIce') will be run -# 'only_', 'only_' -- all analysis from this component or -# with this tag will be run, and all -# analysis for other components or -# without the tag will be skipped -# 'no_' -- skip the given task -# 'no_', 'no_' -- in analogy to 'all_*', skip all analysis -# tasks from the given compoonent or with -# the given tag. Do -# mpas_analysis --list -# to list all task names and their tags -# an equivalent syntax can be used on the command line to override this -# option: -# mpas_analysis config.analysis --generate \ -# all,no_ocean,all_timeSeries -# All tasks with tag "landIceCavities" are disabled because this run did not -# include land-ice cavities. -generate = ['all', 'no_BGC', 'no_icebergs', 'no_index', 'no_eke', - 'no_landIceCavities'] - -[climatology] -## options related to producing climatologies, typically to compare against -## observations and previous runs - -# the first year over which to average climatalogies -startYear = 3 -# the last year over which to average climatalogies -endYear = 5 - -[timeSeries] -## options related to producing time series plots, often to compare against -## observations and previous runs - -# start and end years for timeseries analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = 5 - -[index] -## options related to producing nino index. - -# start and end years for El Nino 3.4 analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = 5 - -[streamfunctionMOC] -## options related to plotting the streamfunction of the meridional overturning -## circulation (MOC) - -# Use postprocessing script to compute the MOC? You want this to be True -# for low-resolution simulations that use GM to parameterize eddies, because -# the online MOC analysis member currently does not include the bolus velocity -# in its calculation, whereas the postprocessing script does. -# NOTE: this is a temporary option that will be removed once the online -# MOC takes into account the bolus velocity when GM is on. -usePostprocessingScript = True diff --git a/configs/compy/test_suite/mesh_rename.cfg b/configs/compy/test_suite/mesh_rename.cfg deleted file mode 100644 index 2cf651fca..000000000 --- a/configs/compy/test_suite/mesh_rename.cfg +++ /dev/null @@ -1,127 +0,0 @@ -[runs] -## options related to the run to be analyzed and control runs to be -## compared against - -# mainRunName is a name that identifies the simulation being analyzed. -mainRunName = main - -# config file for a control run to which this run will be compared. The -# analysis should have already been run to completion once with this config -# file, so that the relevant MPAS climatologies already exist and have been -# remapped to the comparison grid. Leave this option commented out if no -# control run is desired. -# controlRunConfigFile = /path/to/config/file - -# config file for a main run on which the analysis was already run to -# completion. The relevant MPAS climatologies already exist and have been -# remapped to the comparison grid and time series have been extracted. -# Leave this option commented out if the analysis for the main run should be -# performed. -# mainRunConfigFile = /path/to/config/file - -[input] -## options related to reading in the results to be analyzed - -# directory containing model results -baseDirectory = /compyfs/asay932/analysis_testing/test_output/20200305.A_WCYCL1850.ne4_oQU480.anvil - -# name of the ocean and sea-ice mesh (e.g. EC30to60E2r2, WC14to60E2r3, -# ECwISC30to60E2r1, SOwISC12to60E2r4, oQU240, etc.) -mpasMeshName = new_oQU480 - -# subdirectory containing restart files -runSubdirectory = run -# subdirectory for ocean history files -oceanHistorySubdirectory = archive/ocn/hist -# subdirectory for sea ice history files -seaIceHistorySubdirectory = archive/ice/hist - -# names of namelist and streams files, either a path relative to baseDirectory -# or an absolute path. -oceanNamelistFileName = run/mpaso_in -oceanStreamsFileName = run/streams.ocean -seaIceNamelistFileName = run/mpassi_in -seaIceStreamsFileName = run/streams.seaice - -[output] -## options related to writing out plots, intermediate cached data sets, logs, -## etc. - -# directory where analysis should be written -baseDirectory = /compyfs/asay932/analysis_testing/baseline - -htmlSubdirectory = /compyfs/www/asay932/analysis_testing/baseline - -# a list of analyses to generate. Valid names can be seen by running: -# mpas_analysis --list -# This command also lists tags for each analysis. -# Shortcuts exist to generate (or not generate) several types of analysis. -# These include: -# 'all' -- all analyses will be run -# 'all_' -- all analysis with a particular tag will be run -# 'all_' -- all analyses from a given component (either 'ocean' -# or 'seaIce') will be run -# 'only_', 'only_' -- all analysis from this component or -# with this tag will be run, and all -# analysis for other components or -# without the tag will be skipped -# 'no_' -- skip the given task -# 'no_', 'no_' -- in analogy to 'all_*', skip all analysis -# tasks from the given compoonent or with -# the given tag. Do -# mpas_analysis --list -# to list all task names and their tags -# an equivalent syntax can be used on the command line to override this -# option: -# mpas_analysis config.analysis --generate \ -# all,no_ocean,all_timeSeries -# All tasks with tag "landIceCavities" are disabled because this run did not -# include land-ice cavities. -generate = ['all', 'no_BGC', 'no_icebergs', 'no_index', 'no_eke', - 'no_landIceCavities'] - -[climatology] -## options related to producing climatologies, typically to compare against -## observations and previous runs - -# the first year over which to average climatalogies -startYear = 3 -# the last year over which to average climatalogies -endYear = 5 - -[timeSeries] -## options related to producing time series plots, often to compare against -## observations and previous runs - -# start and end years for timeseries analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = end - -[index] -## options related to producing nino index. - -# start and end years for El Nino 3.4 analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = end - -[streamfunctionMOC] -## options related to plotting the streamfunction of the meridional overturning -## circulation (MOC) - -# Use postprocessing script to compute the MOC? You want this to be True -# for low-resolution simulations that use GM to parameterize eddies, because -# the online MOC analysis member currently does not include the bolus velocity -# in its calculation, whereas the postprocessing script does. -# NOTE: this is a temporary option that will be removed once the online -# MOC takes into account the bolus velocity when GM is on. -usePostprocessingScript = True diff --git a/configs/compy/test_suite/no_ncclimo.cfg b/configs/compy/test_suite/no_ncclimo.cfg deleted file mode 100644 index 794abf5e8..000000000 --- a/configs/compy/test_suite/no_ncclimo.cfg +++ /dev/null @@ -1,131 +0,0 @@ -[runs] -## options related to the run to be analyzed and control runs to be -## compared against - -# mainRunName is a name that identifies the simulation being analyzed. -mainRunName = main - -# config file for a control run to which this run will be compared. The -# analysis should have already been run to completion once with this config -# file, so that the relevant MPAS climatologies already exist and have been -# remapped to the comparison grid. Leave this option commented out if no -# control run is desired. -# controlRunConfigFile = /path/to/config/file - -# config file for a main run on which the analysis was already run to -# completion. The relevant MPAS climatologies already exist and have been -# remapped to the comparison grid and time series have been extracted. -# Leave this option commented out if the analysis for the main run should be -# performed. -# mainRunConfigFile = /path/to/config/file - -[input] -## options related to reading in the results to be analyzed - -# directory containing model results -baseDirectory = /compyfs/asay932/analysis_testing/test_output/20200305.A_WCYCL1850.ne4_oQU480.anvil - -# name of the ocean and sea-ice mesh (e.g. EC30to60E2r2, WC14to60E2r3, -# ECwISC30to60E2r1, SOwISC12to60E2r4, oQU240, etc.) -mpasMeshName = oQU480 - -# subdirectory containing restart files -runSubdirectory = run -# subdirectory for ocean history files -oceanHistorySubdirectory = archive/ocn/hist -# subdirectory for sea ice history files -seaIceHistorySubdirectory = archive/ice/hist - -# names of namelist and streams files, either a path relative to baseDirectory -# or an absolute path. -oceanNamelistFileName = run/mpaso_in -oceanStreamsFileName = run/streams.ocean -seaIceNamelistFileName = run/mpassi_in -seaIceStreamsFileName = run/streams.seaice - -[output] -## options related to writing out plots, intermediate cached data sets, logs, -## etc. - -# directory where analysis should be written -baseDirectory = /compyfs/asay932/analysis_testing/baseline - -htmlSubdirectory = /compyfs/www/asay932/analysis_testing/baseline - -# a list of analyses to generate. Valid names can be seen by running: -# mpas_analysis --list -# This command also lists tags for each analysis. -# Shortcuts exist to generate (or not generate) several types of analysis. -# These include: -# 'all' -- all analyses will be run -# 'all_' -- all analysis with a particular tag will be run -# 'all_' -- all analyses from a given component (either 'ocean' -# or 'seaIce') will be run -# 'only_', 'only_' -- all analysis from this component or -# with this tag will be run, and all -# analysis for other components or -# without the tag will be skipped -# 'no_' -- skip the given task -# 'no_', 'no_' -- in analogy to 'all_*', skip all analysis -# tasks from the given compoonent or with -# the given tag. Do -# mpas_analysis --list -# to list all task names and their tags -# an equivalent syntax can be used on the command line to override this -# option: -# mpas_analysis config.analysis --generate \ -# all,no_ocean,all_timeSeries -# All tasks with tag "landIceCavities" are disabled because this run did not -# include land-ice cavities. -generate = ['all', 'no_BGC', 'no_icebergs', 'no_index', 'no_eke', - 'no_landIceCavities'] - -[climatology] -## options related to producing climatologies, typically to compare against -## observations and previous runs - -# the first year over which to average climatalogies -startYear = 3 -# the last year over which to average climatalogies -endYear = 5 - -useNcclimo = False -daskThreads = 12 -subprocessCount = 12 - -[timeSeries] -## options related to producing time series plots, often to compare against -## observations and previous runs - -# start and end years for timeseries analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = end - -[index] -## options related to producing nino index. - -# start and end years for El Nino 3.4 analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = end - -[streamfunctionMOC] -## options related to plotting the streamfunction of the meridional overturning -## circulation (MOC) - -# Use postprocessing script to compute the MOC? You want this to be True -# for low-resolution simulations that use GM to parameterize eddies, because -# the online MOC analysis member currently does not include the bolus velocity -# in its calculation, whereas the postprocessing script does. -# NOTE: this is a temporary option that will be removed once the online -# MOC takes into account the bolus velocity when GM is on. -usePostprocessingScript = True diff --git a/configs/compy/test_suite/test_suite.bash b/configs/compy/test_suite/test_suite.bash deleted file mode 100755 index 5d13babd5..000000000 --- a/configs/compy/test_suite/test_suite.bash +++ /dev/null @@ -1,140 +0,0 @@ -#!/usr/bin/env bash - -set -e - -machine=compy - -main_py=3.9 -alt_py=3.8 - -export HDF5_USE_FILE_LOCKING=FALSE - -source ${HOME}/miniconda3/etc/profile.d/conda.sh -conda activate base - -branch=$(git symbolic-ref --short HEAD) - -conda update -y conda conda-build mamba boa -rm -rf ${HOME}/miniconda3/conda-bld -conda mambabuild ci/recipe - -# create the test conda envs -for py in ${main_py} ${alt_py} -do - env=test_mpas_analysis_py${py} - conda remove -y --all -n ${env} - mamba create -y -n ${env} --use-local python=${py} mpas-analysis sphinx \ - mock sphinx_rtd_theme "tabulate>=0.8.2" m2r pytest - conda activate ${env} - pytest - conda deactivate -done - -# create another env for testing xarray master branch -py=${main_py} -env=test_mpas_analysis_xarray_master -mamba create --yes --quiet --name ${env} --use-local python=${py} \ - mpas-analysis pytest -conda activate ${env} -pip install git+https://github.com/pydata/xarray.git -pytest -conda deactivate - -# move to a subdirectory so we use the conda package, not the local package -rm -rf ${machine}_test_suite -mkdir ${machine}_test_suite - -cd ${machine}_test_suite - -template_path=../configs/${machine}/test_suite -job_template_path=${template_path} - -for py in ${main_py} ${alt_py} -do - env=test_mpas_analysis_py${py} - run=main_py${py} - config=${run}.cfg - mkdir ${run} - job=${run}/job_script.bash - sed "s/baseline/${machine}\/${branch}\/py${py}/g" ${template_path}/main.cfg > ${config} - sed -e "s/main.cfg/${config}/g" -e "s/test_env/${env}/g" \ - ${job_template_path}/job_script.bash > ${job} -done - - -py=${main_py} -env=test_mpas_analysis_py${py} - -run=wc_defaults -config=${run}.cfg -mkdir ${run} -job=${run}/job_script.bash -sed "s/baseline/${machine}\/${branch}\/${run}/g" ${template_path}/${config} > ${config} -sed -e "s/no_polar_regions.cfg/${config}/g" -e "s/test_env/${env}/g" \ - ${job_template_path}/job_script_no_polar_regions.bash > ${job} - -run=no_ncclimo -config=${run}.cfg -mkdir ${run} -job=${run}/job_script.bash -sed "s/baseline/${machine}\/${branch}\/${run}/g" ${template_path}/${config} > ${config} -sed -e "s/main.cfg/${config}/g" -e "s/test_env/${env}/g" \ - ${job_template_path}/job_script.bash > ${job} - -run=ctrl -config=${run}.cfg -sed "s/baseline/${machine}\/${branch}\/py${py}/g" ${template_path}/${config} > ${config} - -run=main_vs_ctrl -config=${run}.cfg -mkdir ${run} -job=${run}/job_script.bash -sed "s/baseline/${machine}\/${branch}\/${run}/g" ${template_path}/${config} > ${config} -sed -e "s/main.cfg/${config}/g" -e "s/test_env/${env}/g" \ - ${job_template_path}/job_script.bash > ${job} - -run=no_polar_regions -config=${run}.cfg -mkdir ${run} -job=${run}/job_script.bash -sed "s/baseline/${machine}\/${branch}\/${run}/g" ${template_path}/main.cfg > ${config} -sed -e "s/test_env/${env}/g" \ - ${job_template_path}/job_script_no_polar_regions.bash > ${job} - -run=mesh_rename -config=${run}.cfg -mkdir ${run} -job=${run}/job_script.bash -sed "s/baseline/${machine}\/${branch}\/${run}/g" ${template_path}/${config} > ${config} -sed -e "s/main.cfg/${config}/g" -e "s/test_env/${env}/g" \ - ${job_template_path}/job_script.bash > ${job} - -env=test_mpas_analysis_xarray_master -run=xarray_master -config=${run}.cfg -mkdir ${run} -job=${run}/job_script.bash -sed "s/baseline/${machine}\/${branch}\/${run}/g" ${template_path}/main.cfg > ${config} -sed -e "s/main.cfg/${config}/g" -e "s/test_env/${env}/g" \ - ${job_template_path}/job_script.bash > ${job} - - -# submit the jobs -cd main_py${main_py} -RES=$(sbatch job_script.bash) -cd .. - -cd main_vs_ctrl -sbatch --dependency=afterok:${RES##* } job_script.bash -cd .. - -for run in main_py${alt_py} wc_defaults no_ncclimo no_polar_regions \ - mesh_rename xarray_master -do - cd ${run} - sbatch job_script.bash - cd .. -done - -cd .. - diff --git a/configs/compy/test_suite/wc_defaults.cfg b/configs/compy/test_suite/wc_defaults.cfg deleted file mode 100644 index 1d7c68e5c..000000000 --- a/configs/compy/test_suite/wc_defaults.cfg +++ /dev/null @@ -1,126 +0,0 @@ -[runs] -## options related to the run to be analyzed and control runs to be -## compared against - -# mainRunName is a name that identifies the simulation being analyzed. -mainRunName = wc_defaults - -# config file for a control run to which this run will be compared. The -# analysis should have already been run to completion once with this config -# file, so that the relevant MPAS climatologies already exist and have been -# remapped to the comparison grid. Leave this option commented out if no -# control run is desired. -# controlRunConfigFile = /path/to/config/file - -# config file for a main run on which the analysis was already run to -# completion. The relevant MPAS climatologies already exist and have been -# remapped to the comparison grid and time series have been extracted. -# Leave this option commented out if the analysis for the main run should be -# performed. -# mainRunConfigFile = /path/to/config/file - -[input] -## options related to reading in the results to be analyzed - -# directory containing model results -baseDirectory = /compyfs/asay932/analysis_testing/test_output/20200305.A_WCYCL1850.ne4_oQU480.anvil - -# name of the ocean and sea-ice mesh (e.g. EC30to60E2r2, WC14to60E2r3, -# ECwISC30to60E2r1, SOwISC12to60E2r4, oQU240, etc.) -mpasMeshName = oQU480 - -# subdirectory containing restart files -runSubdirectory = run -# subdirectory for ocean history files -oceanHistorySubdirectory = archive/ocn/hist -# subdirectory for sea ice history files -seaIceHistorySubdirectory = archive/ice/hist - -# names of namelist and streams files, either a path relative to baseDirectory -# or an absolute path. -oceanNamelistFileName = run/mpaso_in -oceanStreamsFileName = run/streams.ocean -seaIceNamelistFileName = run/mpassi_in -seaIceStreamsFileName = run/streams.seaice - -[output] -## options related to writing out plots, intermediate cached data sets, logs, -## etc. - -# directory where analysis should be written -baseDirectory = /compyfs/asay932/analysis_testing/baseline - -htmlSubdirectory = /compyfs/www/asay932/analysis_testing/baseline - -# a list of analyses to generate. Valid names can be seen by running: -# mpas_analysis --list -# This command also lists tags for each analysis. -# Shortcuts exist to generate (or not generate) several types of analysis. -# These include: -# 'all' -- all analyses will be run -# 'all_' -- all analysis with a particular tag will be run -# 'all_' -- all analyses from a given component (either 'ocean' -# or 'seaIce') will be run -# 'only_', 'only_' -- all analysis from this component or -# with this tag will be run, and all -# analysis for other components or -# without the tag will be skipped -# 'no_' -- skip the given task -# 'no_', 'no_' -- in analogy to 'all_*', skip all analysis -# tasks from the given compoonent or with -# the given tag. Do -# mpas_analysis --list -# to list all task names and their tags -# an equivalent syntax can be used on the command line to override this -# option: -# mpas_analysis config.analysis --generate \ -# all,no_ocean,all_timeSeries -# All tasks with tag "landIceCavities" are disabled because this run did not -# include land-ice cavities. -generate = ['all', 'no_landIceCavities', 'no_BGC', 'no_icebergs', 'no_min', 'no_max', 'no_sose', 'no_climatologyMapSchmidtko', 'no_climatologyMapAntarcticMelt', 'no_regionalTSDiagrams', 'no_timeSeriesAntarcticMelt', 'no_timeSeriesOceanRegions', 'no_climatologyMapSose', 'no_woceTransects', 'no_soseTransects', 'no_geojsonTransects', 'no_oceanRegionalProfiles'] - -[climatology] -## options related to producing climatologies, typically to compare against -## observations and previous runs - -# the first year over which to average climatalogies -startYear = 3 -# the last year over which to average climatalogies -endYear = 5 - -[timeSeries] -## options related to producing time series plots, often to compare against -## observations and previous runs - -# start and end years for timeseries analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = end - -[index] -## options related to producing nino index. - -# start and end years for El Nino 3.4 analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = end - -[streamfunctionMOC] -## options related to plotting the streamfunction of the meridional overturning -## circulation (MOC) - -# Use postprocessing script to compute the MOC? You want this to be True -# for low-resolution simulations that use GM to parameterize eddies, because -# the online MOC analysis member currently does not include the bolus velocity -# in its calculation, whereas the postprocessing script does. -# NOTE: this is a temporary option that will be removed once the online -# MOC takes into account the bolus velocity when GM is on. -usePostprocessingScript = True diff --git a/configs/cori/config.20190225.GMPAS-DIB-IAF-ISMF.T62_oEC60to30v3wLI.cori-knl b/configs/cori/20190225.GMPAS-DIB-IAF-ISMF.T62_oEC60to30v3wLI.cori-knl.cfg similarity index 88% rename from configs/cori/config.20190225.GMPAS-DIB-IAF-ISMF.T62_oEC60to30v3wLI.cori-knl rename to configs/cori/20190225.GMPAS-DIB-IAF-ISMF.T62_oEC60to30v3wLI.cori-knl.cfg index d31271c94..89dccd8fb 100644 --- a/configs/cori/config.20190225.GMPAS-DIB-IAF-ISMF.T62_oEC60to30v3wLI.cori-knl +++ b/configs/cori/20190225.GMPAS-DIB-IAF-ISMF.T62_oEC60to30v3wLI.cori-knl.cfg @@ -23,28 +23,21 @@ mainRunName = GMPAS-DIB-IAF-ISMF.oEC60to30v3wLI ## options related to executing parallel tasks # the number of parallel tasks (1 means tasks run in serial, the default) -parallelTaskCount = 6 +parallelTaskCount = 12 # the parallelism mode in ncclimo ("serial" or "bck") # Set this to "bck" (background parallelism) if running on a machine that can # handle 12 simultaneous processes, one for each monthly climatology. ncclimoParallelMode = bck -# "None" if ESMF should perform mapping file generation in serial without a -# command, or one of "srun" or "mpirun" if it should be run in parallel (or ins -# serial but with a command) -mapParallelExec = srun +# the number of total threads to use when ncclimo runs in "bck" or "mpi" mode. +# Reduce this number if ncclimo is crashing (maybe because it is out of memory). +# The number of threads must be a factor of 12 (1, 2, 3, 4, 6 or 12). +ncclimoThreads = 12 -[diagnostics] -## config options related to observations, mapping files and region files used -## by MPAS-Analysis in diagnostics computations. - -# The base path to the diagnostics directory. Typically, this will be a shared -# directory on each E3SM supported machine (see the example config files for -# its location). For other machines, this would be the directory pointed to -# when running "download_analysis_data.py" to get the public observations, -# mapping files and region files. -baseDirectory = /global/cfs/cdirs/e3sm/diagnostics +# the number of MPI tasks to use in creating mapping files (1 means tasks run in +# serial, the default) +mapMpiTasks = 12 [input] ## options related to reading in the results to be analyzed @@ -144,4 +137,4 @@ endYear = 30 # a "main vs. control" analysis run, the range of years must be valid and # cannot include "end" because the original data may not be available. startYear = 1 -endYear = end +endYear = 30 diff --git a/configs/cori/config.20171201.default.GMPAS-IAF.T62_oRRS30to10v3wLI.cori-knl b/configs/cori/config.20171201.default.GMPAS-IAF.T62_oRRS30to10v3wLI.cori-knl deleted file mode 100644 index b46647959..000000000 --- a/configs/cori/config.20171201.default.GMPAS-IAF.T62_oRRS30to10v3wLI.cori-knl +++ /dev/null @@ -1,136 +0,0 @@ -[runs] -## options related to the run to be analyzed and control runs to be -## compared against - -# mainRunName is a name that identifies the simulation being analyzed. -mainRunName = 20171201.default.GMPAS-IAF.T62_oRRS30to10v3wLI.cori-knl - -# config file for a control run to which this run will be compared. The -# analysis should have already been run to completion once with this config -# file, so that the relevant MPAS climatologies already exist and have been -# remapped to the comparison grid. Leave this option commented out if no -# control run is desired. -# controlRunConfigFile = /path/to/config/file - -# config file for a main run on which the analysis was already run to -# completion. The relevant MPAS climatologies already exist and have been -# remapped to the comparison grid and time series have been extracted. -# Leave this option commented out if the analysis for the main run should be -# performed. -# mainRunConfigFile = /path/to/config/file - -[execute] -## options related to executing parallel tasks - -# the number of parallel tasks (1 means tasks run in serial, the default) -parallelTaskCount = 1 - -# the parallelism mode in ncclimo ("serial" or "bck") -# Set this to "bck" (background parallelism) if running on a machine that can -# handle 12 simultaneous processes, one for each monthly climatology. -ncclimoParallelMode = bck - -# the number of total threads to use when ncclimo runs in "bck" or "mpi" mode. -# Reduce this number if ncclimo is crashing (maybe because it is out of memory). -# The number of threads must be a factor of 12 (1, 2, 3, 4, 6 or 12). -ncclimoThreads = 3 - -# "None" if ESMF should perform mapping file generation in serial without a -# command, or one of "srun" or "mpirun" if it should be run in parallel (or ins -# serial but with a command) -mapParallelExec = srun - -[diagnostics] -## config options related to observations, mapping files and region files used -## by MPAS-Analysis in diagnostics computations. - -# The base path to the diagnostics directory. Typically, this will be a shared -# directory on each E3SM supported machine (see the example config files for -# its location). For other machines, this would be the directory pointed to -# when running "download_analysis_data.py" to get the public observations, -# mapping files and region files. -baseDirectory = /global/cfs/cdirs/e3sm/diagnostics - -[input] -## options related to reading in the results to be analyzed - -# directory containing model results -baseDirectory = /global/cscratch1/sd/mpeterse/acme_scratch/20171201.default.GMPAS-IAF.T62_oRRS30to10v3wLI.cori-knl/run - -# name of the ocean and sea-ice mesh (e.g. EC30to60E2r2, WC14to60E2r3, -# ECwISC30to60E2r1, SOwISC12to60E2r4, oQU240, etc.) -mpasMeshName = oRRS30to10v3wLI - -# names of namelist and streams files, either a path relative to baseDirectory -# or an absolute path. -oceanNamelistFileName = mpas-o_in -oceanStreamsFileName = streams.ocean -seaIceNamelistFileName = mpas-cice_in -seaIceStreamsFileName = streams.cice - -[output] -## options related to writing out plots, intermediate cached data sets, logs, -## etc. - -# directory where analysis should be written -baseDirectory = /dir/to/analysis/output - -# provide an absolute path to put HTML in an alternative location (e.g. a web -# portal) -# htmlSubdirectory = /global/cfs/cdirs/e3sm/www/USERNAME/RUNNAME -htmlSubdirectory = html - -# a list of analyses to generate. Valid names can be seen by running: -# mpas_analysis --list -# This command also lists tags for each analysis. -# Shortcuts exist to generate (or not generate) several types of analysis. -# These include: -# 'all' -- all analyses will be run -# 'all_' -- all analysis with a particular tag will be run -# 'all_' -- all analyses from a given component (either 'ocean' -# or 'seaIce') will be run -# 'no_' -- skip the given task -# 'no_', 'no_' -- in analogy to 'all_*', skip all analysis -# tasks from the given compoonent or with -# the given tag. Do -# mpas_analysis --list -# to list all task names and their tags -# an equivalent syntax can be used on the command line to override this -# option: -# mpas_analysis config.analysis --generate \ -# all,no_ocean,all_timeSeries -generate = ['all', 'no_eke', 'no_BGC', 'no_icebergs', 'no_min', 'no_max'] - -[climatology] -## options related to producing climatologies, typically to compare against -## observations and previous runs - -# the first year over which to average climatalogies -startYear = 5 -# the last year over which to average climatalogies -endYear = 10 - -[timeSeries] -## options related to producing time series plots, often to compare against -## observations and previous runs - -# start and end years for timeseries analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = 10 - -[index] -## options related to producing nino index. - -# start and end years for El Nino 3.4 analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = 10 diff --git a/configs/cori/config.20180129.DECKv1b_piControl.ne30_oEC.edison b/configs/cori/config.20180129.DECKv1b_piControl.ne30_oEC.edison deleted file mode 100644 index a2e670d55..000000000 --- a/configs/cori/config.20180129.DECKv1b_piControl.ne30_oEC.edison +++ /dev/null @@ -1,157 +0,0 @@ -[runs] -## options related to the run to be analyzed and control runs to be -## compared against - -# mainRunName is a name that identifies the simulation being analyzed. -mainRunName = 20180129.DECKv1b_piControl.ne30_oEC.edison - -# config file for a control run to which this run will be compared. The -# analysis should have already been run to completion once with this config -# file, so that the relevant MPAS climatologies already exist and have been -# remapped to the comparison grid. Leave this option commented out if no -# control run is desired. -# controlRunConfigFile = /path/to/config/file - -# config file for a main run on which the analysis was already run to -# completion. The relevant MPAS climatologies already exist and have been -# remapped to the comparison grid and time series have been extracted. -# Leave this option commented out if the analysis for the main run should be -# performed. -# mainRunConfigFile = /path/to/config/file - -[execute] -## options related to executing parallel tasks - -# the number of parallel tasks (1 means tasks run in serial, the default) -parallelTaskCount = 4 - -# the parallelism mode in ncclimo ("serial" or "bck") -# Set this to "bck" (background parallelism) if running on a machine that can -# handle 12 simultaneous processes, one for each monthly climatology. -ncclimoParallelMode = bck - -# "None" if ESMF should perform mapping file generation in serial without a -# command, or one of "srun" or "mpirun" if it should be run in parallel (or ins -# serial but with a command) -mapParallelExec = srun - -[diagnostics] -## config options related to observations, mapping files and region files used -## by MPAS-Analysis in diagnostics computations. - -# The base path to the diagnostics directory. Typically, this will be a shared -# directory on each E3SM supported machine (see the example config files for -# its location). For other machines, this would be the directory pointed to -# when running "download_analysis_data.py" to get the public observations, -# mapping files and region files. -baseDirectory = /global/cfs/cdirs/e3sm/diagnostics - -[input] -## options related to reading in the results to be analyzed - -# directory containing model results -baseDirectory = /global/cscratch1/sd/golaz/ACME_simulations/20180129.DECKv1b_piControl.ne30_oEC.edison - -# subdirectory containing restart files -runSubdirectory = run -# subdirectory for ocean history files -oceanHistorySubdirectory = archive/ocn/hist -# subdirectory for sea ice history files -seaIceHistorySubdirectory = archive/ice/hist - -# name of the ocean and sea-ice mesh (e.g. EC30to60E2r2, WC14to60E2r3, -# ECwISC30to60E2r1, SOwISC12to60E2r4, oQU240, etc.) -mpasMeshName = oEC60to30v3 - -# names of namelist and streams files, either a path relative to baseDirectory -# or an absolute path. -oceanNamelistFileName = run/mpas-o_in -oceanStreamsFileName = run/streams.ocean -seaIceNamelistFileName = run/mpas-cice_in -seaIceStreamsFileName = run/streams.cice - -[output] -## options related to writing out plots, intermediate cached data sets, logs, -## etc. - -# directory where analysis should be written -baseDirectory = /dir/to/analysis/output - -# provide an absolute path to put HTML in an alternative location (e.g. a web -# portal) -# htmlSubdirectory = /global/cfs/cdirs/e3sm/www/USERNAME/RUNNAME -htmlSubdirectory = html - -# a list of analyses to generate. Valid names can be seen by running: -# mpas_analysis --list -# This command also lists tags for each analysis. -# Shortcuts exist to generate (or not generate) several types of analysis. -# These include: -# 'all' -- all analyses will be run -# 'all_' -- all analysis with a particular tag will be run -# 'all_' -- all analyses from a given component (either 'ocean' -# or 'seaIce') will be run -# 'only_', 'only_' -- all analysis from this component or -# with this tag will be run, and all -# analysis for other components or -# without the tag will be skipped -# 'no_' -- skip the given task -# 'no_', 'no_' -- in analogy to 'all_*', skip all analysis -# tasks from the given compoonent or with -# the given tag. Do -# mpas_analysis --list -# to list all task names and their tags -# an equivalent syntax can be used on the command line to override this -# option: -# mpas_analysis config.analysis --generate \ -# all,no_ocean,all_timeSeries -# All tasks with tag "landIceCavities" are disabled because this run did not -# include land-ice cavities. -generate = ['all', 'no_landIceCavities', 'no_eke', 'no_BGC', 'no_icebergs', - 'no_min', 'no_max'] - -[climatology] -## options related to producing climatologies, typically to compare against -## observations and previous runs - -# the first year over which to average climatalogies -startYear = 5 -# the last year over which to average climatalogies -endYear = 10 - -[timeSeries] -## options related to producing time series plots, often to compare against -## observations and previous runs - -# start and end years for timeseries analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = 10 - -[index] -## options related to producing nino index. - -# start and end years for El Nino 3.4 analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = 10 - -[streamfunctionMOC] -## options related to plotting the streamfunction of the meridional overturning -## circulation (MOC) - -# Use postprocessing script to compute the MOC? You want this to be True -# for low-resolution simulations that use GM to parameterize eddies, because -# the online MOC analysis member currently does not include the bolus velocity -# in its calculation, whereas the postprocessing script does. -# NOTE: this is a temporary option that will be removed once the online -# MOC takes into account the bolus velocity when GM is on. -usePostprocessingScript = True diff --git a/configs/cori/config.20180209.GMPAS-IAF.T62_oRRS30to10v3wLI.cori-knl.afterSalinityFix b/configs/cori/config.20180209.GMPAS-IAF.T62_oRRS30to10v3wLI.cori-knl.afterSalinityFix deleted file mode 100644 index ff8fd84b1..000000000 --- a/configs/cori/config.20180209.GMPAS-IAF.T62_oRRS30to10v3wLI.cori-knl.afterSalinityFix +++ /dev/null @@ -1,144 +0,0 @@ -[runs] -## options related to the run to be analyzed and control runs to be -## compared against - -# mainRunName is a name that identifies the simulation being analyzed. -mainRunName = 20180129.DECKv1b_piControl.ne30_oEC.edison - -# config file for a control run to which this run will be compared. The -# analysis should have already been run to completion once with this config -# file, so that the relevant MPAS climatologies already exist and have been -# remapped to the comparison grid. Leave this option commented out if no -# control run is desired. -# controlRunConfigFile = /path/to/config/file - -# config file for a main run on which the analysis was already run to -# completion. The relevant MPAS climatologies already exist and have been -# remapped to the comparison grid and time series have been extracted. -# Leave this option commented out if the analysis for the main run should be -# performed. -# mainRunConfigFile = /path/to/config/file - -[execute] -## options related to executing parallel tasks - -# the number of parallel tasks (1 means tasks run in serial, the default) -parallelTaskCount = 4 - -# the parallelism mode in ncclimo ("serial" or "bck") -# Set this to "bck" (background parallelism) if running on a machine that can -# handle 12 simultaneous processes, one for each monthly climatology. -ncclimoParallelMode = bck - -# "None" if ESMF should perform mapping file generation in serial without a -# command, or one of "srun" or "mpirun" if it should be run in parallel (or ins -# serial but with a command) -mapParallelExec = srun - -[diagnostics] -## config options related to observations, mapping files and region files used -## by MPAS-Analysis in diagnostics computations. - -# The base path to the diagnostics directory. Typically, this will be a shared -# directory on each E3SM supported machine (see the example config files for -# its location). For other machines, this would be the directory pointed to -# when running "download_analysis_data.py" to get the public observations, -# mapping files and region files. -baseDirectory = /global/cfs/cdirs/e3sm/diagnostics - -[input] -## options related to reading in the results to be analyzed - -# directory containing model results -baseDirectory = /global/cscratch1/sd/mpeterse/acme_scratch/20180209.GMPAS-IAF.T62_oRRS30to10v3wLI.cori-knl.afterSalinityFix - -# subdirectory containing restart files -runSubdirectory = run -# subdirectory for ocean history files -oceanHistorySubdirectory = run -# subdirectory for sea ice history files -seaIceHistorySubdirectory = run - -# name of the ocean and sea-ice mesh (e.g. EC30to60E2r2, WC14to60E2r3, -# ECwISC30to60E2r1, SOwISC12to60E2r4, oQU240, etc.) -mpasMeshName = oRRS30to10v3wLI - -# names of namelist and streams files, either a path relative to baseDirectory -# or an absolute path. -oceanNamelistFileName = run/mpas-o_in -oceanStreamsFileName = run/streams.ocean -seaIceNamelistFileName = run/mpas-cice_in -seaIceStreamsFileName = run/streams.cice - -[output] -## options related to writing out plots, intermediate cached data sets, logs, -## etc. - -# directory where analysis should be written -baseDirectory = /dir/to/analysis/output - -# provide an absolute path to put HTML in an alternative location (e.g. a web -# portal) -# htmlSubdirectory = /global/cfs/cdirs/e3sm/www/USERNAME/RUNNAME -htmlSubdirectory = html - -# a list of analyses to generate. Valid names can be seen by running: -# mpas_analysis --list -# This command also lists tags for each analysis. -# Shortcuts exist to generate (or not generate) several types of analysis. -# These include: -# 'all' -- all analyses will be run -# 'all_' -- all analysis with a particular tag will be run -# 'all_' -- all analyses from a given component (either 'ocean' -# or 'seaIce') will be run -# 'only_', 'only_' -- all analysis from this component or -# with this tag will be run, and all -# analysis for other components or -# without the tag will be skipped -# 'no_' -- skip the given task -# 'no_', 'no_' -- in analogy to 'all_*', skip all analysis -# tasks from the given compoonent or with -# the given tag. Do -# mpas_analysis --list -# to list all task names and their tags -# an equivalent syntax can be used on the command line to override this -# option: -# mpas_analysis config.analysis --generate \ -# all,no_ocean,all_timeSeries -# All tasks with tag "landIceCavities" are disabled because this run did not -# include land-ice cavities. -generate = ['all', 'no_eke', 'no_BGC', 'no_icebergs', 'no_min', 'no_max'] - -[climatology] -## options related to producing climatologies, typically to compare against -## observations and previous runs - -# the first year over which to average climatalogies -startYear = 5 -# the last year over which to average climatalogies -endYear = 10 - -[timeSeries] -## options related to producing time series plots, often to compare against -## observations and previous runs - -# start and end years for timeseries analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = 10 - -[index] -## options related to producing nino index. - -# start and end years for El Nino 3.4 analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = 10 diff --git a/configs/cori/config.20180511.GMPAS-IAF.T62_oEC60to30v3wLI.edison b/configs/cori/config.20180511.GMPAS-IAF.T62_oEC60to30v3wLI.edison deleted file mode 100644 index 17c3852e1..000000000 --- a/configs/cori/config.20180511.GMPAS-IAF.T62_oEC60to30v3wLI.edison +++ /dev/null @@ -1,146 +0,0 @@ -[runs] -## options related to the run to be analyzed and reference runs to be -## compared against - -# mainRunName is a name that identifies the simulation being analyzed. -mainRunName = 20180511.GMPAS-IAF.T62_oEC60to30v3wLI.edison - -# config file for a control run to which this run will be compared. The -# analysis should have already been run to completion once with this config -# file, so that the relevant MPAS climatologies already exist and have been -# remapped to the comparison grid. Leave this option commented out if no -# control run is desired. -# controlRunConfigFile = /path/to/config/file - -# config file for a main run on which the analysis was already run to -# completion. The relevant MPAS climatologies already exist and have been -# remapped to the comparison grid and time series have been extracted. -# Leave this option commented out if the analysis for the main run should be -# performed. -# mainRunConfigFile = /path/to/config/file - -[execute] -## options related to executing parallel tasks - -# the number of parallel tasks (1 means tasks run in serial, the default) -parallelTaskCount = 6 - -# the parallelism mode in ncclimo ("serial" or "bck") -# Set this to "bck" (background parallelism) if running on a machine that can -# handle 12 simultaneous processes, one for each monthly climatology. -ncclimoParallelMode = bck - -# "None" if ESMF should perform mapping file generation in serial without a -# command, or one of "srun" or "mpirun" if it should be run in parallel (or ins -# serial but with a command) -mapParallelExec = srun - -[diagnostics] -## config options related to observations, mapping files and region files used -## by MPAS-Analysis in diagnostics computations. - -# The base path to the diagnostics directory. Typically, this will be a shared -# directory on each E3SM supported machine (see the example config files for -# its location). For other machines, this would be the directory pointed to -# when running "download_analysis_data.py" to get the public observations, -# mapping files and region files. -baseDirectory = /global/cfs/cdirs/e3sm/diagnostics - -[input] -## options related to reading in the results to be analyzed - -# directory containing model results -baseDirectory = /global/cscratch1/sd/dcomeau/acme_scratch/edison/20180511.GMPAS-IAF.T62_oEC60to30v3wLI.edison - -# subdirectory containing restart files -runSubdirectory = run -# subdirectory for ocean history files -oceanHistorySubdirectory = run -# subdirectory for sea ice history files -seaIceHistorySubdirectory = run - -# name of the ocean and sea-ice mesh (e.g. EC30to60E2r2, WC14to60E2r3, -# ECwISC30to60E2r1, SOwISC12to60E2r4, oQU240, etc.) -mpasMeshName = oEC60to30v3wLI - -# names of namelist and streams files, either a path relative to baseDirectory -# or an absolute path. -oceanNamelistFileName = run/mpaso_in -oceanStreamsFileName = run/streams.ocean -seaIceNamelistFileName = run/mpassi_in -seaIceStreamsFileName = run/streams.seaice - -[output] -## options related to writing out plots, intermediate cached data sets, logs, -## etc. - -# directory where analysis should be written -baseDirectory = /path/to/output/dir - -# provide an absolute path to put HTML in an alternative location (e.g. a web -# portal) -# htmlSubdirectory = /global/cfs/cdirs/e3sm/www/USERNAME/RUNNAME -htmlSubdirectory = html - -# a list of analyses to generate. Valid names can be seen by running: -# ./run_mpas_analysis --list -# This command also lists tags for each analysis. -# Shortcuts exist to generate (or not generate) several types of analysis. -# These include: -# 'all' -- all analyses will be run -# 'all_' -- all analysis with a particular tag will be run -# 'all_' -- all analyses from a given component (either 'ocean' -# or 'seaIce') will be run -# 'only_', 'only_' -- all analysis from this component or -# with this tag will be run, and all -# analysis for other components or -# without the tag will be skipped -# 'no_' -- skip the given task -# 'no_', 'no_' -- in analogy to 'all_*', skip all analysis -# tasks from the given compoonent or with -# the given tag. Do -# ./run_mpas_analysis --list -# to list all task names and their tags -# an equivalent syntax can be used on the command line to override this -# option: -# ./run_mpas_analysis config.analysis --generate \ -# all,no_ocean,all_timeSeries -# All tasks with tag "landIceCavities" are disabled because this run did not -# include land-ice cavities. -generate = ['all', 'no_eke', 'no_BGC', 'no_icebergs', 'no_index', 'no_min', - 'no_max'] - -[climatology] -## options related to producing climatologies, typically to compare against -## observations and previous runs - -# the first year over which to average climatalogies -startYear = 21 -# the last year over which to average climatalogies -endYear = 30 - -[timeSeries] -## options related to producing time series plots, often to compare against -## observations and previous runs - -# start and end years for timeseries analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = 30 - -[index] -## options related to producing nino index. - -# start and end years for El Nino 3.4 analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = 30 - diff --git a/configs/cori/config.20180514.G.oQU240wLI.edison b/configs/cori/config.20180514.G.oQU240wLI.edison deleted file mode 100644 index 4901be080..000000000 --- a/configs/cori/config.20180514.G.oQU240wLI.edison +++ /dev/null @@ -1,156 +0,0 @@ -[runs] -## options related to the run to be analyzed and control runs to be -## compared against - -# mainRunName is a name that identifies the simulation being analyzed. -mainRunName = 20180514.G.oQU240wLI.edison - -# config file for a control run to which this run will be compared. The -# analysis should have already been run to completion once with this config -# file, so that the relevant MPAS climatologies already exist and have been -# remapped to the comparison grid. Leave this option commented out if no -# control run is desired. -# controlRunConfigFile = /path/to/config/file - -# config file for a main run on which the analysis was already run to -# completion. The relevant MPAS climatologies already exist and have been -# remapped to the comparison grid and time series have been extracted. -# Leave this option commented out if the analysis for the main run should be -# performed. -# mainRunConfigFile = /path/to/config/file - -[execute] -## options related to executing parallel tasks - -# the number of parallel tasks (1 means tasks run in serial, the default) -parallelTaskCount = 8 - -# the parallelism mode in ncclimo ("serial" or "bck") -# Set this to "bck" (background parallelism) if running on a machine that can -# handle 12 simultaneous processes, one for each monthly climatology. -ncclimoParallelMode = bck - -# "None" if ESMF should perform mapping file generation in serial without a -# command, or one of "srun" or "mpirun" if it should be run in parallel (or ins -# serial but with a command) -mapParallelExec = srun - -[diagnostics] -## config options related to observations, mapping files and region files used -## by MPAS-Analysis in diagnostics computations. - -# The base path to the diagnostics directory. Typically, this will be a shared -# directory on each E3SM supported machine (see the example config files for -# its location). For other machines, this would be the directory pointed to -# when running "download_analysis_data.py" to get the public observations, -# mapping files and region files. -baseDirectory = /global/cfs/cdirs/e3sm/diagnostics - -[input] -## options related to reading in the results to be analyzed - -# directory containing model results -baseDirectory = /global/cscratch1/sd/xylar/acme_scratch/edison/20180514.G.oQU240wLI.edison - -# name of the ocean and sea-ice mesh (e.g. EC30to60E2r2, WC14to60E2r3, -# ECwISC30to60E2r1, SOwISC12to60E2r4, oQU240, etc.) -mpasMeshName = oQU240wLI - -# subdirectory containing restart files -runSubdirectory = run -# subdirectory for ocean history files -oceanHistorySubdirectory = run -# subdirectory for sea ice history files -seaIceHistorySubdirectory = run - -# names of namelist and streams files, either a path relative to baseDirectory -# or an absolute path. -oceanNamelistFileName = run/mpaso_in -oceanStreamsFileName = run/streams.ocean -seaIceNamelistFileName = run/mpassi_in -seaIceStreamsFileName = run/streams.seaice - -[output] -## options related to writing out plots, intermediate cached data sets, logs, -## etc. - -# directory where analysis should be written -baseDirectory = /dir/to/analysis/output - -# provide an absolute path to put HTML in an alternative location (e.g. a web -# portal) -# htmlSubdirectory = /global/cfs/cdirs/e3sm/www/USERNAME/RUNNAME -htmlSubdirectory = html - -# a list of analyses to generate. Valid names can be seen by running: -# mpas_analysis --list -# This command also lists tags for each analysis. -# Shortcuts exist to generate (or not generate) several types of analysis. -# These include: -# 'all' -- all analyses will be run -# 'all_' -- all analysis with a particular tag will be run -# 'all_' -- all analyses from a given component (either 'ocean' -# or 'seaIce') will be run -# 'only_', 'only_' -- all analysis from this component or -# with this tag will be run, and all -# analysis for other components or -# without the tag will be skipped -# 'no_' -- skip the given task -# 'no_', 'no_' -- in analogy to 'all_*', skip all analysis -# tasks from the given compoonent or with -# the given tag. Do -# mpas_analysis --list -# to list all task names and their tags -# an equivalent syntax can be used on the command line to override this -# option: -# mpas_analysis config.analysis --generate \ -# all,no_ocean,all_timeSeries -# All tasks with tag "landIceCavities" are disabled because this run did not -# include land-ice cavities. -generate = ['all', 'no_eke', 'no_BGC', 'no_icebergs', 'no_min', 'no_max'] - -[climatology] -## options related to producing climatologies, typically to compare against -## observations and previous runs - -# the first year over which to average climatalogies -startYear = 4 -# the last year over which to average climatalogies -endYear = 8 - -[timeSeries] -## options related to producing time series plots, often to compare against -## observations and previous runs - -# start and end years for timeseries analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = end - -[index] -## options related to producing nino index. - -# start and end years for El Nino 3.4 analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = end - -[streamfunctionMOC] -## options related to plotting the streamfunction of the meridional overturning -## circulation (MOC) - -# Use postprocessing script to compute the MOC? You want this to be True -# for low-resolution simulations that use GM to parameterize eddies, because -# the online MOC analysis member currently does not include the bolus velocity -# in its calculation, whereas the postprocessing script does. -# NOTE: this is a temporary option that will be removed once the online -# MOC takes into account the bolus velocity when GM is on. -usePostprocessingScript = True diff --git a/configs/cori/config.B_low_res_ice_shelves_1696_JWolfe_layout_Edison b/configs/cori/config.B_low_res_ice_shelves_1696_JWolfe_layout_Edison deleted file mode 100644 index e3e8c6ea4..000000000 --- a/configs/cori/config.B_low_res_ice_shelves_1696_JWolfe_layout_Edison +++ /dev/null @@ -1,147 +0,0 @@ -[runs] -## options related to the run to be analyzed and control runs to be -## compared against - -# mainRunName is a name that identifies the simulation being analyzed. -mainRunName = B_low_res_ice_shelves_1696_JWolfe_layout_Edison - -# config file for a control run to which this run will be compared. The -# analysis should have already been run to completion once with this config -# file, so that the relevant MPAS climatologies already exist and have been -# remapped to the comparison grid. Leave this option commented out if no -# control run is desired. -# controlRunConfigFile = /path/to/config/file - -# config file for a main run on which the analysis was already run to -# completion. The relevant MPAS climatologies already exist and have been -# remapped to the comparison grid and time series have been extracted. -# Leave this option commented out if the analysis for the main run should be -# performed. -# mainRunConfigFile = /path/to/config/file - -[execute] -## options related to executing parallel tasks - -# the number of parallel tasks (1 means tasks run in serial, the default) -parallelTaskCount = 1 - -# the parallelism mode in ncclimo ("serial" or "bck") -# Set this to "bck" (background parallelism) if running on a machine that can -# handle 12 simultaneous processes, one for each monthly climatology. -ncclimoParallelMode = serial - -# "None" if ESMF should perform mapping file generation in serial without a -# command, or one of "srun" or "mpirun" if it should be run in parallel (or ins -# serial but with a command) -mapParallelExec = srun - -[diagnostics] -## config options related to observations, mapping files and region files used -## by MPAS-Analysis in diagnostics computations. - -# The base path to the diagnostics directory. Typically, this will be a shared -# directory on each E3SM supported machine (see the example config files for -# its location). For other machines, this would be the directory pointed to -# when running "download_analysis_data.py" to get the public observations, -# mapping files and region files. -baseDirectory = /global/cfs/cdirs/e3sm/diagnostics - -[input] -## options related to reading in the results to be analyzed - -# directory containing model results -baseDirectory = /global/cscratch1/sd/fyke/ACME_simulations/B_low_res_ice_shelves_1696_JWolfe_layout_Edison/run - -# name of the ocean and sea-ice mesh (e.g. EC30to60E2r2, WC14to60E2r3, -# ECwISC30to60E2r1, SOwISC12to60E2r4, oQU240, etc.) -mpasMeshName = oEC60to30v3wLI - -# names of namelist and streams files, either a path relative to baseDirectory -# or an absolute path. -oceanNamelistFileName = mpas-o_in -oceanStreamsFileName = streams.ocean -seaIceNamelistFileName = mpas-cice_in -seaIceStreamsFileName = streams.cice - -[output] -## options related to writing out plots, intermediate cached data sets, logs, -## etc. - -# directory where analysis should be written -baseDirectory = /dir/to/analysis/output - -# provide an absolute path to put HTML in an alternative location (e.g. a web -# portal) -# htmlSubdirectory = /global/cfs/cdirs/e3sm/www/USERNAME/RUNNAME -htmlSubdirectory = html - -# a list of analyses to generate. Valid names can be seen by running: -# mpas_analysis --list -# This command also lists tags for each analysis. -# Shortcuts exist to generate (or not generate) several types of analysis. -# These include: -# 'all' -- all analyses will be run -# 'all_' -- all analysis with a particular tag will be run -# 'all_' -- all analyses from a given component (either 'ocean' -# or 'seaIce') will be run -# 'only_', 'only_' -- all analysis from this component or -# with this tag will be run, and all -# analysis for other components or -# without the tag will be skipped -# 'no_' -- skip the given task -# 'no_', 'no_' -- in analogy to 'all_*', skip all analysis -# tasks from the given compoonent or with -# the given tag. Do -# mpas_analysis --list -# to list all task names and their tags -# an equivalent syntax can be used on the command line to override this -# option: -# mpas_analysis config.analysis --generate \ -# all,no_ocean,all_timeSeries -generate = ['all', 'no_eke', 'no_BGC', 'no_icebergs', 'no_min', 'no_max'] - -[climatology] -## options related to producing climatologies, typically to compare against -## observations and previous runs - -# the first year over which to average climatalogies -startYear = 91 -# the last year over which to average climatalogies -endYear = 100 - -[timeSeries] -## options related to producing time series plots, often to compare against -## observations and previous runs - -# start and end years for timeseries analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = 10 - -[index] -## options related to producing nino index. - -# start and end years for El Nino 3.4 analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = end - -[streamfunctionMOC] -## options related to plotting the streamfunction of the meridional overturning -## circulation (MOC) - -# Use postprocessing script to compute the MOC? You want this to be True -# for low-resolution simulations that use GM to parameterize eddies, because -# the online MOC analysis member currently does not include the bolus velocity -# in its calculation, whereas the postprocessing script does. -# NOTE: this is a temporary option that will be removed once the online -# MOC takes into account the bolus velocity when GM is on. -usePostprocessingScript = True diff --git a/configs/cori/job_script.cori-haswell.bash b/configs/cori/job_script.cori-haswell.bash index 45dac226c..852e6fae6 100644 --- a/configs/cori/job_script.cori-haswell.bash +++ b/configs/cori/job_script.cori-haswell.bash @@ -1,15 +1,4 @@ #!/bin/bash -l -# This software is open source software available under the BSD-3 license. -# -# Copyright (c) 2020 Triad National Security, LLC. All rights reserved. -# Copyright (c) 2020 Lawrence Livermore National Security, LLC. All rights -# reserved. -# Copyright (c) 2020 UT-Battelle, LLC. All rights reserved. -# -# Additional copyright and license information can be found in the LICENSE file -# distributed with this code, or at -# https://raw.githubusercontent.com/MPAS-Dev/MPAS-Analysis/master/LICENSE - # comment out if using debug queue #SBATCH --partition=regular # comment in to get premium queue @@ -26,22 +15,17 @@ #SBATCH --error=mpas_analysis.e%j #SBATCH -L cscratch1,SCRATCH,project -cd $SLURM_SUBMIT_DIR # optional, since this is the default behavior - export OMP_NUM_THREADS=1 source /global/common/software/e3sm/anaconda_envs/load_latest_e3sm_unified_cori-haswell.sh +# alternatively, you can load your own development environment +# source ~/miniconda3/etc/profile.d/conda.sh +# conda activate mpas_dev +# export E3SMU_MACHINE=cori-haswell -# MPAS/ACME job to be analyzed, including paths to simulation data and -# observations. Change this name and path as needed -run_config_file="config.run_name_here" - -if [ ! -f $run_config_file ]; then - echo "File $run_config_file not found!" - exit 1 -fi +export HDF5_USE_FILE_LOCKING=FALSE -# For an E3SM cryosphere run, include configs/polarRegions.conf, or exclude -# this extra config file for default parameters -srun -N 1 -n 1 mpas_analysis configs/polarRegions.conf $run_config_file +# For an E3SM cryosphere run, include --polar_regions, or exclude +# this extra flag for default parameters +mpas_analysis --polar_regions run_name_here.cfg diff --git a/configs/cori/job_script.cori-knl.bash b/configs/cori/job_script.cori-knl.bash index 850a53596..bd7c37008 100644 --- a/configs/cori/job_script.cori-knl.bash +++ b/configs/cori/job_script.cori-knl.bash @@ -1,15 +1,4 @@ #!/bin/bash -l -# This software is open source software available under the BSD-3 license. -# -# Copyright (c) 2020 Triad National Security, LLC. All rights reserved. -# Copyright (c) 2020 Lawrence Livermore National Security, LLC. All rights -# reserved. -# Copyright (c) 2020 UT-Battelle, LLC. All rights reserved. -# -# Additional copyright and license information can be found in the LICENSE file -# distributed with this code, or at -# https://raw.githubusercontent.com/MPAS-Dev/MPAS-Analysis/master/LICENSE - # comment out if using debug queue #SBATCH --partition=regular # comment in to get premium queue @@ -26,22 +15,17 @@ #SBATCH --error=mpas_analysis.e%j #SBATCH -L cscratch1,SCRATCH,project -cd $SLURM_SUBMIT_DIR # optional, since this is the default behavior - export OMP_NUM_THREADS=1 source /global/common/software/e3sm/anaconda_envs/load_latest_e3sm_unified_cori-knl.sh +# alternatively, you can load your own development environment +# source ~/miniconda3/etc/profile.d/conda.sh +# conda activate mpas_dev +# export E3SMU_MACHINE=cori-knl -# MPAS/ACME job to be analyzed, including paths to simulation data and -# observations. Change this name and path as needed -run_config_file="config.run_name_here" - -if [ ! -f $run_config_file ]; then - echo "File $run_config_file not found!" - exit 1 -fi +export HDF5_USE_FILE_LOCKING=FALSE -# For an E3SM cryosphere run, include configs/polarRegions.conf, or exclude -# this extra config file for default parameters -srun -N 1 -n 1 mpas_analysis configs/polarRegions.conf $run_config_file +# For an E3SM cryosphere run, include --polar_regions, or exclude +# this extra flag for default parameters +mpas_analysis --polar_regions run_name_here.cfg diff --git a/configs/cori/test_suite/clean_suite.bash b/configs/cori/test_suite/clean_suite.bash deleted file mode 100755 index 2184bf2c7..000000000 --- a/configs/cori/test_suite/clean_suite.bash +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env bash - -set -e - -machine=cori - -branch=$(git symbolic-ref --short HEAD) - -rm -rf ${machine}_test_suite -rm -rf /global/cscratch1/sd/xylar/analysis_testing/${machine}/${branch} -rm -rf /global/cfs/cdirs/e3sm/www/xylar/analysis_testing/${machine}/${branch} diff --git a/configs/cori/test_suite/ctrl.cfg b/configs/cori/test_suite/ctrl.cfg deleted file mode 100644 index 36112a407..000000000 --- a/configs/cori/test_suite/ctrl.cfg +++ /dev/null @@ -1,127 +0,0 @@ -[runs] -## options related to the run to be analyzed and control runs to be -## compared against - -# mainRunName is a name that identifies the simulation being analyzed. -mainRunName = ctrl - -# config file for a control run to which this run will be compared. The -# analysis should have already been run to completion once with this config -# file, so that the relevant MPAS climatologies already exist and have been -# remapped to the comparison grid. Leave this option commented out if no -# control run is desired. -# controlRunConfigFile = /path/to/config/file - -# config file for a main run on which the analysis was already run to -# completion. The relevant MPAS climatologies already exist and have been -# remapped to the comparison grid and time series have been extracted. -# Leave this option commented out if the analysis for the main run should be -# performed. -# mainRunConfigFile = /path/to/config/file - -[input] -## options related to reading in the results to be analyzed - -# directory containing model results -baseDirectory = /global/cfs/cdirs/e3sm/xylar/20200305.A_WCYCL1850.ne4_oQU480.anvil - -# name of the ocean and sea-ice mesh (e.g. EC30to60E2r2, WC14to60E2r3, -# ECwISC30to60E2r1, SOwISC12to60E2r4, oQU240, etc.) -mpasMeshName = oQU480 - -# subdirectory containing restart files -runSubdirectory = run -# subdirectory for ocean history files -oceanHistorySubdirectory = archive/ocn/hist -# subdirectory for sea ice history files -seaIceHistorySubdirectory = archive/ice/hist - -# names of namelist and streams files, either a path relative to baseDirectory -# or an absolute path. -oceanNamelistFileName = run/mpaso_in -oceanStreamsFileName = run/streams.ocean -seaIceNamelistFileName = run/mpassi_in -seaIceStreamsFileName = run/streams.seaice - -[output] -## options related to writing out plots, intermediate cached data sets, logs, -## etc. - -# directory where analysis should be written -baseDirectory = /global/cscratch1/sd/xylar/analysis_testing/baseline - -htmlSubdirectory = /global/cfs/cdirs/e3sm/www/xylar/analysis_testing/baseline - -# a list of analyses to generate. Valid names can be seen by running: -# mpas_analysis --list -# This command also lists tags for each analysis. -# Shortcuts exist to generate (or not generate) several types of analysis. -# These include: -# 'all' -- all analyses will be run -# 'all_' -- all analysis with a particular tag will be run -# 'all_' -- all analyses from a given component (either 'ocean' -# or 'seaIce') will be run -# 'only_', 'only_' -- all analysis from this component or -# with this tag will be run, and all -# analysis for other components or -# without the tag will be skipped -# 'no_' -- skip the given task -# 'no_', 'no_' -- in analogy to 'all_*', skip all analysis -# tasks from the given compoonent or with -# the given tag. Do -# mpas_analysis --list -# to list all task names and their tags -# an equivalent syntax can be used on the command line to override this -# option: -# mpas_analysis config.analysis --generate \ -# all,no_ocean,all_timeSeries -# All tasks with tag "landIceCavities" are disabled because this run did not -# include land-ice cavities. -generate = ['all', 'no_BGC', 'no_icebergs', 'no_index', 'no_eke', - 'no_landIceCavities'] - -[climatology] -## options related to producing climatologies, typically to compare against -## observations and previous runs - -# the first year over which to average climatalogies -startYear = 3 -# the last year over which to average climatalogies -endYear = 5 - -[timeSeries] -## options related to producing time series plots, often to compare against -## observations and previous runs - -# start and end years for timeseries analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = 5 - -[index] -## options related to producing nino index. - -# start and end years for El Nino 3.4 analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = 5 - -[streamfunctionMOC] -## options related to plotting the streamfunction of the meridional overturning -## circulation (MOC) - -# Use postprocessing script to compute the MOC? You want this to be True -# for low-resolution simulations that use GM to parameterize eddies, because -# the online MOC analysis member currently does not include the bolus velocity -# in its calculation, whereas the postprocessing script does. -# NOTE: this is a temporary option that will be removed once the online -# MOC takes into account the bolus velocity when GM is on. -usePostprocessingScript = True diff --git a/configs/cori/test_suite/job_script.bash b/configs/cori/test_suite/job_script.bash deleted file mode 100644 index c5c73badf..000000000 --- a/configs/cori/test_suite/job_script.bash +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash -l -#SBATCH --partition=regular -#SBATCH -C haswell -#SBATCH --nodes=1 -#SBATCH --time=2:00:00 -#SBATCH --account=e3sm -#SBATCH --job-name=mpas_analysis -#SBATCH --output=mpas_analysis.o%j -#SBATCH --error=mpas_analysis.e%j -#SBATCH -L cscratch1,SCRATCH,project - -set -e - -cd $SLURM_SUBMIT_DIR -export OMP_NUM_THREADS=1 - -source ${HOME}/miniconda3/etc/profile.d/conda.sh -conda activate test_env -export HDF5_USE_FILE_LOCKING=FALSE -export E3SMU_MACHINE=cori-haswell - -echo env: test_env -echo configs: --polar_regions ../main.cfg - -srun -N 1 -n 1 mpas_analysis --list -srun -N 1 -n 1 mpas_analysis --plot_colormaps -srun -N 1 -n 1 mpas_analysis --setup_only --polar_regions ../main.cfg -srun -N 1 -n 1 mpas_analysis --purge --polar_regions ../main.cfg --verbose -srun -N 1 -n 1 mpas_analysis --html_only --polar_regions ../main.cfg - -chmod -R ugo+rX /global/cfs/cdirs/e3sm/www/xylar/analysis_testing/ diff --git a/configs/cori/test_suite/job_script_no_polar_regions.bash b/configs/cori/test_suite/job_script_no_polar_regions.bash deleted file mode 100644 index bc426b830..000000000 --- a/configs/cori/test_suite/job_script_no_polar_regions.bash +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash -l -#SBATCH --partition=regular -#SBATCH -C haswell -#SBATCH --nodes=1 -#SBATCH --time=2:00:00 -#SBATCH --account=e3sm -#SBATCH --job-name=mpas_analysis -#SBATCH --output=mpas_analysis.o%j -#SBATCH --error=mpas_analysis.e%j -#SBATCH -L cscratch1,SCRATCH,project - -set -e - -cd $SLURM_SUBMIT_DIR -export OMP_NUM_THREADS=1 - -source ${HOME}/miniconda3/etc/profile.d/conda.sh -conda activate test_env -export HDF5_USE_FILE_LOCKING=FALSE -export E3SMU_MACHINE=cori-haswell - -echo env: test_env -echo configs: ../no_polar_regions.cfg - -srun -N 1 -n 1 mpas_analysis ../no_polar_regions.cfg --verbose - -chmod -R ugo+rX /global/cfs/cdirs/e3sm/www/xylar/analysis_testing/ - diff --git a/configs/cori/test_suite/main.cfg b/configs/cori/test_suite/main.cfg deleted file mode 100644 index 827cf4d1e..000000000 --- a/configs/cori/test_suite/main.cfg +++ /dev/null @@ -1,127 +0,0 @@ -[runs] -## options related to the run to be analyzed and control runs to be -## compared against - -# mainRunName is a name that identifies the simulation being analyzed. -mainRunName = main - -# config file for a control run to which this run will be compared. The -# analysis should have already been run to completion once with this config -# file, so that the relevant MPAS climatologies already exist and have been -# remapped to the comparison grid. Leave this option commented out if no -# control run is desired. -# controlRunConfigFile = /path/to/config/file - -# config file for a main run on which the analysis was already run to -# completion. The relevant MPAS climatologies already exist and have been -# remapped to the comparison grid and time series have been extracted. -# Leave this option commented out if the analysis for the main run should be -# performed. -# mainRunConfigFile = /path/to/config/file - -[input] -## options related to reading in the results to be analyzed - -# directory containing model results -baseDirectory = /global/cfs/cdirs/e3sm/xylar/20200305.A_WCYCL1850.ne4_oQU480.anvil - -# name of the ocean and sea-ice mesh (e.g. EC30to60E2r2, WC14to60E2r3, -# ECwISC30to60E2r1, SOwISC12to60E2r4, oQU240, etc.) -mpasMeshName = oQU480 - -# subdirectory containing restart files -runSubdirectory = run -# subdirectory for ocean history files -oceanHistorySubdirectory = archive/ocn/hist -# subdirectory for sea ice history files -seaIceHistorySubdirectory = archive/ice/hist - -# names of namelist and streams files, either a path relative to baseDirectory -# or an absolute path. -oceanNamelistFileName = run/mpaso_in -oceanStreamsFileName = run/streams.ocean -seaIceNamelistFileName = run/mpassi_in -seaIceStreamsFileName = run/streams.seaice - -[output] -## options related to writing out plots, intermediate cached data sets, logs, -## etc. - -# directory where analysis should be written -baseDirectory = /global/cscratch1/sd/xylar/analysis_testing/baseline - -htmlSubdirectory = /global/cfs/cdirs/e3sm/www/xylar/analysis_testing/baseline - -# a list of analyses to generate. Valid names can be seen by running: -# mpas_analysis --list -# This command also lists tags for each analysis. -# Shortcuts exist to generate (or not generate) several types of analysis. -# These include: -# 'all' -- all analyses will be run -# 'all_' -- all analysis with a particular tag will be run -# 'all_' -- all analyses from a given component (either 'ocean' -# or 'seaIce') will be run -# 'only_', 'only_' -- all analysis from this component or -# with this tag will be run, and all -# analysis for other components or -# without the tag will be skipped -# 'no_' -- skip the given task -# 'no_', 'no_' -- in analogy to 'all_*', skip all analysis -# tasks from the given compoonent or with -# the given tag. Do -# mpas_analysis --list -# to list all task names and their tags -# an equivalent syntax can be used on the command line to override this -# option: -# mpas_analysis config.analysis --generate \ -# all,no_ocean,all_timeSeries -# All tasks with tag "landIceCavities" are disabled because this run did not -# include land-ice cavities. -generate = ['all', 'no_BGC', 'no_icebergs', 'no_index', 'no_eke', - 'no_landIceCavities'] - -[climatology] -## options related to producing climatologies, typically to compare against -## observations and previous runs - -# the first year over which to average climatalogies -startYear = 3 -# the last year over which to average climatalogies -endYear = 5 - -[timeSeries] -## options related to producing time series plots, often to compare against -## observations and previous runs - -# start and end years for timeseries analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = end - -[index] -## options related to producing nino index. - -# start and end years for El Nino 3.4 analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = end - -[streamfunctionMOC] -## options related to plotting the streamfunction of the meridional overturning -## circulation (MOC) - -# Use postprocessing script to compute the MOC? You want this to be True -# for low-resolution simulations that use GM to parameterize eddies, because -# the online MOC analysis member currently does not include the bolus velocity -# in its calculation, whereas the postprocessing script does. -# NOTE: this is a temporary option that will be removed once the online -# MOC takes into account the bolus velocity when GM is on. -usePostprocessingScript = True diff --git a/configs/cori/test_suite/main_vs_ctrl.cfg b/configs/cori/test_suite/main_vs_ctrl.cfg deleted file mode 100644 index c77b86043..000000000 --- a/configs/cori/test_suite/main_vs_ctrl.cfg +++ /dev/null @@ -1,127 +0,0 @@ -[runs] -## options related to the run to be analyzed and control runs to be -## compared against - -# mainRunName is a name that identifies the simulation being analyzed. -mainRunName = main - -# config file for a control run to which this run will be compared. The -# analysis should have already been run to completion once with this config -# file, so that the relevant MPAS climatologies already exist and have been -# remapped to the comparison grid. Leave this option commented out if no -# control run is desired. -controlRunConfigFile = ../ctrl.cfg - -# config file for a main run on which the analysis was already run to -# completion. The relevant MPAS climatologies already exist and have been -# remapped to the comparison grid and time series have been extracted. -# Leave this option commented out if the analysis for the main run should be -# performed. -mainRunConfigFile = ../main_py3.9.cfg - -[input] -## options related to reading in the results to be analyzed - -# directory containing model results -baseDirectory = /global/cfs/cdirs/e3sm/xylar/20200305.A_WCYCL1850.ne4_oQU480.anvil - -# name of the ocean and sea-ice mesh (e.g. EC30to60E2r2, WC14to60E2r3, -# ECwISC30to60E2r1, SOwISC12to60E2r4, oQU240, etc.) -mpasMeshName = oQU480 - -# subdirectory containing restart files -runSubdirectory = run -# subdirectory for ocean history files -oceanHistorySubdirectory = archive/ocn/hist -# subdirectory for sea ice history files -seaIceHistorySubdirectory = archive/ice/hist - -# names of namelist and streams files, either a path relative to baseDirectory -# or an absolute path. -oceanNamelistFileName = run/mpaso_in -oceanStreamsFileName = run/streams.ocean -seaIceNamelistFileName = run/mpassi_in -seaIceStreamsFileName = run/streams.seaice - -[output] -## options related to writing out plots, intermediate cached data sets, logs, -## etc. - -# directory where analysis should be written -baseDirectory = /global/cscratch1/sd/xylar/analysis_testing/baseline - -htmlSubdirectory = /global/cfs/cdirs/e3sm/www/xylar/analysis_testing/baseline - -# a list of analyses to generate. Valid names can be seen by running: -# mpas_analysis --list -# This command also lists tags for each analysis. -# Shortcuts exist to generate (or not generate) several types of analysis. -# These include: -# 'all' -- all analyses will be run -# 'all_' -- all analysis with a particular tag will be run -# 'all_' -- all analyses from a given component (either 'ocean' -# or 'seaIce') will be run -# 'only_', 'only_' -- all analysis from this component or -# with this tag will be run, and all -# analysis for other components or -# without the tag will be skipped -# 'no_' -- skip the given task -# 'no_', 'no_' -- in analogy to 'all_*', skip all analysis -# tasks from the given compoonent or with -# the given tag. Do -# mpas_analysis --list -# to list all task names and their tags -# an equivalent syntax can be used on the command line to override this -# option: -# mpas_analysis config.analysis --generate \ -# all,no_ocean,all_timeSeries -# All tasks with tag "landIceCavities" are disabled because this run did not -# include land-ice cavities. -generate = ['all', 'no_BGC', 'no_icebergs', 'no_index', 'no_eke', - 'no_landIceCavities'] - -[climatology] -## options related to producing climatologies, typically to compare against -## observations and previous runs - -# the first year over which to average climatalogies -startYear = 3 -# the last year over which to average climatalogies -endYear = 5 - -[timeSeries] -## options related to producing time series plots, often to compare against -## observations and previous runs - -# start and end years for timeseries analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = 5 - -[index] -## options related to producing nino index. - -# start and end years for El Nino 3.4 analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = 5 - -[streamfunctionMOC] -## options related to plotting the streamfunction of the meridional overturning -## circulation (MOC) - -# Use postprocessing script to compute the MOC? You want this to be True -# for low-resolution simulations that use GM to parameterize eddies, because -# the online MOC analysis member currently does not include the bolus velocity -# in its calculation, whereas the postprocessing script does. -# NOTE: this is a temporary option that will be removed once the online -# MOC takes into account the bolus velocity when GM is on. -usePostprocessingScript = True diff --git a/configs/cori/test_suite/mesh_rename.cfg b/configs/cori/test_suite/mesh_rename.cfg deleted file mode 100644 index fe0df1235..000000000 --- a/configs/cori/test_suite/mesh_rename.cfg +++ /dev/null @@ -1,127 +0,0 @@ -[runs] -## options related to the run to be analyzed and control runs to be -## compared against - -# mainRunName is a name that identifies the simulation being analyzed. -mainRunName = main - -# config file for a control run to which this run will be compared. The -# analysis should have already been run to completion once with this config -# file, so that the relevant MPAS climatologies already exist and have been -# remapped to the comparison grid. Leave this option commented out if no -# control run is desired. -# controlRunConfigFile = /path/to/config/file - -# config file for a main run on which the analysis was already run to -# completion. The relevant MPAS climatologies already exist and have been -# remapped to the comparison grid and time series have been extracted. -# Leave this option commented out if the analysis for the main run should be -# performed. -# mainRunConfigFile = /path/to/config/file - -[input] -## options related to reading in the results to be analyzed - -# directory containing model results -baseDirectory = /global/cfs/cdirs/e3sm/xylar/20200305.A_WCYCL1850.ne4_oQU480.anvil - -# name of the ocean and sea-ice mesh (e.g. EC30to60E2r2, WC14to60E2r3, -# ECwISC30to60E2r1, SOwISC12to60E2r4, oQU240, etc.) -mpasMeshName = new_oQU480 - -# subdirectory containing restart files -runSubdirectory = run -# subdirectory for ocean history files -oceanHistorySubdirectory = archive/ocn/hist -# subdirectory for sea ice history files -seaIceHistorySubdirectory = archive/ice/hist - -# names of namelist and streams files, either a path relative to baseDirectory -# or an absolute path. -oceanNamelistFileName = run/mpaso_in -oceanStreamsFileName = run/streams.ocean -seaIceNamelistFileName = run/mpassi_in -seaIceStreamsFileName = run/streams.seaice - -[output] -## options related to writing out plots, intermediate cached data sets, logs, -## etc. - -# directory where analysis should be written -baseDirectory = /global/cscratch1/sd/xylar/analysis_testing/baseline - -htmlSubdirectory = /global/cfs/cdirs/e3sm/www/xylar/analysis_testing/baseline - -# a list of analyses to generate. Valid names can be seen by running: -# mpas_analysis --list -# This command also lists tags for each analysis. -# Shortcuts exist to generate (or not generate) several types of analysis. -# These include: -# 'all' -- all analyses will be run -# 'all_' -- all analysis with a particular tag will be run -# 'all_' -- all analyses from a given component (either 'ocean' -# or 'seaIce') will be run -# 'only_', 'only_' -- all analysis from this component or -# with this tag will be run, and all -# analysis for other components or -# without the tag will be skipped -# 'no_' -- skip the given task -# 'no_', 'no_' -- in analogy to 'all_*', skip all analysis -# tasks from the given compoonent or with -# the given tag. Do -# mpas_analysis --list -# to list all task names and their tags -# an equivalent syntax can be used on the command line to override this -# option: -# mpas_analysis config.analysis --generate \ -# all,no_ocean,all_timeSeries -# All tasks with tag "landIceCavities" are disabled because this run did not -# include land-ice cavities. -generate = ['all', 'no_BGC', 'no_icebergs', 'no_index', 'no_eke', - 'no_landIceCavities'] - -[climatology] -## options related to producing climatologies, typically to compare against -## observations and previous runs - -# the first year over which to average climatalogies -startYear = 3 -# the last year over which to average climatalogies -endYear = 5 - -[timeSeries] -## options related to producing time series plots, often to compare against -## observations and previous runs - -# start and end years for timeseries analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = end - -[index] -## options related to producing nino index. - -# start and end years for El Nino 3.4 analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = end - -[streamfunctionMOC] -## options related to plotting the streamfunction of the meridional overturning -## circulation (MOC) - -# Use postprocessing script to compute the MOC? You want this to be True -# for low-resolution simulations that use GM to parameterize eddies, because -# the online MOC analysis member currently does not include the bolus velocity -# in its calculation, whereas the postprocessing script does. -# NOTE: this is a temporary option that will be removed once the online -# MOC takes into account the bolus velocity when GM is on. -usePostprocessingScript = True diff --git a/configs/cori/test_suite/no_ncclimo.cfg b/configs/cori/test_suite/no_ncclimo.cfg deleted file mode 100644 index 4f82592f4..000000000 --- a/configs/cori/test_suite/no_ncclimo.cfg +++ /dev/null @@ -1,131 +0,0 @@ -[runs] -## options related to the run to be analyzed and control runs to be -## compared against - -# mainRunName is a name that identifies the simulation being analyzed. -mainRunName = main - -# config file for a control run to which this run will be compared. The -# analysis should have already been run to completion once with this config -# file, so that the relevant MPAS climatologies already exist and have been -# remapped to the comparison grid. Leave this option commented out if no -# control run is desired. -# controlRunConfigFile = /path/to/config/file - -# config file for a main run on which the analysis was already run to -# completion. The relevant MPAS climatologies already exist and have been -# remapped to the comparison grid and time series have been extracted. -# Leave this option commented out if the analysis for the main run should be -# performed. -# mainRunConfigFile = /path/to/config/file - -[input] -## options related to reading in the results to be analyzed - -# directory containing model results -baseDirectory = /global/cfs/cdirs/e3sm/xylar/20200305.A_WCYCL1850.ne4_oQU480.anvil - -# name of the ocean and sea-ice mesh (e.g. EC30to60E2r2, WC14to60E2r3, -# ECwISC30to60E2r1, SOwISC12to60E2r4, oQU240, etc.) -mpasMeshName = oQU480 - -# subdirectory containing restart files -runSubdirectory = run -# subdirectory for ocean history files -oceanHistorySubdirectory = archive/ocn/hist -# subdirectory for sea ice history files -seaIceHistorySubdirectory = archive/ice/hist - -# names of namelist and streams files, either a path relative to baseDirectory -# or an absolute path. -oceanNamelistFileName = run/mpaso_in -oceanStreamsFileName = run/streams.ocean -seaIceNamelistFileName = run/mpassi_in -seaIceStreamsFileName = run/streams.seaice - -[output] -## options related to writing out plots, intermediate cached data sets, logs, -## etc. - -# directory where analysis should be written -baseDirectory = /global/cscratch1/sd/xylar/analysis_testing/baseline - -htmlSubdirectory = /global/cfs/cdirs/e3sm/www/xylar/analysis_testing/baseline - -# a list of analyses to generate. Valid names can be seen by running: -# mpas_analysis --list -# This command also lists tags for each analysis. -# Shortcuts exist to generate (or not generate) several types of analysis. -# These include: -# 'all' -- all analyses will be run -# 'all_' -- all analysis with a particular tag will be run -# 'all_' -- all analyses from a given component (either 'ocean' -# or 'seaIce') will be run -# 'only_', 'only_' -- all analysis from this component or -# with this tag will be run, and all -# analysis for other components or -# without the tag will be skipped -# 'no_' -- skip the given task -# 'no_', 'no_' -- in analogy to 'all_*', skip all analysis -# tasks from the given compoonent or with -# the given tag. Do -# mpas_analysis --list -# to list all task names and their tags -# an equivalent syntax can be used on the command line to override this -# option: -# mpas_analysis config.analysis --generate \ -# all,no_ocean,all_timeSeries -# All tasks with tag "landIceCavities" are disabled because this run did not -# include land-ice cavities. -generate = ['all', 'no_BGC', 'no_icebergs', 'no_index', 'no_eke', - 'no_landIceCavities'] - -[climatology] -## options related to producing climatologies, typically to compare against -## observations and previous runs - -# the first year over which to average climatalogies -startYear = 3 -# the last year over which to average climatalogies -endYear = 5 - -useNcclimo = False -daskThreads = 12 -subprocessCount = 12 - -[timeSeries] -## options related to producing time series plots, often to compare against -## observations and previous runs - -# start and end years for timeseries analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = end - -[index] -## options related to producing nino index. - -# start and end years for El Nino 3.4 analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = end - -[streamfunctionMOC] -## options related to plotting the streamfunction of the meridional overturning -## circulation (MOC) - -# Use postprocessing script to compute the MOC? You want this to be True -# for low-resolution simulations that use GM to parameterize eddies, because -# the online MOC analysis member currently does not include the bolus velocity -# in its calculation, whereas the postprocessing script does. -# NOTE: this is a temporary option that will be removed once the online -# MOC takes into account the bolus velocity when GM is on. -usePostprocessingScript = True diff --git a/configs/cori/test_suite/test_suite.bash b/configs/cori/test_suite/test_suite.bash deleted file mode 100755 index 5d1e52d5d..000000000 --- a/configs/cori/test_suite/test_suite.bash +++ /dev/null @@ -1,152 +0,0 @@ -#!/usr/bin/env bash - -set -e - -machine=cori - -main_py=3.9 -alt_py=3.8 - -export HDF5_USE_FILE_LOCKING=FALSE - -source ${HOME}/miniconda3/etc/profile.d/conda.sh -conda activate base - -branch=$(git symbolic-ref --short HEAD) - -conda update -y conda conda-build mamba boa -rm -rf ${HOME}/miniconda3/conda-bld -conda mambabuild ci/recipe - -# create the test conda envs -for py in ${main_py} ${alt_py} -do - env=test_mpas_analysis_py${py} - conda remove -y --all -n ${env} - mamba create -y -n ${env} --use-local python=${py} mpas-analysis sphinx \ - mock sphinx_rtd_theme "tabulate>=0.8.2" m2r pytest - conda activate ${env} - pytest - conda deactivate -done - -# create another env for testing xarray master branch -py=${main_py} -env=test_mpas_analysis_xarray_master -mamba create --yes --quiet --name ${env} --use-local python=${py} \ - mpas-analysis pytest -conda activate ${env} -pip install git+https://github.com/pydata/xarray.git -pytest -conda deactivate - -# test building the docs -py=${main_py} -conda activate test_mpas_analysis_py${py} -cd docs -make clean -make html -rm -rf /global/cfs/cdirs/e3sm/www/xylar/analysis_testing/${machine}/${branch}/docs -mkdir -p /global/cfs/cdirs/e3sm/www/xylar/analysis_testing/${machine}/${branch}/ -cp -r _build/html /global/cfs/cdirs/e3sm/www/xylar/analysis_testing/${machine}/${branch}/docs -cd .. -conda deactivate - -# move to a subdirectory so we use the conda package, not the local package -rm -rf ${machine}_test_suite -mkdir ${machine}_test_suite - -cd ${machine}_test_suite - -template_path=../configs/${machine}/test_suite -job_template_path=${template_path} - -for py in ${main_py} ${alt_py} -do - env=test_mpas_analysis_py${py} - run=main_py${py} - config=${run}.cfg - mkdir ${run} - job=${run}/job_script.bash - sed "s/baseline/${machine}\/${branch}\/py${py}/g" ${template_path}/main.cfg > ${config} - sed -e "s/main.cfg/${config}/g" -e "s/test_env/${env}/g" \ - ${job_template_path}/job_script.bash > ${job} -done - - -py=${main_py} -env=test_mpas_analysis_py${py} - -run=wc_defaults -config=${run}.cfg -mkdir ${run} -job=${run}/job_script.bash -sed "s/baseline/${machine}\/${branch}\/${run}/g" ${template_path}/${config} > ${config} -sed -e "s/no_polar_regions.cfg/${config}/g" -e "s/test_env/${env}/g" \ - ${job_template_path}/job_script_no_polar_regions.bash > ${job} - -run=no_ncclimo -config=${run}.cfg -mkdir ${run} -job=${run}/job_script.bash -sed "s/baseline/${machine}\/${branch}\/${run}/g" ${template_path}/${config} > ${config} -sed -e "s/main.cfg/${config}/g" -e "s/test_env/${env}/g" \ - ${job_template_path}/job_script.bash > ${job} - -run=ctrl -config=${run}.cfg -sed "s/baseline/${machine}\/${branch}\/py${py}/g" ${template_path}/${config} > ${config} - -run=main_vs_ctrl -config=${run}.cfg -mkdir ${run} -job=${run}/job_script.bash -sed "s/baseline/${machine}\/${branch}\/${run}/g" ${template_path}/${config} > ${config} -sed -e "s/main.cfg/${config}/g" -e "s/test_env/${env}/g" \ - ${job_template_path}/job_script.bash > ${job} - -run=no_polar_regions -config=${run}.cfg -mkdir ${run} -job=${run}/job_script.bash -sed "s/baseline/${machine}\/${branch}\/${run}/g" ${template_path}/main.cfg > ${config} -sed -e "s/test_env/${env}/g" \ - ${job_template_path}/job_script_no_polar_regions.bash > ${job} - -run=mesh_rename -config=${run}.cfg -mkdir ${run} -job=${run}/job_script.bash -sed "s/baseline/${machine}\/${branch}\/${run}/g" ${template_path}/${config} > ${config} -sed -e "s/main.cfg/${config}/g" -e "s/test_env/${env}/g" \ - ${job_template_path}/job_script.bash > ${job} - -env=test_mpas_analysis_xarray_master -run=xarray_master -config=${run}.cfg -mkdir ${run} -job=${run}/job_script.bash -sed "s/baseline/${machine}\/${branch}\/${run}/g" ${template_path}/main.cfg > ${config} -sed -e "s/main.cfg/${config}/g" -e "s/test_env/${env}/g" \ - ${job_template_path}/job_script.bash > ${job} - - -# submit the jobs -cd main_py${main_py} -RES=$(sbatch job_script.bash) -cd .. - -cd main_vs_ctrl -sbatch --dependency=afterok:${RES##* } job_script.bash -cd .. - -for run in main_py${alt_py} wc_defaults no_ncclimo no_polar_regions \ - mesh_rename xarray_master -do - cd ${run} - sbatch job_script.bash - cd .. -done - -cd .. - diff --git a/configs/cori/test_suite/wc_defaults.cfg b/configs/cori/test_suite/wc_defaults.cfg deleted file mode 100644 index 68c8bbd59..000000000 --- a/configs/cori/test_suite/wc_defaults.cfg +++ /dev/null @@ -1,126 +0,0 @@ -[runs] -## options related to the run to be analyzed and control runs to be -## compared against - -# mainRunName is a name that identifies the simulation being analyzed. -mainRunName = wc_defaults - -# config file for a control run to which this run will be compared. The -# analysis should have already been run to completion once with this config -# file, so that the relevant MPAS climatologies already exist and have been -# remapped to the comparison grid. Leave this option commented out if no -# control run is desired. -# controlRunConfigFile = /path/to/config/file - -# config file for a main run on which the analysis was already run to -# completion. The relevant MPAS climatologies already exist and have been -# remapped to the comparison grid and time series have been extracted. -# Leave this option commented out if the analysis for the main run should be -# performed. -# mainRunConfigFile = /path/to/config/file - -[input] -## options related to reading in the results to be analyzed - -# directory containing model results -baseDirectory = /global/cfs/cdirs/e3sm/xylar/20200305.A_WCYCL1850.ne4_oQU480.anvil - -# name of the ocean and sea-ice mesh (e.g. EC30to60E2r2, WC14to60E2r3, -# ECwISC30to60E2r1, SOwISC12to60E2r4, oQU240, etc.) -mpasMeshName = oQU480 - -# subdirectory containing restart files -runSubdirectory = run -# subdirectory for ocean history files -oceanHistorySubdirectory = archive/ocn/hist -# subdirectory for sea ice history files -seaIceHistorySubdirectory = archive/ice/hist - -# names of namelist and streams files, either a path relative to baseDirectory -# or an absolute path. -oceanNamelistFileName = run/mpaso_in -oceanStreamsFileName = run/streams.ocean -seaIceNamelistFileName = run/mpassi_in -seaIceStreamsFileName = run/streams.seaice - -[output] -## options related to writing out plots, intermediate cached data sets, logs, -## etc. - -# directory where analysis should be written -baseDirectory = /global/cscratch1/sd/xylar/analysis_testing/baseline - -htmlSubdirectory = /global/cfs/cdirs/e3sm/www/xylar/analysis_testing/baseline - -# a list of analyses to generate. Valid names can be seen by running: -# mpas_analysis --list -# This command also lists tags for each analysis. -# Shortcuts exist to generate (or not generate) several types of analysis. -# These include: -# 'all' -- all analyses will be run -# 'all_' -- all analysis with a particular tag will be run -# 'all_' -- all analyses from a given component (either 'ocean' -# or 'seaIce') will be run -# 'only_', 'only_' -- all analysis from this component or -# with this tag will be run, and all -# analysis for other components or -# without the tag will be skipped -# 'no_' -- skip the given task -# 'no_', 'no_' -- in analogy to 'all_*', skip all analysis -# tasks from the given compoonent or with -# the given tag. Do -# mpas_analysis --list -# to list all task names and their tags -# an equivalent syntax can be used on the command line to override this -# option: -# mpas_analysis config.analysis --generate \ -# all,no_ocean,all_timeSeries -# All tasks with tag "landIceCavities" are disabled because this run did not -# include land-ice cavities. -generate = ['all', 'no_landIceCavities', 'no_BGC', 'no_icebergs', 'no_min', 'no_max', 'no_sose', 'no_climatologyMapSchmidtko', 'no_climatologyMapAntarcticMelt', 'no_regionalTSDiagrams', 'no_timeSeriesAntarcticMelt', 'no_timeSeriesOceanRegions', 'no_climatologyMapSose', 'no_woceTransects', 'no_soseTransects', 'no_geojsonTransects', 'no_oceanRegionalProfiles'] - -[climatology] -## options related to producing climatologies, typically to compare against -## observations and previous runs - -# the first year over which to average climatalogies -startYear = 3 -# the last year over which to average climatalogies -endYear = 5 - -[timeSeries] -## options related to producing time series plots, often to compare against -## observations and previous runs - -# start and end years for timeseries analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = end - -[index] -## options related to producing nino index. - -# start and end years for El Nino 3.4 analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = end - -[streamfunctionMOC] -## options related to plotting the streamfunction of the meridional overturning -## circulation (MOC) - -# Use postprocessing script to compute the MOC? You want this to be True -# for low-resolution simulations that use GM to parameterize eddies, because -# the online MOC analysis member currently does not include the bolus velocity -# in its calculation, whereas the postprocessing script does. -# NOTE: this is a temporary option that will be removed once the online -# MOC takes into account the bolus velocity when GM is on. -usePostprocessingScript = True diff --git a/configs/job_script.default.bash b/configs/job_script.default.bash index 4c2dc219f..c46190784 100755 --- a/configs/job_script.default.bash +++ b/configs/job_script.default.bash @@ -1,52 +1,19 @@ #!/bin/bash -# This software is open source software available under the BSD-3 license. -# -# Copyright (c) 2020 Triad National Security, LLC. All rights reserved. -# Copyright (c) 2020 Lawrence Livermore National Security, LLC. All rights -# reserved. -# Copyright (c) 2020 UT-Battelle, LLC. All rights reserved. -# -# Additional copyright and license information can be found in the LICENSE file -# distributed with this code, or at -# https://raw.githubusercontent.com/MPAS-Dev/MPAS-Analysis/master/LICENSE +#SBATCH --nodes=1 +#SBATCH --time=2:00:00 +#SBATCH --job-name=mpas_analysis +#SBATCH --output=mpas_analysis.o%j +#SBATCH --error=mpas_analysis.e%j -export HDF5_USE_FILE_LOCKING=FALSE - -# MPAS/ACME job to be analyzed, including paths to simulation data and -# observations. Change this name and path as needed -run_config_file="config.run_name_here" -# the number of parallel tasks (anything between 1 and the total number -# of tasks to run) -parallel_task_count=8 -# ncclimo can run with 1 (serial) or 12 (bck) threads -ncclimo_mode=bck - -if [ ! -f $run_config_file ]; then - echo "File $run_config_file not found!" - exit 1 -fi - -# This is a config file generated just for this job with the output directory, -# command prefix and parallel task count from above. -job_config_file=config.output.$RANDOM - -# write out the config file specific to this job -cat < $job_config_file -[execute] -## options related to executing parallel tasks +export OMP_NUM_THREADS=1 -# the number of parallel tasks (1 means tasks run in serial, the default) -parallelTaskCount = $parallel_task_count +source ~/miniconda3/etc/profile.d/conda.sh +conda activate mpas_dev +# if you are on an E3SM supported machine, you can specify it: +# export E3SMU_MACHINE=chrysalis -# the parallelism mode in ncclimo ("serial" or "bck") -# Set this to "bck" (background parallelism) if running on a machine that can -# handle 12 simultaneous processes, one for each monthly climatology. -ncclimoParallelMode = $ncclimo_mode - -EOF - -mpas_analysis $run_config_file $job_config_file - -# commend this out if you want to keep the config file, e.g. for debugging -rm $job_config_file +export HDF5_USE_FILE_LOCKING=FALSE +# For an E3SM cryosphere run, include --polar_regions, or exclude +# this extra flag for default parameters +mpas_analysis --polar_regions run_name_here.cfg diff --git a/configs/lanl/config.20170207.MPAS-SeaIce.QU60km_polar.wolf b/configs/lanl/20170207.MPAS-SeaIce.QU60km_polar.wolf.cfg similarity index 85% rename from configs/lanl/config.20170207.MPAS-SeaIce.QU60km_polar.wolf rename to configs/lanl/20170207.MPAS-SeaIce.QU60km_polar.wolf.cfg index 978e5ae2a..75831ca2c 100644 --- a/configs/lanl/config.20170207.MPAS-SeaIce.QU60km_polar.wolf +++ b/configs/lanl/20170207.MPAS-SeaIce.QU60km_polar.wolf.cfg @@ -30,21 +30,14 @@ parallelTaskCount = 6 # handle 12 simultaneous processes, one for each monthly climatology. ncclimoParallelMode = bck -# "None" if ESMF should perform mapping file generation in serial without a -# command, or one of "srun" or "mpirun" if it should be run in parallel (or ins -# serial but with a command) -mapParallelExec = srun - -[diagnostics] -## config options related to observations, mapping files and region files used -## by MPAS-Analysis in diagnostics computations. - -# The base path to the diagnostics directory. Typically, this will be a shared -# directory on each E3SM supported machine (see the example config files for -# its location). For other machines, this would be the directory pointed to -# when running "download_analysis_data.py" to get the public observations, -# mapping files and region files. -baseDirectory = /turquoise/usr/projects/climate/SHARED_CLIMATE/diagnostic +# the number of total threads to use when ncclimo runs in "bck" or "mpi" mode. +# Reduce this number if ncclimo is crashing (maybe because it is out of memory). +# The number of threads must be a factor of 12 (1, 2, 3, 4, 6 or 12). +ncclimoThreads = 6 + +# the number of MPI tasks to use in creating mapping files (1 means tasks run in +# serial, the default) +mapMpiTasks = 6 [input] ## options related to reading in the results to be analyzed @@ -124,5 +117,5 @@ endYear = 1961 # of bounds values will lead to an error. In a "control" config file used in # a "main vs. control" analysis run, the range of years must be valid and # cannot include "end" because the original data may not be available. -startYear = 1 -endYear = end +startYear = 1960 +endYear = 1961 diff --git a/configs/lanl/config.BGCphaeo4.testrun b/configs/lanl/config.BGCphaeo4.testrun deleted file mode 100644 index 6822eae3b..000000000 --- a/configs/lanl/config.BGCphaeo4.testrun +++ /dev/null @@ -1,151 +0,0 @@ -[runs] -## options related to the run to be analyzed and control runs to be -## compared against - -# mainRunName is a name that identifies the simulation being analyzed. -mainRunName = BGCacme.phaeo4 - -# preprocessedReferenceRunName is the name of a control run that has been -# preprocessed to compare against (or None to turn off comparison). Reference -# runs of this type would have preprocessed results because they were not -# performed with MPAS components (so they cannot be easily ingested by -# MPAS-Analysis) -preprocessedReferenceRunName = B1850C5_ne30_v0.4 - -# config file for a control run to which this run will be compared. The -# analysis should have already been run to completion once with this config -# file, so that the relevant MPAS climatologies already exist and have been -# remapped to the comparison grid. Leave this option commented out if no -# control run is desired. -# controlRunConfigFile = /path/to/config/file - -# config file for a main run on which the analysis was already run to -# completion. The relevant MPAS climatologies already exist and have been -# remapped to the comparison grid and time series have been extracted. -# Leave this option commented out if the analysis for the main run should be -# performed. -# mainRunConfigFile = /path/to/config/file - -[execute] -parallelTaskCount = 6 -ncclimoParallelMode = bck - -# "None" if ESMF should perform mapping file generation in serial without a -# command, or one of "srun" or "mpirun" if it should be run in parallel (or ins -# serial but with a command) -mapParallelExec = srun - -[diagnostics] -## config options related to observations, mapping files and region files used -## by MPAS-Analysis in diagnostics computations. - -# The base path to the diagnostics directory. Typically, this will be a shared -# directory on each E3SM supported machine (see the example config files for -# its location). For other machines, this would be the directory pointed to -# when running "download_analysis_data.py" to get the public observations, -# mapping files and region files. -baseDirectory = /turquoise/usr/projects/climate/SHARED_CLIMATE/diagnostics - -[input] -## options related to reading in the results to be analyzed - -# directory containing model results -# baseDirectory = /lustre/scratch3/turquoise/lvanroekel/ACME/cases/MatchBoth_orig/run -baseDirectory = /lustre/scratch3/turquoise/shanlinw/ACME/cases/BGCacme.phaeo4/run - -# name of the ocean and sea-ice mesh (e.g. EC30to60E2r2, WC14to60E2r3, -# ECwISC30to60E2r1, SOwISC12to60E2r4, oQU240, etc.) -mpasMeshName = oEC60to30v3 - -# names of namelist and streams files, either a path relative to baseDirectory -# or an absolute path. -oceanNamelistFileName = mpas-o_in -oceanStreamsFileName = streams.ocean -seaIceNamelistFileName = mpas-cice_in -seaIceStreamsFileName = streams.cice - -[output] -## options related to writing out plots, intermediate cached data sets, logs, -## etc. - -# directory where analysis should be written -baseDirectory = /dir/to/analysis/output - -# a list of analyses to generate. Valid names can be seen by running: -# mpas_analysis --list -# This command also lists tags for each analysis. -# Shortcuts exist to generate (or not generate) several types of analysis. -# These include: -# 'all' -- all analyses will be run -# 'all_' -- all analysis with a particular tag will be run -# 'all_' -- all analyses from a given component (either 'ocean' -# or 'seaIce') will be run -# 'only_', 'only_' -- all analysis from this component or -# with this tag will be run, and all -# analysis for other components or -# without the tag will be skipped -# 'no_' -- skip the given task -# 'no_', 'no_' -- in analogy to 'all_*', skip all analysis -# tasks from the given compoonent or with -# the given tag. Do -# mpas_analysis --list -# to list all task names and their tags -# an equivalent syntax can be used on the command line to override this -# option: -# mpas_analysis config.analysis --generate \ -# all,no_ocean,all_timeSeries -# All tasks with tag "landIceCavities" are disabled because this run did not -# include land-ice cavities. -generate = ['only_BGC'] - -[climatology] -## options related to producing climatologies, typically to compare against -## observations and previous runs - -# the first year over which to average climatalogies -startYear = 1 -# the last year over which to average climatalogies -endYear = 100 - -[timeSeries] -## options related to producing time series plots, often to compare against -## observations and previous runs - -# start and end years for timeseries analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = end - -[index] -## options related to producing nino index. - -# start and end years for El Nino 3.4 analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = end - -[oceanPreprocessedReference] -## options related to preprocessed ocean control run with which the results -## will be compared (e.g. a POP, CESM or ACME v0 run) - -# directory where ocean reference simulation results are stored -baseDirectory = /usr/projects/climate/SHARED_CLIMATE/ACMEv0_lowres/B1850C5_ne30_v0.4/ocn/postprocessing - -[seaIcePreprocessedReference] -## options related to preprocessed sea ice control run with which the results -## will be compared (e.g. a CICE, CESM or ACME v0 run) - -# directory where ocean reference simulation results are stored -baseDirectory = /usr/projects/climate/SHARED_CLIMATE/ACMEv0_lowres/B1850C5_ne30_v0.4/ice/postprocessing - -[climatologyMapBGC] -# Variables to plot: -variables = ['PO4', 'NO3', 'SiO3', 'CO2_gas_flux', 'pH_3D', 'DIC', 'ALK', 'O2', 'Chl'] diff --git a/configs/lanl/config.CONUS.100km.NAEC60to30cr8.20181218 b/configs/lanl/config.CONUS.100km.NAEC60to30cr8.20181218 deleted file mode 100644 index a6c816bcc..000000000 --- a/configs/lanl/config.CONUS.100km.NAEC60to30cr8.20181218 +++ /dev/null @@ -1,136 +0,0 @@ -[runs] -## options related to the run to be analyzed and reference runs to be -## compared against - -# mainRunName is a name that identifies the simulation being analyzed. -mainRunName = CONUS.100km.NAEC60to30cr8.20181218 - -# config file for a control run to which this run will be compared. The -# analysis should have already been run to completion once with this config -# file, so that the relevant MPAS climatologies already exist and have been -# remapped to the comparison grid. Leave this option commented out if no -# control run is desired. -# controlRunConfigFile = /path/to/config/file - -# config file for a main run on which the analysis was already run to -# completion. The relevant MPAS climatologies already exist and have been -# remapped to the comparison grid and time series have been extracted. -# Leave this option commented out if the analysis for the main run should be -# performed. -# mainRunConfigFile = /path/to/config/file - -[execute] -parallelTaskCount = 6 -ncclimoParallelMode = serial - -# "None" if ESMF should perform mapping file generation in serial without a -# command, or one of "srun" or "mpirun" if it should be run in parallel (or ins -# serial but with a command) -mapParallelExec = srun - -[input] -## options related to reading in the results to be analyzed - -# directory containing model results -baseDirectory = /lustre/scratch4/turquoise/.mdt2/kehoch/e3sm/mesh_project/transition/CONUS.100km.NAEC60to30cr8.20181218/ocean/global_ocean/NAEC60to30cr8/spin_up/test_final_settings - -#runSubdirectory = /lustre/scratch4/turquoise/.mdt2/kehoch/e3sm/mesh_project/transition/CONUS.100km.NAEC60to30cr8.20181218/ocean/global_ocean/NAEC60to30cr8/spin_up/spin_up1/restarts -#oceanHistorySubdirectory = analysis_members -# name of the ocean and sea-ice mesh (e.g. EC30to60E2r2, WC14to60E2r3, -# ECwISC30to60E2r1, SOwISC12to60E2r4, oQU240, etc.) -mpasMeshName = NAEC60to30cr8 - -oceanNamelistFileName = namelist.ocean - -oceanStreamsFileName = streams.ocean.analysis - -runSubdirectory = . -oceanHistorySubdirectory = . -[diagnostics] -#baseDirectory = /lustre/scratch4/turquoise/.mdt2/kehoch/e3sm/mesh_project/transition/CONUS.100km.NAEC60to30cr8.20181218/ocean/global_ocean/NAEC60to30cr8/spin_up/test_final_settings -baseDirectory = /usr/projects/climate/SHARED_CLIMATE/diagnostics -# Directory for mapping files (if they have been generated already). If mapping -# files needed by the analysis are not found here, they will be generated and -# placed in the output mappingSubdirectory -#mappingSubdirectory = /lustre/scratch4/turquoise/xylar/analysis/CONUS.100km.NAEC60to30cr8.20181218/mapping - -regionMaskSubdirectory = /lustre/scratch4/turquoise/.mdt2/kehoch/e3sm/mesh_project/transition/CONUS.100km.NAEC60to30cr8.20181218/ocean/global_ocean/NAEC60to30cr8/spin_up/test_final_settings - -[output] -## options related to writing out plots, intermediate cached data sets, logs, -## etc. - -# directory where analysis should be written -baseDirectory = /path/to/analysis/output - -# a list of analyses to generate. Valid names can be seen by running: -# mpas_analysis --list -# This command also lists tags for each analysis. -# Shortcuts exist to generate (or not generate) several types of analysis. -# These include: -# 'all' -- all analyses will be run -# 'all_' -- all analysis with a particular tag will be run -# 'all_' -- all analyses from a given component (either 'ocean' -# or 'seaIce') will be run -# 'only_', 'only_' -- all analysis from this component or -# with this tag will be run, and all -# analysis for other components or -# without the tag will be skipped -# 'no_' -- skip the given task -# 'no_', 'no_' -- in analogy to 'all_*', skip all analysis -# tasks from the given compoonent or with -# the given tag. Do -# mpas_analysis --list -# to list all task names and their tags -# an equivalent syntax can be used on the command line to override this -# option: -# mpas_analysis config.analysis --generate \ -# all,no_ocean,all_timeSeries -# All tasks with tag "landIceCavities" are disabled because this run did not -# include land-ice cavities. -generate = ['only_ocean', 'no_landIceCavities', 'no_mld', 'no_BGC', - 'no_meridionalHeatTransport', 'no_timeSeriesTemperatureAnomaly', - 'no_timeSeriesSalinityAnomaly', 'no_timeSeriesOHCAnomaly', - 'no_timeSeriesSST', 'no_indexNino34', 'no_min', 'no_max'] - -[climatology] -## options related to producing climatologies, typically to compare against -## observations and previous runs - -# the first year over which to average climatalogies -startYear = 1 -# the last year over which to average climatalogies -endYear = 10 - -[timeSeries] -## options related to producing time series plots, often to compare against -## observations and previous runs - -# start and end years for timeseries analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = end - -[index] -## options related to producing nino index. - -# start and end years for El Nino 3.4 analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = end - -[climatologyMapBGC] -preindustrial = True - -[climatologyMapSose] -fieldList = ['temperature', 'salinity', 'potentialDensity', - 'zonalVelocity', 'meridionalVelocity', 'velocityMagnitude'] - diff --git a/configs/lanl/config.GMPAS-OECO-ODMS-IAF.oRRS30to10v3 b/configs/lanl/config.GMPAS-OECO-ODMS-IAF.oRRS30to10v3 deleted file mode 100644 index 5940fe8b4..000000000 --- a/configs/lanl/config.GMPAS-OECO-ODMS-IAF.oRRS30to10v3 +++ /dev/null @@ -1,149 +0,0 @@ -[runs] -## options related to the run to be analyzed and control runs to be -## compared against - -# mainRunName is a name that identifies the simulation being analyzed. -mainRunName = GMPAS-OECO-ODMS-IAF.oRRS30to10v3 - -# preprocessedReferenceRunName is the name of a control run that has been -# preprocessed to compare against (or None to turn off comparison). Reference -# runs of this type would have preprocessed results because they were not -# performed with MPAS components (so they cannot be easily ingested by -# MPAS-Analysis) -preprocessedReferenceRunName = B1850C5_ne30_v0.4 - -# config file for a control run to which this run will be compared. The -# analysis should have already been run to completion once with this config -# file, so that the relevant MPAS climatologies already exist and have been -# remapped to the comparison grid. Leave this option commented out if no -# control run is desired. -# controlRunConfigFile = /path/to/config/file - -# config file for a main run on which the analysis was already run to -# completion. The relevant MPAS climatologies already exist and have been -# remapped to the comparison grid and time series have been extracted. -# Leave this option commented out if the analysis for the main run should be -# performed. -# mainRunConfigFile = /path/to/config/file - -[execute] -parallelTaskCount = 6 -ncclimoParallelMode = bck - -# "None" if ESMF should perform mapping file generation in serial without a -# command, or one of "srun" or "mpirun" if it should be run in parallel (or ins -# serial but with a command) -mapParallelExec = srun - -[diagnostics] -## config options related to observations, mapping files and region files used -## by MPAS-Analysis in diagnostics computations. - -# The base path to the diagnostics directory. Typically, this will be a shared -# directory on each E3SM supported machine (see the example config files for -# its location). For other machines, this would be the directory pointed to -# when running "download_analysis_data.py" to get the public observations, -# mapping files and region files. -baseDirectory = /turquoise/usr/projects/climate/SHARED_CLIMATE/diagnostics - -[input] -## options related to reading in the results to be analyzed - -# directory containing model results -baseDirectory = /lustre/scratch3/turquoise/rileybrady/ACME/cases/GMPAS-OECO-ODMS-IAF_oRRS30to10v3_grizzly03/run - -# name of the ocean and sea-ice mesh (e.g. EC30to60E2r2, WC14to60E2r3, -# ECwISC30to60E2r1, SOwISC12to60E2r4, oQU240, etc.) -mpasMeshName = oRRS30to10v3 - -# names of namelist and streams files, either a path relative to baseDirectory -# or an absolute path. -oceanNamelistFileName = mpaso_in -oceanStreamsFileName = streams.ocean -seaIceNamelistFileName = mpascice_in -seaIceStreamsFileName = streams.cice - -[output] -## options related to writing out plots, intermediate cached data sets, logs, -## etc. - -# directory where analysis should be written -baseDirectory = /path/to/mpas/analysis/output - -# a list of analyses to generate. Valid names can be seen by running: -# mpas_analysis --list -# This command also lists tags for each analysis. -# Shortcuts exist to generate (or not generate) several types of analysis. -# These include: -# 'all' -- all analyses will be run -# 'all_' -- all analysis with a particular tag will be run -# 'all_' -- all analyses from a given component (either 'ocean' -# or 'seaIce') will be run -# 'only_', 'only_' -- all analysis from this component or -# with this tag will be run, and all -# analysis for other components or -# without the tag will be skipped -# 'no_' -- skip the given task -# 'no_', 'no_' -- in analogy to 'all_*', skip all analysis -# tasks from the given compoonent or with -# the given tag. Do -# mpas_analysis --list -# to list all task names and their tags -# an equivalent syntax can be used on the command line to override this -# option: -# mpas_analysis config.analysis --generate \ -# all,no_ocean,all_timeSeries -# All tasks with tag "landIceCavities" are disabled because this run did not -# include land-ice cavities. -generate = ['only_BGC'] - -[climatology] -## options related to producing climatologies, typically to compare against -## observations and previous runs - -# the first year over which to average climatalogies -startYear = 1 -# the last year over which to average climatalogies -endYear = 100 - -[timeSeries] -## options related to producing time series plots, often to compare against -## observations and previous runs - -# start and end years for timeseries analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = end - -[index] -## options related to producing nino index. - -# start and end years for El Nino 3.4 analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = end - -[oceanPreprocessedReference] -## options related to preprocessed ocean control run with which the results -## will be compared (e.g. a POP, CESM or ACME v0 run) - -# directory where ocean reference simulation results are stored -baseDirectory = /usr/projects/climate/SHARED_CLIMATE/ACMEv0_lowres/B1850C5_ne30_v0.4/ocn/postprocessing - -[seaIcePreprocessedReference] -## options related to preprocessed sea ice control run with which the results -## will be compared (e.g. a CICE, CESM or ACME v0 run) - -# directory where ocean reference simulation results are stored -baseDirectory = /usr/projects/climate/SHARED_CLIMATE/ACMEv0_lowres/B1850C5_ne30_v0.4/ice/postprocessing - -[climatologyMapBGC] -preindustrial = True diff --git a/configs/lanl/config.MatchBoth_orig b/configs/lanl/config.MatchBoth_orig deleted file mode 100644 index 67a7f34cd..000000000 --- a/configs/lanl/config.MatchBoth_orig +++ /dev/null @@ -1,133 +0,0 @@ -[runs] -## options related to the run to be analyzed and control runs to be -## compared against - -# mainRunName is a name that identifies the simulation being analyzed. -mainRunName = MatchBoth_orig - -# config file for a control run to which this run will be compared. The -# analysis should have already been run to completion once with this config -# file, so that the relevant MPAS climatologies already exist and have been -# remapped to the comparison grid. Leave this option commented out if no -# control run is desired. -# controlRunConfigFile = /path/to/config/file - -# config file for a main run on which the analysis was already run to -# completion. The relevant MPAS climatologies already exist and have been -# remapped to the comparison grid and time series have been extracted. -# Leave this option commented out if the analysis for the main run should be -# performed. -# mainRunConfigFile = /path/to/config/file - -[execute] -## options related to executing parallel tasks - -# the number of parallel tasks (1 means tasks run in serial, the default) -parallelTaskCount = 6 - -# the parallelism mode in ncclimo ("serial" or "bck") -# Set this to "bck" (background parallelism) if running on a machine that can -# handle 12 simultaneous processes, one for each monthly climatology. -ncclimoParallelMode = bck - -# "None" if ESMF should perform mapping file generation in serial without a -# command, or one of "srun" or "mpirun" if it should be run in parallel (or ins -# serial but with a command) -mapParallelExec = srun - -[diagnostics] -## config options related to observations, mapping files and region files used -## by MPAS-Analysis in diagnostics computations. - -# The base path to the diagnostics directory. Typically, this will be a shared -# directory on each E3SM supported machine (see the example config files for -# its location). For other machines, this would be the directory pointed to -# when running "download_analysis_data.py" to get the public observations, -# mapping files and region files. -baseDirectory = /turquoise/usr/projects/climate/SHARED_CLIMATE/diagnostics - -[input] -## options related to reading in the results to be analyzed - -# directory containing model results -baseDirectory = /lustre/scratch3/turquoise/lvanroekel/ACME/cases/MatchBoth_orig/run - -# name of the ocean and sea-ice mesh (e.g. EC30to60E2r2, WC14to60E2r3, -# ECwISC30to60E2r1, SOwISC12to60E2r4, oQU240, etc.) -mpasMeshName = oEC60to30v3 - -# names of namelist and streams files, either a path relative to baseDirectory -# or an absolute path. -oceanNamelistFileName = mpas-o_in -oceanStreamsFileName = streams.ocean -seaIceNamelistFileName = mpas-cice_in -seaIceStreamsFileName = streams.cice - -[output] -## options related to writing out plots, intermediate cached data sets, logs, -## etc. - -# directory where analysis should be written -baseDirectory = /dir/to/analysis/output - -# a list of analyses to generate. Valid names can be seen by running: -# mpas_analysis --list -# This command also lists tags for each analysis. -# Shortcuts exist to generate (or not generate) several types of analysis. -# These include: -# 'all' -- all analyses will be run -# 'all_' -- all analysis with a particular tag will be run -# 'all_' -- all analyses from a given component (either 'ocean' -# or 'seaIce') will be run -# 'only_', 'only_' -- all analysis from this component or -# with this tag will be run, and all -# analysis for other components or -# without the tag will be skipped -# 'no_' -- skip the given task -# 'no_', 'no_' -- in analogy to 'all_*', skip all analysis -# tasks from the given compoonent or with -# the given tag. Do -# mpas_analysis --list -# to list all task names and their tags -# an equivalent syntax can be used on the command line to override this -# option: -# mpas_analysis config.analysis --generate \ -# all,no_ocean,all_timeSeries -# All tasks with tag "landIceCavities" are disabled because this run did not -# include land-ice cavities. -generate = ['all', 'no_landIceCavities', 'no_eke', 'no_BGC', 'no_icebergs', - 'no_min', 'no_max'] - -[climatology] -## options related to producing climatologies, typically to compare against -## observations and previous runs - -# the first year over which to average climatalogies -startYear = 9 -# the last year over which to average climatalogies -endYear = 10 - -[timeSeries] -## options related to producing time series plots, often to compare against -## observations and previous runs - -# start and end years for timeseries analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = end - -[index] -## options related to producing nino index. - -# start and end years for El Nino 3.4 analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = end diff --git a/configs/lanl/job_script.lanl.bash b/configs/lanl/job_script.lanl.bash index 62e65fc12..d284399e3 100644 --- a/configs/lanl/job_script.lanl.bash +++ b/configs/lanl/job_script.lanl.bash @@ -1,63 +1,25 @@ #!/bin/bash -# This software is open source software available under the BSD-3 license. -# -# Copyright (c) 2020 Triad National Security, LLC. All rights reserved. -# Copyright (c) 2020 Lawrence Livermore National Security, LLC. All rights -# reserved. -# Copyright (c) 2020 UT-Battelle, LLC. All rights reserved. -# -# Additional copyright and license information can be found in the LICENSE file -# distributed with this code, or at -# https://raw.githubusercontent.com/MPAS-Dev/MPAS-Analysis/master/LICENSE - # change number of nodes to change the number of parallel tasks # (anything between 1 and the total number of tasks to run) #SBATCH --nodes=1 -#SBATCH --time=1:00:00 -#SBATCH --account=climateacme +#SBATCH --time=2:00:00 +#SBATCH --account=e3sm #SBATCH --job-name=mpas_analysis #SBATCH --output=mpas_analysis.o%j #SBATCH --error=mpas_analysis.e%j #SBATCH --qos=interactive -cd $SLURM_SUBMIT_DIR # optional, since this is the default behavior - export OMP_NUM_THREADS=1 source /usr/projects/climate/SHARED_CLIMATE/anaconda_envs/load_latest_e3sm_unified_grizzly.sh +# alternatively, you can load your own development environment +# source ~/miniconda3/etc/profile.d/conda.sh +# conda activate mpas_dev +# export E3SMU_MACHINE=grizzly -# MPAS/ACME job to be analyzed, including paths to simulation data and -# observations. Change this name and path as needed -run_config_file="config.run_name_here" -# one parallel task per node by default -parallel_task_count=12 -# ncclimo can run with 1 (serial) or 12 (bck) threads -ncclimo_mode=bck - -if [ ! -f $run_config_file ]; then - echo "File $run_config_file not found!" - exit 1 -fi - - -# This is a config file generated just for this job with the output directory, -# command prefix and parallel task count from above. -job_config_file=config.output.$SLURM_JOB_ID - -# write out the config file specific to this job -cat < $job_config_file -[execute] -## options related to executing parallel tasks - -# the number of parallel tasks (1 means tasks run in serial, the default) -parallelTaskCount = $parallel_task_count - -# the parallelism mode in ncclimo ("serial" or "bck") -# Set this to "bck" (background parallelism) if running on a machine that can -# handle 12 simultaneous processes, one for each monthly climatology. -ncclimoParallelMode = $ncclimo_mode - -EOF +export HDF5_USE_FILE_LOCKING=FALSE -mpas_analysis $run_config_file $job_config_file +# For an E3SM cryosphere run, include --polar_regions, or exclude +# this extra flag for default parameters +mpas_analysis run_name_here.cfg diff --git a/configs/lcrc/20201025.GMPAS-IAF.T62_oQU240wLI.anvil.cfg b/configs/lcrc/20201025.GMPAS-IAF.T62_oQU240wLI.anvil.cfg index 50e00cec2..c94f005f3 100644 --- a/configs/lcrc/20201025.GMPAS-IAF.T62_oQU240wLI.anvil.cfg +++ b/configs/lcrc/20201025.GMPAS-IAF.T62_oQU240wLI.anvil.cfg @@ -23,32 +23,21 @@ mainRunName = GMPAS-IAF.T62_oQU240wLI ## options related to executing parallel tasks # the number of parallel tasks (1 means tasks run in serial, the default) -parallelTaskCount = 6 +parallelTaskCount = 12 # the parallelism mode in ncclimo ("serial" or "bck") # Set this to "bck" (background parallelism) if running on a machine that can # handle 12 simultaneous processes, one for each monthly climatology. ncclimoParallelMode = bck -# "None" if ESMF should perform mapping file generation in serial without a -# command, or one of "srun" or "mpirun" if it should be run in parallel (or ins -# serial but with a command) -mapParallelExec = srun +# the number of total threads to use when ncclimo runs in "bck" or "mpi" mode. +# Reduce this number if ncclimo is crashing (maybe because it is out of memory). +# The number of threads must be a factor of 12 (1, 2, 3, 4, 6 or 12). +ncclimoThreads = 12 -# "None" if ncremap should perform remapping without a command, or "srun" -# possibly with some flags if it should be run with that command -ncremapParallelExec = srun - -[diagnostics] -## config options related to observations, mapping files and region files used -## by MPAS-Analysis in diagnostics computations. - -# The base path to the diagnostics directory. Typically, this will be a shared -# directory on each E3SM supported machine (see the example config files for -# its location). For other machines, this would be the directory pointed to -# when running "download_analysis_data.py" to get the public observations, -# mapping files and region files. -baseDirectory = /lcrc/group/e3sm/diagnostics +# the number of MPI tasks to use in creating mapping files (1 means tasks run in +# serial, the default) +mapMpiTasks = 12 [input] ## options related to reading in the results to be analyzed @@ -132,7 +121,7 @@ endYear = 8 # a "main vs. control" analysis run, the range of years must be valid and # cannot include "end" because the original data may not be available. startYear = 1 -endYear = end +endYear = 8 [index] ## options related to producing nino index. @@ -144,7 +133,7 @@ endYear = end # a "main vs. control" analysis run, the range of years must be valid and # cannot include "end" because the original data may not be available. startYear = 1 -endYear = end +endYear = 8 [streamfunctionMOC] ## options related to plotting the streamfunction of the meridional overturning diff --git a/configs/lcrc/job_script.anvil.bash b/configs/lcrc/job_script.anvil.bash index 0698f9bbc..eedaf5a0b 100644 --- a/configs/lcrc/job_script.anvil.bash +++ b/configs/lcrc/job_script.anvil.bash @@ -7,10 +7,17 @@ #SBATCH --output=mpas_analysis.o%j #SBATCH --error=mpas_analysis.e%j -cd $SLURM_SUBMIT_DIR export OMP_NUM_THREADS=1 source /lcrc/soft/climate/e3sm-unified/load_latest_e3sm_unified_anvil.sh +# alternatively, you can load your own development environment +# source ~/miniconda3/etc/profile.d/conda.sh +# conda activate mpas_dev +# export E3SMU_MACHINE=anvil -srun -N 1 -n 1 mpas_analysis configs/polarRegions.conf 20201025.GMPAS-IAF.T62_oQU240wLI.anvil.cfg +export HDF5_USE_FILE_LOCKING=FALSE + +# For an E3SM cryosphere run, include --polar_regions, or exclude +# this extra flag for default parameters +mpas_analysis --polar_regions 20201025.GMPAS-IAF.T62_oQU240wLI.anvil.cfg diff --git a/configs/lcrc/job_script.chrysalis.bash b/configs/lcrc/job_script.chrysalis.bash index b33294837..2c98dc53a 100644 --- a/configs/lcrc/job_script.chrysalis.bash +++ b/configs/lcrc/job_script.chrysalis.bash @@ -5,10 +5,17 @@ #SBATCH --output=mpas_analysis.o%j #SBATCH --error=mpas_analysis.e%j -cd $SLURM_SUBMIT_DIR export OMP_NUM_THREADS=1 source /lcrc/soft/climate/e3sm-unified/load_latest_e3sm_unified_chrysalis.sh +# alternatively, you can load your own development environment +# source ~/miniconda3/etc/profile.d/conda.sh +# conda activate mpas_dev +# export E3SMU_MACHINE=chrysalis -srun -N 1 -n 1 mpas_analysis configs/polarRegions.conf 20201025.GMPAS-IAF.T62_oQU240wLI.anvil.cfg +export HDF5_USE_FILE_LOCKING=FALSE + +# For an E3SM cryosphere run, include --polar_regions, or exclude +# this extra flag for default parameters +mpas_analysis --polar_regions 20201025.GMPAS-IAF.T62_oQU240wLI.anvil.cfg diff --git a/configs/lcrc/test_suite/QU480.cfg b/configs/lcrc/test_suite/QU480.cfg deleted file mode 100644 index 07c5f3643..000000000 --- a/configs/lcrc/test_suite/QU480.cfg +++ /dev/null @@ -1,127 +0,0 @@ -[runs] -## options related to the run to be analyzed and control runs to be -## compared against - -# mainRunName is a name that identifies the simulation being analyzed. -mainRunName = QU480 - -# config file for a control run to which this run will be compared. The -# analysis should have already been run to completion once with this config -# file, so that the relevant MPAS climatologies already exist and have been -# remapped to the comparison grid. Leave this option commented out if no -# control run is desired. -# controlRunConfigFile = /path/to/config/file - -# config file for a main run on which the analysis was already run to -# completion. The relevant MPAS climatologies already exist and have been -# remapped to the comparison grid and time series have been extracted. -# Leave this option commented out if the analysis for the main run should be -# performed. -# mainRunConfigFile = /path/to/config/file - -[input] -## options related to reading in the results to be analyzed - -# directory containing model results -baseDirectory = /lcrc/group/e3sm/ac.xylar/acme_scratch/anvil/20200305.A_WCYCL1850.ne4_oQU480.anvil - -# name of the ocean and sea-ice mesh (e.g. EC30to60E2r2, WC14to60E2r3, -# ECwISC30to60E2r1, SOwISC12to60E2r4, oQU240, etc.) -mpasMeshName = oQU480 - -# subdirectory containing restart files -runSubdirectory = run -# subdirectory for ocean history files -oceanHistorySubdirectory = archive/ocn/hist -# subdirectory for sea ice history files -seaIceHistorySubdirectory = archive/ice/hist - -# names of namelist and streams files, either a path relative to baseDirectory -# or an absolute path. -oceanNamelistFileName = run/mpaso_in -oceanStreamsFileName = run/streams.ocean -seaIceNamelistFileName = run/mpassi_in -seaIceStreamsFileName = run/streams.seaice - -[output] -## options related to writing out plots, intermediate cached data sets, logs, -## etc. - -# directory where analysis should be written -baseDirectory = /lcrc/group/e3sm/ac.xylar/analysis_testing/baseline - -htmlSubdirectory = /lcrc/group/e3sm/public_html/diagnostic_output/ac.xylar/analysis_testing/baseline - -# a list of analyses to generate. Valid names can be seen by running: -# mpas_analysis --list -# This command also lists tags for each analysis. -# Shortcuts exist to generate (or not generate) several types of analysis. -# These include: -# 'all' -- all analyses will be run -# 'all_' -- all analysis with a particular tag will be run -# 'all_' -- all analyses from a given component (either 'ocean' -# or 'seaIce') will be run -# 'only_', 'only_' -- all analysis from this component or -# with this tag will be run, and all -# analysis for other components or -# without the tag will be skipped -# 'no_' -- skip the given task -# 'no_', 'no_' -- in analogy to 'all_*', skip all analysis -# tasks from the given compoonent or with -# the given tag. Do -# mpas_analysis --list -# to list all task names and their tags -# an equivalent syntax can be used on the command line to override this -# option: -# mpas_analysis config.analysis --generate \ -# all,no_ocean,all_timeSeries -# All tasks with tag "landIceCavities" are disabled because this run did not -# include land-ice cavities. -generate = ['all', 'no_BGC', 'no_icebergs', 'no_index', 'no_eke', - 'no_landIceCavities'] - -[climatology] -## options related to producing climatologies, typically to compare against -## observations and previous runs - -# the first year over which to average climatalogies -startYear = 3 -# the last year over which to average climatalogies -endYear = 5 - -[timeSeries] -## options related to producing time series plots, often to compare against -## observations and previous runs - -# start and end years for timeseries analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = end - -[index] -## options related to producing nino index. - -# start and end years for El Nino 3.4 analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = end - -[streamfunctionMOC] -## options related to plotting the streamfunction of the meridional overturning -## circulation (MOC) - -# Use postprocessing script to compute the MOC? You want this to be True -# for low-resolution simulations that use GM to parameterize eddies, because -# the online MOC analysis member currently does not include the bolus velocity -# in its calculation, whereas the postprocessing script does. -# NOTE: this is a temporary option that will be removed once the online -# MOC takes into account the bolus velocity when GM is on. -usePostprocessingScript = True diff --git a/configs/lcrc/test_suite/anvil/job_script.bash b/configs/lcrc/test_suite/anvil/job_script.bash deleted file mode 100644 index f2da9209e..000000000 --- a/configs/lcrc/test_suite/anvil/job_script.bash +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash -l -#SBATCH --nodes=1 -#SBATCH --time=2:00:00 -#SBATCH -A condo -#SBATCH -p acme-small -#SBATCH --job-name=mpas_analysis -#SBATCH --output=mpas_analysis.o%j -#SBATCH --error=mpas_analysis.e%j - -set -e - -cd $SLURM_SUBMIT_DIR -export OMP_NUM_THREADS=1 - -source /home/ac.xylar/anvil/miniconda3/etc/profile.d/conda.sh -conda activate test_env -export HDF5_USE_FILE_LOCKING=FALSE -export E3SMU_MACHINE=anvil - -echo env: test_env -echo configs: --polar_regions ../main.cfg - -srun -N 1 -n 1 mpas_analysis --list -srun -N 1 -n 1 mpas_analysis --plot_colormaps -srun -N 1 -n 1 mpas_analysis --setup_only --polar_regions ../main.cfg -srun -N 1 -n 1 mpas_analysis --purge --polar_regions ../main.cfg --verbose -srun -N 1 -n 1 mpas_analysis --html_only --polar_regions ../main.cfg - diff --git a/configs/lcrc/test_suite/anvil/job_script_no_polar_regions.bash b/configs/lcrc/test_suite/anvil/job_script_no_polar_regions.bash deleted file mode 100644 index e581bbc2b..000000000 --- a/configs/lcrc/test_suite/anvil/job_script_no_polar_regions.bash +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash -l -#SBATCH --nodes=1 -#SBATCH --time=2:00:00 -#SBATCH -A condo -#SBATCH -p acme-small -#SBATCH --job-name=mpas_analysis -#SBATCH --output=mpas_analysis.o%j -#SBATCH --error=mpas_analysis.e%j - -set -e - -cd $SLURM_SUBMIT_DIR -export OMP_NUM_THREADS=1 - -source /home/ac.xylar/anvil/miniconda3/etc/profile.d/conda.sh -conda activate test_env -export HDF5_USE_FILE_LOCKING=FALSE -export E3SMU_MACHINE=anvil - -echo env: test_env -echo configs: ../no_polar_regions.cfg - -srun -N 1 -n 1 mpas_analysis ../no_polar_regions.cfg --verbose - diff --git a/configs/lcrc/test_suite/chrysalis/job_script.bash b/configs/lcrc/test_suite/chrysalis/job_script.bash deleted file mode 100644 index 672418efc..000000000 --- a/configs/lcrc/test_suite/chrysalis/job_script.bash +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash -l -#SBATCH --nodes=1 -#SBATCH --time=2:00:00 -#SBATCH --job-name=mpas_analysis -#SBATCH --output=mpas_analysis.o%j -#SBATCH --error=mpas_analysis.e%j - -set -e - -cd $SLURM_SUBMIT_DIR -export OMP_NUM_THREADS=1 - -source /home/ac.xylar/chrysalis/miniconda3/etc/profile.d/conda.sh -conda activate test_env -export HDF5_USE_FILE_LOCKING=FALSE -export E3SMU_MACHINE=chrysalis - -echo env: test_env -echo configs: --polar_regions ../main.cfg - -srun -N 1 -n 1 mpas_analysis --list -srun -N 1 -n 1 mpas_analysis --plot_colormaps -srun -N 1 -n 1 mpas_analysis --setup_only --polar_regions ../main.cfg -srun -N 1 -n 1 mpas_analysis --purge --polar_regions ../main.cfg --verbose -srun -N 1 -n 1 mpas_analysis --html_only --polar_regions ../main.cfg - diff --git a/configs/lcrc/test_suite/chrysalis/job_script_no_polar_regions.bash b/configs/lcrc/test_suite/chrysalis/job_script_no_polar_regions.bash deleted file mode 100644 index fc08877b6..000000000 --- a/configs/lcrc/test_suite/chrysalis/job_script_no_polar_regions.bash +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash -l -#SBATCH --nodes=1 -#SBATCH --time=2:00:00 -#SBATCH --job-name=mpas_analysis -#SBATCH --output=mpas_analysis.o%j -#SBATCH --error=mpas_analysis.e%j - -set -e - -cd $SLURM_SUBMIT_DIR -export OMP_NUM_THREADS=1 - -source /home/ac.xylar/chrysalis/miniconda3/etc/profile.d/conda.sh -conda activate test_env -export HDF5_USE_FILE_LOCKING=FALSE -export E3SMU_MACHINE=chrysalis - -echo env: test_env -echo configs: ../no_polar_regions.cfg - -srun -N 1 -n 1 mpas_analysis ../no_polar_regions.cfg --verbose - diff --git a/configs/lcrc/test_suite/clean_suite.bash b/configs/lcrc/test_suite/clean_suite.bash deleted file mode 100755 index fc6e83737..000000000 --- a/configs/lcrc/test_suite/clean_suite.bash +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env bash - -set -e - -machine=$1 - -branch=$(git symbolic-ref --short HEAD) - -rm -rf ${machine}_test_suite -rm -rf /lcrc/group/e3sm/ac.xylar/analysis_testing/${machine}/${branch} -rm -rf /lcrc/group/e3sm/public_html/diagnostic_output/ac.xylar/analysis_testing/${machine}/${branch} diff --git a/configs/lcrc/test_suite/ctrl.cfg b/configs/lcrc/test_suite/ctrl.cfg deleted file mode 100644 index 355bb5ddd..000000000 --- a/configs/lcrc/test_suite/ctrl.cfg +++ /dev/null @@ -1,126 +0,0 @@ -[runs] -## options related to the run to be analyzed and control runs to be -## compared against - -# mainRunName is a name that identifies the simulation being analyzed. -mainRunName = ctrl - -# config file for a control run to which this run will be compared. The -# analysis should have already been run to completion once with this config -# file, so that the relevant MPAS climatologies already exist and have been -# remapped to the comparison grid. Leave this option commented out if no -# control run is desired. -# controlRunConfigFile = /path/to/config/file - -# config file for a main run on which the analysis was already run to -# completion. The relevant MPAS climatologies already exist and have been -# remapped to the comparison grid and time series have been extracted. -# Leave this option commented out if the analysis for the main run should be -# performed. -# mainRunConfigFile = /path/to/config/file - -[input] -## options related to reading in the results to be analyzed - -# directory containing model results -baseDirectory = /lcrc/group/e3sm/ac.xylar/acme_scratch/anvil/20201025.GMPAS-IAF.T62_oQU240wLI.anvil - -# name of the ocean and sea-ice mesh (e.g. EC30to60E2r2, WC14to60E2r3, -# ECwISC30to60E2r1, SOwISC12to60E2r4, oQU240, etc.) -mpasMeshName = oQU240wLI - -# subdirectory containing restart files -runSubdirectory = run -# subdirectory for ocean history files -oceanHistorySubdirectory = archive/ocn/hist -# subdirectory for sea ice history files -seaIceHistorySubdirectory = archive/ice/hist - -# names of namelist and streams files, either a path relative to baseDirectory -# or an absolute path. -oceanNamelistFileName = run/mpaso_in -oceanStreamsFileName = run/streams.ocean -seaIceNamelistFileName = run/mpassi_in -seaIceStreamsFileName = run/streams.seaice - -[output] -## options related to writing out plots, intermediate cached data sets, logs, -## etc. - -# directory where analysis should be written -baseDirectory = /lcrc/group/e3sm/ac.xylar/analysis_testing/baseline - -htmlSubdirectory = /lcrc/group/e3sm/public_html/diagnostic_output/ac.xylar/analysis_testing/baseline - -# a list of analyses to generate. Valid names can be seen by running: -# mpas_analysis --list -# This command also lists tags for each analysis. -# Shortcuts exist to generate (or not generate) several types of analysis. -# These include: -# 'all' -- all analyses will be run -# 'all_' -- all analysis with a particular tag will be run -# 'all_' -- all analyses from a given component (either 'ocean' -# or 'seaIce') will be run -# 'only_', 'only_' -- all analysis from this component or -# with this tag will be run, and all -# analysis for other components or -# without the tag will be skipped -# 'no_' -- skip the given task -# 'no_', 'no_' -- in analogy to 'all_*', skip all analysis -# tasks from the given compoonent or with -# the given tag. Do -# mpas_analysis --list -# to list all task names and their tags -# an equivalent syntax can be used on the command line to override this -# option: -# mpas_analysis config.analysis --generate \ -# all,no_ocean,all_timeSeries -# All tasks with tag "landIceCavities" are disabled because this run did not -# include land-ice cavities. -generate = ['all', 'no_BGC', 'no_icebergs', 'no_index', 'no_eke'] - -[climatology] -## options related to producing climatologies, typically to compare against -## observations and previous runs - -# the first year over which to average climatalogies -startYear = 4 -# the last year over which to average climatalogies -endYear = 8 - -[timeSeries] -## options related to producing time series plots, often to compare against -## observations and previous runs - -# start and end years for timeseries analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = 8 - -[index] -## options related to producing nino index. - -# start and end years for El Nino 3.4 analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = 8 - -[streamfunctionMOC] -## options related to plotting the streamfunction of the meridional overturning -## circulation (MOC) - -# Use postprocessing script to compute the MOC? You want this to be True -# for low-resolution simulations that use GM to parameterize eddies, because -# the online MOC analysis member currently does not include the bolus velocity -# in its calculation, whereas the postprocessing script does. -# NOTE: this is a temporary option that will be removed once the online -# MOC takes into account the bolus velocity when GM is on. -usePostprocessingScript = True diff --git a/configs/lcrc/test_suite/main.cfg b/configs/lcrc/test_suite/main.cfg deleted file mode 100644 index 262185caf..000000000 --- a/configs/lcrc/test_suite/main.cfg +++ /dev/null @@ -1,126 +0,0 @@ -[runs] -## options related to the run to be analyzed and control runs to be -## compared against - -# mainRunName is a name that identifies the simulation being analyzed. -mainRunName = main - -# config file for a control run to which this run will be compared. The -# analysis should have already been run to completion once with this config -# file, so that the relevant MPAS climatologies already exist and have been -# remapped to the comparison grid. Leave this option commented out if no -# control run is desired. -# controlRunConfigFile = /path/to/config/file - -# config file for a main run on which the analysis was already run to -# completion. The relevant MPAS climatologies already exist and have been -# remapped to the comparison grid and time series have been extracted. -# Leave this option commented out if the analysis for the main run should be -# performed. -# mainRunConfigFile = /path/to/config/file - -[input] -## options related to reading in the results to be analyzed - -# directory containing model results -baseDirectory = /lcrc/group/e3sm/ac.xylar/acme_scratch/anvil/20201025.GMPAS-IAF.T62_oQU240wLI.anvil - -# name of the ocean and sea-ice mesh (e.g. EC30to60E2r2, WC14to60E2r3, -# ECwISC30to60E2r1, SOwISC12to60E2r4, oQU240, etc.) -mpasMeshName = oQU240wLI - -# subdirectory containing restart files -runSubdirectory = run -# subdirectory for ocean history files -oceanHistorySubdirectory = archive/ocn/hist -# subdirectory for sea ice history files -seaIceHistorySubdirectory = archive/ice/hist - -# names of namelist and streams files, either a path relative to baseDirectory -# or an absolute path. -oceanNamelistFileName = run/mpaso_in -oceanStreamsFileName = run/streams.ocean -seaIceNamelistFileName = run/mpassi_in -seaIceStreamsFileName = run/streams.seaice - -[output] -## options related to writing out plots, intermediate cached data sets, logs, -## etc. - -# directory where analysis should be written -baseDirectory = /lcrc/group/e3sm/ac.xylar/analysis_testing/baseline - -htmlSubdirectory = /lcrc/group/e3sm/public_html/diagnostic_output/ac.xylar/analysis_testing/baseline - -# a list of analyses to generate. Valid names can be seen by running: -# mpas_analysis --list -# This command also lists tags for each analysis. -# Shortcuts exist to generate (or not generate) several types of analysis. -# These include: -# 'all' -- all analyses will be run -# 'all_' -- all analysis with a particular tag will be run -# 'all_' -- all analyses from a given component (either 'ocean' -# or 'seaIce') will be run -# 'only_', 'only_' -- all analysis from this component or -# with this tag will be run, and all -# analysis for other components or -# without the tag will be skipped -# 'no_' -- skip the given task -# 'no_', 'no_' -- in analogy to 'all_*', skip all analysis -# tasks from the given compoonent or with -# the given tag. Do -# mpas_analysis --list -# to list all task names and their tags -# an equivalent syntax can be used on the command line to override this -# option: -# mpas_analysis config.analysis --generate \ -# all,no_ocean,all_timeSeries -# All tasks with tag "landIceCavities" are disabled because this run did not -# include land-ice cavities. -generate = ['all', 'no_BGC', 'no_icebergs', 'no_index', 'no_eke'] - -[climatology] -## options related to producing climatologies, typically to compare against -## observations and previous runs - -# the first year over which to average climatalogies -startYear = 4 -# the last year over which to average climatalogies -endYear = 8 - -[timeSeries] -## options related to producing time series plots, often to compare against -## observations and previous runs - -# start and end years for timeseries analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = 8 - -[index] -## options related to producing nino index. - -# start and end years for El Nino 3.4 analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = 8 - -[streamfunctionMOC] -## options related to plotting the streamfunction of the meridional overturning -## circulation (MOC) - -# Use postprocessing script to compute the MOC? You want this to be True -# for low-resolution simulations that use GM to parameterize eddies, because -# the online MOC analysis member currently does not include the bolus velocity -# in its calculation, whereas the postprocessing script does. -# NOTE: this is a temporary option that will be removed once the online -# MOC takes into account the bolus velocity when GM is on. -usePostprocessingScript = True diff --git a/configs/lcrc/test_suite/main_vs_ctrl.cfg b/configs/lcrc/test_suite/main_vs_ctrl.cfg deleted file mode 100644 index 32282161e..000000000 --- a/configs/lcrc/test_suite/main_vs_ctrl.cfg +++ /dev/null @@ -1,126 +0,0 @@ -[runs] -## options related to the run to be analyzed and control runs to be -## compared against - -# mainRunName is a name that identifies the simulation being analyzed. -mainRunName = main - -# config file for a control run to which this run will be compared. The -# analysis should have already been run to completion once with this config -# file, so that the relevant MPAS climatologies already exist and have been -# remapped to the comparison grid. Leave this option commented out if no -# control run is desired. -controlRunConfigFile = ../ctrl.cfg - -# config file for a main run on which the analysis was already run to -# completion. The relevant MPAS climatologies already exist and have been -# remapped to the comparison grid and time series have been extracted. -# Leave this option commented out if the analysis for the main run should be -# performed. -mainRunConfigFile = ../main_py3.9.cfg - -[input] -## options related to reading in the results to be analyzed - -# directory containing model results -baseDirectory = /lcrc/group/e3sm/ac.xylar/acme_scratch/anvil/20201025.GMPAS-IAF.T62_oQU240wLI.anvil - -# name of the ocean and sea-ice mesh (e.g. EC30to60E2r2, WC14to60E2r3, -# ECwISC30to60E2r1, SOwISC12to60E2r4, oQU240, etc.) -mpasMeshName = oQU240wLI - -# subdirectory containing restart files -runSubdirectory = run -# subdirectory for ocean history files -oceanHistorySubdirectory = archive/ocn/hist -# subdirectory for sea ice history files -seaIceHistorySubdirectory = archive/ice/hist - -# names of namelist and streams files, either a path relative to baseDirectory -# or an absolute path. -oceanNamelistFileName = run/mpaso_in -oceanStreamsFileName = run/streams.ocean -seaIceNamelistFileName = run/mpassi_in -seaIceStreamsFileName = run/streams.seaice - -[output] -## options related to writing out plots, intermediate cached data sets, logs, -## etc. - -# directory where analysis should be written -baseDirectory = /lcrc/group/e3sm/ac.xylar/analysis_testing/baseline - -htmlSubdirectory = /lcrc/group/e3sm/public_html/diagnostic_output/ac.xylar/analysis_testing/baseline - -# a list of analyses to generate. Valid names can be seen by running: -# mpas_analysis --list -# This command also lists tags for each analysis. -# Shortcuts exist to generate (or not generate) several types of analysis. -# These include: -# 'all' -- all analyses will be run -# 'all_' -- all analysis with a particular tag will be run -# 'all_' -- all analyses from a given component (either 'ocean' -# or 'seaIce') will be run -# 'only_', 'only_' -- all analysis from this component or -# with this tag will be run, and all -# analysis for other components or -# without the tag will be skipped -# 'no_' -- skip the given task -# 'no_', 'no_' -- in analogy to 'all_*', skip all analysis -# tasks from the given compoonent or with -# the given tag. Do -# mpas_analysis --list -# to list all task names and their tags -# an equivalent syntax can be used on the command line to override this -# option: -# mpas_analysis config.analysis --generate \ -# all,no_ocean,all_timeSeries -# All tasks with tag "landIceCavities" are disabled because this run did not -# include land-ice cavities. -generate = ['all', 'no_BGC', 'no_icebergs', 'no_index', 'no_eke'] - -[climatology] -## options related to producing climatologies, typically to compare against -## observations and previous runs - -# the first year over which to average climatalogies -startYear = 4 -# the last year over which to average climatalogies -endYear = 8 - -[timeSeries] -## options related to producing time series plots, often to compare against -## observations and previous runs - -# start and end years for timeseries analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = 8 - -[index] -## options related to producing nino index. - -# start and end years for El Nino 3.4 analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = 8 - -[streamfunctionMOC] -## options related to plotting the streamfunction of the meridional overturning -## circulation (MOC) - -# Use postprocessing script to compute the MOC? You want this to be True -# for low-resolution simulations that use GM to parameterize eddies, because -# the online MOC analysis member currently does not include the bolus velocity -# in its calculation, whereas the postprocessing script does. -# NOTE: this is a temporary option that will be removed once the online -# MOC takes into account the bolus velocity when GM is on. -usePostprocessingScript = True diff --git a/configs/lcrc/test_suite/mesh_rename.cfg b/configs/lcrc/test_suite/mesh_rename.cfg deleted file mode 100644 index b888cebeb..000000000 --- a/configs/lcrc/test_suite/mesh_rename.cfg +++ /dev/null @@ -1,126 +0,0 @@ -[runs] -## options related to the run to be analyzed and control runs to be -## compared against - -# mainRunName is a name that identifies the simulation being analyzed. -mainRunName = main - -# config file for a control run to which this run will be compared. The -# analysis should have already been run to completion once with this config -# file, so that the relevant MPAS climatologies already exist and have been -# remapped to the comparison grid. Leave this option commented out if no -# control run is desired. -# controlRunConfigFile = /path/to/config/file - -# config file for a main run on which the analysis was already run to -# completion. The relevant MPAS climatologies already exist and have been -# remapped to the comparison grid and time series have been extracted. -# Leave this option commented out if the analysis for the main run should be -# performed. -# mainRunConfigFile = /path/to/config/file - -[input] -## options related to reading in the results to be analyzed - -# directory containing model results -baseDirectory = /lcrc/group/e3sm/ac.xylar/acme_scratch/anvil/20201025.GMPAS-IAF.T62_oQU240wLI.anvil - -# name of the ocean and sea-ice mesh (e.g. EC30to60E2r2, WC14to60E2r3, -# ECwISC30to60E2r1, SOwISC12to60E2r4, oQU240, etc.) -mpasMeshName = new_oQU240wLI - -# subdirectory containing restart files -runSubdirectory = run -# subdirectory for ocean history files -oceanHistorySubdirectory = archive/ocn/hist -# subdirectory for sea ice history files -seaIceHistorySubdirectory = archive/ice/hist - -# names of namelist and streams files, either a path relative to baseDirectory -# or an absolute path. -oceanNamelistFileName = run/mpaso_in -oceanStreamsFileName = run/streams.ocean -seaIceNamelistFileName = run/mpassi_in -seaIceStreamsFileName = run/streams.seaice - -[output] -## options related to writing out plots, intermediate cached data sets, logs, -## etc. - -# directory where analysis should be written -baseDirectory = /lcrc/group/e3sm/ac.xylar/analysis_testing/baseline - -htmlSubdirectory = /lcrc/group/e3sm/public_html/diagnostic_output/ac.xylar/analysis_testing/baseline - -# a list of analyses to generate. Valid names can be seen by running: -# mpas_analysis --list -# This command also lists tags for each analysis. -# Shortcuts exist to generate (or not generate) several types of analysis. -# These include: -# 'all' -- all analyses will be run -# 'all_' -- all analysis with a particular tag will be run -# 'all_' -- all analyses from a given component (either 'ocean' -# or 'seaIce') will be run -# 'only_', 'only_' -- all analysis from this component or -# with this tag will be run, and all -# analysis for other components or -# without the tag will be skipped -# 'no_' -- skip the given task -# 'no_', 'no_' -- in analogy to 'all_*', skip all analysis -# tasks from the given compoonent or with -# the given tag. Do -# mpas_analysis --list -# to list all task names and their tags -# an equivalent syntax can be used on the command line to override this -# option: -# mpas_analysis config.analysis --generate \ -# all,no_ocean,all_timeSeries -# All tasks with tag "landIceCavities" are disabled because this run did not -# include land-ice cavities. -generate = ['all', 'no_BGC', 'no_icebergs', 'no_index', 'no_eke'] - -[climatology] -## options related to producing climatologies, typically to compare against -## observations and previous runs - -# the first year over which to average climatalogies -startYear = 4 -# the last year over which to average climatalogies -endYear = 8 - -[timeSeries] -## options related to producing time series plots, often to compare against -## observations and previous runs - -# start and end years for timeseries analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = 8 - -[index] -## options related to producing nino index. - -# start and end years for El Nino 3.4 analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = 8 - -[streamfunctionMOC] -## options related to plotting the streamfunction of the meridional overturning -## circulation (MOC) - -# Use postprocessing script to compute the MOC? You want this to be True -# for low-resolution simulations that use GM to parameterize eddies, because -# the online MOC analysis member currently does not include the bolus velocity -# in its calculation, whereas the postprocessing script does. -# NOTE: this is a temporary option that will be removed once the online -# MOC takes into account the bolus velocity when GM is on. -usePostprocessingScript = True diff --git a/configs/lcrc/test_suite/no_ncclimo.cfg b/configs/lcrc/test_suite/no_ncclimo.cfg deleted file mode 100644 index e89302c43..000000000 --- a/configs/lcrc/test_suite/no_ncclimo.cfg +++ /dev/null @@ -1,130 +0,0 @@ -[runs] -## options related to the run to be analyzed and control runs to be -## compared against - -# mainRunName is a name that identifies the simulation being analyzed. -mainRunName = main - -# config file for a control run to which this run will be compared. The -# analysis should have already been run to completion once with this config -# file, so that the relevant MPAS climatologies already exist and have been -# remapped to the comparison grid. Leave this option commented out if no -# control run is desired. -# controlRunConfigFile = /path/to/config/file - -# config file for a main run on which the analysis was already run to -# completion. The relevant MPAS climatologies already exist and have been -# remapped to the comparison grid and time series have been extracted. -# Leave this option commented out if the analysis for the main run should be -# performed. -# mainRunConfigFile = /path/to/config/file - -[input] -## options related to reading in the results to be analyzed - -# directory containing model results -baseDirectory = /lcrc/group/e3sm/ac.xylar/acme_scratch/anvil/20201025.GMPAS-IAF.T62_oQU240wLI.anvil - -# name of the ocean and sea-ice mesh (e.g. EC30to60E2r2, WC14to60E2r3, -# ECwISC30to60E2r1, SOwISC12to60E2r4, oQU240, etc.) -mpasMeshName = oQU240wLI - -# subdirectory containing restart files -runSubdirectory = run -# subdirectory for ocean history files -oceanHistorySubdirectory = archive/ocn/hist -# subdirectory for sea ice history files -seaIceHistorySubdirectory = archive/ice/hist - -# names of namelist and streams files, either a path relative to baseDirectory -# or an absolute path. -oceanNamelistFileName = run/mpaso_in -oceanStreamsFileName = run/streams.ocean -seaIceNamelistFileName = run/mpassi_in -seaIceStreamsFileName = run/streams.seaice - -[output] -## options related to writing out plots, intermediate cached data sets, logs, -## etc. - -# directory where analysis should be written -baseDirectory = /lcrc/group/e3sm/ac.xylar/analysis_testing/baseline - -htmlSubdirectory = /lcrc/group/e3sm/public_html/diagnostic_output/ac.xylar/analysis_testing/baseline - -# a list of analyses to generate. Valid names can be seen by running: -# mpas_analysis --list -# This command also lists tags for each analysis. -# Shortcuts exist to generate (or not generate) several types of analysis. -# These include: -# 'all' -- all analyses will be run -# 'all_' -- all analysis with a particular tag will be run -# 'all_' -- all analyses from a given component (either 'ocean' -# or 'seaIce') will be run -# 'only_', 'only_' -- all analysis from this component or -# with this tag will be run, and all -# analysis for other components or -# without the tag will be skipped -# 'no_' -- skip the given task -# 'no_', 'no_' -- in analogy to 'all_*', skip all analysis -# tasks from the given compoonent or with -# the given tag. Do -# mpas_analysis --list -# to list all task names and their tags -# an equivalent syntax can be used on the command line to override this -# option: -# mpas_analysis config.analysis --generate \ -# all,no_ocean,all_timeSeries -# All tasks with tag "landIceCavities" are disabled because this run did not -# include land-ice cavities. -generate = ['all', 'no_BGC', 'no_icebergs', 'no_index', 'no_eke'] - -[climatology] -## options related to producing climatologies, typically to compare against -## observations and previous runs - -# the first year over which to average climatalogies -startYear = 4 -# the last year over which to average climatalogies -endYear = 8 - -useNcclimo = False -daskThreads = 12 -subprocessCount = 12 - -[timeSeries] -## options related to producing time series plots, often to compare against -## observations and previous runs - -# start and end years for timeseries analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = end - -[index] -## options related to producing nino index. - -# start and end years for El Nino 3.4 analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = end - -[streamfunctionMOC] -## options related to plotting the streamfunction of the meridional overturning -## circulation (MOC) - -# Use postprocessing script to compute the MOC? You want this to be True -# for low-resolution simulations that use GM to parameterize eddies, because -# the online MOC analysis member currently does not include the bolus velocity -# in its calculation, whereas the postprocessing script does. -# NOTE: this is a temporary option that will be removed once the online -# MOC takes into account the bolus velocity when GM is on. -usePostprocessingScript = True diff --git a/configs/lcrc/test_suite/test_suite.bash b/configs/lcrc/test_suite/test_suite.bash deleted file mode 100755 index 1ebad2e95..000000000 --- a/configs/lcrc/test_suite/test_suite.bash +++ /dev/null @@ -1,160 +0,0 @@ -#!/usr/bin/env bash - -set -e - -machine=$1 - -main_py=3.9 -alt_py=3.8 - -export HDF5_USE_FILE_LOCKING=FALSE - -source ${HOME}/${machine}/miniconda3/etc/profile.d/conda.sh -conda activate base - -branch=$(git symbolic-ref --short HEAD) - -conda update -y conda conda-build mamba boa -rm -rf ${HOME}/${machine}/miniconda3/conda-bld -conda mambabuild ci/recipe - -# create the test conda envs -for py in ${main_py} ${alt_py} -do - env=test_mpas_analysis_py${py} - conda remove -y --all -n ${env} - mamba create -y -n ${env} --use-local python=${py} mpas-analysis sphinx \ - mock sphinx_rtd_theme "tabulate>=0.8.2" m2r pytest - conda activate ${env} - pytest - conda deactivate -done - -# create another env for testing xarray master branch -py=${main_py} -env=test_mpas_analysis_xarray_master -mamba create --yes --quiet --name ${env} --use-local python=${py} \ - mpas-analysis pytest -conda activate ${env} -pip install git+https://github.com/pydata/xarray.git -pytest -conda deactivate - -# test building the docs -py=${main_py} -conda activate test_mpas_analysis_py${py} -cd docs -make clean -make html -rm -rf /lcrc/group/e3sm/public_html/diagnostic_output/ac.xylar/analysis_testing/${machine}/${branch}/docs -mkdir -p /lcrc/group/e3sm/public_html/diagnostic_output/ac.xylar/analysis_testing/${machine}/${branch}/ -cp -r _build/html /lcrc/group/e3sm/public_html/diagnostic_output/ac.xylar/analysis_testing/${machine}/${branch}/docs -cd .. -conda deactivate - -# move to a subdirectory so we use the conda package, not the local package -rm -rf ${machine}_test_suite -mkdir ${machine}_test_suite - -cd ${machine}_test_suite - -template_path=../configs/lcrc/test_suite -job_template_path=${template_path}/${machine} - -for py in ${main_py} ${alt_py} -do - env=test_mpas_analysis_py${py} - run=main_py${py} - config=${run}.cfg - mkdir ${run} - job=${run}/job_script.bash - sed "s/baseline/${machine}\/${branch}\/py${py}/g" ${template_path}/main.cfg > ${config} - sed -e "s/main.cfg/${config}/g" -e "s/test_env/${env}/g" \ - ${job_template_path}/job_script.bash > ${job} -done - - -py=${main_py} -env=test_mpas_analysis_py${py} - -run=wc_defaults -config=${run}.cfg -mkdir ${run} -job=${run}/job_script.bash -sed "s/baseline/${machine}\/${branch}\/${run}/g" ${template_path}/${config} > ${config} -sed -e "s/no_polar_regions.cfg/${config}/g" -e "s/test_env/${env}/g" \ - ${job_template_path}/job_script_no_polar_regions.bash > ${job} - -run=no_ncclimo -config=${run}.cfg -mkdir ${run} -job=${run}/job_script.bash -sed "s/baseline/${machine}\/${branch}\/${run}/g" ${template_path}/${config} > ${config} -sed -e "s/main.cfg/${config}/g" -e "s/test_env/${env}/g" \ - ${job_template_path}/job_script.bash > ${job} - -run=ctrl -config=${run}.cfg -sed "s/baseline/${machine}\/${branch}\/py${py}/g" ${template_path}/${config} > ${config} - -run=main_vs_ctrl -config=${run}.cfg -mkdir ${run} -job=${run}/job_script.bash -sed "s/baseline/${machine}\/${branch}\/${run}/g" ${template_path}/${config} > ${config} -sed -e "s/main.cfg/${config}/g" -e "s/test_env/${env}/g" \ - ${job_template_path}/job_script.bash > ${job} - -run=no_polar_regions -config=${run}.cfg -mkdir ${run} -job=${run}/job_script.bash -sed "s/baseline/${machine}\/${branch}\/${run}/g" ${template_path}/main.cfg > ${config} -sed -e "s/test_env/${env}/g" \ - ${job_template_path}/job_script_no_polar_regions.bash > ${job} - -run=QU480 -config=${run}.cfg -mkdir ${run} -job=${run}/job_script.bash -sed "s/baseline/${machine}\/${branch}\/${run}/g" ${template_path}/${config} > ${config} -sed -e "s/main.cfg/${config}/g" -e "s/test_env/${env}/g" \ - ${job_template_path}/job_script.bash > ${job} - -run=mesh_rename -config=${run}.cfg -mkdir ${run} -job=${run}/job_script.bash -sed "s/baseline/${machine}\/${branch}\/${run}/g" ${template_path}/${config} > ${config} -sed -e "s/main.cfg/${config}/g" -e "s/test_env/${env}/g" \ - ${job_template_path}/job_script.bash > ${job} - -env=test_mpas_analysis_xarray_master -run=xarray_master -config=${run}.cfg -mkdir ${run} -job=${run}/job_script.bash -sed "s/baseline/${machine}\/${branch}\/${run}/g" ${template_path}/main.cfg > ${config} -sed -e "s/main.cfg/${config}/g" -e "s/test_env/${env}/g" \ - ${job_template_path}/job_script.bash > ${job} - - -# submit the jobs -cd main_py${main_py} -RES=$(sbatch job_script.bash) -cd .. - -cd main_vs_ctrl -sbatch --dependency=afterok:${RES##* } job_script.bash -cd .. - -for run in main_py${alt_py} wc_defaults no_ncclimo no_polar_regions QU480 \ - mesh_rename xarray_master -do - cd ${run} - sbatch job_script.bash - cd .. -done - -cd .. - diff --git a/configs/lcrc/test_suite/wc_defaults.cfg b/configs/lcrc/test_suite/wc_defaults.cfg deleted file mode 100644 index 86862bbb6..000000000 --- a/configs/lcrc/test_suite/wc_defaults.cfg +++ /dev/null @@ -1,127 +0,0 @@ -[runs] -## options related to the run to be analyzed and control runs to be -## compared against - -# mainRunName is a name that identifies the simulation being analyzed. -mainRunName = wc_defaults - -# config file for a control run to which this run will be compared. The -# analysis should have already been run to completion once with this config -# file, so that the relevant MPAS climatologies already exist and have been -# remapped to the comparison grid. Leave this option commented out if no -# control run is desired. -# controlRunConfigFile = /path/to/config/file - -# config file for a main run on which the analysis was already run to -# completion. The relevant MPAS climatologies already exist and have been -# remapped to the comparison grid and time series have been extracted. -# Leave this option commented out if the analysis for the main run should be -# performed. -# mainRunConfigFile = /path/to/config/file - -[input] -## options related to reading in the results to be analyzed - -# directory containing model results -baseDirectory = /lcrc/group/e3sm/ac.xylar/acme_scratch/anvil/20201025.GMPAS-IAF.T62_oQU240wLI.anvil - -# name of the ocean and sea-ice mesh (e.g. EC30to60E2r2, WC14to60E2r3, -# ECwISC30to60E2r1, SOwISC12to60E2r4, oQU240, etc.) -mpasMeshName = oQU240wLI - -# subdirectory containing restart files -runSubdirectory = run -# subdirectory for ocean history files -oceanHistorySubdirectory = archive/ocn/hist -# subdirectory for sea ice history files -seaIceHistorySubdirectory = archive/ice/hist - -# names of namelist and streams files, either a path relative to baseDirectory -# or an absolute path. -oceanNamelistFileName = run/mpaso_in -oceanStreamsFileName = run/streams.ocean -seaIceNamelistFileName = run/mpassi_in -seaIceStreamsFileName = run/streams.seaice - -[output] -## options related to writing out plots, intermediate cached data sets, logs, -## etc. - -# directory where analysis should be written -baseDirectory = /lcrc/group/e3sm/ac.xylar/analysis_testing/baseline - -htmlSubdirectory = /lcrc/group/e3sm/public_html/diagnostic_output/ac.xylar/analysis_testing/baseline - -# a list of analyses to generate. Valid names can be seen by running: -# mpas_analysis --list -# This command also lists tags for each analysis. -# Shortcuts exist to generate (or not generate) several types of analysis. -# These include: -# 'all' -- all analyses will be run -# 'all_' -- all analysis with a particular tag will be run -# 'all_' -- all analyses from a given component (either 'ocean' -# or 'seaIce') will be run -# 'only_', 'only_' -- all analysis from this component or -# with this tag will be run, and all -# analysis for other components or -# without the tag will be skipped -# 'no_' -- skip the given task -# 'no_', 'no_' -- in analogy to 'all_*', skip all analysis -# tasks from the given compoonent or with -# the given tag. Do -# mpas_analysis --list -# to list all task names and their tags -# an equivalent syntax can be used on the command line to override this -# option: -# mpas_analysis config.analysis --generate \ -# all,no_ocean,all_timeSeries -# All tasks with tag "landIceCavities" are disabled because this run did not -# include land-ice cavities. -generate = ['all', 'no_landIceCavities', 'no_BGC', 'no_icebergs', 'no_min', 'no_max', 'no_sose', 'no_climatologyMapSchmidtko', 'no_climatologyMapAntarcticMelt', 'no_regionalTSDiagrams', 'no_timeSeriesAntarcticMelt', 'no_timeSeriesOceanRegions', 'no_climatologyMapSose', 'no_woceTransects', 'no_soseTransects', 'no_geojsonTransects', 'no_oceanRegionalProfiles'] - - -[climatology] -## options related to producing climatologies, typically to compare against -## observations and previous runs - -# the first year over which to average climatalogies -startYear = 4 -# the last year over which to average climatalogies -endYear = 8 - -[timeSeries] -## options related to producing time series plots, often to compare against -## observations and previous runs - -# start and end years for timeseries analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = 8 - -[index] -## options related to producing nino index. - -# start and end years for El Nino 3.4 analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 3 -endYear = 8 - -[streamfunctionMOC] -## options related to plotting the streamfunction of the meridional overturning -## circulation (MOC) - -# Use postprocessing script to compute the MOC? You want this to be True -# for low-resolution simulations that use GM to parameterize eddies, because -# the online MOC analysis member currently does not include the bolus velocity -# in its calculation, whereas the postprocessing script does. -# NOTE: this is a temporary option that will be removed once the online -# MOC takes into account the bolus velocity when GM is on. -usePostprocessingScript = True diff --git a/configs/olcf/config.20170313.beta1.A_WCYCL1850S.ne30_oECv3_ICG.edison b/configs/olcf/20170313.beta1.A_WCYCL1850S.ne30_oECv3_ICG.edison.cfg similarity index 87% rename from configs/olcf/config.20170313.beta1.A_WCYCL1850S.ne30_oECv3_ICG.edison rename to configs/olcf/20170313.beta1.A_WCYCL1850S.ne30_oECv3_ICG.edison.cfg index 3206f2983..5cc16f0f1 100644 --- a/configs/olcf/config.20170313.beta1.A_WCYCL1850S.ne30_oECv3_ICG.edison +++ b/configs/olcf/20170313.beta1.A_WCYCL1850S.ne30_oECv3_ICG.edison.cfg @@ -23,23 +23,24 @@ mainRunName = 20170313.beta1.A_WCYCL1850S.ne30_oECv3_ICG.edison ## options related to executing parallel tasks # the number of parallel tasks (1 means tasks run in serial, the default) -parallelTaskCount = 6 +parallelTaskCount = 12 -# the parallelism mode in ncclimo ("serial" or "bck") -# Set this to "bck" (background parallelism) if running on a machine that can -# handle 12 simultaneous processes, one for each monthly climatology. +# the parallelism mode in ncclimo ("serial", "bck" or "mpi") +# Set this to "bck" (background parallelism) in most cases. The default number +# of threads (see below) is 12, one for each monthly climatology. Set to "mpi" +# to run one MPI task on each node and however many threads per node to reach +# 12 total threads. ncclimoParallelMode = bck -[diagnostics] -## config options related to observations, mapping files and region files used -## by MPAS-Analysis in diagnostics computations. +# the number of total threads to use when ncclimo runs in "bck" or "mpi" mode. +# Reduce this number if ncclimo is crashing (maybe because it is out of memory). +# The number of threads must be a factor of 12 (1, 2, 3, 4, 6 or 12). +ncclimoThreads = 12 + +# the number of MPI tasks to use in creating mapping files (1 means tasks run in +# serial, the default) +mapMpiTasks = 12 -# The base path to the diagnostics directory. Typically, this will be a shared -# directory on each E3SM supported machine (see the example config files for -# its location). For other machines, this would be the directory pointed to -# when running "download_analysis_data.py" to get the public observations, -# mapping files and region files. -baseDirectory = /gpfs/alpine/proj-shared/cli115/diagnostics/ [input] ## options related to reading in the results to be analyzed @@ -129,7 +130,7 @@ endYear = 20 # a "main vs. control" analysis run, the range of years must be valid and # cannot include "end" because the original data may not be available. startYear = 1 -endYear = end +endYear = 20 [streamfunctionMOC] ## options related to plotting the streamfunction of the meridional overturning diff --git a/configs/olcf/config.20170915.beta2.A_WCYCL1850S.ne30_oECv3_ICG.edison b/configs/olcf/config.20170915.beta2.A_WCYCL1850S.ne30_oECv3_ICG.edison deleted file mode 100644 index 57f48f067..000000000 --- a/configs/olcf/config.20170915.beta2.A_WCYCL1850S.ne30_oECv3_ICG.edison +++ /dev/null @@ -1,155 +0,0 @@ -[runs] -## options related to the run to be analyzed and control runs to be -## compared against - -# mainRunName is a name that identifies the simulation being analyzed. -mainRunName = 20170915.beta2.A_WCYCL1850S.ne30_oECv3_ICG.edison - -# config file for a control run to which this run will be compared. The -# analysis should have already been run to completion once with this config -# file, so that the relevant MPAS climatologies already exist and have been -# remapped to the comparison grid. Leave this option commented out if no -# control run is desired. -# controlRunConfigFile = /path/to/config/file - -# config file for a main run on which the analysis was already run to -# completion. The relevant MPAS climatologies already exist and have been -# remapped to the comparison grid and time series have been extracted. -# Leave this option commented out if the analysis for the main run should be -# performed. -# mainRunConfigFile = /path/to/config/file - -[execute] -## options related to executing parallel tasks - -# the number of parallel tasks (1 means tasks run in serial, the default) -parallelTaskCount = 6 - -# the parallelism mode in ncclimo ("serial" or "bck") -# Set this to "bck" (background parallelism) if running on a machine that can -# handle 12 simultaneous processes, one for each monthly climatology. -ncclimoParallelMode = bck - -[diagnostics] -## config options related to observations, mapping files and region files used -## by MPAS-Analysis in diagnostics computations. - -# The base path to the diagnostics directory. Typically, this will be a shared -# directory on each E3SM supported machine (see the example config files for -# its location). For other machines, this would be the directory pointed to -# when running "download_analysis_data.py" to get the public observations, -# mapping files and region files. -baseDirectory = /gpfs/alpine/proj-shared/cli115/diagnostics/ - -[input] -## options related to reading in the results to be analyzed - -# directory containing model results -baseDirectory = /lustre/atlas1/cli115/proj-shared/ACME_simulations/20170915.beta2.A_WCYCL1850S.ne30_oECv3_ICG.edison - -# Note: an absolute path can be supplied for any of these subdirectories. -# A relative path is assumed to be relative to baseDirectory. -# By default, results are assumed to be directly in baseDirectory, -# i.e. /./ - -# subdirectory containing restart files -runSubdirectory = run -# subdirectory for ocean history files -oceanHistorySubdirectory = archive/ocn/hist -# subdirectory for sea ice history files -seaIceHistorySubdirectory = archive/ice/hist - -# names of namelist and streams files, either a path relative to baseDirectory -# or an absolute path. -oceanNamelistFileName = run/mpas-o_in -oceanStreamsFileName = run/streams.ocean -seaIceNamelistFileName = run/mpas-cice_in -seaIceStreamsFileName = run/streams.cice - -# name of the ocean and sea-ice mesh (e.g. EC30to60E2r2, WC14to60E2r3, -# ECwISC30to60E2r1, SOwISC12to60E2r4, oQU240, etc.) -mpasMeshName = oEC60to30v3 - -[output] -## options related to writing out plots, intermediate cached data sets, logs, -## etc. - -# directory where analysis should be written -baseDirectory = /dir/to/analysis/output - -# provide an absolute path to put HTML in an alternative location (e.g. a web -# portal) -# htmlSubdirectory = /ccs/proj/cli115/www/USERNAME/RUNNAME -htmlSubdirectory = html - -# a list of analyses to generate. Valid names can be seen by running: -# mpas_analysis --list -# This command also lists tags for each analysis. -# Shortcuts exist to generate (or not generate) several types of analysis. -# These include: -# 'all' -- all analyses will be run -# 'all_' -- all analysis with a particular tag will be run -# 'all_' -- all analyses from a given component (either 'ocean' -# or 'seaIce') will be run -# 'only_', 'only_' -- all analysis from this component or -# with this tag will be run, and all -# analysis for other components or -# without the tag will be skipped -# 'no_' -- skip the given task -# 'no_', 'no_' -- in analogy to 'all_*', skip all analysis -# tasks from the given compoonent or with -# the given tag. Do -# mpas_analysis --list -# to list all task names and their tags -# an equivalent syntax can be used on the command line to override this -# option: -# All tasks with tag "landIceCavities" are disabled because this run did not -# include land-ice cavities. -generate = ['all', 'no_landIceCavities', 'no_eke', 'no_BGC', 'no_icebergs', - 'no_min', 'no_max'] - -[climatology] -## options related to producing climatologies, typically to compare against -## observations and previous runs - -# the first year over which to average climatalogies -startYear = 6 -# the last year over which to average climatalogies -endYear = 10 - -[timeSeries] -## options related to producing time series plots, often to compare against -## observations and previous runs - -# start and end years for timeseries analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = 10 - -[index] -## options related to producing nino index. - -# start and end years for El Nino 3.4 analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = end - -[streamfunctionMOC] -## options related to plotting the streamfunction of the meridional overturning -## circulation (MOC) - -# Use postprocessing script to compute the MOC? You want this to be True -# for low-resolution simulations that use GM to parameterize eddies, because -# the online MOC analysis member currently does not include the bolus velocity -# in its calculation, whereas the postprocessing script does. -# NOTE: this is a temporary option that will be removed once the online -# MOC takes into account the bolus velocity when GM is on. -usePostprocessingScript = True diff --git a/configs/olcf/config.GMPAS-IAF_oRRS18to6v3.titan b/configs/olcf/config.GMPAS-IAF_oRRS18to6v3.titan deleted file mode 100644 index b9e7e48b6..000000000 --- a/configs/olcf/config.GMPAS-IAF_oRRS18to6v3.titan +++ /dev/null @@ -1,131 +0,0 @@ -[runs] -## options related to the run to be analyzed and control runs to be -## compared against - -# mainRunName is a name that identifies the simulation being analyzed. -mainRunName = GMPAS-IAF_oRRS18to6v3 - -# config file for a control run to which this run will be compared. The -# analysis should have already been run to completion once with this config -# file, so that the relevant MPAS climatologies already exist and have been -# remapped to the comparison grid. Leave this option commented out if no -# control run is desired. -# controlRunConfigFile = /path/to/config/file - -# config file for a main run on which the analysis was already run to -# completion. The relevant MPAS climatologies already exist and have been -# remapped to the comparison grid and time series have been extracted. -# Leave this option commented out if the analysis for the main run should be -# performed. -# mainRunConfigFile = /path/to/config/file - -[execute] -## options related to executing parallel tasks - -# the number of parallel tasks (1 means tasks run in serial, the default) -parallelTaskCount = 6 - -# the parallelism mode in ncclimo ("serial" or "bck") -# Set this to "bck" (background parallelism) if running on a machine that can -# handle 12 simultaneous processes, one for each monthly climatology. -ncclimoParallelMode = bck - -[diagnostics] -## config options related to observations, mapping files and region files used -## by MPAS-Analysis in diagnostics computations. - -# The base path to the diagnostics directory. Typically, this will be a shared -# directory on each E3SM supported machine (see the example config files for -# its location). For other machines, this would be the directory pointed to -# when running "download_analysis_data.py" to get the public observations, -# mapping files and region files. -baseDirectory = /gpfs/alpine/proj-shared/cli115/diagnostics/ - -[input] -## options related to reading in the results to be analyzed - -# directory containing model results -baseDirectory = /lustre/atlas1/cli115/proj-shared/vanroek/run - -mpasMeshName = oRRS18to6v3 - -# names of namelist and streams files, either a path relative to baseDirectory -# or an absolute path. -oceanNamelistFileName = mpas-o_in -oceanStreamsFileName = streams.ocean -seaIceNamelistFileName = mpas-cice_in -seaIceStreamsFileName = streams.cice - -[output] -## options related to writing out plots, intermediate cached data sets, logs, -## etc. - -# directory where analysis should be written -# NOTE: This directory path must be specific to each test case. -baseDirectory = /dir/to/analysis/output - -# provide an absolute path to put HTML in an alternative location (e.g. a web -# portal) -htmlSubdirectory = html - -# a list of analyses to generate. Valid names can be seen by running: -# ./run_mpas_analysis --list -# This command also lists tags for each analysis. -# Shortcuts exist to generate (or not generate) several types of analysis. -# These include: -# 'all' -- all analyses will be run -# 'all_' -- all analysis with a particular tag will be run -# 'all_' -- all analyses from a given component (either 'ocean' -# or 'seaIce') will be run -# 'only_', 'only_' -- all analysis from this component or -# with this tag will be run, and all -# analysis for other components or -# without the tag will be skipped -# 'no_' -- skip the given task -# 'no_', 'no_' -- in analogy to 'all_*', skip all analysis -# tasks from the given compoonent or with -# the given tag. Do -# ./run_mpas_analysis --list -# to list all task names and their tags -# an equivalent syntax can be used on the command line to override this -# option: -# ./run_mpas_analysis config.analysis --generate \ -# all,no_ocean,all_timeSeries -# All tasks with tag "landIceCavities" are disabled because this run did not -# include land-ice cavities. -generate = ['all', 'no_landIceCavities', 'no_eke', 'no_BGC', 'no_icebergs', - 'no_min', 'no_max'] - -[climatology] -## options related to producing climatologies, typically to compare against -## observations and previous runs - -# the first year over which to average climatalogies -startYear = 11 -# the last year over which to average climatalogies -endYear = 11 - -[timeSeries] -## options related to producing time series plots, often to compare against -## observations and previous runs - -# start and end years for timeseries analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 11 -endYear = 11 - -[index] -## options related to producing nino index. - -# start and end years for El Nino 3.4 analysis. Use endYear = end to indicate -# that the full range of the data should be used. If errorOnMissing = False, -# the start and end year will be clipped to the valid range. Otherwise, out -# of bounds values will lead to an error. In a "control" config file used in -# a "main vs. control" analysis run, the range of years must be valid and -# cannot include "end" because the original data may not be available. -startYear = 1 -endYear = end diff --git a/configs/olcf/job_script.olcf.bash b/configs/olcf/job_script.olcf.bash index b34c611d7..1cbedd407 100644 --- a/configs/olcf/job_script.olcf.bash +++ b/configs/olcf/job_script.olcf.bash @@ -1,65 +1,21 @@ #!/bin/bash -# This software is open source software available under the BSD-3 license. -# -# Copyright (c) 2020 Triad National Security, LLC. All rights reserved. -# Copyright (c) 2020 Lawrence Livermore National Security, LLC. All rights -# reserved. -# Copyright (c) 2020 UT-Battelle, LLC. All rights reserved. -# -# Additional copyright and license information can be found in the LICENSE file -# distributed with this code, or at -# https://raw.githubusercontent.com/MPAS-Dev/MPAS-Analysis/master/LICENSE - -# comment out if using debug queue -#PBS -q batch -# comment in to get the debug queue (only available on Titan) -##PBS -q debug -# change number of nodes to change the number of parallel tasks -# (anything between 1 and the total number of tasks to run) -#PBS -l nodes=1 -#PBS -l walltime=1:00:00 -#PBS -A cli115 -#PBS -N mpas_analysis -#PBS -o mpas_analysis.o$PBS_JOBID -#PBS -e mpas_analysis.e$PBS_JOBID - -cd $PBS_O_WORKDIR - -source /ccs/proj/cli900/sw/andes/e3sm-unified/load_latest_e3sm_unified_andes.csh - -# MPAS/ACME job to be analyzed, including paths to simulation data and -# observations. Change this name and path as needed -run_config_file="config.run_name_here" -# command to run a serial job on a single node on olcf machines. -command="aprun -b -N 1 -n 1 mpas_analysis" -# one parallel task per node by default -parallel_task_count=12 -# ncclimo can run with 1 (serial) or 12 (bck) threads -ncclimo_mode=bck - -if [ ! -f $run_config_file ]; then - echo "File $run_config_file not found!" - exit 1 -fi - -# This is a config file generated just for this job with the output directory, -# command prefix and parallel task count from above. -job_config_file=config.output.$PBS_JOBID - -# write out the config file specific to this job -cat < $job_config_file -[execute] -## options related to executing parallel tasks - -# the number of parallel tasks (1 means tasks run in serial, the default) -parallelTaskCount = $parallel_task_count - -# the parallelism mode in ncclimo ("serial" or "bck") -# Set this to "bck" (background parallelism) if running on a machine that can -# handle 12 simultaneous processes, one for each monthly climatology. -ncclimoParallelMode = $ncclimo_mode - -EOF - -$command $run_config_file $job_config_file +#SBATCH --nodes=1 +#SBATCH --time=2:00:00 +#SBATCH -A cli115 +#SBATCH -p batch +#SBATCH --job-name=mpas_analysis +#SBATCH --output=mpas_analysis.o%j +#SBATCH --error=mpas_analysis.e%j + +source /gpfs/alpine/proj-shared/cli115/e3sm-unified/load_latest_e3sm_unified_andes.csh +# alternatively, you can load your own development environment +# source ~/miniconda3/etc/profile.d/conda.sh +# conda activate mpas_dev +# export E3SMU_MACHINE=anvil + +export HDF5_USE_FILE_LOCKING=FALSE + +# For an E3SM cryosphere run, include --polar_regions, or exclude +# this extra flag for default parameters +mpas_analysis run_name_here.cfg diff --git a/configs/plots/config.drake_contours b/configs/plots/config.drake_contours index 771ef8ab7..eab510887 100644 --- a/configs/plots/config.drake_contours +++ b/configs/plots/config.drake_contours @@ -5,42 +5,6 @@ # mainRunName is a name that identifies the simulation being analyzed. mainRunName = 20180514.G.oQU240wLI.edison -# config file for a control run to which this run will be compared. The -# analysis should have already been run to completion once with this config -# file, so that the relevant MPAS climatologies already exist and have been -# remapped to the comparison grid. Leave this option commented out if no -# control run is desired. -# controlRunConfigFile = /path/to/config/file - -# config file for a main run on which the analysis was already run to -# completion. The relevant MPAS climatologies already exist and have been -# remapped to the comparison grid and time series have been extracted. -# Leave this option commented out if the analysis for the main run should be -# performed. -# mainRunConfigFile = /path/to/config/file - -[execute] -## options related to executing parallel tasks - -# the number of parallel tasks (1 means tasks run in serial, the default) -parallelTaskCount = 8 - -# the parallelism mode in ncclimo ("serial" or "bck") -# Set this to "bck" (background parallelism) if running on a machine that can -# handle 12 simultaneous processes, one for each monthly climatology. -ncclimoParallelMode = bck - -[diagnostics] -## config options related to observations, mapping files and region files used -## by MPAS-Analysis in diagnostics computations. - -# The base path to the diagnostics directory. Typically, this will be a shared -# directory on each E3SM supported machine (see the example config files for -# its location). For other machines, this would be the directory pointed to -# when running "download_analysis_data.py" to get the public observations, -# mapping files and region files. -baseDirectory = /global/cfs/cdirs/e3sm/diagnostics - [input] ## options related to reading in the results to be analyzed @@ -77,31 +41,7 @@ baseDirectory = /dir/to/analysis/output # htmlSubdirectory = /global/cfs/cdirs/e3sm/www/USERNAME/RUNNAME htmlSubdirectory = html -# a list of analyses to generate. Valid names can be seen by running: -# mpas_analysis --list -# This command also lists tags for each analysis. -# Shortcuts exist to generate (or not generate) several types of analysis. -# These include: -# 'all' -- all analyses will be run -# 'all_' -- all analysis with a particular tag will be run -# 'all_' -- all analyses from a given component (either 'ocean' -# or 'seaIce') will be run -# 'only_', 'only_' -- all analysis from this component or -# with this tag will be run, and all -# analysis for other components or -# without the tag will be skipped -# 'no_' -- skip the given task -# 'no_', 'no_' -- in analogy to 'all_*', skip all analysis -# tasks from the given compoonent or with -# the given tag. Do -# mpas_analysis --list -# to list all task names and their tags -# an equivalent syntax can be used on the command line to override this -# option: -# mpas_analysis config.analysis --generate \ -# all,no_ocean,all_timeSeries -# All tasks with tag "landIceCavities" are disabled because this run did not -# include land-ice cavities. +# a list of analyses to generate. generate = ['all_woce'] [climatology] diff --git a/dev-spec.txt b/dev-spec.txt index 78f9fbb5a..ab788cd35 100644 --- a/dev-spec.txt +++ b/dev-spec.txt @@ -2,7 +2,7 @@ # $ conda create --name --file # Base -python>=3.7,<3.10 +python>=3.7 bottleneck cartopy >=0.18.0 cartopy_offlinedata @@ -37,7 +37,8 @@ pytest # Documentation mock -m2r +m2r2 +mistune<2 sphinx sphinx_rtd_theme tabulate diff --git a/docs/api.rst b/docs/api.rst index 430183b79..7c0e18d60 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -268,6 +268,7 @@ Plotting plot_polar_comparison plot_global_comparison plot_1D + plot_vertical_section_comparison plot_vertical_section colormap.setup_colormap ticks.plot_xtick_format diff --git a/docs/conf.py b/docs/conf.py index 62681f76e..4e9fdd37a 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -13,7 +13,7 @@ # serve to show the default. import os -import m2r +import m2r2 from glob import glob import mpas_analysis on_rtd = os.environ.get('READTHEDOCS', None) == 'True' @@ -211,7 +211,7 @@ build_quick_start() for mdFileName in glob('design_docs/*.md'): - output = m2r.parse_from_file(mdFileName) + output = m2r2.parse_from_file(mdFileName) rstFileName = os.path.splitext(mdFileName)[0]+'.rst' outFile = open(rstFileName, 'w') outFile.write(output) diff --git a/docs/config/output.rst b/docs/config/output.rst index 6096a4000..a80b010ba 100644 --- a/docs/config/output.rst +++ b/docs/config/output.rst @@ -131,6 +131,12 @@ specify:: generate = ['all_publicObs', 'no_seaIce'] +If an appropriate reference year isn't available for computing anomalies, +include 'no_anomaly' in the generate list to skip all tasks that require the +reference year for computing anomalies:: + + generate = ['all_publicObs', 'no_anomaly'] + To specify that you wanted to plot climatologies from the ocean component, you could use:: diff --git a/docs/config/transects.rst b/docs/config/transects.rst index 4408c149c..d466bfc71 100644 --- a/docs/config/transects.rst +++ b/docs/config/transects.rst @@ -8,39 +8,49 @@ the comparison grid for each transect:: # The approximate horizontal resolution (in km) of each transect. Latitude/ # longitude between observation points will be subsampled at this interval. - # Use 'obs' to indicate no subsampling. - horizontalResolution = obs + # Use 'obs' to indicate no subsampling. Use 'mpas' to indicate plotting of + # model data on the native grid, in which case comparison with observations + # will take place on the observation grid. + #horizontalResolution = mpas + #horizontalResolution = obs + horizontalResolution = 5 # The name of the vertical comparison grid. Valid values are 'mpas' for the # MPAS vertical grid, 'obs' to use the locations of observations or - # any other name if the vertical grid is defined by 'verticalComparisonGrid' - # verticalComparisonGridName = obs - verticalComparisonGridName = uniform_0_to_4000m_at_10m + # any other name if the vertical grid is defined by 'verticalComparisonGrid'. + # If horizontalResolution is 'mpas', model data (both main and control) will be + # plotted on the MPAS vertical grid, regardless of the comparison grid. #verticalComparisonGridName = mpas + #verticalComparisonGridName = obs + verticalComparisonGridName = uniform_0_to_4000m_at_10m # The vertical comparison grid if 'verticalComparisonGridName' is not 'mpas' or # 'obs'. This should be numpy array of (typically negative) elevations (in m). verticalComparisonGrid = numpy.linspace(0, -4000, 401) -The ``horizontalResolution`` of all transects can be either ``obs`` or a number -of kilometers. If ``obs``, model data are sampled at latitute and longitude -points corresponding to WOCE stations. It a number of kilometers is given, -linear interpolation between observation points is performed with approximately -the requested resolution. The distance between stations is always divided into -an integer number of segments of equal length so the resolution may be slightly -above or below ``horizontalResolution``. + # A range for the y axis (if any) + verticalBounds = [] +The ``horizontalResolution`` of all transects can be ``obs``, ``mpas`` or a +number of kilometers. If ``obs``, model data are sampled at latitude and +longitude points corresponding to the observations. If the horizontal grid +is ``mpas``, then the native MPAS-Ocean mesh is used for both the horizontal and +vertical grids. If a number of kilometers is given, linear interpolation +between observation points is performed with approximately the requested +resolution. The distance between observation points is always divided into an +integer number of segments of equal length so the resolution may be slightly +above or below ``horizontalResolution``. The vertical grid is determined by two parameters, ``verticalComparisonGridName`` and ``verticalComparisonGrid``. If -``verticalComparisonGridName = mpas``, the MPAS-Ocean vertical coordinate will -be interpolated horitontally from grid cell centers to the latitude and -longitude of each point along the transect, and the observations will be -interpolated vertically to the resulting grid. If -``verticalComparisonGridName = obs``, the vertical grid of the observations -is used instead. If ``verticalComparisonGridName`` is anything else, it is -taken to be the name of a user-defined vertical grid (best to make it -descriptive and unique, e.g. ``uniform_0_to_4000m_at_10m``) and +``verticalComparisonGridName = mpas``, but ``horizontalResoltuion`` is not +``mpas``, the MPAS-Ocean vertical coordinate will be interpolated horizontally +from grid cell centers to the latitude and longitude of each point along the +transect, and the observations will be interpolated vertically to the resulting +grid. If ``verticalComparisonGridName = obs``, the vertical grid of the +observations is used instead. If ``verticalComparisonGridName`` is anything +else, it is taken to be the name of a user-defined vertical grid (best to make +it descriptive and unique, e.g. ``uniform_0_to_4000m_at_10m``) and ``verticalComparisonGrid`` should be assigned a valid array of positive-up depth values (in the form of a python list or numpy array), e.g.:: @@ -48,6 +58,10 @@ depth values (in the form of a python list or numpy array), e.g.:: produces points between 0 and -4000 m sampled every 10 m. +``verticalBounds`` is a list of minimum and maximum limits for the vertical axis +of the transect. The default is an empty list, which means ``matplotlib`` +selects the axis limits to encompass the full range of the vertical grid. + .. note:: Some types of transects (e.g. those produce with geojson files) do not have diff --git a/docs/environment.yml b/docs/environment.yml deleted file mode 100644 index 22b089c04..000000000 --- a/docs/environment.yml +++ /dev/null @@ -1,28 +0,0 @@ -name: mpas-analysis-docs -channels: - - conda-forge -dependencies: - - python=3.7 - - pip - - geometric_features - - pyremap - - matplotlib-base - - pytest - - numpy - - scipy - - netcdf4 - - xarray - - dask - - lxml - - pyproj - - pillow - - cmocean - - shapely - - cartopy >= 0.18.0 - - sphinx - - mock - - sphinx_rtd_theme - - numpydoc - - tabulate >= 0.8.2 - - m2r - - gsw diff --git a/docs/versions.rst b/docs/versions.rst index 953f28ade..a7a43eab3 100644 --- a/docs/versions.rst +++ b/docs/versions.rst @@ -13,6 +13,7 @@ Documentation On GitHub `v1.3.0`_ `1.3.0`_ `v1.4.0`_ `1.4.0`_ `v1.5.0`_ `1.5.0`_ +`v1.6.0`_ `1.6.0`_ ================ =============== .. _`stable`: ../stable/index.html @@ -24,6 +25,7 @@ Documentation On GitHub .. _`v1.3.0`: ../1.3.0/index.html .. _`v1.4.0`: ../1.4.0/index.html .. _`v1.5.0`: ../1.5.0/index.html +.. _`v1.6.0`: ../1.6.0/index.html .. _`master`: https://github.com/MPAS-Dev/MPAS-Analysis/tree/master .. _`develop`: https://github.com/MPAS-Dev/MPAS-Analysis/tree/develop .. _`1.2.6`: https://github.com/MPAS-Dev/MPAS-Analysis/tree/1.2.6 @@ -33,3 +35,4 @@ Documentation On GitHub .. _`1.3.0`: https://github.com/MPAS-Dev/MPAS-Analysis/tree/1.3.0 .. _`1.4.0`: https://github.com/MPAS-Dev/MPAS-Analysis/tree/1.4.0 .. _`1.5.0`: https://github.com/MPAS-Dev/MPAS-Analysis/tree/1.5.0 +.. _`1.6.0`: https://github.com/MPAS-Dev/MPAS-Analysis/tree/1.6.0 diff --git a/example.cfg b/example.cfg index f71cdb7e1..6f358fdb7 100644 --- a/example.cfg +++ b/example.cfg @@ -165,12 +165,21 @@ htmlSubdirectory = html # option: # mpas_analysis config.analysis --generate \ # only_ocean,no_timeSeries,timeSeriesSST +# +# Note: if an appropriate reference year isn't available for computing +# anomalies, include 'no_anomaly' in the generate list generate = ['all_publicObs'] + [climatology] ## options related to producing climatologies, typically to compare against ## observations and previous runs +# the year from which to compute anomalies if not the start year of the +# simulation. This might be useful if a long spin-up cycle is performed and +# only the anomaly over a later span of years is of interest. +# anomalyRefYear = 249 + # the first year over which to average climatalogies startYear = 11 # the last year over which to average climatalogies @@ -181,6 +190,11 @@ endYear = 20 ## options related to producing time series plots, often to compare against ## observations and previous runs +# the year from which to compute anomalies if not the start year of the +# simulation. This might be useful if a long spin-up cycle is performed and +# only the anomaly over a later span of years is of interest. +# anomalyRefYear = 249 + # start and end years for timeseries analysis. Use endYear = end to indicate # that the full range of the data should be used. If errorOnMissing = False, # the start and end year will be clipped to the valid range. Otherwise, out diff --git a/example_e3sm.cfg b/example_e3sm.cfg index c528da2d5..8ef4de423 100644 --- a/example_e3sm.cfg +++ b/example_e3sm.cfg @@ -117,12 +117,20 @@ htmlSubdirectory = html # option: # mpas_analysis config.analysis --generate \ # only_ocean,no_timeSeries,timeSeriesSST +# +# Note: if an appropriate reference year isn't available for computing +# anomalies, include 'no_anomaly' in the generate list generate = ['all_publicObs'] [climatology] ## options related to producing climatologies, typically to compare against ## observations and previous runs +# the year from which to compute anomalies if not the start year of the +# simulation. This might be useful if a long spin-up cycle is performed and +# only the anomaly over a later span of years is of interest. +# anomalyRefYear = 249 + # the first year over which to average climatalogies startYear = 11 # the last year over which to average climatalogies @@ -133,6 +141,11 @@ endYear = 20 ## options related to producing time series plots, often to compare against ## observations and previous runs +# the year from which to compute anomalies if not the start year of the +# simulation. This might be useful if a long spin-up cycle is performed and +# only the anomaly over a later span of years is of interest. +# anomalyRefYear = 249 + # start and end years for timeseries analysis. Use endYear = end to indicate # that the full range of the data should be used. If errorOnMissing = False, # the start and end year will be clipped to the valid range. Otherwise, out diff --git a/mpas_analysis/__init__.py b/mpas_analysis/__init__.py index 992f047e4..1a006aab5 100644 --- a/mpas_analysis/__init__.py +++ b/mpas_analysis/__init__.py @@ -3,5 +3,5 @@ import matplotlib as mpl mpl.use('Agg') -__version_info__ = (1, 5, 0) +__version_info__ = (1, 6, 0) __version__ = '.'.join(str(vi) for vi in __version_info__) diff --git a/mpas_analysis/default.cfg b/mpas_analysis/default.cfg index 38a2e72e7..6abd4e481 100644 --- a/mpas_analysis/default.cfg +++ b/mpas_analysis/default.cfg @@ -2332,7 +2332,10 @@ seasons = ['ANN'] # The approximate horizontal resolution (in km) of each transect. Latitude/ # longitude between observation points will be subsampled at this interval. -# Use 'obs' to indicate no subsampling. +# Use 'obs' to indicate no subsampling. Use 'mpas' to indicate plotting of +# model data on the native grid, in which case comparison with observations +# will take place on the observation grid. +#horizontalResolution = mpas #horizontalResolution = obs horizontalResolution = 5 @@ -2345,15 +2348,22 @@ horizontalBounds = {'WOCE_A21': [], # The name of the vertical comparison grid. Valid values are 'mpas' for the # MPAS vertical grid, 'obs' to use the locations of observations or -# any other name if the vertical grid is defined by 'verticalComparisonGrid' +# any other name if the vertical grid is defined by 'verticalComparisonGrid'. +# If horizontalResolution is 'mpas', model data (both main and control) will be +# plotted on the MPAS vertical grid, regardless of the comparison grid. #verticalComparisonGridName = mpas #verticalComparisonGridName = obs verticalComparisonGridName = uniform_0_to_4000m_at_10m # The vertical comparison grid if 'verticalComparisonGridName' is not 'mpas' or # 'obs'. This should be numpy array of (typically negative) elevations (in m). +# The first and last entries are used as axis bounds for 'mpas' and 'obs' +# vertical comparison grids verticalComparisonGrid = numpy.linspace(0, -4000, 401) +# A range for the y axis (if any) +verticalBounds = [] + # The minimum weight of a destination cell after remapping. Any cell with # weights lower than this threshold will therefore be masked out. renormalizationThreshold = 0.01 @@ -2471,7 +2481,6 @@ normArgsDifference = {'vmin': -0.3, 'vmax': 0.3} contourLevelsDifference = [] - [geojsonTransects] ## options related to plotting model transects at points determined by a ## geojson file. To generate your own geojson file, go to: @@ -2524,7 +2533,9 @@ seasons = ['ANN'] # The approximate horizontal resolution (in km) of each transect. Latitude/ # longitude between observation points will be subsampled at this interval. -# Use 'obs' to indicate no subsampling. +# Use 'obs' to indicate no subsampling. Use 'mpas' to indicate plotting of +# model data on the native grid. +#horizontalResolution = mpas #horizontalResolution = obs horizontalResolution = 5 @@ -2536,8 +2547,13 @@ verticalComparisonGridName = uniform_0_to_4000m_at_10m # The vertical comparison grid if 'verticalComparisonGridName' is not 'mpas'. # This should be numpy array of (typically negative) elevations (in m). +# The first and last entries are used as axis bounds for 'mpas' vertical +# comparison grids verticalComparisonGrid = numpy.linspace(0, -4000, 401) +# A range for the y axis (if any) +verticalBounds = [] + # The minimum weight of a destination cell after remapping. Any cell with # weights lower than this threshold will therefore be masked out. renormalizationThreshold = 0.01 @@ -2713,7 +2729,10 @@ seasons = ['ANN', 'JFM', 'JAS'] # The approximate horizontal resolution (in km) of each transect. Latitude/ # longitude between observation points will be subsampled at this interval. -# Use 'obs' to indicate no subsampling. +# Use 'obs' to indicate no subsampling. Use 'mpas' to indicate plotting of +# model data on the native grid, in which case comparison with observations +# will take place on the observation grid. +#horizontalResolution = mpas #horizontalResolution = obs horizontalResolution = 5 @@ -2726,8 +2745,13 @@ verticalComparisonGridName = uniform_10_to_1500m_at_10m # The vertical comparison grid if 'verticalComparisonGridName' is not 'mpas' or # 'obs'. This should be numpy array of (typically negative) elevations (in m). +# The first and last entries are used as axis bounds for 'mpas' and 'obs' +# vertical comparison grids verticalComparisonGrid = numpy.linspace(-10, -1500, 150) +# A range for the y axis (if any) +verticalBounds = [] + # The minimum weight of a destination cell after remapping. Any cell with # weights lower than this threshold will therefore be masked out. renormalizationThreshold = 0.01 diff --git a/mpas_analysis/docs/parse_quick_start.py b/mpas_analysis/docs/parse_quick_start.py index 03617b4e1..4168ad296 100644 --- a/mpas_analysis/docs/parse_quick_start.py +++ b/mpas_analysis/docs/parse_quick_start.py @@ -4,7 +4,7 @@ in the documentation """ -from m2r import convert +from m2r2 import convert def build_quick_start(): diff --git a/mpas_analysis/ocean/climatology_map_ohc_anomaly.py b/mpas_analysis/ocean/climatology_map_ohc_anomaly.py index 598508c67..f1d86cd1c 100644 --- a/mpas_analysis/ocean/climatology_map_ohc_anomaly.py +++ b/mpas_analysis/ocean/climatology_map_ohc_anomaly.py @@ -71,7 +71,8 @@ def __init__(self, config, mpasClimatologyTask, refYearClimatolgyTask, super(ClimatologyMapOHCAnomaly, self).__init__( config=config, taskName='climatologyMapOHCAnomaly', componentName='ocean', - tags=['climatology', 'horizontalMap', fieldName, 'publicObs']) + tags=['climatology', 'horizontalMap', fieldName, 'publicObs', + 'anomaly']) sectionName = self.taskName @@ -349,7 +350,7 @@ def _compute_ohc(self, climatology): # {{{ nVertLevels = dsRestart.sizes['nVertLevels'] - zMid = compute_zmid(dsRestart.bottomDepth, dsRestart.maxLevelCell, + zMid = compute_zmid(dsRestart.bottomDepth, dsRestart.maxLevelCell-1, dsRestart.layerThickness) vertIndex = xarray.DataArray.from_dict( diff --git a/mpas_analysis/ocean/compute_transects_subtask.py b/mpas_analysis/ocean/compute_transects_subtask.py index fd299856e..5b0c6fbab 100644 --- a/mpas_analysis/ocean/compute_transects_subtask.py +++ b/mpas_analysis/ocean/compute_transects_subtask.py @@ -18,7 +18,17 @@ from pyremap import PointCollectionDescriptor -from mpas_analysis.shared.climatology import RemapMpasClimatologySubtask +from mpas_tools.viz import mesh_to_triangles +from mpas_tools.transects import subdivide_great_circle, \ + cartesian_to_great_circle_distance +from mpas_tools.viz.transects import find_transect_cells_and_weights, \ + make_triangle_tree +from mpas_tools.ocean.transects import find_transect_levels_and_weights, \ + interp_mpas_to_transect_triangle_nodes, \ + interp_transect_grid_to_transect_triangle_nodes + +from mpas_analysis.shared.climatology import RemapMpasClimatologySubtask, \ + get_climatology_op_directory from mpas_analysis.shared.io.utility import build_config_full_path, \ make_directories @@ -80,7 +90,7 @@ def __init__(self, mpasClimatologyTask, parentTask, climatologyName, subtaskName='remapTransects'): # {{{ - ''' + """ Construct the analysis task and adds it as a subtask of the ``parentTask``. @@ -131,7 +141,7 @@ def __init__(self, mpasClimatologyTask, parentTask, climatologyName, subtaskName : str, optional The name of the subtask - ''' + """ # Authors # ------- # Xylar Asay-Davis @@ -147,11 +157,21 @@ def __init__(self, mpasClimatologyTask, parentTask, climatologyName, self.transectCollectionName = transectCollectionName self.verticalComparisonGridName = verticalComparisonGridName self.verticalComparisonGrid = verticalComparisonGrid + self.transectNumber = None + self.x = None + self.collectionDescriptor = None + self.maxLevelCell = None + self.zMid = None + self.remap = self.obsDatasets.horizontalResolution != 'mpas' + if self.obsDatasets.horizontalResolution == 'mpas' and \ + self.verticalComparisonGridName != 'mpas': + raise ValueError('If the horizontal comparison grid is "mpas", the ' + 'vertical grid must also be "mpas".') # }}} def setup_and_check(self): # {{{ - ''' + """ Creates a PointCollectionDescriptor describing all the points in the transects to remap to. Keeps track of which transects index each point belongs to. @@ -162,90 +182,93 @@ def setup_and_check(self): # {{{ If a restart file is not available from which to read mesh information or if no history files are available from which to compute the climatology in the desired time range. - ''' + """ # Authors # ------- # Xylar Asay-Davis - transectNumber = [] - lats = [] - lons = [] - x = [] - obsDatasets = self.obsDatasets.get_observations() - datasets = list(obsDatasets.values()) - for transectIndex, ds in enumerate(datasets): - localLats = list(ds.lat.values) - localLons = list(ds.lon.values) - localX = list(ds.x.values) - localIndices = [transectIndex for lat in localLats] - lats.extend(localLats) - lons.extend(localLons) - x.extend(localX) - transectNumber.extend(localIndices) - - self.transectNumber = xr.DataArray.from_dict( - {'dims': ('nPoints'), - 'data': transectNumber}) - - self.x = xr.DataArray.from_dict( - {'dims': ('nPoints'), - 'data': x}) - - self.collectionDescriptor = PointCollectionDescriptor( - lats, lons, collectionName=self.transectCollectionName, - units='degrees', outDimension='nPoints') - - self.add_comparison_grid_descriptor(self.transectCollectionName, - self.collectionDescriptor) - - # then, call setup_and_check from the base class + if self.remap: + transectNumber = [] + lats = [] + lons = [] + x = [] + obsDatasets = self.obsDatasets.get_observations() + datasets = list(obsDatasets.values()) + for transectIndex, ds in enumerate(datasets): + localLats = list(ds.lat.values) + localLons = list(ds.lon.values) + localX = list(ds.x.values) + localIndices = [transectIndex for _ in localLats] + lats.extend(localLats) + lons.extend(localLons) + x.extend(localX) + transectNumber.extend(localIndices) + + self.transectNumber = xr.DataArray.from_dict( + {'dims': ('nPoints',), + 'data': transectNumber}) + + self.x = xr.DataArray.from_dict( + {'dims': ('nPoints',), + 'data': x}) + + self.collectionDescriptor = PointCollectionDescriptor( + lats, lons, collectionName=self.transectCollectionName, + units='degrees', outDimension='nPoints') + + self.add_comparison_grid_descriptor(self.transectCollectionName, + self.collectionDescriptor) + + for transectName in obsDatasets: + obsDatasets[transectName].close() + + # then, call setup_and_check from the parent class # (RemapMpasClimatologySubtask) - super(ComputeTransectsSubtask, self).setup_and_check() - - for transectName in obsDatasets: - obsDatasets[transectName].close() + super().setup_and_check() def run_task(self): # {{{ - ''' + """ Compute climatologies of melt rates from E3SM/MPAS output This function has been overridden to compute ``zMid`` based on data from a restart file for later use in vertically interpolating to reference depths. - ''' + """ # Authors # ------- # Xylar Asay-Davis - # first, compute zMid and cell mask from the restart file - with xr.open_dataset(self.restartFileName) as ds: - ds = ds[['maxLevelCell', 'bottomDepth', 'layerThickness']] - ds = ds.isel(Time=0) + # first, get maxLevelCell and zMid, needed for masking - self.maxLevelCell = ds.maxLevelCell - 1 + dsMesh = xr.open_dataset(self.restartFileName) + dsMesh = dsMesh.isel(Time=0) - zMid = compute_zmid(ds.bottomDepth, ds.maxLevelCell, - ds.layerThickness) + self.maxLevelCell = dsMesh.maxLevelCell - 1 + + if self.remap: + zMid = compute_zmid(dsMesh.bottomDepth, dsMesh.maxLevelCell-1, + dsMesh.layerThickness) self.zMid = \ xr.DataArray.from_dict({'dims': ('nCells', 'nVertLevels'), 'data': zMid}) - ds.close() # then, call run from the base class (RemapMpasClimatologySubtask), - # which will perform the horizontal remapping + # which will perform masking and possibly horizontal remapping super(ComputeTransectsSubtask, self).run_task() obsDatasets = self.obsDatasets.get_observations() - self.logger.info('Interpolating each transect vertically...') - # finally, vertically interpolate and write out each transect - for season in self.seasons: + if self.remap: + + self.logger.info('Interpolating each transect vertically...') + # vertically interpolate and write out each transect + for season in self.seasons: - remappedFileName = self.get_remapped_file_name( - season, comparisonGridName=self.transectCollectionName) + remappedFileName = self.get_remapped_file_name( + season, comparisonGridName=self.transectCollectionName) - with xr.open_dataset(remappedFileName) as ds: + ds = xr.open_dataset(remappedFileName) transectNames = list(obsDatasets.keys()) for transectIndex, transectName in enumerate(transectNames): self.logger.info(' {}'.format(transectName)) @@ -258,14 +281,19 @@ def run_task(self): # {{{ outFileName, outObsFileName) ds.close() - for transectName in obsDatasets: - obsDatasets[transectName].close() + for transectName in obsDatasets: + obsDatasets[transectName].close() + + else: + self._compute_mpas_transects(dsMesh) + + dsMesh.close() # }}} def customize_masked_climatology(self, climatology, season): # {{{ - ''' - Add zMid to the climatologys + """ + Add zMid to the climatologies Parameters ---------- @@ -279,7 +307,7 @@ def customize_masked_climatology(self, climatology, season): # {{{ ------- climatology : ``xarray.Dataset`` object the modified climatology data set - ''' + """ # Authors # ------- # Xylar Asay-Davis @@ -288,13 +316,14 @@ def customize_masked_climatology(self, climatology, season): # {{{ {'dims': ('nVertLevels',), 'data': numpy.arange(climatology.sizes['nVertLevels'])}) - cellMask = zIndex < self.maxLevelCell + cellMask = zIndex <= self.maxLevelCell for variableName in self.variableList: climatology[variableName] = \ climatology[variableName].where(cellMask) - climatology['zMid'] = self.zMid + if self.remap: + climatology['zMid'] = self.zMid climatology = climatology.transpose('nVertLevels', 'nCells') @@ -302,7 +331,7 @@ def customize_masked_climatology(self, climatology, season): # {{{ def customize_remapped_climatology(self, climatology, comparisonGridNames, season): # {{{ - ''' + """ Add the transect index to the data set Parameters @@ -321,7 +350,7 @@ def customize_remapped_climatology(self, climatology, comparisonGridNames, climatology : ``xarray.Dataset``` The same data set with any custom fields added or modifications made - ''' + """ # Authors # ------- # Xylar Asay-Davis @@ -342,7 +371,7 @@ def customize_remapped_climatology(self, climatology, comparisonGridNames, def _vertical_interp(self, ds, transectIndex, dsObs, outFileName, outObsFileName): - ''' + """ Vertically interpolate a transect and write it to a unique file Parameters @@ -363,7 +392,7 @@ def _vertical_interp(self, ds, transectIndex, dsObs, outFileName, outObsFileName : str The name of the file to which the resulting obs data set should be written if it is interpolated - ''' + """ # Authors # ------- # Xylar Asay-Davis @@ -403,6 +432,149 @@ def _vertical_interp(self, ds, transectIndex, dsObs, outFileName, ds = ds.drop_vars(['validMask', 'transectNumber']) write_netcdf(ds, outFileName) # }}} + def get_mpas_transect_file_name(self, transectName): # {{{ + """Get the file name for a masked MPAS transect info""" + # Authors + # ------- + # Xylar Asay-Davis + + config = self.config + mpasMeshName = config.get('input', 'mpasMeshName') + + climatologyOpDirectory = get_climatology_op_directory(config, 'avg') + + comparisonFullMeshName = transectName.replace(' ', '_') + + stageDirectory = '{}/remapped'.format(climatologyOpDirectory) + + directory = '{}/{}_{}_to_{}'.format( + stageDirectory, self.climatologyName, mpasMeshName, + comparisonFullMeshName) + + make_directories(directory) + + fileName = '{}/mpas_transect_info.nc'.format(directory) + + return fileName # }}} + + def _compute_mpas_transects(self, dsMesh): # {{{ + + # see if all transects have already been computed + allExist = True + transectNames = list(self.obsDatasets.obsFileNames.keys()) + for transectName in transectNames: + transectInfoFileName = self.get_mpas_transect_file_name( + transectName) + if not os.path.exists(transectInfoFileName): + allExist = False + break + obsFileName = self.obsDatasets.get_out_file_name( + transectName, self.verticalComparisonGridName) + if not os.path.exists(obsFileName): + allExist = False + break + + if allExist: + return + + dsTris = mesh_to_triangles(dsMesh) + + triangleTree = make_triangle_tree(dsTris) + + for transectName in transectNames: + obsFileName = self.obsDatasets.get_out_file_name( + transectName, self.verticalComparisonGridName) + transectInfoFileName = self.get_mpas_transect_file_name( + transectName) + if not os.path.exists(obsFileName) or \ + not os.path.exists(transectInfoFileName): + dsTransect = self.obsDatasets.build_observational_dataset( + self.obsDatasets.obsFileNames[transectName], transectName) + + dsTransect.load() + # make sure lat and lon are coordinates + for coord in ['lon', 'lat']: + dsTransect.coords[coord] = dsTransect[coord] + + if 'z' in dsTransect: + transectZ = dsTransect.z + else: + transectZ = None + + dsMpasTransect = find_transect_cells_and_weights( + dsTransect.lon, dsTransect.lat, dsTris, dsMesh, + triangleTree, degrees=True) + + dsMpasTransect = find_transect_levels_and_weights( + dsMpasTransect, dsMesh.layerThickness, + dsMesh.bottomDepth, dsMesh.maxLevelCell - 1, + transectZ) + + if 'landIceFraction' in dsMesh: + interpCellIndices = dsMpasTransect.interpHorizCellIndices + interpCellWeights = dsMpasTransect.interpHorizCellWeights + landIceFraction = dsMesh.landIceFraction.isel( + nCells=interpCellIndices) + landIceFraction = (landIceFraction * interpCellWeights).sum( + dim='nHorizWeights') + dsMpasTransect['landIceFraction'] = landIceFraction + + # use to_netcdf rather than write_netcdf because integer indices + # are getting converted to floats when xarray reads them back + # because of _FillValue + dsMpasTransect.to_netcdf(transectInfoFileName) + + dsTransectOnMpas = xr.Dataset(dsMpasTransect) + dsTransectOnMpas['x'] = dsMpasTransect.dNode.isel( + nSegments=dsMpasTransect.segmentIndices, + nHorizBounds=dsMpasTransect.nodeHorizBoundsIndices) + + dsTransectOnMpas['z'] = dsMpasTransect.zTransectNode + + for var in dsTransect.data_vars: + dims = dsTransect[var].dims + if 'nPoints' in dims and 'nz' in dims: + da = dsTransect[var] + da = self._interp_obs_to_mpas(da, dsMpasTransect) + dsTransectOnMpas[var] = da + + dsTransectOnMpas.to_netcdf(obsFileName) + + for transectName in transectNames: + transectInfoFileName = self.get_mpas_transect_file_name( + transectName) + dsMpasTransect = xr.open_dataset(transectInfoFileName) + + for season in self.seasons: + maskedFileName = self.get_masked_file_name(season) + with xr.open_dataset(maskedFileName) as dsMask: + dsOnMpas = xr.Dataset(dsMpasTransect) + for var in dsMask.data_vars: + dims = dsMask[var].dims + if 'nCells' in dims and 'nVertLevels' in dims: + dsOnMpas[var] = \ + interp_mpas_to_transect_triangle_nodes( + dsMpasTransect, dsMask[var]) + + outFileName = self.get_remapped_file_name( + season, comparisonGridName=transectName) + dsOnMpas.to_netcdf(outFileName) + + # }}} + + def _interp_obs_to_mpas(self, da, dsMpasTransect, threshold=0.1): # {{{ + """ + Interpolate observations to the native MPAS transect with masking + """ + daMask = da.notnull() + da = da.where(daMask, 0.) + da = interp_transect_grid_to_transect_triangle_nodes( + dsMpasTransect, da) + daMask = interp_transect_grid_to_transect_triangle_nodes( + dsMpasTransect, daMask) + da = (da / daMask).where(daMask > threshold) + return da # }}} + # }}} @@ -421,8 +593,8 @@ class TransectsObservations(object): # {{{ observations for a transect horizontalResolution : str - 'obs' for the obs as they are or a size in km if subdivision is - desired. + 'obs' for the obs as they are, 'mpas' for the native MPAS mesh, or a + size in km if subdivision of the observational transect is desired. transectCollectionName : str A name that describes the collection of transects (e.g. the name @@ -438,7 +610,7 @@ class TransectsObservations(object): # {{{ def __init__(self, config, obsFileNames, horizontalResolution, transectCollectionName): # {{{ - ''' + """ Construct the object, setting the observations dictionary to None. Parameters @@ -451,14 +623,14 @@ def __init__(self, config, obsFileNames, horizontalResolution, observations for a transect horizontalResolution : str - 'obs' for the obs as they are or a size in km if subdivision is - desired. + 'obs' for the obs as they are, 'mpas' for the native MPAS mesh, or a + size in km if subdivision of the observational transect is desired. transectCollectionName : str A name that describes the collection of transects (e.g. the name of the collection of observations) used to name the destination "mesh" for regridding - ''' + """ # Authors # ------- # Xylar Asay-Davis @@ -466,25 +638,30 @@ def __init__(self, config, obsFileNames, horizontalResolution, self.obsDatasets = None self.config = config self.obsFileNames = obsFileNames - if horizontalResolution != 'obs': + if horizontalResolution not in ['obs', 'mpas']: horizontalResolution = float(horizontalResolution) self.horizontalResolution = horizontalResolution self.transectCollectionName = transectCollectionName def get_observations(self): # {{{ - ''' + """ Read in and set up the observations. Returns ------- obsDatasets : OrderedDict The observational dataset - ''' + """ # Authors # ------- # Xylar Asay-Davis + if self.horizontalResolution == 'mpas': + # by default, we don't do anything for transects on the native grid + # but subclasses might need to do something + return None + obsDatasets = OrderedDict() for name in self.obsFileNames: outFileName = self.get_out_file_name(name) @@ -500,17 +677,14 @@ def get_observations(self): for coord in ['lon', 'lat']: dsObs.coords[coord] = dsObs[coord] - if self.horizontalResolution == 'obs': - dsObs = self._add_distance(dsObs) - else: - dsObs = self._subdivide_observations(dsObs) + dsObs = self._add_distance(dsObs) write_netcdf(dsObs, outFileName) obsDatasets[name] = dsObs return obsDatasets # }}} def build_observational_dataset(self, fileName, transectName): # {{{ - ''' + """ read in the data sets for observations, and possibly rename some variables and dimensions @@ -526,7 +700,7 @@ def build_observational_dataset(self, fileName, transectName): # {{{ ------- dsObs : ``xarray.Dataset`` The observational dataset - ''' + """ # Authors # ------- # Xylar Asay-Davis @@ -542,7 +716,7 @@ def build_observational_dataset(self, fileName, transectName): # {{{ def get_out_file_name(self, transectName, verticalComparisonGridName='obs'): # {{{ - ''' + """ Given config options, the name of a field and a string identifying the months in a seasonal climatology, returns the full path for MPAS climatology files before and after remapping. @@ -564,7 +738,7 @@ def get_out_file_name(self, transectName, ------- fileName : str The path to the climatology file for the specified season. - ''' + """ # Authors # ------- # Xylar Asay-Davis @@ -578,91 +752,54 @@ def get_out_file_name(self, transectName, make_directories(remappedDirectory) + transectSuffix = transectName.replace(' ', '_') + if verticalComparisonGridName == 'obs': fileName = '{}/{}_{}.nc'.format( - remappedDirectory, self.transectCollectionName, transectName) + remappedDirectory, self.transectCollectionName, transectSuffix) else: fileName = '{}/{}_{}_{}.nc'.format( - remappedDirectory, self.transectCollectionName, transectName, + remappedDirectory, self.transectCollectionName, transectSuffix, verticalComparisonGridName) return fileName # }}} def _add_distance(self, dsObs): # {{{ - ''' - Subdivide each segment of the transect so the horizontal resolution - approximately matches the requested resolution - ''' - - lat = dsObs.lat.values - lon = dsObs.lon.values - - # compute the great circle distance between these points - dxIn = self._haversine(lon[0:-1], lat[0:-1], lon[1:], lat[1:]) - - xIn = numpy.zeros(lat.shape) - xIn[1:] = numpy.cumsum(dxIn) - - dsObs['x'] = (('nPoints',), xIn) - return dsObs # }}} - - def _subdivide_observations(self, dsObs): # {{{ - ''' - Subdivide each segment of the transect so the horizontal resolution - approximately matches the requested resolution - ''' - - lat = dsObs.lat.values - lon = dsObs.lon.values + """ + Add a distance coordinate for the transect. If a horizontal resolution + for subdivision is provided, subdivide each segment of the transect so + the horizontal resolution is at least as high as the requested + resolution + """ - # compute the great circle distance between these points - dxIn = self._haversine(lon[0:-1], lat[0:-1], lon[1:], lat[1:]) + lat = numpy.deg2rad(dsObs.lat.values) + lon = numpy.deg2rad(dsObs.lon.values) - nSegments = numpy.maximum( - (dxIn / self.horizontalResolution + 0.5).astype(int), 1) + earth_radius = 6.371e6 # Radius of earth in meters - xIn = numpy.zeros(lat.shape) - xIn[1:] = numpy.cumsum(dxIn) + x = earth_radius * numpy.cos(lat) * numpy.cos(lon) + y = earth_radius * numpy.cos(lat) * numpy.sin(lon) + z = earth_radius * numpy.sin(lat) - outIndex = [] - for index in range(len(xIn) - 1): - n = nSegments[index] - outIndex.extend(index + numpy.arange(0, n) / n) - outIndex.append(len(xIn) - 1) + if self.horizontalResolution == 'obs': + dIn = cartesian_to_great_circle_distance(x, y, z, earth_radius) + dsObs['x'] = (('nPoints',), dIn) + elif self.horizontalResolution != 'mpas': + # subdivide + xOut, yOut, zOut, dIn, dOut = subdivide_great_circle( + x, y, z, 1e3*self.horizontalResolution, earth_radius) - xOut = numpy.interp(outIndex, numpy.arange(len(xIn)), xIn) + dsObs['xIn'] = (('nPoints',), dIn) + dsObs['xOut'] = (('nPointsOut',), dOut) - dsObs['xIn'] = (('nPoints',), xIn) - dsObs['xOut'] = (('nPointsOut',), xOut) + # interpolate fields without and with vertical dimension + dsObs = interp_1d(dsObs, inInterpDim='nPoints', + inInterpCoord='xIn', outInterpDim='nPointsOut', + outInterpCoord='xOut') + dsObs = dsObs.drop_vars(['xIn']) + dsObs = dsObs.rename({'nPointsOut': 'nPoints', 'xOut': 'x'}) - # interpolate fields without and with vertical dimension - dsObs = interp_1d(dsObs, inInterpDim='nPoints', - inInterpCoord='xIn', outInterpDim='nPointsOut', - outInterpCoord='xOut') - dsObs = dsObs.drop_vars(['xIn']) - dsObs = dsObs.rename({'nPointsOut': 'nPoints', 'xOut': 'x'}) return dsObs # }}} - def _haversine(self, lon1, lat1, lon2, lat2): # {{{ - """ - Calculate the great circle distance in km between two points on the - earth (specified in decimal degrees). Based on - https://stackoverflow.com/a/4913653 - """ - # convert decimal degrees to radians - lon1 = numpy.deg2rad(lon1) - lat1 = numpy.deg2rad(lat1) - lon2 = numpy.deg2rad(lon2) - lat2 = numpy.deg2rad(lat2) - - # haversine formula - dlon = lon2 - lon1 - dlat = lat2 - lat1 - a = numpy.sin(dlat / 2.)**2 + numpy.cos(lat1) * numpy.cos(lat2) * \ - numpy.sin(dlon / 2.)**2 - c = 2 * numpy.arcsin(numpy.sqrt(a)) - r = 6371 # Radius of earth in kilometers. Use 3956 for miles - return c * r # }}} - # }}} # vim: foldmethod=marker ai ts=4 sts=4 et sw=4 ft=python diff --git a/mpas_analysis/ocean/geojson_transects.py b/mpas_analysis/ocean/geojson_transects.py index a10778f88..4ccbbeda9 100644 --- a/mpas_analysis/ocean/geojson_transects.py +++ b/mpas_analysis/ocean/geojson_transects.py @@ -81,6 +81,8 @@ def __init__(self, config, mpasClimatologyTask, controlConfig=None): verticalComparisonGrid = config.getExpression( sectionName, 'verticalComparisonGrid', usenumpyfunc=True) + verticalBounds = config.getExpression(sectionName, 'verticalBounds') + fields = config.getExpression(sectionName, 'fields') obsFileNames = OrderedDict() @@ -163,7 +165,8 @@ def __init__(self, config, mpasClimatologyTask, controlConfig=None): groupLink='geojson', galleryName=field['titleName'], configSectionName='geojson{}Transects'.format( - fieldPrefixUpper)) + fieldPrefixUpper), + verticalBounds=verticalBounds) self.add_subtask(subtask) # }}} diff --git a/mpas_analysis/ocean/meridional_heat_transport.py b/mpas_analysis/ocean/meridional_heat_transport.py index 958227543..9e5e6fec3 100644 --- a/mpas_analysis/ocean/meridional_heat_transport.py +++ b/mpas_analysis/ocean/meridional_heat_transport.py @@ -181,9 +181,9 @@ def run_task(self): # {{{ if os.path.exists(outFileName): self.logger.info(' Reading results from previous analysis run...') annualClimatology = xr.open_dataset(outFileName) - refZMid = annualClimatology.refZMid.values + refZMid = annualClimatology.refZMid binBoundaryMerHeatTrans = \ - annualClimatology.binBoundaryMerHeatTrans.values + annualClimatology.binBoundaryMerHeatTrans else: # Read in depth and MHT latitude points @@ -195,15 +195,18 @@ def run_task(self): # {{{ 'one for MHT calcuation') with xr.open_dataset(restartFileName) as dsRestart: - refBottomDepth = dsRestart.refBottomDepth.values + refBottomDepth = dsRestart.refBottomDepth - nVertLevels = len(refBottomDepth) + nVertLevels = refBottomDepth.sizes['nVertLevels'] refLayerThickness = np.zeros(nVertLevels) refLayerThickness[0] = refBottomDepth[0] refLayerThickness[1:nVertLevels] = \ refBottomDepth[1:nVertLevels] - \ refBottomDepth[0:nVertLevels - 1] + refLayerThickness = xr.DataArray(dims='nVertLevels', + data=refLayerThickness) + refZMid = -refBottomDepth + 0.5 * refLayerThickness binBoundaryMerHeatTrans = None @@ -220,7 +223,7 @@ def run_task(self): # {{{ with xr.open_dataset(inputFile) as ds: if 'binBoundaryMerHeatTrans' in ds.data_vars: binBoundaryMerHeatTrans = \ - ds.binBoundaryMerHeatTrans.values + ds.binBoundaryMerHeatTrans break if binBoundaryMerHeatTrans is None: @@ -251,12 +254,22 @@ def run_task(self): # {{{ annualClimatology = xr.open_dataset(climatologyFileName) annualClimatology = annualClimatology[variableList] + annualClimatology = annualClimatology.rename( + {'timeMonthly_avg_meridionalHeatTransportLat': + 'meridionalHeatTransportLat', + 'timeMonthly_avg_meridionalHeatTransportLatZ': + 'meridionalHeatTransportLatZ'}) if 'Time' in annualClimatology.dims: annualClimatology = annualClimatology.isel(Time=0) - annualClimatology.coords['refZMid'] = (('nVertLevels',), refZMid) + annualClimatology.coords['refZMid'] = refZMid annualClimatology.coords['binBoundaryMerHeatTrans'] = \ - (('nMerHeatTransBinsP1',), binBoundaryMerHeatTrans) + binBoundaryMerHeatTrans + + if config.getboolean(self.sectionName, 'plotVerticalSection'): + # normalize 2D MHT by layer thickness + annualClimatology['meridionalHeatTransportLatZ'] /= \ + refLayerThickness write_netcdf(annualClimatology, outFileName) @@ -265,9 +278,10 @@ def run_task(self): # {{{ self.logger.info(' Plot global MHT...') # Plot 1D MHT (zonally averaged, depth integrated) x = binBoundaryMerHeatTrans - y = annualClimatology.timeMonthly_avg_meridionalHeatTransportLat - xLabel = 'latitude [deg]' - yLabel = 'meridional heat transport [PW]' + y = annualClimatology.meridionalHeatTransportLat + xLabel = 'latitude (deg)' + yLabel = 'meridional heat transport (PW)' + title = 'Global MHT (ANN, years {:04d}-{:04d})\n {}'.format( self.startYear, self.endYear, mainRunName) filePrefix = self.filePrefixes['mht'] @@ -314,8 +328,7 @@ def run_task(self): # {{{ lineWidths.append(1.2) legendText.append(controlRunName) xArrays.append(dsControl.binBoundaryMerHeatTrans) - fieldArrays.append( - dsControl.timeMonthly_avg_meridionalHeatTransportLat) + fieldArrays.append(dsControl.meridionalHeatTransportLat) errArrays.append(None) if len(legendText) == 1: @@ -333,26 +346,20 @@ def run_task(self): # {{{ if config.getboolean(self.sectionName, 'plotVerticalSection'): # Plot 2D MHT (zonally integrated) - # normalize 2D MHT by layer thickness - MHTLatZVar = \ - annualClimatology.timeMonthly_avg_meridionalHeatTransportLatZ - MHTLatZ = MHTLatZVar.values.T[:, :] - for k in range(nVertLevels): - MHTLatZ[k, :] = MHTLatZ[k, :] / refLayerThickness[k] - x = binBoundaryMerHeatTrans y = refZMid - z = MHTLatZ - xLabel = 'latitude [deg]' - yLabel = 'depth [m]' + z = annualClimatology.meridionalHeatTransportLatZ + xLabel = 'latitude (deg)' + yLabel = 'depth (m)' title = 'Global MHT (ANN, years {:04d}-{:04d})\n {}'.format( self.startYear, self.endYear, mainRunName) filePrefix = self.filePrefixes['mhtZ'] outFileName = '{}/{}.png'.format(self.plotsDirectory, filePrefix) - colorbarLabel = '[PW/m]' - plot_vertical_section(config, x, y, z, self.sectionName, - suffix='', colorbarLabel=colorbarLabel, - title=title, xlabel=xLabel, ylabel=yLabel, + colorbarLabel = '(PW/m)' + plot_vertical_section(config, z, self.sectionName, xCoords=x, + zCoord=y, suffix='', + colorbarLabel=colorbarLabel, + title=title, xlabels=xLabel, ylabel=yLabel, xLim=xLimGlobal, yLim=depthLimGlobal, invertYAxis=False, movingAveragePoints=movingAveragePoints, diff --git a/mpas_analysis/ocean/plot_hovmoller_subtask.py b/mpas_analysis/ocean/plot_hovmoller_subtask.py index a59456c0a..3b7a038b5 100644 --- a/mpas_analysis/ocean/plot_hovmoller_subtask.py +++ b/mpas_analysis/ocean/plot_hovmoller_subtask.py @@ -288,9 +288,15 @@ def run_task(self): # {{{ z = np.zeros(depths.shape) z[0] = -0.5 * depths[0] z[1:] = -0.5 * (depths[0:-1] + depths[1:]) + z = xr.DataArray(dims='nVertLevels', data=z) - Time = ds.Time.values - field = ds[self.mpasFieldName].values.transpose() + Time = ds.Time + field = ds[self.mpasFieldName] + + # drop any NaN values, because this causes issues with rolling averages + mask = field.notnull().all(dim='Time') + field = field.where(mask, drop=True) + z = z.where(mask, drop=True) xLabel = 'Time (years)' yLabel = 'Depth (m)' @@ -342,7 +348,12 @@ def run_task(self): # {{{ regionDim = 'nOceanRegionsTmp' dsRef = dsRef.isel(**{regionDim: regionIndex}) - refField = dsRef[self.mpasFieldName].values.transpose() + refField = dsRef[self.mpasFieldName] + # drop any NaN values, because this causes issues with rolling + # averages + refMask = refField.notnull().all(dim='Time') + assert(np.all(refMask.values == mask.values)) + refField = refField.where(mask, drop=True) assert(field.shape == refField.shape) diff = field - refField refTitle = self.controlConfig.get('runs', 'mainRunName') @@ -364,10 +375,10 @@ def run_task(self): # {{{ defaultFontSize = None fig, _, suptitle = plot_vertical_section_comparison( - config, Time, z, field, refField, diff, self.sectionName, - colorbarLabel=self.unitsLabel, title=title, modelTitle=mainRunName, - refTitle=refTitle, diffTitle=diffTitle, xlabel=xLabel, - ylabel=yLabel, lineWidth=1, xArrayIsTime=True, + config, field, refField, diff, self.sectionName, xCoords=Time, + zCoord=z, colorbarLabel=self.unitsLabel, title=title, + modelTitle=mainRunName, refTitle=refTitle, diffTitle=diffTitle, + xlabels=xLabel, ylabel=yLabel, lineWidth=1, xCoordIsTime=True, movingAveragePoints=movingAverageMonths, calendar=self.calendar, firstYearXTicks=firstYearXTicks, yearStrideXTicks=yearStrideXTicks, yLim=yLim, invertYAxis=False, titleFontSize=titleFontSize, diff --git a/mpas_analysis/ocean/plot_transect_subtask.py b/mpas_analysis/ocean/plot_transect_subtask.py index c77a3c838..61c6f8cfa 100644 --- a/mpas_analysis/ocean/plot_transect_subtask.py +++ b/mpas_analysis/ocean/plot_transect_subtask.py @@ -24,6 +24,8 @@ from geometric_features import FeatureCollection +from mpas_tools.ocean.transects import get_outline_segments + from mpas_analysis.shared.plot import plot_vertical_section_comparison, \ savefig, add_inset @@ -36,8 +38,6 @@ from mpas_analysis.shared.constants import constants -from mpas_analysis.ocean.utility import nans_to_numpy_mask - class PlotTransectSubtask(AnalysisTask): # {{{ """ @@ -52,8 +52,8 @@ class PlotTransectSubtask(AnalysisTask): # {{{ transectName : str The name of the transect to plot - remapMpasClimatologySubtask : ``RemapMpasClimatologySubtask`` - The subtask for remapping the MPAS climatology that this subtask + computeTransectsSubtask : ``ComputeTransectsSubtask`` + The subtask for computing the MPAS climatology that this subtask will plot plotObs : bool, optional @@ -76,12 +76,6 @@ class PlotTransectSubtask(AnalysisTask): # {{{ mpasFieldName : str The name of the variable in the MPAS timeSeriesStatsMonthly output - obsFieldName : str - The name of the variable to use from the observations file - - observationTitleLabel : str - the title of the observations subplot - diffTitleLabel : str, optional the title of the difference subplot @@ -113,7 +107,7 @@ class PlotTransectSubtask(AnalysisTask): # {{{ # Xylar Asay-Davis, Greg Streletz def __init__(self, parentTask, season, transectName, fieldName, - remapMpasClimatologySubtask, plotObs=True, + computeTransectsSubtask, plotObs=True, controlConfig=None, horizontalBounds=None): # {{{ ''' @@ -135,8 +129,8 @@ def __init__(self, parentTask, season, transectName, fieldName, fieldName : str The name of the field to plot (for use in the subtask name only) - remapMpasClimatologySubtask : ``RemapMpasClimatologySubtask`` - The subtask for remapping the MPAS climatology that this subtask + computeTransectsSubtask : ``ComputeTransectsSubtask`` + The subtask for computing the MPAS climatology that this subtask will plot plotObs : bool, optional @@ -156,7 +150,7 @@ def __init__(self, parentTask, season, transectName, fieldName, self.season = season self.transectName = transectName - self.remapMpasClimatologySubtask = remapMpasClimatologySubtask + self.computeTransectsSubtask = computeTransectsSubtask self.plotObs = plotObs self.controlConfig = controlConfig if horizontalBounds is not None and len(horizontalBounds) == 2: @@ -178,13 +172,13 @@ def __init__(self, parentTask, season, transectName, fieldName, # this task should not run until the remapping subtasks are done, since # it relies on data from those subtasks - self.run_after(remapMpasClimatologySubtask) + self.run_after(computeTransectsSubtask) # }}} def set_plot_info(self, outFileLabel, fieldNameInTitle, mpasFieldName, refFieldName, refTitleLabel, unitsLabel, imageCaption, galleryGroup, groupSubtitle, groupLink, - galleryName, configSectionName, + galleryName, configSectionName, verticalBounds, diffTitleLabel='Model - Observations'): # {{{ ''' @@ -231,8 +225,13 @@ def set_plot_info(self, outFileLabel, fieldNameInTitle, mpasFieldName, configSectionName : str the name of the section where the color map and range is defined + verticalBounds : list + the min and max for the vertical axis, or an emtpy list if the + range automatically determined by matplotlib should be used + diffTitleLabel : str, optional the title of the difference subplot + ''' # Authors # ------- @@ -257,6 +256,10 @@ def set_plot_info(self, outFileLabel, fieldNameInTitle, mpasFieldName, self.thumbnailDescription = self.season self.configSectionName = configSectionName + if len(verticalBounds) == 0: + self.verticalBounds = None + else: + self.verticalBounds = verticalBounds # }}} def setup_and_check(self): # {{{ @@ -315,7 +318,7 @@ def run_task(self): # {{{ # first read the model climatology remappedFileName = \ - self.remapMpasClimatologySubtask.get_remapped_file_name( + self.computeTransectsSubtask.get_remapped_file_name( season=season, comparisonGridName=transectName) remappedModelClimatology = xr.open_dataset(remappedFileName) @@ -323,14 +326,14 @@ def run_task(self): # {{{ # now the observations or control run if self.plotObs: verticalComparisonGridName = \ - self.remapMpasClimatologySubtask.verticalComparisonGridName + self.computeTransectsSubtask.verticalComparisonGridName remappedFileName = \ - self.remapMpasClimatologySubtask.obsDatasets.get_out_file_name( + self.computeTransectsSubtask.obsDatasets.get_out_file_name( transectName, verticalComparisonGridName) remappedRefClimatology = xr.open_dataset(remappedFileName) - # if Time is an axis, take the appropriate avarage to get the + # if Time is an axis, take the appropriate average to get the # climatology if 'Time' in remappedRefClimatology.dims: monthValues = constants.monthDictionary[season] @@ -338,7 +341,7 @@ def run_task(self): # {{{ remappedRefClimatology, monthValues, maskVaries=True) elif self.controlConfig is not None: - climatologyName = self.remapMpasClimatologySubtask.climatologyName + climatologyName = self.computeTransectsSubtask.climatologyName remappedFileName = \ get_remapped_mpas_climatology_file_name( self.controlConfig, season=season, @@ -377,22 +380,40 @@ def _plot_transect(self, remappedModelClimatology, remappedRefClimatology): mainRunName = config.get('runs', 'mainRunName') - # broadcast x and z to have the same dimensions - x, z = xr.broadcast(remappedModelClimatology.x, - remappedModelClimatology.z) + remap = self.computeTransectsSubtask.remap + + if remap: + x = 1e-3*remappedModelClimatology.x + z = remappedModelClimatology.z + + # set lat and lon in case we want to plot versus these quantities + lat = remappedModelClimatology.lat + lon = remappedModelClimatology.lon + + if len(lat.dims) > 1: + lat = lat[:, 0] + + if len(lon.dims) > 1: + lon = lon[:, 0] + + # z is masked out with NaNs in some locations (where there is land) + # but this makes pcolormesh unhappy so we'll zero out those + # locations + z = z.where(z.notnull(), 0.) + + else: + x = 1e-3*remappedModelClimatology.dNode + z = None + lon = remappedModelClimatology.lonNode + lat = remappedModelClimatology.latNode - # set lat and lon in case we want to plot versus these quantities - lat = remappedModelClimatology.lat - lon = remappedModelClimatology.lon + remappedModelClimatology['dNode'] = x - # convert x, z, lat, and lon to numpy arrays; make a copy because - # they are sometimes read-only (not sure why) - x = x.values.copy().transpose() - z = z.values.copy().transpose() - lat = lat.values.copy().transpose() - lon = lon.values.copy().transpose() - self.lat = lat - self.lon = lon + # flatten the x, lon and lat arrays because this is what + # vertical_section is expecting + x = xr.DataArray(data=x.values.ravel(), dims=('nx',)) + lon = xr.DataArray(data=lon.values.ravel(), dims=('nx',)) + lat = xr.DataArray(data=lat.values.ravel(), dims=('nx',)) # This will do strange things at the antemeridian but there's little # we can do about that. @@ -400,13 +421,14 @@ def _plot_transect(self, remappedModelClimatology, remappedRefClimatology): if self.horizontalBounds is not None: mask = numpy.logical_and( - remappedModelClimatology.x.values >= self.horizontalBounds[0], - remappedModelClimatology.x.values <= self.horizontalBounds[1]) - inset_lon = lon_pm180[mask] - inset_lat = lat[mask] + x.values >= self.horizontalBounds[0], + x.values <= self.horizontalBounds[1]) + inset_lon = lon_pm180.values[mask] + inset_lat = lat.values[mask] else: - inset_lon = lon_pm180 - inset_lat = lat + inset_lon = lon_pm180.values + inset_lat = lat.values + fc = FeatureCollection() fc.add_feature( {"type": "Feature", @@ -419,25 +441,19 @@ def _plot_transect(self, remappedModelClimatology, remappedRefClimatology): "type": "LineString", "coordinates": list(map(list, zip(inset_lon, inset_lat)))}}) - # z is masked out with NaNs in some locations (where there is land) but - # this makes pcolormesh unhappy so we'll zero out those locations - z[numpy.isnan(z)] = 0. + modelOutput = remappedModelClimatology[self.mpasFieldName] - modelOutput = nans_to_numpy_mask( - remappedModelClimatology[self.mpasFieldName].values) - modelOutput = modelOutput.transpose() + if remap: + triangulation_args = None + else: + triangulation_args = self._get_ds_triangulation( + remappedModelClimatology) if remappedRefClimatology is None: refOutput = None bias = None else: refOutput = remappedRefClimatology[self.refFieldName] - dims = refOutput.dims - refOutput = nans_to_numpy_mask(refOutput.values) - if dims[1] != 'nPoints': - assert(dims[0] == 'nPoints') - refOutput = refOutput.transpose() - bias = modelOutput - refOutput filePrefix = self.filePrefix @@ -446,8 +462,9 @@ def _plot_transect(self, remappedModelClimatology, remappedRefClimatology): self.fieldNameInTitle, season, self.startYear, self.endYear) - xLabel = 'Distance [km]' - yLabel = 'Depth [m]' + xs = [x] + xLabels = ['Distance (km)'] + yLabel = 'Depth (m)' # define the axis labels and the data to use for the upper # x axis or axes, if such additional axes have been requested @@ -458,45 +475,50 @@ def _plot_transect(self, remappedModelClimatology, remappedRefClimatology): 'transects', 'upperXAxisTickLabelPrecision') - self._set_third_x_axis_to_none() - - if upperXAxes == 'neither': - self._set_second_x_axis_to_none() - elif upperXAxes == 'lat': - self._set_second_x_axis_to_latitude() - elif upperXAxes == 'lon': - self._set_second_x_axis_to_longitude() - elif upperXAxes == 'both': - self._set_second_x_axis_to_longitude() - self._set_third_x_axis_to_latitude() - elif upperXAxes == 'greatestExtent': - if self._greatest_extent(lat, lon): - self._set_second_x_axis_to_latitude() + add_lat = False + add_lon = False + + if upperXAxes in ['lat', 'both']: + add_lat = True + if upperXAxes in ['lon', 'both']: + add_lon = True + if upperXAxes == 'greatestExtent': + if self._lat_greater_extent(lat, lon): + add_lat = True else: - self._set_second_x_axis_to_longitude() + add_lon = True elif upperXAxes == 'strictlyMonotonic': - if self._strictly_monotonic(lat, lon): - self._set_second_x_axis_to_latitude() + if self._strictly_monotonic(lat): + add_lat = True + elif self._strictly_monotonic(lon): + add_lon = True else: - self._set_second_x_axis_to_longitude() + raise ValueError('Neither lat nor lon is strictly monotonic.') elif upperXAxes == 'mostMonotonic': - if self._most_monotonic(lat, lon): - self._set_second_x_axis_to_latitude() + if self._lat_most_monotonic(lat, lon): + add_lat = True else: - self._set_second_x_axis_to_longitude() + add_lon = True elif upperXAxes == 'mostStepsInSameDirection': - if self._most_steps_in_same_direction(lat, lon): - self._set_second_x_axis_to_latitude() + if self._lat_most_steps_in_same_direction(lat, lon): + add_lat = True else: - self._set_second_x_axis_to_longitude() + add_lon = True elif upperXAxes == 'fewestDirectionChanges': - if self._fewest_direction_changes(lat, lon): - self._set_second_x_axis_to_latitude() + if self._lat_fewest_direction_changes(lat, lon): + add_lat = True else: - self._set_second_x_axis_to_longitude() + add_lon = True else: raise ValueError('invalid option for upperXAxes') + if add_lat: + xs.append(lat) + xLabels.append('Latitude') + if add_lon: + xs.append(lon) + xLabels.append('Longitude') + # get the parameters determining what type of plot to use, # what line styles and line colors to use, and whether and how # to label contours @@ -542,28 +564,28 @@ def _plot_transect(self, remappedModelClimatology, remappedRefClimatology): fig, axes, suptitle = plot_vertical_section_comparison( config, - x, - z, modelOutput, refOutput, bias, configSectionName, + xCoords=xs, + zCoord=z, + triangulation_args=triangulation_args, colorbarLabel=self.unitsLabel, - xlabel=xLabel, + xlabels=xLabels, ylabel=yLabel, title=title, modelTitle='{}'.format(mainRunName), refTitle=self.refTitleLabel, diffTitle=self.diffTitleLabel, - secondXAxisData=self.secondXAxisData, - secondXAxisLabel=self.secondXAxisLabel, - thirdXAxisData=self.thirdXAxisData, - thirdXAxisLabel=self.thirdXAxisLabel, numUpperTicks=numUpperTicks, upperXAxisTickLabelPrecision=upperXAxisTickLabelPrecision, invertYAxis=False, - backgroundColor='#918167', + backgroundColor='#d9bf96', + invalidColor='#d9bf96', + outlineValid=False, xLim=self.horizontalBounds, + yLim=self.verticalBounds, compareAsContours=compareAsContours, lineStyle=contourLineStyle, lineColor=contourLineColor, @@ -579,6 +601,21 @@ def _plot_transect(self, remappedModelClimatology, remappedRefClimatology): pos = suptitle.get_position() suptitle.set_position((pos[0] - 0.05, pos[1])) + if not remap: + # add open ocean or ice shelves + d = remappedModelClimatology.dNode.values.ravel() + ssh = remappedModelClimatology.ssh.values.ravel() + if 'landIceFraction' in remappedModelClimatology: + # plot ice in light blue + color = '#e1eaf7' + else: + # plot open ocean in white + color = 'white' + mask = ssh < 0. + for ax in axes: + ax.fill_between(d, ssh, numpy.zeros(ssh.shape), where=mask, + interpolate=True, color=color) + # make a red start axis and green end axis to correspond to the dots # in the inset for ax in axes: @@ -607,31 +644,7 @@ def _plot_transect(self, remappedModelClimatology, remappedRefClimatology): # }}} - def _set_second_x_axis_to_latitude(self): - self.secondXAxisData = self.lat - self.secondXAxisLabel = 'Latitude' - - def _set_second_x_axis_to_longitude(self): - self.secondXAxisData = self.lon - self.secondXAxisLabel = 'Longitude' - - def _set_second_x_axis_to_none(self): - self.secondXAxisData = None - self.secondXAxisLabel = None - - def _set_third_x_axis_to_latitude(self): - self.thirdXAxisData = self.lat - self.thirdXAxisLabel = 'Latitude' - - def _set_third_x_axis_to_longitude(self): - self.thirdXAxisData = self.lon - self.thirdXAxisLabel = 'Longitude' - - def _set_third_x_axis_to_none(self): - self.thirdXAxisData = None - self.thirdXAxisLabel = None - - def _greatest_extent(self, lat, lon): + def _lat_greater_extent(self, lat, lon): # {{{ """ Returns True if lat has a greater extent (in degrees) than lon (or the @@ -639,7 +652,7 @@ def _greatest_extent(self, lat, lon): Authors ------- - Greg Streletz + Greg Streletz, Xylar Asay-Davis """ lat_extent = numpy.max(lat) - numpy.min(lat) @@ -709,41 +722,31 @@ def _greatest_extent(self, lat, lon): return False # }}} - def _strictly_monotonic(self, lat, lon): + def _strictly_monotonic(self, coord): # {{{ - """ - Returns True if lat is strictly monotonic; returns false if lon is - strictly monotonic and lat is not strictly monotonic; throws an error - if neither are strictly monotonic. + """Whether the coordinate is strictly monotonic""" + # Authors + # ------- + # Greg Streletz, Xylar Asay-Davis - Authors - ------- - Greg Streletz - """ - lat_diff = numpy.diff(lat) - if numpy.all(lat_diff > 0) or numpy.all(lat_diff < 0): - return True - lon_diff = numpy.diff(lon) - lon_diff = numpy.where(lon_diff > 180, lon_diff - 360, lon_diff) - lon_diff = numpy.where(lon_diff < -180, lon_diff + 360, lon_diff) - if numpy.all(lon_diff > 0) or numpy.all(lon_diff < 0): - return False - else: - raise ValueError('neither input array is strictly monotonic') + coord_diff = numpy.diff(coord.values) + coord_diff = numpy.where(coord_diff > 180, coord_diff - 360, coord_diff) + coord_diff = numpy.where(coord_diff < -180, coord_diff + 360, + coord_diff) + return numpy.all(coord_diff > 0) or numpy.all(coord_diff < 0) # }}} - def _most_monotonic(self, lat, lon): + def _lat_most_monotonic(self, lat, lon): # {{{ """ Returns True if lat is "more monotonic" than lon in terms of the difference between the total number of degrees traversed and the net number of degrees traversed (or if both are equally as monotonic in this sense), and False otherwise. - - Authors - ------- - Greg Streletz """ + # Authors + # ------- + # Greg Streletz, Xylar Asay-Davis lat_diff = numpy.diff(lat) lat_score = numpy.sum(numpy.fabs(lat_diff)) - abs(numpy.sum(lat_diff)) @@ -758,17 +761,17 @@ def _most_monotonic(self, lat, lon): return False # }}} - def _most_steps_in_same_direction(self, lat, lon): + def _lat_most_steps_in_same_direction(self, lat, lon): # {{{ """ Returns True if lat is has more steps in the same direction (either steps that increase the latitude or steps that decrease the latitude) than lon (or the same number as lon), and False otherwise. - - Authors - ------- - Greg Streletz """ + # Authors + # ------- + # Greg Streletz, Xylar Asay-Davis + lat_changes = numpy.diff(lat) lat_changes = lat_changes[lat_changes != 0.0] # ignore flat regions lat_changedirs = lat_changes / numpy.fabs(lat_changes) @@ -793,7 +796,7 @@ def _most_steps_in_same_direction(self, lat, lon): return False # }}} - def _fewest_direction_changes(self, lat, lon): + def _lat_fewest_direction_changes(self, lat, lon): # {{{ """ Returns True if lat is has fewer changes in direction (from increasing @@ -802,7 +805,7 @@ def _fewest_direction_changes(self, lat, lon): Authors ------- - Greg Streletz + Greg Streletz, Xylar Asay-Davis """ lat_changes = numpy.diff(lat) lat_changes = lat_changes[lat_changes != 0.0] # ignore flat regions @@ -828,6 +831,24 @@ def _fewest_direction_changes(self, lat, lon): return False # }}} + def _get_ds_triangulation(self, dsTransectTriangles): + """get matplotlib Triangulation from triangulation dataset""" + + nTransectTriangles = dsTransectTriangles.sizes['nTransectTriangles'] + dNode = dsTransectTriangles.dNode.isel( + nSegments=dsTransectTriangles.segmentIndices, + nHorizBounds=dsTransectTriangles.nodeHorizBoundsIndices) + x = dNode.values.ravel() + + zTransectNode = dsTransectTriangles.zTransectNode + y = zTransectNode.values.ravel() + + tris = numpy.arange(3 * nTransectTriangles).reshape( + (nTransectTriangles, 3)) + triangulation_args = dict(x=x, y=y, triangles=tris) + + return triangulation_args + # }}} diff --git a/mpas_analysis/ocean/regional_ts_diagrams.py b/mpas_analysis/ocean/regional_ts_diagrams.py index 90fd890c7..2469499bd 100644 --- a/mpas_analysis/ocean/regional_ts_diagrams.py +++ b/mpas_analysis/ocean/regional_ts_diagrams.py @@ -660,7 +660,7 @@ def _write_mpas_t_s(self, config): # {{{ ds = ds[variableList] ds['zMid'] = compute_zmid(dsRestart.bottomDepth, - dsRestart.maxLevelCell, + dsRestart.maxLevelCell-1, dsRestart.layerThickness) ds['volume'] = (dsRestart.areaCell * diff --git a/mpas_analysis/ocean/remap_depth_slices_subtask.py b/mpas_analysis/ocean/remap_depth_slices_subtask.py index 51603b757..f1cd082c7 100644 --- a/mpas_analysis/ocean/remap_depth_slices_subtask.py +++ b/mpas_analysis/ocean/remap_depth_slices_subtask.py @@ -117,7 +117,7 @@ def run_task(self): # {{{ depthNames = [str(depth) for depth in self.depths] - zMid = compute_zmid(ds.bottomDepth, ds.maxLevelCell, + zMid = compute_zmid(ds.bottomDepth, ds.maxLevelCell-1, ds.layerThickness) nVertLevels = zMid.shape[1] diff --git a/mpas_analysis/ocean/sose_transects.py b/mpas_analysis/ocean/sose_transects.py index 46d62e5a3..31e7caf90 100644 --- a/mpas_analysis/ocean/sose_transects.py +++ b/mpas_analysis/ocean/sose_transects.py @@ -86,6 +86,8 @@ def __init__(self, config, mpasClimatologyTask, controlConfig=None): verticalComparisonGrid = config.getExpression( sectionName, 'verticalComparisonGrid', usenumpyfunc=True) + verticalBounds = config.getExpression(sectionName, 'verticalBounds') + longitudes = sorted(config.getExpression(sectionName, 'longitudes', usenumpyfunc=True)) @@ -134,7 +136,7 @@ def __init__(self, config, mpasClimatologyTask, controlConfig=None): if field['mpas'] != 'velMag'] transectCollectionName = 'SOSE_transects' - if horizontalResolution != 'obs': + if horizontalResolution not in ['obs', 'mpas']: transectCollectionName = '{}_{}km'.format(transectCollectionName, horizontalResolution) @@ -203,7 +205,8 @@ def __init__(self, config, mpasClimatologyTask, controlConfig=None): groupLink='sose_transects', galleryName=field['titleName'], configSectionName='sose{}Transects'.format( - fieldPrefixUpper)) + fieldPrefixUpper), + verticalBounds=verticalBounds) self.add_subtask(subtask) # }}} @@ -378,6 +381,11 @@ def combine_observations(self): # {{{ dsObs.velMag.attrs['units'] = 'm s$^{-1}$' dsObs.velMag.attrs['description'] = description + # make a copy of the top set of data at z=0 + dsObs = xr.concat((dsObs.isel(z=0), dsObs), dim='z') + z = dsObs.z.values + z[0] = 0. + dsObs['z'] = ('z', z) write_netcdf(dsObs, combinedFileName) print(' Done.') @@ -413,7 +421,7 @@ def build_observational_dataset(self, fileName, transectName): # {{{ dsObs = dsObs.sel(method=str('nearest'), lon=lon) lon = dsObs.lon.values - # do some dropping and renaming so we end up wiht the right coordinates + # do some dropping and renaming so we end up with the right coordinates # and dimensions dsObs = dsObs.rename({'lat': 'nPoints', 'z': 'nz'}) dsObs['lat'] = dsObs.nPoints diff --git a/mpas_analysis/ocean/streamfunction_moc.py b/mpas_analysis/ocean/streamfunction_moc.py index 7617df95c..c192e484e 100644 --- a/mpas_analysis/ocean/streamfunction_moc.py +++ b/mpas_analysis/ocean/streamfunction_moc.py @@ -755,38 +755,28 @@ def run_task(self): # {{{ maxLat = config.getExpression('streamfunctionMOC{}'.format(region), 'latBinMax') indLat = np.logical_and(x >= minLat, x <= maxLat) - x = x[indLat] - regionMOC = regionMOC[:, indLat] + x = x.where(indLat, drop=True) + regionMOC = regionMOC.where(indLat, drop=True) if self.controlConfig is None: refRegionMOC = None diff = None else: # the coords of the ref MOC won't necessarily match this MOC # so we need to interpolate - nz, nx = regionMOC.shape - refNz, refNx = refMOC[region].shape - temp = np.zeros((refNz, nx)) - for zIndex in range(refNz): - temp[zIndex, :] = np.interp( - x, refLat[region], refMOC[region][zIndex, :], - left=np.nan, right=np.nan) - refRegionMOC = np.zeros((nz, nx)) - for xIndex in range(nx): - refRegionMOC[:, xIndex] = np.interp( - depth, refDepth, temp[:, xIndex], - left=np.nan, right=np.nan) + refRegionMOC = _interp_moc(x, z, regionMOC, refLat[region], + refDepth, refMOC[region]) diff = regionMOC - refRegionMOC plot_vertical_section_comparison( - config, x, z, regionMOC, refRegionMOC, diff, + config, regionMOC, refRegionMOC, diff, xCoords=x, zCoord=z, colorMapSectionName='streamfunctionMOC{}'.format(region), colorbarLabel=colorbarLabel, title=title, modelTitle=mainRunName, refTitle=refTitle, diffTitle=diffTitle, - xlabel=xLabel, + xlabels=xLabel, ylabel=yLabel, movingAveragePoints=movingAveragePointsClimatological, maxTitleLength=70) @@ -820,15 +810,13 @@ def _load_moc(self, config): # {{{ endYear) # Read from file - ncFile = netCDF4.Dataset(inputFileName, mode='r') - depth = ncFile.variables['depth'][:] + ds = xr.open_dataset(inputFileName) + depth = ds['depth'] lat = {} moc = {} for region in self.regionNames: - lat[region] = ncFile.variables['lat{}'.format(region)][:] - moc[region] = \ - ncFile.variables['moc{}'.format(region)][:, :] - ncFile.close() + lat[region] = ds['lat{}'.format(region)] + moc[region] = ds['moc{}'.format(region)] return depth, lat, moc # }}} # }}} @@ -1606,4 +1594,29 @@ def _compute_moc(latBins, nz, latCell, regionCellMask, transportZ, mocTop = mocTop.T return mocTop # }}} + +def _interp_moc(x, z, regionMOC, refX, refZ, refMOC): + x = x.values + z = z.values + dims = regionMOC.dims + regionMOC = regionMOC.values + refX = refX.values + refZ = refZ.values + refMOC = refMOC.values + + nz, nx = regionMOC.shape + refNz, refNx = refMOC.shape + temp = np.zeros((refNz, nx)) + for zIndex in range(refNz): + temp[zIndex, :] = np.interp( + x, refX, refMOC[zIndex, :], + left=np.nan, right=np.nan) + refRegionMOC = np.zeros((nz, nx)) + for xIndex in range(nx): + refRegionMOC[:, xIndex] = np.interp( + z, refZ, temp[:, xIndex], + left=np.nan, right=np.nan) + + return xr.DataArray(dims=dims, data=refRegionMOC) + # vim: foldmethod=marker ai ts=4 sts=4 et sw=4 ft=python diff --git a/mpas_analysis/ocean/time_series_ocean_regions.py b/mpas_analysis/ocean/time_series_ocean_regions.py index 5b732245e..bef0375b2 100644 --- a/mpas_analysis/ocean/time_series_ocean_regions.py +++ b/mpas_analysis/ocean/time_series_ocean_regions.py @@ -313,7 +313,7 @@ def run_task(self): # {{{ config_zmax = None dsRestart = xarray.open_dataset(restartFileName).isel(Time=0) - zMid = compute_zmid(dsRestart.bottomDepth, dsRestart.maxLevelCell, + zMid = compute_zmid(dsRestart.bottomDepth, dsRestart.maxLevelCell-1, dsRestart.layerThickness) areaCell = dsRestart.areaCell if 'landIceMask' in dsRestart: diff --git a/mpas_analysis/ocean/time_series_ohc_anomaly.py b/mpas_analysis/ocean/time_series_ohc_anomaly.py index 42a2c9128..cb38cd2fb 100644 --- a/mpas_analysis/ocean/time_series_ohc_anomaly.py +++ b/mpas_analysis/ocean/time_series_ohc_anomaly.py @@ -63,7 +63,7 @@ def __init__(self, config, mpasTimeSeriesTask, controlConfig=None): config=config, taskName='timeSeriesOHCAnomaly', componentName='ocean', - tags=['timeSeries', 'ohc', 'publicObs']) + tags=['timeSeries', 'ohc', 'publicObs', 'anomaly']) sectionName = 'timeSeriesOHCAnomaly' regionNames = config.getExpression(sectionName, 'regions') diff --git a/mpas_analysis/ocean/time_series_salinity_anomaly.py b/mpas_analysis/ocean/time_series_salinity_anomaly.py index 3c474ee9b..36fdb75ee 100644 --- a/mpas_analysis/ocean/time_series_salinity_anomaly.py +++ b/mpas_analysis/ocean/time_series_salinity_anomaly.py @@ -50,7 +50,7 @@ def __init__(self, config, mpasTimeSeriesTask): # {{{ config=config, taskName='timeSeriesSalinityAnomaly', componentName='ocean', - tags=['timeSeries', 'salinity', 'publicObs']) + tags=['timeSeries', 'salinity', 'publicObs', 'anomaly']) sectionName = 'hovmollerSalinityAnomaly' regionNames = config.getExpression(sectionName, 'regions') diff --git a/mpas_analysis/ocean/time_series_ssh_anomaly.py b/mpas_analysis/ocean/time_series_ssh_anomaly.py index 094ccdf00..b76a30957 100644 --- a/mpas_analysis/ocean/time_series_ssh_anomaly.py +++ b/mpas_analysis/ocean/time_series_ssh_anomaly.py @@ -71,7 +71,7 @@ def __init__(self, config, mpasTimeSeriesTask, controlConfig): config=config, taskName='timeSeriesSSHAnomaly', componentName='ocean', - tags=['timeSeries', 'ssh', 'publicObs']) + tags=['timeSeries', 'ssh', 'publicObs', 'anomaly']) self.controlConfig = controlConfig self.filePrefix = None diff --git a/mpas_analysis/ocean/time_series_temperature_anomaly.py b/mpas_analysis/ocean/time_series_temperature_anomaly.py index 8438dbd77..6d7febc42 100644 --- a/mpas_analysis/ocean/time_series_temperature_anomaly.py +++ b/mpas_analysis/ocean/time_series_temperature_anomaly.py @@ -50,7 +50,7 @@ def __init__(self, config, mpasTimeSeriesTask): # {{{ config=config, taskName='timeSeriesTemperatureAnomaly', componentName='ocean', - tags=['timeSeries', 'temperature', 'publicObs']) + tags=['timeSeries', 'temperature', 'publicObs', 'anomaly']) sectionName = 'hovmollerTemperatureAnomaly' regionNames = config.getExpression(sectionName, 'regions') diff --git a/mpas_analysis/ocean/utility.py b/mpas_analysis/ocean/utility.py index 468d3bdd8..9d59b3643 100644 --- a/mpas_analysis/ocean/utility.py +++ b/mpas_analysis/ocean/utility.py @@ -33,7 +33,7 @@ def compute_zmid(bottomDepth, maxLevelCell, layerThickness): # {{{ the depth of the ocean bottom (positive) maxLevelCell : ``xarray.DataArray`` - the 1-based vertical index of the bottom of the ocean + the 0-based vertical index of the bottom of the ocean layerThickness : ``xarray.DataArray`` the thickness of MPAS-Ocean layers (possibly as a function of time) @@ -54,7 +54,7 @@ def compute_zmid(bottomDepth, maxLevelCell, layerThickness): # {{{ xarray.DataArray.from_dict({'dims': ('nVertLevels',), 'data': numpy.arange(nVertLevels)}) - layerThickness = layerThickness.where(vertIndex < maxLevelCell) + layerThickness = layerThickness.where(vertIndex <= maxLevelCell) thicknessSum = layerThickness.sum(dim='nVertLevels') thicknessCumSum = layerThickness.cumsum(dim='nVertLevels') diff --git a/mpas_analysis/ocean/woce_transects.py b/mpas_analysis/ocean/woce_transects.py index c9434786b..f19c7d4b3 100644 --- a/mpas_analysis/ocean/woce_transects.py +++ b/mpas_analysis/ocean/woce_transects.py @@ -77,6 +77,8 @@ def __init__(self, config, mpasClimatologyTask, controlConfig=None): verticalComparisonGrid = config.getExpression( sectionName, 'verticalComparisonGrid', usenumpyfunc=True) + verticalBounds = config.getExpression(sectionName, 'verticalBounds') + horizontalBounds = config.getExpression( sectionName, 'horizontalBounds') @@ -115,7 +117,7 @@ def __init__(self, config, mpasClimatologyTask, controlConfig=None): 'units': r'kg m$^{-3}$'}} transectCollectionName = 'WOCE_transects' - if horizontalResolution != 'obs': + if horizontalResolution not in ['obs', 'mpas']: transectCollectionName = '{}_{}km'.format(transectCollectionName, horizontalResolution) @@ -187,7 +189,8 @@ def __init__(self, config, mpasClimatologyTask, controlConfig=None): groupLink='woce', galleryName=titleName, configSectionName='woce{}Transects'.format( - fieldNameUpper)) + fieldNameUpper), + verticalBounds=verticalBounds) self.add_subtask(subtask) # }}} diff --git a/mpas_analysis/polar_regions.cfg b/mpas_analysis/polar_regions.cfg index 54a1fdd06..c8aa4abd3 100644 --- a/mpas_analysis/polar_regions.cfg +++ b/mpas_analysis/polar_regions.cfg @@ -133,6 +133,22 @@ regionNames = ["Atlantic_Basin", "Pacific_Basin", "Indian_Basin", longitudes = [318., 325., 0., 75., 117., 145., 160., 184., 187., 198., 253., 280., 288.] +# The approximate horizontal resolution (in km) of each transect. Latitude/ +# longitude between observation points will be subsampled at this interval. +# Use 'obs' to indicate no subsampling. Use 'mpas' to indicate plotting of +# model data on the native grid, in which case comparison with observations +# will take place on the observation grid. +horizontalResolution = mpas + +# The name of the vertical comparison grid. Valid values are 'mpas' for the +# MPAS vertical grid, 'obs' to use the locations of observations or +# any other name if the vertical grid is defined by 'verticalComparisonGrid' +verticalComparisonGridName = mpas + +# A range for the y axis (if any) +verticalBounds = [-1500., 0.] + + [soseTemperatureTransects] # options related to plotting SOSE transects of potential temperature @@ -178,6 +194,18 @@ colorbarLevelsDifference = [-0.5, -0.2, -0.1, -0.05, -0.02, 0, 0.02, 0.05, ## options related to plotting model vs. World Ocean Circulation Experiment ## (WOCE) transects. +# The approximate horizontal resolution (in km) of each transect. Latitude/ +# longitude between observation points will be subsampled at this interval. +# Use 'obs' to indicate no subsampling. Use 'mpas' to indicate plotting of +# model data on the native grid, in which case comparison with observations +# will take place on the observation grid. +horizontalResolution = mpas + +# The name of the vertical comparison grid. Valid values are 'mpas' for the +# MPAS vertical grid, 'obs' to use the locations of observations or +# any other name if the vertical grid is defined by 'verticalComparisonGrid' +verticalComparisonGridName = mpas + # Horizontal bounds of the plot (in km), or an empty list for automatic bounds # The bounds are a 2-element list of the minimum and maximum distance along the # transect @@ -185,6 +213,9 @@ horizontalBounds = {'WOCE_A21': [630., 830.], 'WOCE_A23': [0., 200.], 'WOCE_A12': [4620., 4820.]} +# A range for the y axis (if any) +verticalBounds = [-4000., 0.] + [woceTemperatureTransects] ## options related to plotting WOCE transects of potential temperature diff --git a/mpas_analysis/shared/climatology/__init__.py b/mpas_analysis/shared/climatology/__init__.py index 175cee269..0f9c73d17 100644 --- a/mpas_analysis/shared/climatology/__init__.py +++ b/mpas_analysis/shared/climatology/__init__.py @@ -4,7 +4,8 @@ get_unmasked_mpas_climatology_directory, \ get_unmasked_mpas_climatology_file_name, \ get_masked_mpas_climatology_file_name, \ - get_remapped_mpas_climatology_file_name + get_remapped_mpas_climatology_file_name, \ + get_climatology_op_directory from mpas_analysis.shared.climatology.mpas_climatology_task import \ MpasClimatologyTask diff --git a/mpas_analysis/shared/climatology/climatology.py b/mpas_analysis/shared/climatology/climatology.py index fab6fd57a..d3d25aa24 100644 --- a/mpas_analysis/shared/climatology/climatology.py +++ b/mpas_analysis/shared/climatology/climatology.py @@ -585,7 +585,7 @@ def get_remapped_mpas_climatology_file_name(config, season, componentName, comparisonGridName) comparisonFullMeshName = comparisonDescriptor.meshName else: - comparisonFullMeshName = comparisonGridName + comparisonFullMeshName = comparisonGridName.replace(' ', '_') stageDirectory = '{}/remapped'.format(climatologyOpDirectory) diff --git a/mpas_analysis/shared/climatology/mpas_climatology_task.py b/mpas_analysis/shared/climatology/mpas_climatology_task.py index fbe1dcc6f..4529c2e95 100644 --- a/mpas_analysis/shared/climatology/mpas_climatology_task.py +++ b/mpas_analysis/shared/climatology/mpas_climatology_task.py @@ -19,6 +19,7 @@ import dask import multiprocessing from multiprocessing.pool import ThreadPool +import glob from mpas_analysis.shared.analysis_task import AnalysisTask @@ -501,20 +502,24 @@ def _compute_climatologies_with_ncclimo(self, inDirectory, outDirectory, if len(seasons) == 0: seasons = ['none'] + workDir = os.getcwd() + os.chdir(inDirectory) + + inFiles = sorted(glob.glob(f'{self.ncclimoModel}*')) + args = ['ncclimo', '--no_stdin', '-4', '--clm_md=mth', '-a', 'sdd', - '-m', self.ncclimoModel, + '-P', self.ncclimoModel, '-p', parallelMode, '-j', '{}'.format(self.subprocessCount), '-v', ','.join(variableList), '--seasons={}'.format(','.join(seasons)), '-s', '{:04d}'.format(self.startYear), '-e', '{:04d}'.format(self.endYear), - '-i', inDirectory, - '-o', outDirectory] + '-o', outDirectory] + inFiles if remapper is not None: args.extend(['-r', remapper.mappingFileName]) @@ -547,6 +552,8 @@ def _compute_climatologies_with_ncclimo(self, inDirectory, outDirectory, raise subprocess.CalledProcessError(process.returncode, ' '.join(args)) + os.chdir(workDir) + # }}} # }}} diff --git a/mpas_analysis/shared/climatology/ref_year_mpas_climatology_task.py b/mpas_analysis/shared/climatology/ref_year_mpas_climatology_task.py index cc9d7aa3b..709bc96d5 100644 --- a/mpas_analysis/shared/climatology/ref_year_mpas_climatology_task.py +++ b/mpas_analysis/shared/climatology/ref_year_mpas_climatology_task.py @@ -75,6 +75,8 @@ def __init__(self, config, componentName, taskName=None): # {{{ config=new_config, componentName=componentName, taskName=taskName) + + self.tags.append('anomaly') # }}} def get_start_and_end(self): # {{{ diff --git a/mpas_analysis/shared/climatology/remap_mpas_climatology_subtask.py b/mpas_analysis/shared/climatology/remap_mpas_climatology_subtask.py index 8f4653206..d2236ea37 100644 --- a/mpas_analysis/shared/climatology/remap_mpas_climatology_subtask.py +++ b/mpas_analysis/shared/climatology/remap_mpas_climatology_subtask.py @@ -537,7 +537,7 @@ def _mask_climatologies(self, season, dsMask): # {{{ # add valid mask as a variable, useful for remapping later climatology['validMask'] = \ - xr.DataArray(numpy.ones(climatology.dims['nCells']), + xr.DataArray(numpy.ones(climatology.sizes['nCells']), dims=['nCells']) # mask the data set for variableName in self.variableList: diff --git a/mpas_analysis/shared/html/pages.py b/mpas_analysis/shared/html/pages.py index 5f9607fa4..caf46b641 100644 --- a/mpas_analysis/shared/html/pages.py +++ b/mpas_analysis/shared/html/pages.py @@ -87,7 +87,17 @@ def generate_html(config, analyses, controlConfig, customConfigFiles): page.generate() - print("Done.") + url = None + if config.has_section('web_portal'): + section = config['web_portal'] + base_path = section.get('base_path') + base_url = section.get('base_url') + html_dir = config.get('output', 'htmlSubdirectory') + if html_dir.startswith(base_path): + url = base_url + html_dir[len(base_path):] + print(f'Web page: {url}') + if url is None: + print("Done.") # }}} diff --git a/mpas_analysis/shared/plot/inset.py b/mpas_analysis/shared/plot/inset.py index 929176264..aed9cef60 100644 --- a/mpas_analysis/shared/plot/inset.py +++ b/mpas_analysis/shared/plot/inset.py @@ -8,9 +8,9 @@ # Additional copyright and license information can be found in the LICENSE file # distributed with this code, or at # https://raw.githubusercontent.com/MPAS-Dev/MPAS-Analysis/master/LICENSE -''' +""" Functions for plotting inset maps in plots (e.g. for transects) -''' +""" # Authors # ------- # Xylar Asay-Davis @@ -31,7 +31,7 @@ def add_inset(fig, fc, latlonbuffer=45., polarbuffer=5., width=1.0, height=1.0, lowerleft=None, xbuffer=None, ybuffer=None, maxlength=1.): - ''' + """ Plots an inset map showing the location of a transect or polygon. Shapes are plotted on a polar grid if they are entirely poleward of +/-50 deg. latitude and with a lat/lon grid if not. @@ -71,7 +71,7 @@ def add_inset(fig, fc, latlonbuffer=45., polarbuffer=5., width=1.0, ------- inset : ``matplotlib.axes.Axes`` The new inset axis - ''' + """ # Authors # ------- # Xylar Asay-Davis @@ -170,13 +170,14 @@ def add_inset(fig, fc, latlonbuffer=45., polarbuffer=5., width=1.0, def _set_circular_boundary(ax): - '''Set the boundary of the given axis to be circular (for a polar plot)''' + """Set the boundary of the given axis to be circular (for a polar plot)""" # Compute a circle in axes coordinates, which we can use as a boundary # for the map. We can pan/zoom as much as we like - the boundary will be # permanently circular. theta = numpy.linspace(0, 2*numpy.pi, 100) - center, radius = [0.5, 0.5], 0.5 + center = numpy.array([0.5, 0.5]) + radius = 0.5 verts = numpy.vstack([numpy.sin(theta), numpy.cos(theta)]).T circle = matplotlib.path.Path(verts * radius + center) @@ -184,12 +185,12 @@ def _set_circular_boundary(ax): def _get_bounds(fc): - '''Compute the lon/lat bounding box for all transects and regions''' + """Compute the lon/lat bounding box for all transects and regions""" bounds = shapely.geometry.GeometryCollection() for feature in fc.features: shape = shapely.geometry.shape(feature['geometry']) shape_bounds = shapely.geometry.box(*shape.bounds) - bounds = shapely.geometry.box(*(bounds.union(shape_bounds).bounds)) + bounds = shapely.geometry.box(*bounds.union(shape_bounds).bounds) return bounds.bounds diff --git a/mpas_analysis/shared/plot/vertical_section.py b/mpas_analysis/shared/plot/vertical_section.py index d6831d36f..6597c420b 100644 --- a/mpas_analysis/shared/plot/vertical_section.py +++ b/mpas_analysis/shared/plot/vertical_section.py @@ -21,8 +21,8 @@ import matplotlib import matplotlib.pyplot as plt +from matplotlib.tri import Triangulation import xarray as xr -import pandas as pd import numpy as np from mpas_analysis.shared.timekeeping.utility import date_to_days @@ -34,14 +34,21 @@ def plot_vertical_section_comparison( config, - xArray, - depthArray, modelArray, refArray, diffArray, colorMapSectionName, + xCoords=None, + zCoord=None, + triangulation_args=None, + xOutlineModel=None, + zOutlineModel=None, + xOutlineRef=None, + zOutlineRef=None, + xOutlineDiff=None, + zOutlineDiff=None, colorbarLabel=None, - xlabel=None, + xlabels=None, ylabel=None, title=None, modelTitle='Model', @@ -57,16 +64,14 @@ def plot_vertical_section_comparison( lineStyle='solid', lineColor='black', backgroundColor='grey', + invalidColor='white', + outlineValid=True, xLim=None, yLim=None, - secondXAxisData=None, - secondXAxisLabel=None, - thirdXAxisData=None, - thirdXAxisLabel=None, numUpperTicks=None, upperXAxisTickLabelPrecision=None, invertYAxis=True, - xArrayIsTime=False, + xCoordIsTime=False, movingAveragePoints=None, firstYearXTicks=None, yearStrideXTicks=None, @@ -96,19 +101,45 @@ def plot_vertical_section_comparison( the configuration, containing a [plot] section with options that control plotting - xArray : float array - x array (latitude, longitude, spherical distance, or distance along - a transect; or, time for Hovmoller plots) - - depthArray : float array - depth array [m] - - modelArray, refArray : float arrays + modelArray, refArray : xarray.DataArray model and observational or control run data sets diffArray : float array difference between modelArray and refArray + xCoords : xarray.DataArray or list of xarray.DataArray, optional + The x coordinate(s) for the model, ref and diff arrays. Optional second + and third entries will be used for a second and third x axis above the + plot. The typical use for the second and third axis is for transects, + for which the primary x axis represents distance along a transect, and + the second and third x axes are used to display the corresponding + latitudes and longitudes. + + zCoord : xarray.DataArray, optional + The z coordinates for the model, ref and diff arrays + + triangulation_args : dict, optional + A dict of arguments to create a matplotlib.tri.Triangulation of the + transect that does not rely on it being on a logically rectangular grid. + The arguments rather than the triangulation itself are passed because + multiple triangulations with different masks are needed internally and + there is not an obvious mechanism for copying an existing triangulation. + If this option is provided, ``xCoords`` is only used for tick marks if + more than one x axis is requested, and ``zCoord`` will be ignored. + + xOutlineModel, zOutlineModel : numpy.ndarray, optional + pairs of points defining line segments that are used to outline the + valid region of the mesh for the model panel if ``outlineValid = True`` + and ``triangulation_args`` is not ``None`` + + xOutlineRef, zOutlineRef : numpy.ndarray, optional + Same as ``xOutlineModel`` and ``zOutlineModel`` but for the reference + panel + + xOutlineDiff, zOutlineDiff : numpy.ndarray, optional + Same as ``xOutlineModel`` and ``zOutlineModel`` but for the difference + panel + colorMapSectionName : str section name in ``config`` where color map info can be found. @@ -121,8 +152,11 @@ def plot_vertical_section_comparison( parenthetically appended to the legend entries of the contour comparison plot. - xlabel, ylabel : str, optional - label of x- and y-axis + xlabels : str or list of str, optional + labels of x-axes. Labels correspond to entries in ``xCoords``. + + ylabel : str, optional + label of y-axis title : str, optional the subtitle of the plot @@ -173,37 +207,23 @@ def plot_vertical_section_comparison( contourComparisonLineColor argument). backgroundColor : str, optional - the background color for the plot (NaNs and masked areas will be + the background color for the plot outside the limits of ``xCoord`` and + ``zCoord``. + + invalidColor : str, optional + the color for invalid values (NaNs and masked areas will be shown in this color) + outlineValid : bool, optional + whether to outline the boundary between the valid an invalid regions + with a black contour + xLim : float array, optional x range of plot yLim : float array, optional y range of plot - secondXAxisData : the data to use to display a second x axis (which will be - placed above the plot). This array must have the same number of values - as xArray, and it is assumed that the values in this array define - locations along the x axis that are the same as those defined by the - corresponding values in xArray, but in some different unit system. - - secondXAxisLabel : the label for the second x axis, if requested - - thirdXAxisData : the data to use to display a third x axis (which will be - placed above the plot and above the second x axis, which must be - specified if a third x axis is to be specified). This array must have - the same number of values as xArray, and it is assumed that the values - in this array define locations along the x axis that are the same as - those defined by the corresponding values in xArray, but in some - different unit system (which is presumably also different from the unit - system used for the values in the secondXAxisData array). The typical - use for this third axis is for transects, for which the primary x axis - represents distance along a transect, and the second and third x axes - are used to display the corresponding latitudes and longitudes. - - thirdXAxisLabel : the label for the third x axis, if requested - numUpperTicks : the approximate number of ticks to use on the upper x axis or axes (these are the second and third x axes, which are placed above the plot if they have been requested by specifying the secondXAxisData @@ -217,13 +237,13 @@ def plot_vertical_section_comparison( invertYAxis : logical, optional if True, invert Y axis - xArrayIsTime : logical, optional + xCoordIsTime : logical, optional if True, format the x axis for time (this applies only to the primary x axis, not to the optional second or third x axes) movingAveragePoints : int, optional the number of points over which to perform a moving average - NOTE: this option is mostly intended for use when xArrayIsTime is True, + NOTE: this option is mostly intended for use when xCoordIsTime is True, although it will work with other data as well. Also, the moving average calculation is based on number of points, not actual x axis values, so for best results, the values in the xArray should be equally @@ -240,11 +260,11 @@ def plot_vertical_section_comparison( maxXTicks : int, optional the maximum number of tick marks that will be allowed along the primary x axis. This may need to be adjusted depending on the figure size and - aspect ratio. NOTE: maxXTicks is only used if xArrayIsTime is True + aspect ratio. NOTE: maxXTicks is only used if xCoordIsTime is True calendar : str, optional the calendar to use for formatting the time axis - NOTE: calendar is only used if xArrayIsTime is True + NOTE: calendar is only used if xCoordIsTime is True compareAsContours : bool, optional if compareAsContours is True, instead of creating a three panel plot @@ -298,6 +318,11 @@ def plot_vertical_section_comparison( if defaultFontSize is None: defaultFontSize = config.getint('plot', 'defaultFontSize') matplotlib.rc('font', size=defaultFontSize) + if not isinstance(xCoords, list): + xCoords = [xCoords] + + if not isinstance(xlabels, list): + xlabels = [xlabels] if refArray is None or compareAsContours: singlePanel = True @@ -312,13 +337,13 @@ def plot_vertical_section_comparison( # depending on how many x axes are to be displayed on the plots if singlePanel: if compareAsContours and refArray is not None: - if thirdXAxisData is not None: + if len(xCoords) == 3: figsize = (8, 8) else: figsize = (8, 7) else: figsize = (8, 5) - elif thirdXAxisData is not None: + elif len(xCoords) == 3: figsize = (8, 17) else: figsize = (8, 13) @@ -339,12 +364,12 @@ def plot_vertical_section_comparison( if plotTitleFontSize is None: plotTitleFontSize = config.get('plot', 'threePanelPlotTitleFontSize') - if thirdXAxisData is not None: + if len(xCoords) == 3: if singlePanel: titleY = 1.64 else: titleY = 1.34 - elif secondXAxisData is not None: + elif len(xCoords) >= 2: titleY = 1.20 else: titleY = 1.06 @@ -357,12 +382,12 @@ def plot_vertical_section_comparison( if not compareAsContours or refArray is None: title = modelTitle - contourComparisonFieldArray = None + contourComparisonField = None comparisonFieldName = None originalFieldName = None else: title = None - contourComparisonFieldArray = refArray + contourComparisonField = refArray comparisonFieldName = refTitle originalFieldName = modelTitle @@ -370,14 +395,17 @@ def plot_vertical_section_comparison( _, ax = plot_vertical_section( config, - xArray, - depthArray, modelArray, colorMapSectionName, + xCoords=xCoords, + zCoord=zCoord, + triangulation_args=triangulation_args, + xOutline=xOutlineModel, + zOutline=zOutlineModel, suffix=resultSuffix, colorbarLabel=colorbarLabel, title=title, - xlabel=xlabel, + xlabels=xlabels, ylabel=ylabel, figsize=None, titleFontSize=plotTitleFontSize, @@ -389,21 +417,19 @@ def plot_vertical_section_comparison( lineWidth=lineWidth, lineStyle=lineStyle, lineColor=lineColor, - secondXAxisData=secondXAxisData, - secondXAxisLabel=secondXAxisLabel, - thirdXAxisData=thirdXAxisData, - thirdXAxisLabel=thirdXAxisLabel, numUpperTicks=numUpperTicks, upperXAxisTickLabelPrecision=upperXAxisTickLabelPrecision, invertYAxis=invertYAxis, - xArrayIsTime=xArrayIsTime, + xCoordIsTime=xCoordIsTime, movingAveragePoints=movingAveragePoints, firstYearXTicks=firstYearXTicks, yearStrideXTicks=yearStrideXTicks, maxXTicks=maxXTicks, calendar=calendar, backgroundColor=backgroundColor, + invalidColor=invalidColor, + outlineValid=outlineValid, plotAsContours=compareAsContours, - contourComparisonFieldArray=contourComparisonFieldArray, + contourComparisonField=contourComparisonField, comparisonFieldName=comparisonFieldName, originalFieldName=originalFieldName, comparisonContourLineStyle=comparisonContourLineStyle, @@ -418,14 +444,17 @@ def plot_vertical_section_comparison( plt.subplot(3, 1, 2) _, ax = plot_vertical_section( config, - xArray, - depthArray, refArray, colorMapSectionName, + xCoords=xCoords, + zCoord=zCoord, + triangulation_args=triangulation_args, + xOutline=xOutlineRef, + zOutline=zOutlineRef, suffix=resultSuffix, colorbarLabel=colorbarLabel, title=refTitle, - xlabel=xlabel, + xlabels=xlabels, ylabel=ylabel, figsize=None, titleFontSize=plotTitleFontSize, @@ -437,20 +466,18 @@ def plot_vertical_section_comparison( lineWidth=lineWidth, lineStyle=lineStyle, lineColor=lineColor, - secondXAxisData=secondXAxisData, - secondXAxisLabel=secondXAxisLabel, - thirdXAxisData=thirdXAxisData, - thirdXAxisLabel=thirdXAxisLabel, upperXAxisTickLabelPrecision=upperXAxisTickLabelPrecision, numUpperTicks=numUpperTicks, invertYAxis=invertYAxis, - xArrayIsTime=xArrayIsTime, + xCoordIsTime=xCoordIsTime, movingAveragePoints=movingAveragePoints, firstYearXTicks=firstYearXTicks, yearStrideXTicks=yearStrideXTicks, maxXTicks=maxXTicks, calendar=calendar, backgroundColor=backgroundColor, + invalidColor=invalidColor, + outlineValid=outlineValid, labelContours=labelContours, contourLabelPrecision=contourLabelPrecision, maxTitleLength=maxTitleLength) @@ -460,14 +487,17 @@ def plot_vertical_section_comparison( plt.subplot(3, 1, 3) _, ax = plot_vertical_section( config, - xArray, - depthArray, diffArray, colorMapSectionName, + xCoords=xCoords, + zCoord=zCoord, + triangulation_args=triangulation_args, + xOutline=xOutlineDiff, + zOutline=zOutlineDiff, suffix=diffSuffix, colorbarLabel=colorbarLabel, title=diffTitle, - xlabel=xlabel, + xlabels=xlabels, ylabel=ylabel, figsize=None, titleFontSize=plotTitleFontSize, @@ -479,20 +509,18 @@ def plot_vertical_section_comparison( lineWidth=lineWidth, lineStyle=lineStyle, lineColor=lineColor, - secondXAxisData=secondXAxisData, - secondXAxisLabel=secondXAxisLabel, - thirdXAxisData=thirdXAxisData, - thirdXAxisLabel=thirdXAxisLabel, upperXAxisTickLabelPrecision=upperXAxisTickLabelPrecision, numUpperTicks=numUpperTicks, invertYAxis=invertYAxis, - xArrayIsTime=xArrayIsTime, + xCoordIsTime=xCoordIsTime, movingAveragePoints=movingAveragePoints, firstYearXTicks=firstYearXTicks, yearStrideXTicks=yearStrideXTicks, maxXTicks=maxXTicks, calendar=calendar, backgroundColor=backgroundColor, + invalidColor=invalidColor, + outlineValid=outlineValid, labelContours=labelContours, contourLabelPrecision=contourLabelPrecision, maxTitleLength=maxTitleLength) @@ -500,7 +528,7 @@ def plot_vertical_section_comparison( axes.append(ax) if singlePanel: - if thirdXAxisData is not None and refArray is None: + if len(xCoords) == 3 and refArray is None: plt.tight_layout(pad=0.0, h_pad=2.0, rect=[0.0, 0.0, 1.0, 0.98]) else: plt.tight_layout(pad=0.0, h_pad=2.0, rect=[0.0, 0.0, 1.0, 0.95]) @@ -512,14 +540,17 @@ def plot_vertical_section_comparison( def plot_vertical_section( config, - xArray, - depthArray, - fieldArray, + field, colorMapSectionName, + xCoords=None, + zCoord=None, + triangulation_args=None, + xOutline=None, + zOutline=None, suffix='', colorbarLabel=None, title=None, - xlabel=None, + xlabels=None, ylabel=None, figsize=(10, 4), dpi=None, @@ -533,21 +564,19 @@ def plot_vertical_section( lineStyle='solid', lineColor='black', backgroundColor='grey', - secondXAxisData=None, - secondXAxisLabel=None, - thirdXAxisData=None, - thirdXAxisLabel=None, + invalidColor='white', + outlineValid=True, numUpperTicks=None, upperXAxisTickLabelPrecision=None, invertYAxis=True, - xArrayIsTime=False, + xCoordIsTime=False, movingAveragePoints=None, firstYearXTicks=None, yearStrideXTicks=None, maxXTicks=20, calendar='gregorian', plotAsContours=False, - contourComparisonFieldArray=None, + contourComparisonField=None, comparisonFieldName=None, originalFieldName=None, comparisonContourLineStyle=None, @@ -559,12 +588,12 @@ def plot_vertical_section( Plots a data set as a x distance (latitude, longitude, or spherical distance) vs depth map (vertical section). - Or, if xArrayIsTime is True, plots data set on a vertical + Or, if xCoordIsTime is True, plots data set on a vertical Hovmoller plot (depth vs. time). - Typically, the fieldArray data are plotted using a heatmap, but if - contourComparisonFieldArray is not None, then contours of both - fieldArray and contourComparisonFieldArray are plotted instead. + Typically, the ``field`` data are plotted using a heatmap, but if + ``contourComparisonField`` is not None, then contours of both + ``field`` and ``contourComparisonField`` are plotted instead. Parameters ---------- @@ -572,19 +601,45 @@ def plot_vertical_section( the configuration, containing a [plot] section with options that control plotting - xArray : float array - x array (latitude, longitude, or spherical distance; or, time for a - Hovmoller plot) - - depthArray : float array - depth array [m] - - fieldArray : float array - field array to plot + field : xarray.DataArray + field array to plot. For contour plots, ``xCoords`` and ``zCoords`` + should broadcast to the same shape as ``field``. For heatmap plots, + ``xCoords`` and ``zCoords`` are the corners of the plot. If they + broadcast to the same shape as ``field``, ``field`` will be bilinearly + interpolated to center values for each plot cell. If the coordinates + have one extra element in each direction than ``field``, ``field`` is + assumed to contain cell values and no interpolation is performed. colorMapSectionName : str section name in ``config`` where color map info can be found. + xCoords : xarray.DataArray or list of xarray.DataArray, optional + The x coordinate(s) for the ``field``. Optional second + and third entries will be used for a second and third x axis above the + plot. The typical use for the second and third axis is for transects, + for which the primary x axis represents distance along a transect, and + the second and third x axes are used to display the corresponding + latitudes and longitudes. + + zCoord : xarray.DataArray, optional + The z coordinates for the ``field`` + + triangulation_args : dict, optional + A dict of arguments to create a matplotlib.tri.Triangulation of the + transect that does not rely on it being on a logically rectangular grid. + The arguments rather than the triangulation itself are passed because + multiple triangulations with different masks are needed internally and + there is not an obvious mechanism for copying an existing triangulation. + If this option is provided, ``xCoords`` is only used for tick marks if + more than one x axis is requested, and ``zCoord`` will be ignored. + + xOutline, zOutline : numpy.ndarray, optional + pairs of points defining line segments that are used to outline the + valid region of the mesh if ``outlineValid = True`` and + ``triangulation_args`` is not ``None`` + + + suffix : str, optional the suffix used for colorbar config options @@ -592,16 +647,19 @@ def plot_vertical_section( the label for the colorbar. If plotAsContours and labelContours are both True, colorbarLabel is used as follows (typically in order to indicate the units that are associated with the contour labels): - if contourComparisonFieldArray is None, the colorbarLabel string is + if ``contourComparisonField`` is None, the ``colorbarLabel`` string is parenthetically appended to the plot title; if - contourComparisonFieldArray is not None, it is parenthetically appended + ``contourComparisonField`` is not None, it is parenthetically appended to the legend entries of the contour comparison plot. title : str, optional title of plot - xlabel, ylabel : str, optional - label of x- and y-axis + xlabels : str or list of str, optional + labels of x-axes. Labels correspond to entries in ``xCoords``. + + ylabel : str, optional + label of y-axis figsize : tuple of float, optional size of the figure in inches, or None if the current figure should @@ -635,45 +693,34 @@ def plot_vertical_section( lineStyle : str, optional the line style of contour lines (if specified); this applies to the style of contour lines of fieldArray (the style of the contour lines - of contourComparisonFieldArray is set using + of contourComparisonField is set using contourComparisonLineStyle). lineColor : str, optional the color of contour lines (if specified); this applies to the contour lines of fieldArray (the color of the contour lines of - contourComparisonFieldArray is set using contourComparisonLineColor + contourComparisonField is set using contourComparisonLineColor backgroundColor : str, optional - the background color for the plot (NaNs will be shown in this color) - - secondXAxisData : the data to use to display a second x axis (which will be - placed above the plot). This array must have the same number of values - as xArray, and it is assumed that the values in this array define - locations along the x axis that are the same as those defined by the - corresponding values in xArray, but in some different unit system. - - secondXAxisLabel : the label for the second x axis, if requested - - thirdXAxisData : the data to use to display a third x axis (which will be - placed above the plot and above the second x axis, which must be - specified if a third x axis is to be specified). This array must have - the same number of values as xArray, and it is assumed that the values - in this array define locations along the x axis that are the same as - those defined by the corresponding values in xArray, but in some - different unit system (which is presumably also different from the unit - system used for the values in the secondXAxisData array). The typical - use for this third axis is for transects, for which the primary x axis - represents distance along a transect, and the second and third x axes - are used to display the corresponding latitudes and longitudes. - - thirdXAxisLabel : the label for the third x axis, if requested + the background color for the plot outside the limits of ``xCoord`` and + ``zCoord``. - numUpperTicks : the approximate number of ticks to use on the upper x axis + invalidColor : str, optional + the color for invalid values (NaNs and masked areas will be + shown in this color) + + outlineValid : bool, optional + whether to outline the boundary between the valid an invalid regions + with a black contour + + numUpperTicks : int, optional + the approximate number of ticks to use on the upper x axis or axes (these are the second and third x axes, which are placed above the plot if they have been requested by specifying the secondXAxisData or thirdXAxisData arrays above) - upperXAxisTickLabelPrecision : the number of decimal places (to the right + upperXAxisTickLabelPrecision : int, optional + the number of decimal places (to the right of the decimal point) to use for values at upper axis ticks. This value can be adjusted (in concert with numUpperTicks) to avoid problems with overlapping numbers along the upper axis. @@ -681,17 +728,17 @@ def plot_vertical_section( invertYAxis : logical, optional if True, invert Y axis - xArrayIsTime : logical, optional + xCoordIsTime : logical, optional if True, format the x axis for time (this applies only to the primary x axis, not to the optional second or third x axes) movingAveragePoints : int, optional the number of points over which to perform a moving average - NOTE: this option is mostly intended for use when xArrayIsTime is True, - although it will work with other data as well. Also, the moving + NOTE: this option is mostly intended for use when ``xCoordIsTime`` is + True, although it will work with other data as well. Also, the moving average calculation is based on number of points, not actual x axis - values, so for best results, the values in the xArray should be equally - spaced. + values, so for best results, the values in the first entry in + ``xCoords`` should be equally spaced. firstYearXTicks : int, optional The year of the first tick on the x axis. By default, the first time @@ -704,38 +751,36 @@ def plot_vertical_section( maxXTicks : int, optional the maximum number of tick marks that will be allowed along the primary x axis. This may need to be adjusted depending on the figure size and - aspect ratio. NOTE: maxXTicks is only used if xArrayIsTime is True + aspect ratio. NOTE: maxXTicks is only used if xCoordIsTime is True calendar : str, optional the calendar to use for formatting the time axis - NOTE: calendar is only used if xArrayIsTime is True + NOTE: calendar is only used if xCoordIsTime is True plotAsContours : bool, optional - if plotAsContours is True, instead of plotting fieldArray as a - heatmap, the function will plot only the contours of fieldArray. In - addition, if contourComparisonFieldArray is not None, the contours + if plotAsContours is True, instead of plotting ``field`` as a + heatmap, the function will plot only the contours of ``field``. In + addition, if contourComparisonField is not None, the contours of this field will be plotted on the same plot. The selection of contour levels is still determined as for the contours on the heatmap - plots, via the 'contours' entry in colorMapSectionName. - - contourComparisonFieldArray : float array, optional - a comparison field array (typically observational data or results from - another simulation run), assumed to be of the same shape as fieldArray, - and related to xArray and depthArray in the same way fieldArray is. - If contourComparisonFieldArray is None, then fieldArray will be plotted - as a heatmap. However, if countourComparisonFieldArray is not None, - then contours of both fieldArray and contourComparisonFieldArray will - be plotted in order to enable a comparison of the two fields on the - same plot. If plotAsContours is False, this parameter is ignored. + plots, via the 'contours' entry in ``colorMapSectionName``. + + contourComparisonField : float array, optional + a comparison ``field`` array (typically observational data or results + from another simulation run), assumed to be of the same shape as + ``field``. If ``plotAsContours`` is ``True`` and + ``countourComparisonFieldArray`` is not ``None``, then contours of both + ``field`` and ``contourComparisonField`` will be plotted in order to + enable a comparison of the two fields on the same plot. comparisonFieldName : str, optional - the name for the comparison field. If contourComparisonFieldArray is - None, this parameter is ignored. + the name for the comparison field. If contourComparisonField is + None, this parameter is ignored. originalFieldName : str, optional - the name for the fieldArray field (for the purposes of labeling the - contours on a contour comparison plot). If contourComparisonFieldArray - is None, this parameter is ignored. + the name for the ``field`` field (for the purposes of labeling the + contours on a contour comparison plot). If contourComparisonField + is None, this parameter is ignored. comparisonContourLineStyle : str, optional the line style of contour lines of the comparisonFieldName field on @@ -771,143 +816,61 @@ def plot_vertical_section( if defaultFontSize is None: defaultFontSize = config.getint('plot', 'defaultFontSize') matplotlib.rc('font', size=defaultFontSize) + if xCoords is not None: + if not isinstance(xCoords, list): + xCoords = [xCoords] - # compute moving averages with respect to the x dimension - if movingAveragePoints is not None and movingAveragePoints != 1: - N = movingAveragePoints - movingAverageDepthSlices = [] - for nVertLevel in range(len(depthArray)): - depthSlice = fieldArray[[nVertLevel]][0] - # in case it's not an xarray already - depthSlice = xr.DataArray(depthSlice) - mean = pd.Series.rolling(depthSlice.to_series(), N, - center=True).mean() - mean = xr.DataArray.from_series(mean) - mean = mean[int(N / 2.0):-int(round(N / 2.0) - 1)] - movingAverageDepthSlices.append(mean) - xArray = xArray[int(N / 2.0):-int(round(N / 2.0) - 1)] - fieldArray = xr.DataArray(movingAverageDepthSlices) - - dimX = xArray.shape - dimZ = depthArray.shape - dimF = fieldArray.shape - if contourComparisonFieldArray is not None: - dimC = contourComparisonFieldArray.shape - - if len(dimX) != 1 and len(dimX) != 2: - raise ValueError('xArray must have either one or two dimensions ' - '(has %d)' % dimX) - - if len(dimZ) != 1 and len(dimZ) != 2: - raise ValueError('depthArray must have either one or two dimensions ' - '(has %d)' % dimZ) - - if len(dimF) != 2: - raise ValueError('fieldArray must have two dimensions (has %d)' % dimF) - - if contourComparisonFieldArray is not None: - if len(dimC) != 2: - raise ValueError('contourComparisonFieldArray must have two ' - 'dimensions (has %d)' % dimC) - elif (fieldArray.shape[0] != contourComparisonFieldArray.shape[0]) or \ - (fieldArray.shape[1] != contourComparisonFieldArray.shape[1]): - raise ValueError('size mismatch between fieldArray (%d x %d) and ' - 'contourComparisonFieldArray (%d x %d)' % - (fieldArray.shape[0], fieldArray.shape[1], - contourComparisonFieldArray.shape[0], - contourComparisonFieldArray.shape[1])) - - # verify that the dimensions of fieldArray are consistent with those of - # xArray and depthArray - if len(dimX) == 1 and len(dimZ) == 1: - num_x = dimX[0] - num_z = dimZ[0] - if num_x != fieldArray.shape[1] or num_z != fieldArray.shape[0]: - raise ValueError('size mismatch between xArray (%d), ' - 'depthArray (%d), and fieldArray (%d x %d)' % - (num_x, num_z, fieldArray.shape[0], - fieldArray.shape[1])) - elif len(dimX) == 1: - num_x = dimX[0] - num_x_Z = dimZ[1] - num_z = dimZ[0] - if num_x != fieldArray.shape[1] or num_z != fieldArray.shape[0] or \ - num_x != num_x_Z: - raise ValueError('size mismatch between xArray (%d), ' - 'depthArray (%d x %d), and fieldArray (%d x %d)' % - (num_x, num_z, num_x_Z, - fieldArray.shape[0], - fieldArray.shape[1])) - elif len(dimZ) == 1: - num_x = dimX[1] - num_z_X = dimX[0] - num_z = dimZ[0] - if num_x != fieldArray.shape[1] or num_z != fieldArray.shape[0] or \ - num_z != num_z_X: - raise ValueError('size mismatch between xArray (%d x %d), ' - 'depthArray (%d), and fieldArray (%d x %d)' % - (num_z_X, num_x, num_z, - fieldArray.shape[0], - fieldArray.shape[1])) - else: - num_x = dimX[1] - num_z_X = dimX[0] - num_x_Z = dimZ[1] - num_z = dimZ[0] - if num_x != fieldArray.shape[1] or num_z != fieldArray.shape[0] \ - or num_x != num_x_Z or num_z != num_z_X: - raise ValueError('size mismatch between xArray (%d x %d), ' - 'depthArray (%d x %d), and fieldArray (%d x %d)' % - (num_z_X, num_x, num_z, num_x_Z, - fieldArray.shape[0], - fieldArray.shape[1])) - - # Verify that the upper x-axis parameters are consistent with each other - # and with xArray - if secondXAxisData is None and thirdXAxisData is not None: - raise ValueError('secondXAxisData cannot be None if thirdXAxisData ' - 'is not None') - if secondXAxisData is not None: - arrayShape = secondXAxisData.shape - if len(arrayShape) == 1 and arrayShape[0] != num_x: - raise ValueError('secondXAxisData has %d x values, ' - 'but should have num_x = %d x values' % - (arrayShape[0], num_x)) - elif len(arrayShape) == 2 and arrayShape[1] != num_x: - raise ValueError('secondXAxisData has %d x values, ' - 'but should have num_x = %d x values' % - (arrayShape[1], num_x)) - elif len(arrayShape) > 2: - raise ValueError('secondXAxisData must be a 1D or 2D array, ' - 'but is of dimension %d' % - (len(arrayShape))) - if thirdXAxisData is not None: - arrayShape = thirdXAxisData.shape - if len(arrayShape) == 1 and arrayShape[0] != num_x: - raise ValueError('thirdXAxisData has %d x values, ' - 'but should have num_x = %d x values' % - (arrayShape[0], num_x)) - elif len(arrayShape) == 2 and arrayShape[1] != num_x: - raise ValueError('thirdXAxisData has %d x values, ' - 'but should have num_x = %d x values' % - (arrayShape[1], num_x)) - elif len(arrayShape) > 2: - raise ValueError('thirdXAxisData must be a 1D or 2D array, ' - 'but is of dimension %d' % - (len(arrayShape))) - - # define x and y as the appropriate 2D arrays for plotting - if len(dimX) == 1 and len(dimZ) == 1: - x, y = np.meshgrid(xArray, depthArray) - elif len(dimX) == 1: - x, y = np.meshgrid(xArray, np.zeros(num_z)) - y = depthArray - elif len(dimZ) == 1: - x, y = np.meshgrid(np.zeros(num_x), depthArray) - x = xArray + if not isinstance(xlabels, list): + xlabels = [xlabels] + + if len(xCoords) != len(xlabels): + raise ValueError('Expected the same number of xCoords and xlabels') + + if triangulation_args is None: + + x, y = xr.broadcast(xCoords[0], zCoord) + dims_in_field = all([dim in field.dims for dim in x.dims]) + + if dims_in_field: + x = x.transpose(*field.dims) + y = y.transpose(*field.dims) + else: + xsize = list(x.sizes.values()) + fieldsize = list(field.sizes.values()) + if xsize[0] == fieldsize[0] + 1 and xsize[1] == fieldsize[1] + 1: + pass + elif xsize[0] == fieldsize[1] + 1 and xsize[1] == fieldsize[0] + 1: + x = x.transpose(x.dims[1], x.dims[0]) + y = y.transpose(y.dims[1], y.dims[0]) + else: + raise ValueError('Sizes of coords {}x{} and field {}x{} not ' + 'compatible.'.format(xsize[0], xsize[1], + fieldsize[0], + fieldsize[1])) + + # compute moving averages with respect to the x dimension + if movingAveragePoints is not None and movingAveragePoints != 1: + + dim = field.dims[0] + field = field.rolling(dim={dim: movingAveragePoints}, + center=True).mean().dropna(dim) + x = x.rolling(dim={dim: movingAveragePoints}, + center=True).mean().dropna(dim) + y = y.rolling(dim={dim: movingAveragePoints}, + center=True).mean().dropna(dim) + + mask = field.notnull() + maskedTriangulation, unmaskedTriangulation = _get_triangulation( + x, y, mask) else: - x = xArray - y = depthArray + mask = field.notnull() + triMask = np.logical_not(mask.values) + # if any node of a triangle is masked, the triangle is masked + triMask = np.amax(triMask, axis=1) + unmaskedTriangulation = Triangulation(**triangulation_args) + mask_args = dict(triangulation_args) + mask_args['mask'] = triMask + maskedTriangulation = Triangulation(**mask_args) # set up figure if dpi is None: @@ -917,77 +880,90 @@ def plot_vertical_section( else: fig = plt.gcf() - colormapDict = setup_colormap(config, colorMapSectionName, suffix=suffix) + colormapDict = setup_colormap(config, colorMapSectionName, + suffix=suffix) + + # fill the unmasked region with the invalid color so it will show through + # any masked regions + zeroArray = xr.zeros_like(field) + plt.tricontourf(unmaskedTriangulation, zeroArray.values.ravel(), + colors=invalidColor) - if not plotAsContours: # display a heatmap of fieldArray + if not plotAsContours: + # display a heatmap of fieldArray + fieldMasked = field.where(mask, 0.0).values.ravel() if colormapDict['levels'] is None: - # interpFieldArray contains the values at centers of grid cells, - # for pcolormesh plots (using bilinear interpolation) - interpFieldArray = \ - 0.5 * (0.5 * (fieldArray[1:, 1:] + fieldArray[0:-1, 1:]) + - 0.5 * (fieldArray[1:, 0:-1] + fieldArray[0:-1, 0:-1])) - - plotHandle = plt.pcolormesh(x, y, interpFieldArray, - cmap=colormapDict['colormap'], - norm=colormapDict['norm'], - rasterized=True) + + plotHandle = plt.tripcolor(maskedTriangulation, fieldMasked, + cmap=colormapDict['colormap'], + norm=colormapDict['norm'], + rasterized=True, shading='gouraud') else: - plotHandle = plt.contourf(x, y, fieldArray, - cmap=colormapDict['colormap'], - norm=colormapDict['norm'], - levels=colormapDict['levels'], - extend='both') + plotHandle = plt.tricontourf(maskedTriangulation, fieldMasked, + cmap=colormapDict['colormap'], + norm=colormapDict['norm'], + levels=colormapDict['levels'], + extend='both') cbar = plt.colorbar(plotHandle, orientation='vertical', spacing='uniform', aspect=9, - ticks=colormapDict['ticks'], - boundaries=colormapDict['ticks']) + ticks=colormapDict['ticks']) if colorbarLabel is not None: cbar.set_label(colorbarLabel) - else: # display a white heatmap to get a white background for non-land - zeroArray = np.ma.where(fieldArray != np.nan, 0.0, fieldArray) - plt.contourf(x, y, zeroArray, colors='white') + else: + # display a white heatmap to get a white background for non-land + zeroArray = xr.zeros_like(field) + plt.tricontourf(maskedTriangulation, zeroArray.values.ravel(), + colors='white') - # set the color for NaN or masked regions, and draw a black - # outline around them; technically, the contour level used should - # be 1.0, but the contours don't show up when using 1.0, so 0.999 - # is used instead ax = plt.gca() ax.set_facecolor(backgroundColor) - landArray = np.ma.where(fieldArray != np.nan, 1.0, fieldArray) - landArray = np.ma.masked_where(landArray == np.nan, landArray, copy=True) - landArray = landArray.filled(0.0) - plt.contour(x, y, landArray, levels=[0.999], colors='black', linewidths=1) + if outlineValid: + if xOutline is not None and zOutline is not None: + # also outline the domain if provided + plt.plot(xOutline, zOutline, color='black', linewidth=1) + else: + # do a contour to outline the boundary between valid and invalid + # values + landMask = np.isnan(field.values).ravel() + plt.tricontour(unmaskedTriangulation, landMask, levels=[0.0001], + colors='black', linewidths=1) + # plot contours, if they were requested contourLevels = colormapDict['contours'] + fmt_string = None + cs1 = None + cs2 = None + if contourLevels is not None: if len(contourLevels) == 0: # automatic calculation of contour levels contourLevels = None - cs1 = plt.contour(x, y, fieldArray, - levels=contourLevels, - colors=lineColor, - linestyles=lineStyle, - linewidths=lineWidth) + cs1 = plt.tricontour(maskedTriangulation, field.values.ravel(), + levels=contourLevels, + colors=lineColor, + linestyles=lineStyle, + linewidths=lineWidth) if labelContours: fmt_string = "%%1.%df" % int(contourLabelPrecision) plt.clabel(cs1, fmt=fmt_string) - if plotAsContours and contourComparisonFieldArray is not None: - cs2 = plt.contour(x, y, contourComparisonFieldArray, - levels=contourLevels, - colors=comparisonContourLineColor, - linestyles=comparisonContourLineStyle, - linewidths=lineWidth) + if plotAsContours and contourComparisonField is not None: + cs2 = plt.tricontour(maskedTriangulation, + contourComparisonField.values.ravel(), + levels=contourLevels, + colors=comparisonContourLineColor, + linestyles=comparisonContourLineStyle, + linewidths=lineWidth) if labelContours: plt.clabel(cs2, fmt=fmt_string) - if plotAsContours and contourComparisonFieldArray is not None: + if plotAsContours and contourComparisonField is not None: h1, _ = cs1.legend_elements() h2, _ = cs2.legend_elements() if labelContours: @@ -999,7 +975,7 @@ def plot_vertical_section( if title is not None: if plotAsContours and labelContours \ - and contourComparisonFieldArray is None: + and contourComparisonField is None: title = limit_title(title, maxTitleLength-(3+len(colorbarLabel))) title = title + " (" + colorbarLabel + ")" else: @@ -1014,13 +990,12 @@ def plot_vertical_section( else: plt.title(title, **title_font) - if (xlabel is not None) or (ylabel is not None): - if axisFontSize is None: - axisFontSize = config.get('plot', 'axisFontSize') - axis_font = {'size': axisFontSize} + if axisFontSize is None: + axisFontSize = config.get('plot', 'axisFontSize') + axis_font = {'size': axisFontSize} - if xlabel is not None: - plt.xlabel(xlabel, **axis_font) + if xlabels is not None: + plt.xlabel(xlabels[0], **axis_font) if ylabel is not None: plt.ylabel(ylabel, **axis_font) @@ -1032,44 +1007,89 @@ def plot_vertical_section( if yLim: ax.set_ylim(yLim) - if xArrayIsTime: + if xCoords is not None and xCoordIsTime: if firstYearXTicks is None: - minDays = [xArray[0]] + minDays = xCoords[0][0].values else: minDays = date_to_days(year=firstYearXTicks, calendar=calendar) - maxDays = [xArray[-1]] + maxDays = xCoords[0][-1].values plot_xtick_format(calendar, minDays, maxDays, maxXTicks, yearStride=yearStrideXTicks) # add a second x-axis scale, if it was requested - if secondXAxisData is not None: + if xCoords is not None and len(xCoords) >= 2: ax2 = ax.twiny() ax2.set_facecolor(backgroundColor) - ax2.set_xlabel(secondXAxisLabel, **axis_font) + if xlabels[1] is not None: + ax2.set_xlabel(xlabels[1], **axis_font) xlimits = ax.get_xlim() ax2.set_xlim(xlimits) - xticks = np.linspace(xlimits[0], xlimits[1], numUpperTicks) - tickValues = np.interp(xticks, x.flatten()[:num_x], secondXAxisData) - ax2.set_xticks(xticks) - formatString = "{{0:.{:d}f}}{}".format( - upperXAxisTickLabelPrecision, r'$\degree$') - ax2.set_xticklabels([formatString.format(member) - for member in tickValues]) + formatString = None + xticks = None + if numUpperTicks is not None: + xticks = np.linspace(xlimits[0], xlimits[1], numUpperTicks) + tickValues = np.interp(xticks, xCoords[0].values, xCoords[1].values) + ax2.set_xticks(xticks) + formatString = "{{0:.{:d}f}}{}".format( + upperXAxisTickLabelPrecision, r'$\degree$') + ax2.set_xticklabels([formatString.format(member) + for member in tickValues]) # add a third x-axis scale, if it was requested - if thirdXAxisData is not None: + if len(xCoords) == 3: ax3 = ax.twiny() ax3.set_facecolor(backgroundColor) - ax3.set_xlabel(thirdXAxisLabel, **axis_font) + ax3.set_xlabel(xlabels[2], **axis_font) ax3.set_xlim(xlimits) ax3.set_xticks(xticks) - tickValues = np.interp(xticks, x.flatten()[:num_x], thirdXAxisData) - ax3.set_xticklabels([formatString.format(member) - for member in tickValues]) - ax3.spines['top'].set_position(('outward', 36)) + if numUpperTicks is not None: + tickValues = np.interp(xticks, xCoords[0].values, + xCoords[2].values) + ax3.set_xticklabels([formatString.format(member) + for member in tickValues]) + ax3.spines['top'].set_position(('outward', 36)) return fig, ax # }}} +def _get_triangulation(x, y, mask): + """divide each quad in the x/y mesh into 2 triangles""" + + nx = x.sizes[x.dims[0]] - 1 + ny = x.sizes[x.dims[1]] - 1 + nTriangles = 2*nx*ny + + mask = mask.values + mask = np.logical_and(np.logical_and(mask[0:-1, 0:-1], mask[1:, 0:-1]), + np.logical_and(mask[0:-1, 1:], mask[1:, 1:])) + triMask = np.zeros((nx, ny, 2), bool) + triMask[:, :, 0] = np.logical_not(mask) + triMask[:, :, 1] = triMask[:, :, 0] + + triMask = triMask.ravel() + + xIndices, yIndices = np.meshgrid(np.arange(nx), np.arange(ny), + indexing='ij') + + tris = np.zeros((nx, ny, 2, 3), int) + # upper triangles: + tris[:, :, 0, 0] = (ny+1)*xIndices + yIndices + tris[:, :, 0, 1] = (ny+1)*(xIndices + 1) + yIndices + tris[:, :, 0, 2] = (ny+1)*xIndices + yIndices + 1 + # lower triangle + tris[:, :, 1, 0] = (ny+1)*xIndices + yIndices + 1 + tris[:, :, 1, 1] = (ny+1)*(xIndices + 1) + yIndices + tris[:, :, 1, 2] = (ny+1)*(xIndices + 1) + yIndices + 1 + + tris = tris.reshape((nTriangles, 3)) + + x = x.values.ravel() + y = y.values.ravel() + + maskedTriangulation = Triangulation(x=x, y=y, triangles=tris, mask=triMask) + unmaskedTriangulation = Triangulation(x=x, y=y, triangles=tris) + + return maskedTriangulation, unmaskedTriangulation + # vim: foldmethod=marker ai ts=4 sts=4 et sw=4 ft=python diff --git a/suite/job_script.bash b/suite/job_script.bash index 2d451860a..1cc41693d 100644 --- a/suite/job_script.bash +++ b/suite/job_script.bash @@ -22,4 +22,5 @@ echo configs: {{ flags }} {{ config }} {{ parallel_exec }} mpas_analysis --purge {{ flags }} {{ config }} --verbose {{ parallel_exec }} mpas_analysis --html_only {{ flags }} {{ config }} +chmod ugo+rx {{ html_base }}/{{ out_common_dir }} chmod -R ugo+rX {{ html_base }}/{{ out_subdir }} diff --git a/suite/run_suite.bash b/suite/run_suite.bash index ebdec7aa2..67836c504 100755 --- a/suite/run_suite.bash +++ b/suite/run_suite.bash @@ -20,8 +20,8 @@ for py in ${main_py} ${alt_py} do env=test_mpas_analysis_py${py} mamba create -y -n ${env} --use-local python=${py} mpas-analysis sphinx \ - mock sphinx_rtd_theme "tabulate>=0.8.2" m2r pytest "mache>=1.1.2" \ - jinja2 + mock sphinx_rtd_theme "tabulate>=0.8.2" m2r2 "mistune<2" pytest \ + "mache>=1.1.2" jinja2 conda activate ${env} pytest conda deactivate diff --git a/suite/setup.py b/suite/setup.py index 0f80f9d0d..903d2f63c 100755 --- a/suite/setup.py +++ b/suite/setup.py @@ -130,6 +130,7 @@ def main(): out_subdir = os.path.join(machine, args.branch, f'main_py{args.python}') else: out_subdir = os.path.join(machine, args.branch, args.run) + out_common_dir = os.path.join(machine, args.branch) if machine == 'cori-haswell': execute_options = \ @@ -185,7 +186,7 @@ def main(): sbatch=sbatch, conda_base=conda_base, conda_env=conda_env, machine=machine, flags=flags, config=config_from_job, parallel_exec=parallel_exec, html_base=html_base, - out_subdir=out_subdir) + out_subdir=out_subdir, out_common_dir=out_common_dir) with open(job, 'w') as job_file: job_file.write(job_text)