diff --git a/scm/doc/TechGuide/acknow.tex b/scm/doc/TechGuide/acknow.tex index 1947092cf..e443162f3 100644 --- a/scm/doc/TechGuide/acknow.tex +++ b/scm/doc/TechGuide/acknow.tex @@ -8,11 +8,11 @@ \textcolor{darkgray}{\LARGE Acknowledgement} \vspace*{1cm}\par -If significant help was provided via the helpdesk or support forum for work resulting in a publication, please acknowledge the Developmental Testbed Center team.\\ +If significant help was provided via the helpdesk, email, or support forum for work resulting in a publication, please acknowledge the Developmental Testbed Center team.\\ \vspace*{1cm}\par For referencing this document please use:\\ \vspace*{1cm}\par -Firl, G., L. Carson, L. Bernardet, D. Heinzeller, and M. Harrold, 2020. Common Community Physics Package Single Column Model v4.1.0 User and Technical Guide. 38pp. Available at https://dtcenter.org/GMTB/v4.1.0/scm-ccpp-guide-v4.1.0.pdf +Firl, G., L. Carson, L. Bernardet, D. Heinzeller, and M. Harrold, 2021. Common Community Physics Package Single Column Model v5.0.0 User and Technical Guide. 39pp. Available at https://dtcenter.org/GMTB/v5.0.0/scm-ccpp-guide-v5.0.0.pdf \end{flushleft} \end{titlepage} diff --git a/scm/doc/TechGuide/chap_cases.tex b/scm/doc/TechGuide/chap_cases.tex index 8655ee127..cd72f3e5b 100644 --- a/scm/doc/TechGuide/chap_cases.tex +++ b/scm/doc/TechGuide/chap_cases.tex @@ -206,7 +206,7 @@ \section{Using other LASSO cases} \section{Using UFS Initial Conditions} \label{sec:UFS ICs} -A script exists in \execout{scm/etc/scripts/UFS\_IC\_generator.py} to read in UFS Atmosphere cold start initial conditions and generate a case input data file that the SCM can use. Since the Noah LSM is the operational LSM, it is assumed that initial variables for it exist in the UFS Atmosphere initial condition files. Although NoahMP is not a member of any officially supported suite as of this release, if NoahMP is to be used, its initial conditions are generated from the Noah initial conditions using the same algorithm used in the UFS Atmosphere. Note that the script requires a few python packages that may not be found by default in all python installations: \exec{argparse}, \exec{fnmatch}, \exec{logging}, \exec{NetCDF4}, \exec{numpy}, \exec{shapely}, \exec{f90nml}, and \exec{re}. +A script exists in \execout{scm/etc/scripts/UFS\_IC\_generator.py} to read in UFS Atmosphere cold start initial conditions and generate a case input data file that the SCM can use. Note that the script requires a few python packages that may not be found by default in all python installations: \exec{argparse}, \exec{fnmatch}, \exec{logging}, \exec{NetCDF4}, \exec{numpy}, \exec{shapely}, \exec{f90nml}, and \exec{re}. NOTE: If using NOAA's Hera HPC, the \execout{shapely} python package does not seem to be installed with the version of Anaconda used by the rest of this software package by default so it is installed when users execute \execout{scm/etc/Hera\_setup\_intel.[csh/sh]}. @@ -221,7 +221,7 @@ \section{Using UFS Initial Conditions} \begin{lstlisting}[language=bash] ./UFS_IC_generator.py [-h] (-l LOCATION LOCATION | -ij INDEX INDEX) -d DATE -i IN_DIR -g GRID_DIR [-t {1,2,3,4,5,6}] -[-a AREA] [-mp] -n CASE_NAME [-oc] +[-a AREA] -n CASE_NAME [-oc] \end{lstlisting} Mandatory arguments: @@ -240,7 +240,6 @@ \section{Using UFS Initial Conditions} Optional arguments: \begin{enumerate} \item \exec{-{}-tile (-t)}: if one already knows the correct tile for the given longitude and latitude OR one is specifying the UFS grid index (\exec{-{}-index} argument) -\item \exec{-{}-noahmp (-mp)}: flag to generate cold-start initial conditions for NoahMP LSM from Noah LSM initial conditions \item \exec{-{}-area (-a)}: area of grid cell in $m^2$ (if known or different than the value calculated from the supergrid file) \item \exec{-{}-old\_chgres (-oc)}: flag if UFS initial conditions were generated using older version of chgres (global\_chgres); might be the case for pre-2018 data \end{enumerate} @@ -249,9 +248,6 @@ \section{Using UFS Initial Conditions} \begin{lstlisting}[language=bash] ./UFS_IC_generator.py -l 261.51 38.2 -d 201610030000 -i ../../data/raw_case_input/FV3_C96_example_ICs -g ../../data/raw_case_input/FV3_C96_example_ICs -n fv3_model_point_noah -oc \end{lstlisting} -\begin{lstlisting}[language=bash] -./UFS_IC_generator.py -l 261.51 38.2 -d 201610030000 -i ../../data/raw_case_input/FV3_C96_example_ICs -g ../../data/raw_case_input/FV3_C96_example_ICs -n fv3_model_point_noahmp -mp -oc -\end{lstlisting} Note that the \exec{-{}-in\_dir (-i)} and \exec{-{}-grid\_dir (-g)} arguments are the same in this case (since the supergrid files were copied to the same directory as the initial conditions files for point of example), but they will not in general be the same. Also note that the default behavior of the script is to expect that the NetCDF initial condition files were generated from \execout{chgres\_cube} and not the older \execout{global\_chgres}. If they were generated from the older version (which is likely for pre-2018 data), they will have a slightly different format requiring the \exec{-{}-old\_chgres (-oc)} option to be set in order for the files to be read properly by the script. If you try without the \exec{-{}-old\_chgres (-oc)} flag and receive a ``IndexError: t not found'' error, try the script again with the flag. @@ -261,6 +257,3 @@ \section{Using UFS Initial Conditions} \begin{lstlisting}[language=bash] ./run_scm.py -c fv3_model_point_noah -s SCM_GFS_v15p2 \end{lstlisting} -\begin{lstlisting}[language=bash] -./run_scm.py -c fv3_model_point_noahmp -s SCM_GFS_v15p2_noahmp -n input_GFS_v15p2_noahmp.nml -\end{lstlisting} diff --git a/scm/doc/TechGuide/chap_ccpp.tex b/scm/doc/TechGuide/chap_ccpp.tex index 643c847a7..c3aef9f2a 100644 --- a/scm/doc/TechGuide/chap_ccpp.tex +++ b/scm/doc/TechGuide/chap_ccpp.tex @@ -1,7 +1,7 @@ \chapter{CCPP Interface} \label{chapter: ccpp_interface} -Chapter 6 of the CCPP v4 Technical Documentation (\url{https://ccpp-techdoc.readthedocs.io/en/v4.1.0/}) provides a wealth of information on the overall process of connecting a host model to the CCPP framework for calling physics. This chapter describes the particular implementation within this SCM, including how to set up, initialize, call, and change a physics suite using the CCPP framework. +Chapter 6 of the CCPP v5 Technical Documentation (\url{https://ccpp-techdoc.readthedocs.io/en/v5.0.0/}) provides a wealth of information on the overall process of connecting a host model to the CCPP framework for calling physics. This chapter describes the particular implementation within this SCM, including how to set up, initialize, call, and change a physics suite using the CCPP framework. \section{Setting up a suite} @@ -9,11 +9,11 @@ \section{Setting up a suite} \subsection{Preparing data from the SCM} -As described in sections 6.1 and 6.2 of the \href{https://ccpp-techdoc.readthedocs.io/en/v4.1.0/}{CCPP Technical Documentation} a host model must allocate memory and provide metadata for variables that are passed into and out of the schemes within the physics suite. As of this release, in practice this means that a host model must do this for all variables needed by all physics schemes that are expected to be used with the host model. For this SCM, all variables needed by the physics schemes are allocated and documented in the file \execout{ccpp-scm/scm/src/scm\_type\_defs.f90} and are contained within the \execout{physics} derived data type. This derived data type initializes its component variables in a \execout{create} type-bound procedure. As mentioned in section 6.2 of the \href{https://ccpp-techdoc.readthedocs.io/en/v4.1.0/}{CCPP Technical Documentation}, a table containing all required metadata was constructed for describing all variables in the \execout{physics} derived data type. The standard names of all variables in this table must match with a corresponding variable within one or more of the physics schemes. A list of all standard names used can be found in \execout{ccpp/framework/doc/DevelopersGuide/CCPP\_VARIABLES\_SCM.pdf}. The \execout{local\_name} for each variable corresponds to how a variable is referenced from the point in the code where \execout{ccpp\_field\_add()} statements are made. For this SCM, then, all \execout{local\_name}s begin with the \execout{physics} derived data type. Nested within most of the \execout{local\_name}s is also the name of a derived data type used within the UFS Atmosphere cap (re-used here for expediency). Since the \execout{ccpp\_field\_add()} statements are made within a loop over all columns within \execout{scm.F90}, most \execout{local\_name}s are also referenced with \execout{i} as an array index. +As described in sections 6.1 and 6.2 of the \href{https://ccpp-techdoc.readthedocs.io/en/v5.0.0/}{CCPP Technical Documentation} a host model must allocate memory and provide metadata for variables that are passed into and out of the schemes within the physics suite. As of this release, in practice this means that a host model must do this for all variables needed by all physics schemes that are expected to be used with the host model. For this SCM, all variables needed by the physics schemes are allocated and documented in the file \execout{ccpp-scm/scm/src/scm\_type\_defs.f90} and are contained within the \execout{physics} derived data type. This derived data type initializes its component variables in a \execout{create} type-bound procedure. As mentioned in section 6.2 of the \href{https://ccpp-techdoc.readthedocs.io/en/v5.0.0/}{CCPP Technical Documentation}, a table containing all required metadata was constructed for describing all variables in the \execout{physics} derived data type. The standard names of all variables in this table must match with a corresponding variable within one or more of the physics schemes. A list of all standard names used can be found in \execout{ccpp/framework/doc/DevelopersGuide/CCPP\_VARIABLES\_SCM.pdf}. The \execout{local\_name} for each variable corresponds to how a variable is referenced from the point in the code where \execout{ccpp\_field\_add()} statements are made. For this SCM, then, all \execout{local\_name}s begin with the \execout{physics} derived data type. Nested within most of the \execout{local\_name}s is also the name of a derived data type used within the UFS Atmosphere cap (re-used here for expediency). \subsection{Editing and running \exec{ccpp\_prebuild.py}} -General instructions for configuring and running the \execout{ccpp\_prebuild.py} script can be found in chapter 8 of the \href{https://ccpp-techdoc.readthedocs.io/en/v4.1.0/}{CCPP Technical Documentation}. The script expects to be run with a host-model-dependent configuration file, passed as argument \execout{--config=path\_to\_config\_file}. Within this configuration file are variables that hold paths to the variable definition files (where metadata tables can be found on the host model side), the scheme files (a list of paths to all source files containing scheme entry points), the auto-generated physics schemes makefile snippet, the auto-generated physics scheme caps makefile snippet, the file where \execout{ccpp\_modules.inc} and \execout{ccpp\_fields.inc} are included, and the directory where the auto-generated physics caps should be written out to. Other variables less likely to be modified by a user are included in this configuration file as well, such as code sections to be included in the auto-generated scheme caps. As mentioned in section \ref{section: compiling}, this script must be run to reconcile data provided by the SCM with data required by the physics schemes before compilation by following step 1 in that section. +General instructions for configuring and running the \execout{ccpp\_prebuild.py} script can be found in chapter 8 of the \href{https://ccpp-techdoc.readthedocs.io/en/v5.0.0/}{CCPP Technical Documentation}. The script expects to be run with a host-model-dependent configuration file, passed as argument \execout{--config=path\_to\_config\_file}. Within this configuration file are variables that hold paths to the variable definition files (where metadata tables can be found on the host model side), the scheme files (a list of paths to all source files containing scheme entry points), the auto-generated physics schemes makefile snippet, the auto-generated physics scheme caps makefile snippet, the file where \execout{ccpp\_modules.inc} and \execout{ccpp\_fields.inc} are included, and the directory where the auto-generated physics caps should be written out to. Other variables less likely to be modified by a user are included in this configuration file as well, such as code sections to be included in the auto-generated scheme caps. As mentioned in section \ref{section: compiling}, this script must be run to reconcile data provided by the SCM with data required by the physics schemes before compilation by following step 1 in that section. \subsection{Preparing a suite definition file} The suite definition file is a text file read by the model at compile time. It is used to specify the physical parameterization suite, and includes information about the number of parameterization groupings, which parameterizations that are part of each of the groups, the order in which the parameterizations should be run, and whether subcycling will be used to run any of the parameterizations with shorter timesteps. @@ -25,13 +25,13 @@ \subsection{Preparing a suite definition file} For this release, supported suite definition files used with this SCM are found in \execout{ccpp-scm/ccpp/suites}. For all of these suites, the physics schemes have been organized into 3 groupings following how the physics are called in the UFS Atmosphere model, although no code is executed in the SCM time loop between execution of the grouped schemes. Several ``interstitial'' schemes are included in the suite definition file to execute code that previously was part of a hard-coded physics driver. Some of these schemes may eventually be rolled into the schemes themselves, improving portability. \section{Initializing/running a suite} -The process for initializing and running a suite in this SCM is described in sections \ref{section: physics init} and \ref{section: time integration}, respectively. A more general description of the process for performing suite initialization and running can also be found in sections 6.4 and 6.5 of the \href{https://ccpp-techdoc.readthedocs.io/en/v4.1.0/}{CCPP Technical Documentation}. +The process for initializing and running a suite in this SCM is described in sections \ref{section: physics init} and \ref{section: time integration}, respectively. A more general description of the process for performing suite initialization and running can also be found in sections 6.4 and 6.5 of the \href{https://ccpp-techdoc.readthedocs.io/en/v5.0.0/}{CCPP Technical Documentation}. \section{Changing a suite} \subsection{Replacing a scheme with another} -When the CCPP has reached a state of maturity, the process for modifying the contents of an existing physics suite will be a very straightforward process, consisting of merely changing the name of the scheme in the suite definition file. As of this release, which consists of one scheme of each ``type'' in the pool of CCPP-compliant physics schemes with many short interstitial schemes, the process requires some consideration. Of course, prior to being able to swap a scheme within a suite, one must first add a CCPP-compliant scheme to the pool of available schemes in the CCPP physics repository. This process is described in chapter 2 of the \href{https://ccpp-techdoc.readthedocs.io/en/v4.1.0/}{CCPP Technical Documentation}. +When the CCPP has reached a state of maturity, the process for modifying the contents of an existing physics suite will be a very straightforward process, consisting of merely changing the name of the scheme in the suite definition file. As of this release, which consists of one scheme of each ``type'' in the pool of CCPP-compliant physics schemes with many short interstitial schemes, the process requires some consideration. Of course, prior to being able to swap a scheme within a suite, one must first add a CCPP-compliant scheme to the pool of available schemes in the CCPP physics repository. This process is described in chapter 2 of the \href{https://ccpp-techdoc.readthedocs.io/en/v5.0.0/}{CCPP Technical Documentation}. Once a CCPP-compliant scheme has been added to the CCPP physics repository, the process for modifying an existing suite should take the following steps into account: @@ -39,7 +39,7 @@ \subsection{Replacing a scheme with another} \item Examine and compare the arguments of the scheme being replaced and the replacement scheme. \begin{itemize} \item Are there any new variables that the replacement scheme needs from the host application? If so, these new variables must be added to the host model cap. For the SCM, this involves adding a component variable to the \execout{physics} derived data type and a corresponding entry in the metadata table. The new variables must also be allocated and initialized in the \execout{physics\%create} type-bound procedure. -\item Do any of the new variables need to be calculated in an interstitial scheme? If so, one must be written and made CCPP-compliant itself. The \href{https://ccpp-techdoc.readthedocs.io/en/v4.1.0/}{CCPP Technical Documentation} will help in this endeavor, and the process outlined in its chapter 2 should be followed. +\item Do any of the new variables need to be calculated in an interstitial scheme? If so, one must be written and made CCPP-compliant itself. The \href{https://ccpp-techdoc.readthedocs.io/en/v5.0.0/}{CCPP Technical Documentation} will help in this endeavor, and the process outlined in its chapter 2 should be followed. \item Do other schemes in the suite rely on output variables from the scheme being replaced that are no longer being supplied by the replacement scheme? Do these output variables need to be derived/calculated in an interstitial scheme? If so, see the previous bullet about adding one. \end{itemize} \item Examine existing interstitial schemes related to the scheme being replaced. @@ -47,7 +47,7 @@ \subsection{Replacing a scheme with another} \item There may be scheme-specific interstitial schemes (needed for one specific scheme) and/or type-generic interstitial schemes (those that are called for all schemes of a given type, i.e. all PBL schemes). Does one need to write analogous scheme-specific interstitial schemes for the replacement? \item Are the type-generic interstitial schemes relevant or do they need to be modified? \end{itemize} -\item Depending on the answers to the above considerations, edit the suite definition file as necessary. Typically, this would involve finding the \execout{} elements associated with the scheme to be replaced and its associated interstitial \execout{} elements and simply replacing the scheme names to reflect their replacements. See chapter 4 of the \href{https://ccpp-techdoc.readthedocs.io/en/v4.1.0/}{CCPP Technical Documentation} for further details. +\item Depending on the answers to the above considerations, edit the suite definition file as necessary. Typically, this would involve finding the \execout{} elements associated with the scheme to be replaced and its associated interstitial \execout{} elements and simply replacing the scheme names to reflect their replacements. See chapter 4 of the \href{https://ccpp-techdoc.readthedocs.io/en/v5.0.0/}{CCPP Technical Documentation} for further details. \end{itemize} \subsection{Modifying ``groups'' of parameterizations} diff --git a/scm/doc/TechGuide/chap_intro.tex b/scm/doc/TechGuide/chap_intro.tex index c32d7d4b4..59c5b0426 100644 --- a/scm/doc/TechGuide/chap_intro.tex +++ b/scm/doc/TechGuide/chap_intro.tex @@ -1,7 +1,7 @@ \chapter{Introduction} \label{chapter: introduction} -A single column model (SCM) can be a valuable tool for diagnosing the performance of a physics suite, from validating that schemes have been integrated into a suite correctly to deep dives into how physical processes are being represented by the approximating code. This SCM has the advantage of working with the Common Community Physics Package (CCPP), a library of physical parameterizations for atmospheric numerical models and the associated framework for connecting potentially any atmospheric model to physics suites constructed from its member parameterizations. In fact, this SCM serves as perhaps the simplest example for using the CCPP and its framework in an atmospheric model. This version contains all parameterizations of NOAA's evolved operational GFS v15.2 suite (implemented in 2019), plus additional developmental schemes. The schemes are grouped in four supported suites described in detail in the \href{https://dtcenter.org/GMTB/v4.0/sci\_doc/}{CCPP Scientific Documentation} (GFS\_v15p2, GFS\_v16beta, csawmg, and GSD\_v1). Two additional suites without the near sea surface temperature scheme are available to match the first Unified Forecast System (UFS) public release. +A single column model (SCM) can be a valuable tool for diagnosing the performance of a physics suite, from validating that schemes have been integrated into a suite correctly to deep dives into how physical processes are being represented by the approximating code. This SCM has the advantage of working with the Common Community Physics Package (CCPP), a library of physical parameterizations for atmospheric numerical models and the associated framework for connecting potentially any atmospheric model to physics suites constructed from its member parameterizations. In fact, this SCM serves as perhaps the simplest example for using the CCPP and its framework in an atmospheric model. This version contains all parameterizations of NOAA's evolved operational GFS v15.2 suite (implemented in 2019), plus additional developmental schemes. The schemes are grouped in five supported suites described in detail in the \href{https://dtcenter.ucar.edu/GMTB/v5.0.0/sci_doc/}{CCPP Scientific Documentation} (GFS\_v15p2, GFS\_v16beta, csawmg, GSD\_v1, and RRFS\_v1alpha). This document serves as both the User and Technical Guides for this model. It contains a Quick Start Guide with instructions for obtaining the code, compiling, and running a sample test case, an explanation for what is included in the repository, a brief description of the operation of the model, a description of how cases are set up and run, and finally, an explanation for how the model interfaces with physics through the CCPP infrastructure. @@ -9,7 +9,7 @@ \chapter{Introduction} \section{Version Notes} -The CCPP SCM v4.1.0 contains the following major and minor changes since v4.0. +The CCPP SCM v5.0.0 contains the following major and minor changes since v4.1. Major \begin{itemize} @@ -18,7 +18,12 @@ \section{Version Notes} Minor \begin{itemize} -\item Added Python 3 compatibility for run scripts and unsupported plotting scripts +\item Tracers are configured externally via a file, to match the ``field\_table'' functionality in FV3 +\item Add the RRFS\_v1alpha suite to match the UFS SRW App version 1 public release: \url{https://ufs-srweather-app.readthedocs.io/en/latest/} +\item Added ability to run with HWRF physics +\item Fixed bug related to prescribed surface flux cases (bug was present in v4.1.0) +\item Updated UFS initial conditions case generation script to better handle LSM-related variables +\item Update SCM surface initialization code to better match FV3 \end{itemize} \subsection{Limitations} @@ -26,6 +31,7 @@ \subsection{Limitations} This release bundle has some known limitations: \begin{itemize} +\item Using the RRFS\_v1alpha suite for cases where deep convection is expected to be active will likely produce strange/unreliable results, unless the forcing has been modified to account for the deep convection. This is because forcing for existing cases assumes a horizontal scale for which deep convection is subgrid-scale and is expected to be parameterized. The RRFS\_v1alpha suite is intended for use with regional UFS simulations with horizontal scale small enough not to need a deep convection parameterization active, and it does not contain a deep convective scheme. Nevertheless, the RRFS\_v1alpha suite is included with the SCM as-is for research purposes. \item The provided cases over land points cannot use an LSM at this time due to the lack of initialization data for the LSMs. Therefore, for the provided cases over land points (ARM\_SGP\_summer\_1997\_* and LASSO\_*, where sfc\_type = 1 is set in the case configuration file), prescribed surface fluxes must be used: \begin{itemize} \item surface sensible and latent heat fluxes must be provided in the case data file diff --git a/scm/doc/TechGuide/chap_quick.tex b/scm/doc/TechGuide/chap_quick.tex index 201d74232..5e58c5c21 100644 --- a/scm/doc/TechGuide/chap_quick.tex +++ b/scm/doc/TechGuide/chap_quick.tex @@ -14,7 +14,7 @@ \subsection{Release Code} Clone the source using \begin{lstlisting}[language=bash] -git clone --recursive -b v4.1.0 https://github.com/NCAR/ccpp-scm +git clone --recursive -b v5.0.0 https://github.com/NCAR/ccpp-scm \end{lstlisting} Recall that the \execout{recursive} option in this command clones the main ccpp-scm repository and all subrepositories (ccpp-physics and ccpp-framework). Using this option, there is no need to execute \exec{git submodule init} and \exec{git submodule update}. @@ -29,17 +29,21 @@ \subsection{Development Code} There you will find links to all of the documentation pertinent to developers. -For working with the development branches (stability not guaranteed), check out the \exec{master} branches of the repository (and submodules): +For working with the development branches (stability not guaranteed), check out the \exec{master} branch of the repository: \begin{lstlisting}[language=bash] git clone --recursive -b master https://github.com/NCAR/ccpp-scm \end{lstlisting} -You may want to double-check that the master branch of the SCM is pointing to the latest commits of the master branches of ccpp-physics and ccpp-framework. While we update the submodule pointers often, it is occasionally forgotten. To ensure that you have the latest development code for the submodules, execute the following: +By using the \execout{recursive} option, it guarantees that you are checking out the commits of ccpp-physics and ccpp-framework that were tested with the latest commit of the SCM master branch. You can always retrieve the commits of the submodules that were intended to be used with a given commit of the SCM by doing the following from the top level SCM directory: +\begin{lstlisting}[language=bash] +git submodule update --init --recursive +\end{lstlisting} +You can try to use the latest commits of the ccpp-physics and ccpp-framework submodules if you wish, but this may not have been tested. To do so: \begin{enumerate} \item Navigate to the ccpp-physics directory. \begin{lstlisting}[language=bash] cd ccpp-scm/ccpp/physics \end{lstlisting} -\item Check out the right branch (cloning recursively as instructed above creates a ``detached head'' state for the submodules by default). +\item Check out master. \begin{lstlisting}[language=bash] git checkout master \end{lstlisting} @@ -63,19 +67,19 @@ \subsection{Development Code} \section{System Requirements, Libraries, and Tools} \label{section: systemrequirements} -The source code for the SCM and CCPP component is in the form of programs written in FORTRAN, FORTRAN 90, and C. In addition, the I/O relies on the NetCDF libraries. Beyond the standard scripts, the build system relies on use of the Python scripting language, along with cmake, GNU make and date. +The source code for the SCM and CCPP components is in the form of programs written in FORTRAN, FORTRAN 90, and C. In addition, the I/O relies on the NetCDF libraries. Beyond the standard scripts, the build system relies on use of the Python scripting language, along with cmake, GNU make and date. The basic requirements for building and running the CCPP and SCM bundle are listed below. The versions listed reflect successful tests and there is no guarantee that the code will work with different versions. \begin{itemize} \item FORTRAN 90+ compiler \begin{itemize} \item ifort 18.0.5.274, 19.0.2 and 19.0.5 - \item gfortran 6.2, 8.3, and 9.2 + \item gfortran 6.2, 8.3, 9.1, 9.2, and 10.1 \end{itemize} \item C compiler \begin{itemize} \item icc 18.0.5.274, 19.0.2 and 19.0.5 - \item gcc 6.2, 8.3, and 9.2 + \item gcc 6.2, 8.3, 9.1, 9.2 and 10.1 \item Apple clang 11.0.0.11000033, LLVM clang 9.0.0 \end{itemize} \item cmake 2.8.12.1, 2.8.12.2, 3.6.2, 3.16.3, 3.16.4 @@ -86,7 +90,7 @@ \section{System Requirements, Libraries, and Tools} \item Python 2.7.5, 2.7.9, 2.7.13, 2.7.16, 3.6.1, 3.7.5, and 3.8.5 with f90nml module (and Shapely if using the \execout{UFS\_IC\_generator.py} script) \end{itemize} -Because these tools and libraries are typically the purview of system administrators to install and maintain, they are considered part of the basic system requirements. The Unified Forecast System (UFS) Medium-Range Weather Application release v1.1.0 of September/October, 2020, provides software packages and detailed instructions to install these prerequisites and the NCEPLIBS on supported platforms (see section~\ref{section: setup_supported_platforms}). +Because these tools and libraries are typically the purview of system administrators to install and maintain, they are considered part of the basic system requirements. The Unified Forecast System (UFS) Short-Range Weather Application release v1.0.0 of Feburary 2021 provides software packages and detailed instructions to install these prerequisites and the NCEPLIBS on supported platforms (see section~\ref{section: setup_supported_platforms}). Further, there are several utility libraries as part of the NCEPLIBS package that must be installed with environment variables pointing to their locations prior to building the SCM. \begin{itemize} @@ -94,21 +98,36 @@ \section{System Requirements, Libraries, and Tools} \item sp - Spectral Transformation Library \item w3nco - GRIB decoder and encoder library \end{itemize} -The following environment variables are used by the build system to properly link these libraries: \execout{BACIO\_LIB4}, \execout{SP\_LIBd}, and \execout{W3NCO\_LIBd}. Computational platforms in which the NCEPLIBS are prebuilt and installed in a central location are referred to as preconfigured platforms. Examples of preconfigured platforms are most NOAA high-performance computing machines (using the Intel compiler) and the NCAR Cheyenne system (using the Intel and GNU compilers). The machine setup scripts mentioned in section \ref{section: compiling} load these libraries (which are identical to those used by the UFS Medium Range Weather Application on those machines) and set these environment variables for the user automatically. For installing the libraries and its prerequisites on supported platforms, existing UFS packages can be used (see section~\ref{section: setup_supported_platforms}). +The following environment variables are used by the build system to properly link these libraries: \execout{BACIO\_LIB4}, \execout{SP\_LIBd}, and \execout{W3NCO\_LIBd}. Computational platforms in which the NCEPLIBS are prebuilt and installed in a central location are referred to as preconfigured platforms. Examples of preconfigured platforms are most NOAA high-performance computing machines (using the Intel compiler) and the NCAR Cheyenne system (using the Intel and GNU compilers). The machine setup scripts mentioned in section \ref{section: compiling} load these libraries (which are identical to those used by the UFS Short and Medium Range Weather Applications on those machines) and set these environment variables for the user automatically. For installing the libraries and its prerequisites on supported platforms, existing UFS packages can be used (see section~\ref{section: setup_supported_platforms}). \subsection{Compilers} The CCPP and SCM have been tested on a variety of computing platforms. Currently the CCPP system is actively supported on Linux and MacOS computing platforms using the Intel or GNU Fortran -compilers. Please use versions listed in the previous section as unforeseen -build issues may occur when using older compiler versions. Typically the best results come from using the +compilers. Windows users have a path to use this software through a Docker container that uses Linux internally (see section \ref{docker}). Please use compiler versions listed in the previous section as unforeseen +build issues may occur when using older versions. Typically the best results come from using the most recent version of a compiler. If you have problems with compilers, please check the ``Known Issues'' section of the release website (\url{https://dtcenter.org/community-code/common-community-physics-package-ccpp/download}). -\subsection{Installing Libraries on Supported Platforms}\label{section: setup_supported_platforms} -For users on supported platforms such as generic Linux or macOS systems, the UFS Medium-Range Weather Application v1.1.0 release provides software packages and detailed setup instructions at \url{https://github.com/NOAA-EMC/NCEPLIBS-external/tree/release/public-v1} and \url{https://github.com/NOAA-EMC/NCEPLIBS/tree/release/public-v1}. UFS users who already installed the \execout{NCEPLIBS} package only need to set the compiler environment variables as indicated in the relevant \execout{README\_*.txt} file in \url{https://github.com/NOAA-EMC/NCEPLIBS-external/tree/release/public-v1/doc} and source the shell script that is created by the \execout{NCEPLIBS} install process to set the required environment variables for compiling the SCM. +\subsection{Using Existing Libraries on Preconfigured Platforms}\label{section: use_preconfigured_platforms} +Platform-specific scripts are provided to load modules and set the user environment for preconfigured platforms. These scripts load compiler modules (Fortran 2008-compliant), the NetCDF module, Python environment, etc. and set compiler and NCEPLIBS environment variables. From the top-level code directory (\execout{ccpp-scm} by default), source the correct script for your platform and shell. For \textit{t/csh} shells, +\begin{lstlisting}[language=csh] +source scm/etc/Hera_setup_intel.csh +source scm/etc/Cheyenne_setup_gnu.csh +source scm/etc/Cheyenne_setup_intel.csh +\end{lstlisting} +For bourne/bash shells, +\begin{lstlisting}[language=bash] +. scm/etc/Hera_setup_intel.sh +. scm/etc/Cheyenne_setup_gnu.sh +. scm/etc/Cheyenne_setup_intel.sh +\end{lstlisting} + -The SCM uses only a small part of the UFS \execout{NCEPLIBS} package and has fewer prerequisites (i.e. no \execout{ESMF} or \execout{wgrib2} needed). Users who are not planning to use the UFS can follow the machine setup instructions in the relevant \execout{README*.txt} files in \url{https://github.com/NOAA-EMC/NCEPLIBS-external/tree/release/public-v1/doc} and, instead of installing \execout{NCEPLIBS-external} and \execout{NCEPLIBS}, install only NetCDF/NetCDF-Fortran manually or using the software package manager (\execout{apt}, \execout{yum}, \execout{brew}). +\subsection{Installing Libraries on Non-preconfigured Platforms}\label{section: setup_supported_platforms} +For users on supported platforms such as generic Linux or macOS systems that have not been preconfigured, the UFS Short-Range Weather Application v1.0.0 release provides software packages and detailed setup instructions at \url{https://github.com/NOAA-EMC/NCEPLIBS-external/tree/release/public-v2} and \url{https://github.com/NOAA-EMC/NCEPLIBS/tree/release/public-v2}. UFS users who already installed the \execout{NCEPLIBS} package only need to set the compiler environment variables as indicated in the relevant \execout{README\_*.txt} file in \url{https://github.com/NOAA-EMC/NCEPLIBS-external/tree/release/public-v2/doc} and source the shell script that is created by the \execout{NCEPLIBS} install process to set the required environment variables for compiling the SCM. + +The SCM uses only a small part of the UFS \execout{NCEPLIBS} package and has fewer prerequisites (i.e. no \execout{ESMF} or \execout{wgrib2} needed). Users who are not planning to use the UFS can follow the machine setup instructions in the relevant \execout{README*.txt} files in \url{https://github.com/NOAA-EMC/NCEPLIBS-external/tree/release/public-v2/doc} and, instead of installing \execout{NCEPLIBS-external} and \execout{NCEPLIBS}, install only NetCDF/NetCDF-Fortran manually or using the software package manager (\execout{apt}, \execout{yum}, \execout{brew}). \textbf{Note.} On macOS systems, it may be necessary to add the (future) location of the NetCDF libraries \execout{libnetcdf.dylib} and \execout{libnetcdff.dylib} to the \execout{rpath} linker flags before compiling the NetCDF/NetCDF-Fortran libraries. Execute the following command before running \execout{configure} and \execout{make} for \execout{netcdf-c} and \execout{netcdf-fortran}: \begin{lstlisting}[language=bash] @@ -124,18 +143,18 @@ \subsection{Installing Libraries on Supported Platforms}\label{section: setup_su Following successful execution of this script, the commands to set the proper environment variables mentioned above will be written to the terminal as output. One must execute the correct set for the active shell to finish the installation, e.g., for bash \begin{lstlisting} -export BACIO_LIB4=/path/to/nceplibs/lib/libbacio_v2.2.0_4.a -export SP_LIBd=/path/to/nceplibs/lib/libsp_v2.1.0_d.a -export W3NCO_LIBd=/path/to/nceplibs/lib/libw3nco_v2.1.0_d.a +export BACIO_LIB4=/path/to/nceplibs/lib/libbacio_4.a +export SP_LIBd=/path/to/nceplibs/lib/libsp_d.a +export W3NCO_LIBd=/path/to/nceplibs/lib/libw3nco_d.a \end{lstlisting} and for t/csh \begin{lstlisting} -setenv BACIO_LIB4 /path/to/nceplibs/lib/libbacio_v2.2.0_4.a -setenv SP_LIBd /path/to/nceplibs/lib/libsp_v2.1.0_d.a -setenv W3NCO_LIBd /path/to/nceplibs/lib/libw3nco_v2.1.0_d.a +setenv BACIO_LIB4 /path/to/nceplibs/lib/libbacio_4.a +setenv SP_LIBd /path/to/nceplibs/lib/libsp_d.a +setenv W3NCO_LIBd /path/to/nceplibs/lib/libw3nco_d.a \end{lstlisting} -The installation of NCEPLIBS requires \execout{cmake} v3.15+. There are many ways to obtain the required version, either by following instructions provided by \execout{cmake} (\url{https://cmake.org/install/}), or by following the instructions provided for the UFS Medium-Range Weather Application release (\url{https://github.com/NOAA-EMC/NCEPLIBS-external/tree/release/public-v1}). Prepend this installation directory of \execout{cmake} to your path environment variable to use it for building the NCEPLIBS. +The installation of NCEPLIBS requires \execout{cmake} v3.15+. There are many ways to obtain the required version, either by following instructions provided by \execout{cmake} (\url{https://cmake.org/install/}), or by following the instructions provided for the UFS Short-Range Weather Application release (\url{https://github.com/NOAA-EMC/NCEPLIBS-external/tree/release/public-v2}). Prepend this installation directory of \execout{cmake} to your path environment variable to use it for building the NCEPLIBS. The Python environment must provide the \execout{f90nml} module for the SCM scripts to function. Users can test if f90nml is installed using this command in the shell: \begin{lstlisting} @@ -170,28 +189,15 @@ \subsection{Installing Libraries on Supported Platforms}\label{section: setup_su \end{lstlisting} The directory \execout{/my/install/directory} must exist and its subdirectory \execout{/my/install/directory/lib/python[version]/site-packages} (or \execout{lib64} instead of \execout{lib}, depending on the system) must be in the \execout{PYTHONPATH} environment variable. -\subsection{Using Existing Libraries on Preconfigured Platforms}\label{section: use_preconfigured_platforms} -Platform-specific scripts are provided to load modules and set the user environment for preconfigured platforms. These scripts load compiler modules (Fortran 2003-compliant), the NetCDF module, Python environment, etc. and set compiler and NCEPLIBS environment variables. From the top-level code directory (\execout{ccpp-scm} by default), source the correct script for your platform and shell. For \textit{t/csh} shells, -\begin{lstlisting}[language=csh] -source scm/etc/Hera_setup_intel.csh -source scm/etc/Cheyenne_setup_gnu.csh -source scm/etc/Cheyenne_setup_intel.csh -\end{lstlisting} -For bourne/bash shells, -\begin{lstlisting}[language=bash] -. scm/etc/Hera_setup_intel.sh -. scm/etc/Cheyenne_setup_gnu.sh -. scm/etc/Cheyenne_setup_intel.sh -\end{lstlisting} - \section{Compiling SCM with CCPP} \label{section: compiling} -The first step in compiling the CCPP and SCM is to properly setup your user environment as described in sections~\ref{section: setup_supported_platforms} and~\ref{section: use_preconfigured_platforms}. The second step is to download the lookup tables (large binaries, $~$324\,MB) for the Thompson microphysics package and place them in the correct directory: -From the top-level code directory (\execout{ccpp-scm} by default), execute the following script: +The first step in compiling the CCPP and SCM is to properly setup your user environment as described in sections~\ref{section: use_preconfigured_platforms} and~\ref{section: setup_supported_platforms}. The second step is to download the lookup tables and other large datasets (large binaries, $~$324\,MB) needed by the physics schemes and place them in the correct directory: +From the top-level code directory (\execout{ccpp-scm} by default), execute the following scripts: \begin{lstlisting}[language=bash] ./contrib/get_thompson_tables.sh +./contrib/get_mg_inccn_data.sh \end{lstlisting} -If the download step fails, make sure that your system's firewall does not block access to GitHub. If it does, download the file \execout{thompson\_tables.tar} from the GitHub release website using your browser and manually extract its contents in the directory \execout{scm/data/physics\_input\_data/}. +If the download step fails, make sure that your system's firewall does not block access to GitHub. If it does, download the files \execout{thompson\_tables.tar} and \execout{MG\_INCCN\_data.tar} from the GitHub release website using your browser and manually extract its contents in the directory \execout{scm/data/physics\_input\_data/}. Following this step, the top level build system will use \execout{cmake} to query system parameters, execute the CCPP prebuild script to match the physics variables (between what the host model -- SCM -- can provide and what is needed by physics schemes in the CCPP), and build the physics caps needed to use them. Finally, \execout{make} is used to compile the components. \begin{enumerate} @@ -226,7 +232,7 @@ \section{Compiling SCM with CCPP} If necessary, the CCPP prebuild script can be executed manually from the top level directory (\execout{ccpp-scm}). The basic syntax is \begin{lstlisting}[language=bash] -./ccpp/framework/scripts/ccpp_prebuild.py --config=./ccpp/config/ccpp_prebuild_config.py --static --suites=SCM_GFS_v15p2,SCM_GFS_v16beta,SCM_GSD_v1[...] --builddir=./scm/bin [--debug] +./ccpp/framework/scripts/ccpp_prebuild.py --config=./ccpp/config/ccpp_prebuild_config.py --suites=SCM_GFS_v15p2,SCM_GFS_v16beta,SCM_GSD_v1[...] --builddir=./scm/bin [--debug] \end{lstlisting} where the argument supplied via the \execout{-{}-suites} variable is a comma-separated list of suite names that exist in the \execout{./ccpp/suites} directory. Note that suite names are the suite definition filenames minus the \exec{suite\_} prefix and \exec{.xml} suffix. @@ -251,27 +257,48 @@ \section{Run the SCM with a supplied case} There are several test cases provided with this version of the SCM. For all cases, the SCM will go through the time steps, applying forcing and calling the physics defined in the chosen suite definition file using physics configuration options from an associated namelist. The model is executed through one of two Python run scripts that are pre-staged into the \execout{bin} directory: \execout{run\_scm.py} or \execout{multi\_run\_scm.py}. The first sets up and runs one integration while the latter will set up and run several integrations serially. \subsection{Single Run Script Usage} \label{subsection: singlerunscript} -Running a case requires three pieces of information: the case to run (consisting of initial conditions, geolocation, forcing data, etc.), the physics suite to use (through a CCPP suite definition file), and a physics namelist (that specifies configurable physics options to use). As discussed in chapter \ref{chapter: cases}, cases are set up via their own namelists in \execout{../etc/case\_config}. A default physics suite is provided as a user-editable variable in the script and default namelists are associated with each physics suite (through \execout{../src/default\_namelists.py}), so, technically, one must only specify a case to run with the SCM. The single run script's interface is described below. +Running a case requires four pieces of information: the case to run (consisting of initial conditions, geolocation, forcing data, etc.), the physics suite to use (through a CCPP suite definition file), a physics namelist (that specifies configurable physics options to use), and a tracer configuration file. As discussed in chapter \ref{chapter: cases}, cases are set up via their own namelists in \execout{../etc/case\_config}. A default physics suite is provided as a user-editable variable in the script and default namelists and tracer configurations are associated with each physics suite (through \execout{../src/default\_namelists.py} and \execout{../src/default\_tracers.py}), so, technically, one must only specify a case to run with the SCM. The single run script's interface is described below. \begin{lstlisting}[language=bash] -./run_scm.py -c CASE_NAME [-s SUITE_NAME] [-n PHYSICS_NAMELIST_WITH_PATH] [-g] [-d] +./run_scm.py -c CASE_NAME [-s SUITE_NAME] [-n PHYSICS_NAMELIST.nml] [-t TRACER_CONFIGURATION.txt] [-g] [-d] \end{lstlisting} -When invoking the run script, the only required argument is the name of the case to run. The case name used must match one of the case configuration files located in \execout{../etc/case\_config} (\emph{without the .nml extension!}). If specifying a suite other than the default, the suite name used must match the value of the suite name in one of the suite definition files located in \execout{../../ccpp/suites} (Note: not the filename of the suite definition file). As part of the fourth CCPP release, the following suite names are valid: +When invoking the run script, the only required argument is the name of the case to run. The case name used must match one of the case configuration files located in \execout{../etc/case\_config} (\emph{without the .nml extension!}). If specifying a suite other than the default, the suite name used must match the value of the suite name in one of the suite definition files located in \execout{../../ccpp/suites} (Note: not the filename of the suite definition file). As part of the fifth CCPP release, the following suite names are valid: \begin{enumerate} \item SCM\_GFS\_v15p2 \item SCM\_GFS\_v16beta -\item SCM\_GFS\_v15p2\_no\_nsst -\item SCM\_GFS\_v16beta\_no\_nsst \item SCM\_csawmg \item SCM\_GSD\_v1 +\item SCM\_RRFS\_v1alpha \end{enumerate} Note that using the Thompson microphysics scheme (as in SCM\_GSD\_v1) requires the computation of look-up tables during its initialization phase. As of the release, this process has been prohibitively slow with this model, so it is HIGHLY suggested that these look-up tables are downloaded and staged to use this scheme (and the SCM\_GSD\_v1 suite) as described in section~\ref{section: compiling}. Also note that some cases require specified surface fluxes. Special suite definition files that correspond to the suites listed above have been created and use the \execout{*\_prescribed\_surface} decoration. It is not necessary to specify this filename decoration when specifying the suite name. If the \execout{spec\_sfc\_flux} variable in the configuration file of the case being run is set to \execout{.true.}, the run script will automatically use the special suite definition file that corresponds to the chosen suite from the list above. -If specifying a namelist other than the default, the value must be an entire filename that exists in \execout{../../ccpp/physics\_namelists}. Caution should be exercised when modifying physics namelists since some redundancy between flags to control some physics parameterizations and scheme entries in the CCPP suite definition files currently exists. Values of numerical parameters are typically OK to change without fear of inconsistencies. Lastly, the \execout{-g} flag can be used to run the executable through the \exec{gdb} debugger (assuming it is installed on the system), and the \execout{-d} flag is required when running this command in a Docker container in order to successfully mount a volume between the host machine and the Docker container instance and to share the output and plots with the host machine. +If specifying a namelist other than the default, the value must be an entire filename that exists in \execout{../../ccpp/physics\_namelists}. Caution should be exercised when modifying physics namelists since some redundancy between flags to control some physics parameterizations and scheme entries in the CCPP suite definition files currently exists. Values of numerical parameters are typically OK to change without fear of inconsistencies. If specifying a tracer configuration other than the default, the value must be an entire filename that exists in \execout{../../scm/etc/tracer\_config}. The tracers that are used should match what the physics suite expects, lest a runtime error will result. Most of the tracers are dependent on the microphysics scheme used within the suite. The tracer names that are supported as of this release are given by the following list. Note that running without \execout{sphum}, \execout{o3mr}, and \execout{liq\_wat} may result in a runtime error in all supported suites. + +\begin{enumerate} +\item sphum +\item o3mr +\item liq\_wat +\item ice\_wat +\item rainwat +\item snowwat +\item graupel +\item cld\_amt +\item water\_nc +\item ice\_nc +\item rain\_nc +\item snow\_nc +\item graupel\_nc +\item sgs\_tke +\item liq\_aero +\item ice\_aero +\item q\_rimef +\end{enumerate} + +Lastly, the \execout{-g} flag can be used to run the executable through the \exec{gdb} debugger (assuming it is installed on the system), and the \execout{-d} flag is required when running this command in a Docker container in order to successfully mount a volume between the host machine and the Docker container instance and to share the output and plots with the host machine. A NetCDF output file is generated in the location specified in the case configuration file, if the \execout{output\_dir} variable exists in that file. Otherwise an output directory is constructed from the case, suite, and namelist used (if different from the default). All output directories are placed in the \execout{bin} directory. If using a Docker container, all output is copied to the \execout{/home} directory in container space for volume-mounting purposes. Any standard NetCDF file viewing or analysis tools may be used to @@ -285,7 +312,7 @@ \subsection{Multiple Run Script Usage}\label{subsection: multirunscript} ./multi_run_scm.py {[-c CASE_NAME] [-s SUITE_NAME] [-f PATH_TO_FILE]} [-v{v}] [-t] [-d] \end{lstlisting} -No arguments are required for this script. The \execout{-c or --case}, \execout{-s or --suite}, or \execout{-f or --file} options form a mutually-exclusive group, so exactly one of these is allowed at one time. If \execout{--c} is specified with a case name, the script will run a set of integrations for all supported suites (defined in \execout{../src/supported\_suites.py}) for that case. If \execout{-s} is specified with a suite name, the script will run a set of integrations for all supported cases (defined in \execout{../src/supported\_cases.py}) for that that suite. If \execout{-f} is specified with the path to a filename, it will read in lists of cases, suites, and namelists to use from that file. An example for this file's syntax can be found in \execout{../src/example\_multi\_run.py}. If multiple namelists are specified in the file, there either must be one suite specified \emph{or} the number of suites must match the number of namelists. If none of the \execout{-c or --case}, \execout{-s or --suite}, or \execout{-f or --file} options group is specified, the script will run through all permutations of supported cases and suites (as defined in the files previously mentioned). +No arguments are required for this script. The \execout{-c or --case}, \execout{-s or --suite}, or \execout{-f or --file} options form a mutually-exclusive group, so exactly one of these is allowed at one time. If \execout{--c} is specified with a case name, the script will run a set of integrations for all supported suites (defined in \execout{../src/supported\_suites.py}) for that case. If \execout{-s} is specified with a suite name, the script will run a set of integrations for all supported cases (defined in \execout{../src/supported\_cases.py}) for that that suite. If \execout{-f} is specified with the path to a filename, it will read in lists of cases, suites, and namelists to use from that file. An example for this file's syntax can be found in \execout{../src/example\_multi\_run.py}. If multiple namelists are specified in the file, there either must be one suite specified \emph{or} the number of suites must match the number of namelists. If none of the \execout{-c or --case}, \execout{-s or --suite}, or \execout{-f or --file} options group is specified, the script will run through all permutations of supported cases and suites (as defined in the files previously mentioned). For this script, all runs are assumed to use default tracer configurations for all suites. In addition to the main options, some helper options can also be used with any of those above. The \execout{-v{v} or --verbose} option can be used to output more information from the script to the console and to a log file. If this option is not used, only completion progress messages are written out. If one \execout{-v} is used, the script will write out completion progress messages and all messages and output from the single run script. If two \execout{-vv} are used, the script will also write out all messages and single run script output to a log file (\execout{multi\_run\_scm.log}) in the \execout{bin} directory. The option, \execout{-t or --timer}, can be used to output the elapsed time for each integration executed by the script. Note that the execution time includes file operations performed by the single run script in addition to the execution of the underlying (Fortran) SCM executable. By default, this option will execute one integration of each subprocess. Since some variability is expected for each model run, if greater precision is required, the number of integrations for timing averaging can be set through the internal script variable \execout{timer\_iterations}. This option can be useful, for example, for getting a rough idea of relative computational expense of different physics suites. Finally, the \execout{-d} flag is required when running this command in a Docker container in order to successfully mount a volume between the host machine and the Docker container instance and to share the output and plots with the host machine. @@ -297,7 +324,7 @@ \subsection{Batch Run Script} \end{lstlisting} from the \execout{bin} directory. -Additional details regarding the SCM may be found in the remainder of this guide. More information on the CCPP can be found in the CCPP Technical Documentation available at \url{https://ccpp-techdoc.readthedocs.io/en/v4.1.0/}. +Additional details regarding the SCM may be found in the remainder of this guide. More information on the CCPP can be found in the CCPP Technical Documentation available at \url{https://ccpp-techdoc.readthedocs.io/en/v5.0.0/}. \section{Creating and Using a Docker Container with SCM and CCPP} \label{docker} @@ -317,19 +344,19 @@ \section{Creating and Using a Docker Container with SCM and CCPP} \subsection{Building the Docker image} -The Dockerfile builds CCPP SCM v4.1.0 from source using the GNU compiler. A number of required codes are built and installed via the DTC-supported common community container. For reference, the common community container repository can be accessed here: \url{https://github.com/NCAR/Common-Community-Container}. +The Dockerfile builds CCPP SCM v5.0.0 from source using the GNU compiler. A number of required codes are built and installed via the DTC-supported common community container. For reference, the common community container repository can be accessed here: \url{https://github.com/NCAR/Common-Community-Container}. The CCPP SCM has a number of system requirements and necessary libraries and tools. Below is a list, including versions, used to create the the GNU-based Docker image: \begin{itemize} -\item gfortran - 8.3.1 -\item gcc - 8.3.1 +\item gfortran - 9.3 +\item gcc - 9.3 \item cmake - 3.16.5 \item NetCDF - 4.6.2 \item HDF5 - 1.10.4 \item ZLIB - 1.2.7 \item SZIP - 2.1.1 \item Python - 3 -\item NCEPLIBS subset: bacio v2.2.0\_4, sp v2.1.0\_d, w3nco v2.1.0\_d +\item NCEPLIBS subset: bacio v2.4.1\_4, sp v2.3.3\_d, w3nco v2.4.1\_d \end{itemize} A Docker image containing the SCM, CCPP, and its software prerequisites can be generated from the code in the software repository obtained by following section \ref{obtaining_code} by executing the following steps: @@ -342,7 +369,7 @@ \subsection{Building the Docker image} \begin{lstlisting}[language=bash] docker build -t ccpp-scm . \end{lstlisting} -Inspect the Dockerfile if you would like to see details for how the image is built. The image will contain SCM prerequisite software from DTC, the SCM and CCPP code, and a pre-compiled executable for the SCM with the 6 supported suites for the SCM. A successful build will show two images: dtcenter/common-community-container, and ccpp-scm. To list images, type: +Inspect the Dockerfile if you would like to see details for how the image is built. The image will contain SCM prerequisite software from DTC, the SCM and CCPP code, and a pre-compiled executable for the SCM with the 5 supported suites for the SCM. A successful build will show two images: dtcenter/common-community-container, and ccpp-scm. To list images, type: \begin{lstlisting}[language=bash] docker images \end{lstlisting} @@ -352,7 +379,7 @@ \subsection{Using a prebuilt Docker image from Dockerhub} A prebuilt Docker image for this release is available on Dockerhub if it is not desired to build from source. In order to use this, execute the following from the terminal where Docker is run: \begin{lstlisting}[language=bash] -docker pull dtcenter/ccpp-scm:v4.1.0 +docker pull dtcenter/ccpp-scm:v5.0.0 \end{lstlisting} To verify that it exists afterward, run \begin{lstlisting}[language=bash] @@ -393,7 +420,7 @@ \subsection{Running the Docker image} \item \execout{-v} specifies the volume mount from host directory (outside container) to inside the container. Using volumes allows you to share data between the host machine and container. For running the SCM, the output is being mounted from \execout{/home} inside the container to the \execout{OUT\_DIR} on the host machine. Upon exiting the container, data mounted to the host machine will still be accessible. \item \execout{$--$name} names the container. If no name is provided, the daemon will autogenerate a random string name. \end{itemize} -NOTE: If you are using a prebuilt image from Dockerhub, substitute the name of the image that was pulled from Dockerhub in the commands above; i.e. instead of \execout{ccpp-scm} above, one would have \execout{dtcenter/ccpp-scm:v4.1.0}. +NOTE: If you are using a prebuilt image from Dockerhub, substitute the name of the image that was pulled from Dockerhub in the commands above; i.e. instead of \execout{ccpp-scm} above, one would have \execout{dtcenter/ccpp-scm:v5.0.0}. \item To use the SCM interactively, run non-default configurations, create plots, or even develop code, issue the following command: \begin{lstlisting}[language=bash] docker run --rm -it -v ${OUT_DIR}:/home --name run-ccpp-scm ccpp-scm /bin/bash diff --git a/scm/doc/TechGuide/chap_repo.tex b/scm/doc/TechGuide/chap_repo.tex index 862725a5f..d2a1e1a78 100644 --- a/scm/doc/TechGuide/chap_repo.tex +++ b/scm/doc/TechGuide/chap_repo.tex @@ -2,7 +2,7 @@ \chapter{Repository} \label{chapter: repository} \section{What is included in the repository?} -The repository contains all code and data required to run the CCPP SCM (with the exception of large initialization tables for the Thompson microphysics scheme discussed in subsection \ref{subsection: singlerunscript}). It is functionally separated into 3 subdirectories representing the SCM model infrastructure (\execout{scm} directory), the CCPP infrastructure (\execout{ccpp/framework} directory), and the CCPP physics schemes (\execout{ccpp/physics} directory). The entire \execout{ccpp-scm} repository resides on Github's NCAR space, and the \execout{ccpp/framework} and \execout{ccpp/physics} directories are git submodules that point to repositories \execout{ccpp-framework} and \execout{ccpp-physics} on the same space. The structure of the entire repository is represented below. Note that the \execout{ccpp-physics} repository also contains files needed for using the CCPP with the UFS Atmosphere host model that uses the Finite-Volume Cubed-Sphere (FV3) dynamical core. +The repository contains all code and data required to run the CCPP SCM (with the exception of large initialization tables for the Thompson and Morrison-Gettelman microphysics schemes discussed in subsection \ref{subsection: singlerunscript}). It is functionally separated into 3 subdirectories representing the SCM model infrastructure (\execout{scm} directory), the CCPP infrastructure (\execout{ccpp/framework} directory), and the CCPP physics schemes (\execout{ccpp/physics} directory). The entire \execout{ccpp-scm} repository resides on Github's NCAR space, and the \execout{ccpp/framework} and \execout{ccpp/physics} directories are git submodules that point to repositories \execout{ccpp-framework} and \execout{ccpp-physics} on the same space. The structure of the entire repository is represented below. Note that the \execout{ccpp-physics} repository also contains files needed for using the CCPP with the UFS Atmosphere host model that uses the Finite-Volume Cubed-Sphere (FV3) dynamical core. {\small\justify \dirtree{% @@ -25,16 +25,17 @@ \section{What is included in the repository?} .4 CMakeLists.txt\DTcomment{cmake configuration file for ccpp-physics}. .4 CODEOWNERS\DTcomment{list of GitHub users with permission to merge}. .4 LICENSE. - .4 pgifix.py. .4 physics/\DTcomment{contains all CCPP physics and interstitial schemes}. .5 docs/\DTcomment{contains CCPP physics doxygen documentation}. .4 README.md. + .4 tools/\DTcomment{tools for checking physics source code}. .3 physics\_namelists\DTcomment{contains physics namelist files associated with suites}. .3 suites/\DTcomment{contains suite definition files}. .2 CODEOWNERS\DTcomment{list of GitHub users with permission to merge}. .2 contrib/. .3 build\_nceplibs.sh\DTcomment{script for installing prerequisite NCEPLIBS locally}. .3 get\_thompson\_tables.sh\DTcomment{script for downloading/extracting the Thompson lookup tables}. + .3 get\_mg\_inccn\_data.sh\DTcomment{script for downloading/extracting the Morrison-Gettelman data}. .2 docker/. .3 Dockerfile\DTcomment{contains Docker instructions for building the CCPP SCM image}. .2 README.md. @@ -62,6 +63,7 @@ \section{What is included in the repository?} .5 f90nml.0.19/\DTcomment{f90nml Python package}. .5 Shapely-1.7.0\DTcomment{Shapely Python package}. .5 plot\_configs/\DTcomment{plot configuration files}. + .4 tracer\_config\DTcomment{tracer configuration files}. .3 LICENSE.txt. .3 src/\DTcomment{source code for SCM infrastructure and Python run scripts}. }} diff --git a/scm/doc/TechGuide/main.pdf b/scm/doc/TechGuide/main.pdf index 18b19e04c..26eadce99 100644 Binary files a/scm/doc/TechGuide/main.pdf and b/scm/doc/TechGuide/main.pdf differ diff --git a/scm/doc/TechGuide/title.tex b/scm/doc/TechGuide/title.tex index 720bb0530..2386e6074 100644 --- a/scm/doc/TechGuide/title.tex +++ b/scm/doc/TechGuide/title.tex @@ -8,10 +8,10 @@ \textcolor{darkgray}{\bigsf Common Community Physics Package\\[0.5ex] Single Column Model (SCM)} \vspace*{1em}\par -\textcolor{darkgray}{\bigst User and Technical Guide\\[0.5ex] v4.1.0} +\textcolor{darkgray}{\bigst User and Technical Guide\\[0.5ex] v5.0.0} \vspace*{1em}\par -\large{September 2020}\\ +\large{March 2021}\\ Grant Firl, Laurie Carson, Michelle Harrold\\ \textit{\small{National Center for Atmospheric Research and Developmental Testbed Center}}\\[4em]