From 7888f1aecfb655e957dd6ce0532dfd95f330fc96 Mon Sep 17 00:00:00 2001 From: gspetro Date: Fri, 11 Feb 2022 13:49:49 -0500 Subject: [PATCH 01/30] add docs --- docs/INSTALL | 33 +++++ docs/Makefile | 20 ++++ docs/README | 29 +++++ docs/RUNTIME | 13 ++ docs/make.bat | 35 ++++++ docs/requirements.txt | 2 + docs/source/conf.py | 112 +++++++++++++++++ docs/source/hpc-components.rst | 91 ++++++++++++++ docs/source/hpc-install.rst | 213 +++++++++++++++++++++++++++++++++ docs/source/hpc-intro.rst | 29 +++++ docs/source/hpc-notes.rst | 120 +++++++++++++++++++ docs/source/hpc-parameters.rst | 99 +++++++++++++++ docs/source/hpc-prereqs.rst | 13 ++ docs/source/index.rst | 17 +++ 14 files changed, 826 insertions(+) create mode 100644 docs/INSTALL create mode 100644 docs/Makefile create mode 100644 docs/README create mode 100644 docs/RUNTIME create mode 100644 docs/make.bat create mode 100644 docs/requirements.txt create mode 100644 docs/source/conf.py create mode 100644 docs/source/hpc-components.rst create mode 100644 docs/source/hpc-install.rst create mode 100644 docs/source/hpc-intro.rst create mode 100644 docs/source/hpc-notes.rst create mode 100644 docs/source/hpc-parameters.rst create mode 100644 docs/source/hpc-prereqs.rst create mode 100644 docs/source/index.rst diff --git a/docs/INSTALL b/docs/INSTALL new file mode 100644 index 00000000..5923285b --- /dev/null +++ b/docs/INSTALL @@ -0,0 +1,33 @@ +# Simple setup instructions for the UFS SRW App +# For more details, see the "Getting Started" guide: +# https://github.com/ufs-community/ufs-srweather-app/wiki/Getting-Started + +# Getting the UFS SRW App code +# +# The SRW App can be downloaded directly from github, either by using `git clone` or by downloading +# from the web. + +git clone https://github.com/ufs-community/ufs-srweather-app.git + +cd ufs-srweather-app/ +./manage_externals/checkout_externals + +# Prior to building, you must set up the environment so cmake can find the appropriate compilers +# and libraries. For instructions specific to supported platforms, see the "build_[machine]_[compiler].env +# files in the "env" directory. These files give instructions assuming a bash or ksh login shell, for +# csh and tcsh users you will have to modify the commands for setting envronment variables. + +# Supported CMake flags: +# -DCMAKE_INSTALL_PREFIX Location where the bin/ include/ lib/ and share/ directories containing +# the various components of the SRW App will be created. Recommended value +# is "..", one directory up from the build directory +# -DCCPP_SUITES A comma-separated list of CCPP suites to build with the UFS weather +# model. See the User's Guide for a full list of available suites. The +# default is to build with the released supported suites: FV3_GFS_v15p2 and +# FV3_RRFS_v1beta + +mkdir build && cd build +cmake .. -DCMAKE_INSTALL_PREFIX=.. +make -j 8 + + diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 00000000..d0c3cbf1 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = source +BUILDDIR = build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/README b/docs/README new file mode 100644 index 00000000..30617076 --- /dev/null +++ b/docs/README @@ -0,0 +1,29 @@ +Steps to build and use the Sphinx documentation tool: + +1) Get Sphinx and sphinxcontrib-bibtex installed on your desktop from + http://www.sphinx-doc.org/en/master/usage/installation.html + https://sphinxcontrib-bibtex.readthedocs.io/en/latest/quickstart.html#installation + +2) Create a Sphinx documentation root directory: + % mkdir docs + % cd docs + +3) Initialize your Sphinx project (set up an initial directory structure) using + % sphinx-quickstart + + See http://www.sphinx-doc.org/en/master/usage/quickstart.html or + https://sphinx-rtd-tutorial.readthedocs.io/en/latest/sphinx-quickstart.html + + for help. You can answer (ENTER) to most of the questions. + +To build html: + +From the directory above source and build, the sphinx project directory: + +make html + +Sphinx uses Latex to export the documentation as a PDF file. To build pdf: + +make latexpdf + +It will generate a PDF file in ./build/latex/.pdf diff --git a/docs/RUNTIME b/docs/RUNTIME new file mode 100644 index 00000000..a80a6522 --- /dev/null +++ b/docs/RUNTIME @@ -0,0 +1,13 @@ +# Users should load the appropriate python environment for the workflow. +# The workflow requires Python 3, with the packages 'PyYAML', 'Jinja2', and 'f90nml' available. + +# For users' convenience, the python environment for the workflow is put in 'ufs-srweather-app/env/wflow_[machine].env'. +# When generating a workflow experiment or running a workflow, users can use this file for a specific machine. + +# For example, on Hera: + +cd ufs-srweather-app/env +source wflow_hera.env + +cd ../regional_workflow/ush +./generate_FV3LAM_wflow.sh diff --git a/docs/make.bat b/docs/make.bat new file mode 100644 index 00000000..061f32f9 --- /dev/null +++ b/docs/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=source +set BUILDDIR=build + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.https://www.sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 00000000..9c725846 --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,2 @@ +sphinxcontrib-bibtex +sphinx_rtd_theme diff --git a/docs/source/conf.py b/docs/source/conf.py new file mode 100644 index 00000000..7c0161f2 --- /dev/null +++ b/docs/source/conf.py @@ -0,0 +1,112 @@ +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import sys +sys.path.insert(0, os.path.abspath('.')) + + +# -- Project information ----------------------------------------------------- + +project = 'HPC-Stack Users Guide' +copyright = '2022, EPIC AUS Team' +author = 'EPIC AUS Team' + +# The short X.Y version +version = '' +# The full version, including alpha/beta/rc tags +release = '' + +numfig = True + + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.doctest', + 'sphinx.ext.intersphinx', + 'sphinx.ext.todo', + 'sphinx.ext.coverage', + 'sphinx.ext.mathjax', + 'sphinx.ext.ifconfig', + 'sphinx.ext.viewcode', + 'sphinx.ext.githubpages', + 'sphinx.ext.napoleon', + 'sphinxcontrib.bibtex', + 'myst_parser' +] + +bibtex_bibfiles = ['references.bib'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +# source_suffix = ['.rst', '.md'] +source_suffix = '.rst' + +# The master toctree document. +master_doc = 'index' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = 'en' + + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = [] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'sphinx_rtd_theme' +html_theme_path = ["_themes", ] + + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +# html_theme_options = {} +html_theme_options = {"body_max_width": "none"} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +html_context = { + 'css_files': [ + '_static/theme_overrides.css', # override wide tables in RTD theme + ], + } + +def setup(app): + app.add_css_file('custom.css') # may also be an URL + diff --git a/docs/source/hpc-components.rst b/docs/source/hpc-components.rst new file mode 100644 index 00000000..f15286d0 --- /dev/null +++ b/docs/source/hpc-components.rst @@ -0,0 +1,91 @@ +.. This is a continuation of the hpc-intro.rst chapter + +.. _HPCComponents: + +HPC-Stack Components +===================== + +The HPC-Stack packages are built in :numref:`Step %s ` using the `build_stack.sh` script. The following software can optionally be built with the scripts under `libs`. + +* Compilers and MPI libraries + + - [GNU/GCC](https://gcc.gnu.org/) + - [Intel](https://intel.com) + - [OpenMPI](https://www.open-mpi.org/) + - [MPICH](https://www.mpich.org/) + - `hpc-` Meta-modules for all the above as well as Intel and IMPI + +* HPC Stack - Third Party Libraries + + - [CMake](https://cmake.org/) + - [Udunits](https://www.unidata.ucar.edu/software/udunits/) + - [PNG](http://www.libpng.org/pub/png/) + - [JPEG](https://jpeg.org/) + - [Jasper](https://github.com/jasper-software/jasper) + - [SZip](https://support.hdfgroup.org/doc_resource/SZIP/) + - [Zlib](http://www.zlib.net/) + - [HDF5](https://www.hdfgroup.org/solutions/hdf5/) + - [PNetCDF](https://parallel-netcdf.github.io/) + - [NetCDF](https://www.unidata.ucar.edu/software/netcdf/) + - [ParallelIO](https://github.com/NCAR/ParallelIO) + - [nccmp](https://gitlab.com/remikz/nccmp) + - [nco](http://nco.sourceforge.net/) + - [CDO](https://code.mpimet.mpg.de/projects/cdo) + - [FFTW](http://www.fftw.org/) + - [GPTL](https://jmrosinski.github.io/GPTL/) + - [Tau2]() + - [Boost](https://beta.boost.org/) + - [Eigen](http://eigen.tuxfamily.org/) + - [GSL-Lite](http://github.com/gsl-lite/gsl-lite) + - [JSON for C++](https://github.com/nlohmann/json/) + - [JSON Schema Validator for C++](https://github.com/pboettch/json-schema-validator) + - [pybind11](https://github.com/pybind/pybind11) + - [MADIS](https://madis-data.ncep.noaa.gov) + - [SQLite](https://www.sqlite.org) + - [PROJ](https://proj.org) + - [GEOS](https://www.osgeo.org/projects/geos) + +* UFS Dependencies + + - [ESMF](https://www.earthsystemcog.org/projects/esmf/) + - [FMS](https://github.com/noaa-gfdl/fms.git) + +* NCEP Libraries + + - [NCEPLIBS-bacio](https://github.com/noaa-emc/nceplibs-bacio.git) + - [NCEPLIBS-sigio](https://github.com/noaa-emc/nceplibs-sigio.git) + - [NCEPLIBS-sfcio](https://github.com/noaa-emc/nceplibs-sfcio.git) + - [NCEPLIBS-gfsio](https://github.com/noaa-emc/nceplibs-gfsio.git) + - [NCEPLIBS-w3nco](https://github.com/noaa-emc/nceplibs-w3nco.git) + - [NCEPLIBS-sp](https://github.com/noaa-emc/nceplibs-sp.git) + - [NCEPLIBS-ip](https://github.com/noaa-emc/nceplibs-ip.git) + - [NCEPLIBS-ip2](https://github.com/noaa-emc/nceplibs-ip2.git) + - [NCEPLIBS-g2](https://github.com/noaa-emc/nceplibs-g2.git) + - [NCEPLIBS-g2c](https://github.com/noaa-emc/nceplibs-g2c.git) + - [NCEPLIBS-g2tmpl](https://github.com/noaa-emc/nceplibs-g2tmpl.git) + - [NCEPLIBS-nemsio](https://github.com/noaa-emc/nceplibs-nemsio.git) + - [NCEPLIBS-nemsiogfs](https://github.com/noaa-emc/nceplibs-nemsiogfs.git) + - [NCEPLIBS-w3emc](https://github.com/noaa-emc/nceplibs-w3emc.git) + - [NCEPLIBS-landsfcutil](https://github.com/noaa-emc/nceplibs-landsfcutil.git) + - [NCEPLIBS-bufr](https://github.com/noaa-emc/nceplibs-bufr.git) + - [NCEPLIBS-wgrib2](https://github.com/noaa-emc/nceplibs-wgrib2.git) + - [NCEPLIBS-prod_util](https://github.com/noaa-emc/nceplibs-prod_util.git) + - [NCEPLIBS-grib_util](https://github.com/noaa-emc/nceplibs-grib_util.git) + - [NCEPLIBS-ncio](https://github.com/noaa-emc/nceplibs-ncio.git) + - [NCEPLIBS-wrf_io](https://github.com/noaa-emc/nceplibs-wrf_io.git) + - [EMC_crtm](https://github.com/noaa-emc/EMC_crtm.git) + - [EMC_post](https://github.com/noaa-emc/EMC_post.git) + +* JEDI Dependencies + + - [ecbuild](https://github.com/ecmwf/ecbuild.git) + - [eckit](https://github.com/ecmwf/eckit.git) + - [fckit](https://github.com/ecmwf/fckit.git) + - [atlas](https://github.com/ecmwf/atlas.git) + +* Python and Virtual Environments + + - [Miniconda3](https://docs.conda.io/en/latest/) + - [r2d2](https://github.com/jcsda-internal/r2d2.git) + + diff --git a/docs/source/hpc-install.rst b/docs/source/hpc-install.rst new file mode 100644 index 00000000..9c7e7466 --- /dev/null +++ b/docs/source/hpc-install.rst @@ -0,0 +1,213 @@ +.. _InstallBuildHPCstack: + +================================ +Install and Build the HPC-Stack +================================ + +.. warning:: + The HPC-stack is already installed on `Level 1 systems `_ (e.g., Cheyenne, Hera, Orion). Installation is not necessary. + +HPC-Stack installation will vary from system to system because there are so many possible combinations of operating systems, compilers, MPI's, and package versions. Installation via an EPIC-provided container is recommended to reduce this variability. However, users may choose a non-container approach to installation if they prefer. + + +.. _SingularityInstall: + +Install and Build the HPC-Stack in a Singularity Container +=========================================================== + +The Earth Prediction Innovation Center (EPIC) provides several containers available for the installation of the stack and Unified Forecast System (UFS) applications: + +* docker://noaaepic/ubuntu20.04-gnu9.3 +* docker://noaaepic/ubuntu20.04-hpc-stack +* docker://noaaepic/ubuntu20.04-epic-srwapp +* docker://noaaepic/ubuntu20.04-epic-mrwapp + +Install Singularity +----------------------- + +To install the HPC-stack via Singularity container, first install the Singularity package according to the `Singularity Installation Guide `_. This will include the installation of dependencies and the installation of the Go programming +language. SingularityCE Version 3.7 or above is recommended. + +.. warning:: + Docker containers can only be run with root privileges, and users cannot have root privileges on HPC computers. Therefore, it is not possible to build the HPC-stack inside a Docker container. A Docker image may be pulled, but it must be run inside a container such as Singularity. + + +Build and Run the Container +---------------------------- + +1. Pull and build the container. + + .. code-block:: console + + singularity pull ubuntu20.04-epic.sif docker://noaaepic/ubuntu20.04-epic + singularity build --sandbox ubuntu20.04-epic ubuntu20.04-epic.sif + cd ubuntu20.04-epic + + Make a directory (e.g. ``contrib``) in the container if one does not exist: + + .. code-block:: console + + mkdir contrib + cd .. + +2. Start the container and run an interactive shell within it. This command also binds the local working +directory to the container so that data can be shared between them. + + .. code-block:: console + + singularity shell -e --writable --bind /contrib:/contrib ubuntu20.04-gnu9.3 + + +Build the HPC-Stack +-------------------- + +1. Clone the hpc-stack repository (from inside the singularity shell above). + + .. code-block:: console + + git clone -b feature/ubuntu20.04 https://github.com/jkbk2004/hpc-stack + cd hpc-stack + +2. Set up the build environment. Be sure to change the ``prefix`` argument in the code below to +your system's install location (likely within the hpc-stack directory). + + .. code-block:: console + + ./setup_modules.sh -p -c config/config_custom.sh + + where is the directory where the software packages will be installed with a default value $HOME/opt. + + Enter YES/YES/YES when the option is presented. Then modify ``build_stack.sh`` with the following commands: + + .. code-block:: console + + sed -i "10 a source /usr/share/lmod/6.6/init/bash" ./build_stack.sh + sed -i "10 a export PATH=/usr/local/sbin:/usr/local/bin:$PATH" ./build_stack.sh + sed -i "10 a export LD_LIBRARY_PATH=/usr/local/lib64:/usr/local/lib:$LD_LIBRARY_PATH" ./build_stack.sh + +3. Build the environment. This may take several hours to complete. + + .. code-block:: console + + ./build_stack.sh -p -c config/config_custom.sh -y stack/stack_custom.yaml -m + +4. Load the required modules. + + .. code-block:: console + + source /usr/share/lmod/lmod/init/bash + module use /modulefiles/stack + module load hpc hpc-gnu hpc-openmpi + module avail + +From here, the user can continue to install and run applications that depend on the HPC-Stack, such as the UFS Short Range Weather (SRW) Application. + + + +Non-Container HPC-Stack Installation and Build +================================================= + +Install Prerequisites +---------------------- + +To install the HPC-Stack locally, the following pre-requisites must be installed: + +* **Python 3:** Can be obtained either from the `main distributor `_ or from `Anaconda `_. +* **Compilers:** Distributions of Fortran, C, and C++ compilers that work for your system. +* **Message Passing Interface (MPI)** libraries for multi-processor and multi-core communications, configured to work with your corresponding Fortran, C/C++ compilers. +* **Programs and software packages:** `Lmod `_, `CMake `_, `make `_, `wget `_, `curl `_, `git `_ + +To determine whether these prerequisites are installed, query the environment variables (for ``Lmod``) or the location and version of the packages (for ``cmake``, ``make``, ``wget``, ``curl``, ``git``). A few examples: + + .. code-block:: console + + echo $LMOD_PKG + which cmake + cmake --version + +If compilers or MPI's need to be installed, consult the :doc:`HPC-Stack Prerequisites ` document for further guidance. + +.. _NonConConfigure: + +Configure the Build +--------------------- + +Choose the COMPILER, MPI, and PYTHON version, and specify any other aspects of the build that you would like. For Level 1 systems, a default configuration can be found in the applicable ``config/config_.sh`` file. For Level 2-4 systems, selections can be made by editing the config/config_custom.sh file to reflect the appropriate compiler, mpi, and python choices for your system. If Lmod is installed on your system, you can view options using the ``module avail`` command. + +Some of the parameter settings available are: + +* HPC_COMPILER: This defines the vendor and version of the compiler you wish to use for this build. The format is the same as what you would typically use in a module load command. For example, HPC_COMPILER=intel/2020. Use ``gcc -v`` to determine your compiler and version. +* HPC_MPI: This is the MPI library you wish to use. The format is the same as for HPC_COMPILER. For example: ``HPC_MPI=impi/2020``. +* HPC_PYTHON: This is the Python interpreter to use for the build. The format is the same as for HPC_COMPILER, for example: ``HPC_PYTHON=python/3.7.5``. Use ``python --version`` to determine the current version of Python. + +Other variables include USE_SUDO, DOWNLOAD_ONLY, NOTE, PKGDIR, LOGDIR, OVERWRITE, NTHREADS, MAKE_CHECK, MAKE_VERBOSE, and VENVTYPE. For more information on their use, see :doc:`HPC-Stack Parameters `. + +.. note:: + + If you only want to install select components of the stack, you can edit the ``stack/stack_custom.yaml`` file to omit unwanted components. The ``stack/stack_custom.yaml`` file lists the software packages to be built along with their version, options, compiler flags, and any other package-specific options. A full listing of components is available in the :doc:`HPC-Stack Components ` section. + + +.. _NonConSetUp: + +Set Up Compiler, MPI, Python & Module System +----------------------------------------------------- + +.. note:: + This step is required if you are using ``Lmod`` modules for managing the software stack. Lmod is installed across all Level 1 and Level 2 systems and in the containers provided. If ``LMod`` is not desired or used, the user can skip ahead to :numref:`Step %s `. + +Run from the top directory: + +.. code-block:: console + + ./setup_modules.sh -p -c + +where: + +```` is the directory where the software packages will be installed with a default value $HOME/opt. The software installation trees will branch directly off of , while the module files will be located in the /modulefiles subdirectory. + +```` points to the configuration script that you wish to use, as described in :numref:`Step %s `. The default configuration file is ``config/config_custom.sh``. + +**Additional Options:** + +The compiler and mpi modules can be handled separately from the rest of the build in order to exploit site-specific installations that maximize performance. In this case, the compiler and mpi modules are preceded by an hpc- label. For example, to load the Intel compiler module and the Intel MPI (IMPI) software library, enter: + + .. code-block:: console + + module load hpc-intel/2020 + module load hpc-impi/2020 + +These hpc- modules are really meta-modules that load the compiler/mpi library and modify the MODULEPATH so that the user has access to the software packages that will be built in :numref:`Step %s `. On HPC systems, these meta-modules load the native modules provided by the system administrators. + +In short, you may prefer not to load the compiler or MPI modules directly. Instead, loading the hpc- meta-modules as demonstrated above will provide everything needed to load software libraries. + +It may be necessary to set certain source and path variables in the ``build_stack.sh`` script. For example: + + .. code-block:: console + + source /usr/share/lmod/6.6/init/bash + source /usr/share/lmod/lmod/init/bash + export PATH=/usr/local/sbin:/usr/local/bin:$PATH + export LD_LIBRARY_PATH=/usr/local/lib64:/usr/local/lib:$LD_LIBRARY_PATH + export LD_LIBRARY_PATH=/usr/lib/x86_64-linux-gnu:$LD_LIBRARY_PATH + + +.. _NonConHPCBuild: + +Build the HPC-stack +-------------------- + +Now all that remains is to build the stack: + + .. code-block:: console + + ./build_stack.sh -p -c -y -m + +Here the -m option is only required if LMod is used for managing the software stack. It should be omitted otherwise. and are the same as in :numref:`Step %s `, namely a reference to the installation prefix and a corresponding configuration file in the config directory. As in :numref:`Step %s `, if this argument is omitted, the default is to use ``$HOME/opt`` and ``config/config_custom.sh`` respectively. represents a user configurable yaml file containing a list of packages that need to be built in the stack along with their versions and package options. The default value of is ``stack/stack_custom.yaml``. + +.. warning:: + Steps :numref:`Step %s `, :numref:`Step %s `, and :numref:`Step %s ` need to be repeated for each compiler/MPI combination that you wish to install.** The new packages will be installed alongside any previously-existing packages that may already have been built from other compiler/MPI combinations. + +From here, the user can continue to install and run applications that depend on the HPC-Stack, such as the UFS Short Range Weather (SRW) Application. + + + diff --git a/docs/source/hpc-intro.rst b/docs/source/hpc-intro.rst new file mode 100644 index 00000000..c65d4ae7 --- /dev/null +++ b/docs/source/hpc-intro.rst @@ -0,0 +1,29 @@ +.. _Intro: + +====================== +Introduction +====================== + +**Definition:** HPC-stack is a repository that provides a unified, shell script-based build system for +building the software stack required for the `Unified Forecast System (UFS) `_ and applications. + +Background +------------------------ +The UFS Weather Model draws on over 50 code libraries to run its applications. These libraries range from libraries developed in-house at NOAA (e.g. NCEPLIBS, FMS, etc.) to libraries developed by NOAA's partners (e.g. PIO, ESMF etc) to truly third party libraries (e.g. NETCDF). Individual installation of these libraries is not practical, so the `HPC-Stack `_ was developed as a central installation system to ensure that the infrastructure environment across multiple platforms is as similar as possible. Installation of the HPC-Stack is required to run the SRW. + +Instructions +------------------------- +`Level 1 `_ platforms (e.g. Cheyenne, Hera) already have the HPC-Stack installed. Users on those platforms do *not* need to install the HPC-Stack before building UFS applications (e.g. SRW, MRW) or models. Users working on systems that fall under `Support Levels 2-4 `_ will need to install the HPC-Stack the first time they try to run UFS applications or models. + +Users can either build the HPC-stack on their local system or use the centrally maintained stacks on each HPC platform. For a detailed description of installation options, see :doc:`Installing the HPC-Stack `. + +.. note:: + `HPC-Stack `_ is part of the NCEPLIBS project and was originally written for the `Joint Effort for Data assimilation Integration (JEDI) `_ framework. + + + + + + + + diff --git a/docs/source/hpc-notes.rst b/docs/source/hpc-notes.rst new file mode 100644 index 00000000..0eb8903d --- /dev/null +++ b/docs/source/hpc-notes.rst @@ -0,0 +1,120 @@ +.. This is a continuation of the hpc-install.rst chapter + +.. _HPCNotes: + +HPC-Stack Additional Notes +=========================== + +Setting compiler flags and other options +----------------------------------------- + +Often it is necessary to specify compiler flags (e.g. ``gfortran-10 +-fallow-argument-mismatch``) to the packages via ``FFLAGS``. There are 2 +ways this can be achieved. + +1. For all packages: One can define variable e.g. ``STACK_FFLAGS=-fallow-argument-mismatch`` in the config file ``config_custom.sh``. This will append ``STACK_FFLAGS`` to ``FFLAGS`` in every build script under libs. + +2. Package specific flags: To compile only the specific package under ``libs`` with the above compiler flag, one can define variable ``FFLAGS=-fallow-argument-mismatch`` in the ```` section of the +YAML file ``stack_custom.yaml``. This will append ``STACK__FFLAGS`` to ``FFLAGS`` in the build script for that ```` only. + +Adding a New Library or Package +-------------------------------- + +If you want to add a new library to the stack you need to follow these steps: + +1. Write a new build script in libs, using exising scripts as a template + +2. Define a new section in the ``yaml`` file for that library/package in config directory + +3. If the package is a python virtual environment, add a ``requirements.txt`` or ``environment.yml`` file containing the listing the python packages required to install the package. These files should be named and placed in ``pyvenv/package_name.txt`` and ``pyvenv/package_name.yml``. ``VENVTYPE=pyvenv`` will use the ``pyvenv/package_name.txt`` and ``VENVTYPE=condaenv`` will use ``pyvenv/package_name.yml`` + +4. Add a call to the new build script in ``build_stack.sh`` + +5. Create a new module template at the appropriate place in the modulefiles directory, using exising files as a template + +6. Update ``README.md`` to include the name of the new library or package + +Configuring for a new HPC +--------------------------- + +If you want to port this to a new HPC, you need to follow these steps: + +1. Write a new config file ``config/config_.sh``, using existing configs as a template. Also create a new yaml file ``config/stack_.yaml``, using existing yaml files as a template. + +2. Add/remove basic modules for that HPC. + +3. Choose the appropriate Compiler/MPI combination. + +4. If a template modulefile does not exist for that Compiler/MPI combinattion, create module templates at the appropriate place in the modulefiles directory, using existing files as a template. E.g. ``hpc-ips`` or ``hpc-smpi``. + +5. If the HPC provides some basic modules for e.g. Git, CMake, etc. they can be loaded in ``config/config_.sh`` + +Using the **DOWNLOAD_ONLY** Option +---------------------------------------- + +If an HPC (e.g. NOAA RDHPCS Hera) does not allow access to online software via ``wget`` or ``git clone``, you will have to download all the packages using the **DOWNLOAD_ONLY** option in the ``config_custom.sh``. Execute ``build_stack.sh`` as you would on a machine that does allow access to online software with ``DOWNLOAD_ONLY=YES`` and all the packages will be downloaded in the ``pkg`` directory. Transfer the contents of the ``pkg`` directory to the machine you wish to install the hpc-stack and execute ``build_stack.sh``. ``build_stack.sh`` will detect the already downloaded packages and use them rather than fetching them. + +Using the HPC-stack +--------------------- + +* If Lmod is used to manage the software stack, to use the HPC-stack, + you need to activate the stack. This is done by loading the ``hpc`` + module under ``$PREFIX/modulefiles/stack`` as follows: + + .. code-block:: console + + module use $PREFIX/modulefiles/stack + module load hpc/1.0.0 + +This will put the ``hpc-`` module in your ``MODULEPATH``, +which can be loaded as: + + .. code-block:: console + + module load hpc-/ + +* If the HPC-stack is not managed via modules, you need to add ``$PREFIX`` to the PATH as follows: + + .. code-block:: console + + export PATH="$PREFIX/bin:$PATH" + export LD_LIBRARY_PATH="$PREFIX/lib:$LD_LIBRARY_PATH" + export CMAKE_PREFIX_PATH="$PREFIX" + +Known Workaround for Certain Installations of Lmod +---------------------------------------------------- + +* On some machine's (e.g., **WCOSS_DELL_P3**), LMod is built to disable loading of default modulefiles and requires the user to load the module with an explicit version of the module. e.g. ``module load netcdf/4.7.4`` instead of ``module load netcdf``. The latter looks for the ``default`` module which is either the latest version or a version that is marked as default. To circumvent this, it is necessary to place the following lines in ``modulefiles/stack/hpc/hpc.lua`` prior to executing ``setup_modules.sh`` or in ``$PREFIX/modulefiles/stack/hpc/1.0.0.lua`` after executing ``setup_modules.sh``. + + .. code-block:: console + + -- https://lmod.readthedocs.io/en/latest/090_configuring_lmod.html + setenv("LMOD_EXACT_MATCH", "no") + setenv("LMOD_EXTENDED_DEFAULT", "yes") + + +Known Issues +=============== + +* NetCDF-C++ does not build with LLVM Clang. It can be disabled by setting ``disable_cxx: YES`` in the stack file under the NetCDF section. + +* Json-schema-validator does not build with LLVM Clang. It can be disabled in the stack file in the json-schema-validator-section. + + +Disclaimer +============= + +The United States Department of Commerce (DOC) GitHub project code is +provided on an "as is" basis and the user assumes responsibility for +its use. DOC has relinquished control of the information and no longer +has responsibility to protect the integrity, confidentiality, or +availability of the information. Any claims against the Department of +Commerce stemming from the use of its GitHub project will be governed +by all applicable Federal law. Any reference to specific commercial +products, processes, or services by service mark, trademark, +manufacturer, or otherwise, does not constitute or imply their +endorsement, recommendation or favoring by the Department of +Commerce. The Department of Commerce seal and logo, or the seal and +logo of a DOC bureau, shall not be used in any manner to imply +endorsement of any commercial product or activity by DOC or the United +States Government. diff --git a/docs/source/hpc-parameters.rst b/docs/source/hpc-parameters.rst new file mode 100644 index 00000000..de03851f --- /dev/null +++ b/docs/source/hpc-parameters.rst @@ -0,0 +1,99 @@ +.. This is a continuation of the Installation.rst chapter + +.. _HPCParameters: + +Build Parameters +========================== + +Compiler & MPI +---------------- + +``HPC_COMPILER``: (Default: “”) + This defines the vendor and version of the compiler you wish to use for this build. The format is the same as what you would typically use in a module load command. For example, ``HPC_COMPILER=intel/2020``. Options include: + + * ``gnu/6.5.0`` + * ``gnu/9.2.0`` + * ``intel/18.0.5.274`` + * ``intel/19.0.5.281`` + * ``intel/2020`` + * ``intel/2020.2`` + * ``intel/2021.3.0`` + +``HPC_MPI``: (Default: “”) + The MPI library you wish to use for this build. The format is the same as for HPC_COMPILER; for example: ``HPC_MPI=impi/2020``. Current MPI types accepted are openmpi, mpich, impi, cray, and cray*.Options include: + + * ``impi/2020`` + * ``impi/2018.4.274`` + * ``impi/2019.0.5`` + * ``impi/2020`` + * ``impi/2020.2`` + * ``impi/2021.3.0`` + * ``mvapich2/2.3`` + * ``openmpi/4.1.2`` + +.. note:: + For example, when using Intel-based compilers and Intel's implementation of the MPI interface (IMPI), the ``config/config_custom.sh`` should contain the following specifications: + + .. code-block:: console + + export SERIAL_CC=icc + export SERIAL_FC=ifort + export SERIAL_CXX=icpc + + export MPI_CC=mpiicc + export MPI_FC=mpiifort + export MPI_CXX=mpiicpc + + This will set the C, Fortran, and C++ compilers and MPI's. + +.. note:: + To verify that your chosen mpi build (e.g., mpiicc) is based on the corresponding serial compiler (e.g., icc), use the ``-show`` option to query the MPI's. For example, + + .. code-block:: console + + mpiicc -show + + will display output like this: + + .. code-block:: console + + $ icc -I -L -L - -X -- -l -l -l + + The message you need from this prompt is "icc", which confirms that your mpiicc build is based on icc. It may happen that if you query the "mpicc -show" on your system, it is based on "gcc" (or something else). + +Other Parameters +-------------------- + +``HPC_PYTHON``: (Default: “”) + The Python interpretor you wish to use for this build. The format is the same as for ``HPC_COMPILER``, for example: ``HPC_PYTHON=python/3.7.5``. + +``USE_SUDO``: (Default: “”) + If PREFIX is set to a value that requires root permission to write to, such as ``/opt/modules``, then this flag should be enabled. For example, ``USE_SUDO=Y``. + +``DOWNLOAD_ONLY``: (Default: “”) + The stack allows the option to download the source code for all the software without performing the installation. This is especially useful for installing the stack on machines that do not allow internet connectivity to websites hosting the softwares e.g. GitHub. + +.. note:: + + To enable a boolean flag use a single-digit ``Y`` or ``T``. To disable, use ``N`` or ``F`` (case insensitive) + +``PKGDIR``: (Default: “”) + is the directory where tarred or zipped software files will be downloaded and compiled. Unlike PREFIX, this is a relative path, based on the root path of the repository. Individual software packages can be downloaded manually to this directory and untarred, but this is not required. Build scripts will look for directory ``pkg/pkgName-pkgVersion`` e.g., ``pkg/hdf5-1_10_3``. + +``LOGDIR``: (Default: “”) + The directory where log files from the build will be written, relative to the root path of the repository. + +``OVERWRITE``: (Default: “”) + If set, this flag will cause the build script to remove the current installation, if any exists, and replace it with the new version of each software package in question. If this is not set, the build will bypass software packages that are already installed. + +``NTHREADS``: (Default: “”) + The number of threads to use for parallel builds + +``MAKE_CHECK``: (Default: “”) + Run make check after build + +``MAKE_VERBOSE``: (Default: “”) + Print out extra information to the log files during the build + +``VENVTYPE``: (Default: “”) + Set the type of python environment to build. Value depends on whether using pip or conda. Set ``VENVTYPE=pyvenv`` when using pip and ``VENVTYPE=condaenv`` when using Miniconda for creating virtual environments. Default is ``pyvenv``. diff --git a/docs/source/hpc-prereqs.rst b/docs/source/hpc-prereqs.rst new file mode 100644 index 00000000..4993c626 --- /dev/null +++ b/docs/source/hpc-prereqs.rst @@ -0,0 +1,13 @@ +.. This is a continuation of the hpc-intro.rst chapter + +.. _Prerequisites: + +Installation of the HPC-Stack Prerequisites +============================================= + +More information coming soon! + +.. + COMMENT: Linked from "If compilers or MPI's need to be installed, consult the `HPC-Stack Prerequisites ` document for further guidance. " + COMMENT: Add details about requirements (e.g., MPI, compilers, environment modules, etc.)??? + COMMENT: https://www.open-mpi.org/software/ompi/v4.1/ diff --git a/docs/source/index.rst b/docs/source/index.rst new file mode 100644 index 00000000..61d6ee20 --- /dev/null +++ b/docs/source/index.rst @@ -0,0 +1,17 @@ +.. HPC-Stack Users Guide master file, created by sphinx-quickstart on Wed Feb 9 11:38:12 2022. + You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. + +HPC-Stack User's Guide +================================ + +.. toctree:: + :numbered: + :maxdepth: 3 + :caption: Contents: + + hpc-intro + hpc-install + hpc-prereqs + hpc-parameters + hpc-components + hpc-notes \ No newline at end of file From 055cab8ec5f7f87413e73185663215b0d151fe3e Mon Sep 17 00:00:00 2001 From: gspetro Date: Fri, 11 Feb 2022 15:19:57 -0500 Subject: [PATCH 02/30] reformat intro to make reusable --- docs/source/hpc-install.rst | 6 +++--- docs/source/hpc-intro-text.rst | 23 +++++++++++++++++++++++ docs/source/hpc-intro.rst | 17 +---------------- 3 files changed, 27 insertions(+), 19 deletions(-) create mode 100644 docs/source/hpc-intro-text.rst diff --git a/docs/source/hpc-install.rst b/docs/source/hpc-install.rst index 9c7e7466..ac9121c3 100644 --- a/docs/source/hpc-install.rst +++ b/docs/source/hpc-install.rst @@ -125,7 +125,7 @@ To determine whether these prerequisites are installed, query the environment va which cmake cmake --version -If compilers or MPI's need to be installed, consult the :doc:`HPC-Stack Prerequisites ` document for further guidance. +If compilers or MPI's need to be installed, consult the :ref:`HPC-Stack Prerequisites ` document for further guidance. .. _NonConConfigure: @@ -140,11 +140,11 @@ Some of the parameter settings available are: * HPC_MPI: This is the MPI library you wish to use. The format is the same as for HPC_COMPILER. For example: ``HPC_MPI=impi/2020``. * HPC_PYTHON: This is the Python interpreter to use for the build. The format is the same as for HPC_COMPILER, for example: ``HPC_PYTHON=python/3.7.5``. Use ``python --version`` to determine the current version of Python. -Other variables include USE_SUDO, DOWNLOAD_ONLY, NOTE, PKGDIR, LOGDIR, OVERWRITE, NTHREADS, MAKE_CHECK, MAKE_VERBOSE, and VENVTYPE. For more information on their use, see :doc:`HPC-Stack Parameters `. +Other variables include USE_SUDO, DOWNLOAD_ONLY, NOTE, PKGDIR, LOGDIR, OVERWRITE, NTHREADS, MAKE_CHECK, MAKE_VERBOSE, and VENVTYPE. For more information on their use, see :ref:`HPC-Stack Parameters `. .. note:: - If you only want to install select components of the stack, you can edit the ``stack/stack_custom.yaml`` file to omit unwanted components. The ``stack/stack_custom.yaml`` file lists the software packages to be built along with their version, options, compiler flags, and any other package-specific options. A full listing of components is available in the :doc:`HPC-Stack Components ` section. + If you only want to install select components of the stack, you can edit the ``stack/stack_custom.yaml`` file to omit unwanted components. The ``stack/stack_custom.yaml`` file lists the software packages to be built along with their version, options, compiler flags, and any other package-specific options. A full listing of components is available in the :ref:`HPC-Stack Components ` section. .. _NonConSetUp: diff --git a/docs/source/hpc-intro-text.rst b/docs/source/hpc-intro-text.rst new file mode 100644 index 00000000..cdaaf6fd --- /dev/null +++ b/docs/source/hpc-intro-text.rst @@ -0,0 +1,23 @@ +**Definition:** HPC-stack is a repository that provides a unified, shell script-based build system for +building the software stack required for the `Unified Forecast System (UFS) `_ and applications. + +Background +------------------------ +The UFS Weather Model draws on over 50 code libraries to run its applications. These libraries range from libraries developed in-house at NOAA (e.g. NCEPLIBS, FMS, etc.) to libraries developed by NOAA's partners (e.g. PIO, ESMF etc) to truly third party libraries (e.g. NETCDF). Individual installation of these libraries is not practical, so the `HPC-Stack `_ was developed as a central installation system to ensure that the infrastructure environment across multiple platforms is as similar as possible. Installation of the HPC-Stack is required to run the SRW. + +Instructions +------------------------- +`Level 1 `_ platforms (e.g. Cheyenne, Hera) already have the HPC-Stack installed. Users on those platforms do *not* need to install the HPC-Stack before building UFS applications (e.g. SRW, MRW) or models. Users working on systems that fall under `Support Levels 2-4 `_ will need to install the HPC-Stack the first time they try to run UFS applications or models. + +Users can either build the HPC-stack on their local system or use the centrally maintained stacks on each HPC platform. For a detailed description of installation options, see :doc:`Installing the HPC-Stack `. + +.. note:: + `HPC-Stack `_ is part of the NCEPLIBS project and was originally written for the `Joint Effort for Data assimilation Integration (JEDI) `_ framework. + + + + + + + + diff --git a/docs/source/hpc-intro.rst b/docs/source/hpc-intro.rst index c65d4ae7..bb27618b 100644 --- a/docs/source/hpc-intro.rst +++ b/docs/source/hpc-intro.rst @@ -4,22 +4,7 @@ Introduction ====================== -**Definition:** HPC-stack is a repository that provides a unified, shell script-based build system for -building the software stack required for the `Unified Forecast System (UFS) `_ and applications. - -Background ------------------------- -The UFS Weather Model draws on over 50 code libraries to run its applications. These libraries range from libraries developed in-house at NOAA (e.g. NCEPLIBS, FMS, etc.) to libraries developed by NOAA's partners (e.g. PIO, ESMF etc) to truly third party libraries (e.g. NETCDF). Individual installation of these libraries is not practical, so the `HPC-Stack `_ was developed as a central installation system to ensure that the infrastructure environment across multiple platforms is as similar as possible. Installation of the HPC-Stack is required to run the SRW. - -Instructions -------------------------- -`Level 1 `_ platforms (e.g. Cheyenne, Hera) already have the HPC-Stack installed. Users on those platforms do *not* need to install the HPC-Stack before building UFS applications (e.g. SRW, MRW) or models. Users working on systems that fall under `Support Levels 2-4 `_ will need to install the HPC-Stack the first time they try to run UFS applications or models. - -Users can either build the HPC-stack on their local system or use the centrally maintained stacks on each HPC platform. For a detailed description of installation options, see :doc:`Installing the HPC-Stack `. - -.. note:: - `HPC-Stack `_ is part of the NCEPLIBS project and was originally written for the `Joint Effort for Data assimilation Integration (JEDI) `_ framework. - +.. include :: hpc-intro-text.rst From b038f395699af83c533c61b24563e281ee082b28 Mon Sep 17 00:00:00 2001 From: gspetro Date: Fri, 11 Feb 2022 15:32:24 -0500 Subject: [PATCH 03/30] fixed ref link --- docs/source/hpc-intro-text.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/hpc-intro-text.rst b/docs/source/hpc-intro-text.rst index cdaaf6fd..1f9809a2 100644 --- a/docs/source/hpc-intro-text.rst +++ b/docs/source/hpc-intro-text.rst @@ -9,7 +9,7 @@ Instructions ------------------------- `Level 1 `_ platforms (e.g. Cheyenne, Hera) already have the HPC-Stack installed. Users on those platforms do *not* need to install the HPC-Stack before building UFS applications (e.g. SRW, MRW) or models. Users working on systems that fall under `Support Levels 2-4 `_ will need to install the HPC-Stack the first time they try to run UFS applications or models. -Users can either build the HPC-stack on their local system or use the centrally maintained stacks on each HPC platform. For a detailed description of installation options, see :doc:`Installing the HPC-Stack `. +Users can either build the HPC-stack on their local system or use the centrally maintained stacks on each HPC platform. For a detailed description of installation options, see :ref:`Installing the HPC-Stack `. .. note:: `HPC-Stack `_ is part of the NCEPLIBS project and was originally written for the `Joint Effort for Data assimilation Integration (JEDI) `_ framework. From 9002252d5a39f69ff084ad2e969862557281bef3 Mon Sep 17 00:00:00 2001 From: gspetro Date: Mon, 14 Feb 2022 23:59:03 -0500 Subject: [PATCH 04/30] deleted extraneous files --- docs/INSTALL | 33 --------------------------------- docs/RUNTIME | 13 ------------- docs/source/conf.py | 4 ++-- 3 files changed, 2 insertions(+), 48 deletions(-) delete mode 100644 docs/INSTALL delete mode 100644 docs/RUNTIME diff --git a/docs/INSTALL b/docs/INSTALL deleted file mode 100644 index 5923285b..00000000 --- a/docs/INSTALL +++ /dev/null @@ -1,33 +0,0 @@ -# Simple setup instructions for the UFS SRW App -# For more details, see the "Getting Started" guide: -# https://github.com/ufs-community/ufs-srweather-app/wiki/Getting-Started - -# Getting the UFS SRW App code -# -# The SRW App can be downloaded directly from github, either by using `git clone` or by downloading -# from the web. - -git clone https://github.com/ufs-community/ufs-srweather-app.git - -cd ufs-srweather-app/ -./manage_externals/checkout_externals - -# Prior to building, you must set up the environment so cmake can find the appropriate compilers -# and libraries. For instructions specific to supported platforms, see the "build_[machine]_[compiler].env -# files in the "env" directory. These files give instructions assuming a bash or ksh login shell, for -# csh and tcsh users you will have to modify the commands for setting envronment variables. - -# Supported CMake flags: -# -DCMAKE_INSTALL_PREFIX Location where the bin/ include/ lib/ and share/ directories containing -# the various components of the SRW App will be created. Recommended value -# is "..", one directory up from the build directory -# -DCCPP_SUITES A comma-separated list of CCPP suites to build with the UFS weather -# model. See the User's Guide for a full list of available suites. The -# default is to build with the released supported suites: FV3_GFS_v15p2 and -# FV3_RRFS_v1beta - -mkdir build && cd build -cmake .. -DCMAKE_INSTALL_PREFIX=.. -make -j 8 - - diff --git a/docs/RUNTIME b/docs/RUNTIME deleted file mode 100644 index a80a6522..00000000 --- a/docs/RUNTIME +++ /dev/null @@ -1,13 +0,0 @@ -# Users should load the appropriate python environment for the workflow. -# The workflow requires Python 3, with the packages 'PyYAML', 'Jinja2', and 'f90nml' available. - -# For users' convenience, the python environment for the workflow is put in 'ufs-srweather-app/env/wflow_[machine].env'. -# When generating a workflow experiment or running a workflow, users can use this file for a specific machine. - -# For example, on Hera: - -cd ufs-srweather-app/env -source wflow_hera.env - -cd ../regional_workflow/ush -./generate_FV3LAM_wflow.sh diff --git a/docs/source/conf.py b/docs/source/conf.py index 7c0161f2..f94d6599 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -18,8 +18,8 @@ # -- Project information ----------------------------------------------------- project = 'HPC-Stack Users Guide' -copyright = '2022, EPIC AUS Team' -author = 'EPIC AUS Team' +copyright = '2022, EMC/EPIC' +author = 'EMC/EPIC' # The short X.Y version version = '' From 8b63ba42338dd2ee6b005dd863a920b09674abb0 Mon Sep 17 00:00:00 2001 From: gspetro Date: Wed, 16 Feb 2022 13:33:52 -0500 Subject: [PATCH 05/30] updates to intro & pared down README.md --- README.md | 372 +-------------------------------- docs/source/hpc-intro-text.rst | 7 +- docs/source/hpc-notes.rst | 4 +- 3 files changed, 10 insertions(+), 373 deletions(-) diff --git a/README.md b/README.md index eb4867bf..f26aca6a 100644 --- a/README.md +++ b/README.md @@ -5,15 +5,14 @@ # hpc-stack This repository provides a unified, shell script based build system -for building software stack needed for the NOAA [Universal Forecast +for building the software stack needed for the NOAA [Universal Forecast System (UFS)](https://github.com/ufs-community/ufs-weather-model) and related products, and applications written for the [Joint Effort for Data assimilation Integration (JEDI)](https://jointcenterforsatellitedataassimilation-jedi-docs.readthedocs-hosted.com/en/latest/) framework. -This is part of the [NCEPLIBS](https://github.com/NOAA-EMC/NCEPLIBS) -project. +This is part of the [NCEPLIBS](https://github.com/NOAA-EMC/NCEPLIBS) project. ## Authors @@ -46,371 +45,12 @@ The prerequisites of building hpc-stack are: - wget and curl - git -Building the software stack is a **Three-Step process**, as described -in the following sections. +Building the software stack is a **Three-Step process**, as described in the documentation: -## Step 1: Configure Build +- Step 1: Configure Build +- Step 2: Set Up Compiler, MPI, Python, and Module System +- Step 3: Build Software Stack -The first step is to choose the **COMPILER**, **MPI**, and **PYTHON** and -specify any other aspects of the build that you would like. This is normally -done by editing the file `config/config_custom.sh`. Here we describe -some of the parameter settings available. - -- **HPC_COMPILER:** This defines the vendor and version of the - compiler you wish to use for this build. The format is the same - as what you would typically use in a module load command. For - example, `HPC_COMPILER=intel/2020`. - -- **HPC_MPI:** is the MPI library you wish to use for this build. The - format is the same as for `HPC_COMPILER`, for example: - `HPC_MPI=impi/2020`. - -- **HPC_PYTHON:** is the Python Interpretor you wish to use for this build. The - format is the same as for `HPC_COMPILER`, for example: - `HPC_PYTHON=python/3.7.5`. - -- **USE_SUDO:** If `PREFIX` is set to a value that requires root - permission to write to, such as `/opt/modules`, then this flag - should be enabled. For example, `USE_SUDO=Y` - -- **DOWNLOAD_ONLY:** The stack allows the option to download the - source code for all the software without performing the - installation. This is especially useful for installing the stack - on machines that do not allow internet connectivity to websites - hosting the softwares e.g. GitHub. - -- **NOTE: To enable a boolean flag use a single-digit `Y` or `T`. To - disable, use `N` or `F` (case insensitive)**_ - -- **PKGDIR:** is the directory where tarred or zipped software files - will be downloaded and compiled. Unlike `PREFIX`, this is a - relative path, based on the root path of the repository. - Individual software packages can be downloaded manually to this - directory and untarred, but this is not required. Build scripts - will look for directory `pkg/pkgName-pkgVersion` - e.g. `pkg/hdf5-1_10_3`. - -- **LOGDIR:** is the directory where log files from the build will be - written, relative to the root path of the repository. - -- **OVERWRITE:** If set, this flag will cause the build script to - remove the current installation, if any exists, and replace it - with the new version of each software package in question. If - this is not set, the build will bypass software packages that are - already installed. - -- **NTHREADS:** The number of threads to use for parallel builds - -- **MAKE_CHECK:** Run `make check` after build - -- **MAKE_VERBOSE:** Print out extra information to the log files during the build - -- **VENVTYPE:** Set the type of python environment to build. Value depends on whether using `pip` or `conda`. Set `VENVTYPE=pyvenv` when using `pip` and `VENVTYPE=condaenv` when using `Miniconda` for creating virtual environments. Default is `pyvenv` - -The next step is to choose what components of the stack you wish to -build. This is done by editing the file `stack/stack_custom.yaml` -which defines the software packages to be built along with their -version, options and compiler flags along with other package specific -options. - -The following software can optionally be built with the scripts under -`libs`. These packages are built in Step 3 using the `build_stack.sh` -script. - -* Compilers and MPI libraries - - [GNU/GCC](https://gcc.gnu.org/) - - [Intel](https://intel.com) - - [OpenMPI](https://www.open-mpi.org/) - - [MPICH](https://www.mpich.org/) - - `hpc-` Meta-modules for all the above as well as Intel and IMPI - -* HPC Stack - Third Party Libraries - - [CMake](https://cmake.org/) - - [Udunits](https://www.unidata.ucar.edu/software/udunits/) - - [PNG](http://www.libpng.org/pub/png/) - - [JPEG](https://jpeg.org/) - - [Jasper](https://github.com/jasper-software/jasper) - - [SZip](https://support.hdfgroup.org/doc_resource/SZIP/) - - [Zlib](http://www.zlib.net/) - - [HDF5](https://www.hdfgroup.org/solutions/hdf5/) - - [PNetCDF](https://parallel-netcdf.github.io/) - - [NetCDF](https://www.unidata.ucar.edu/software/netcdf/) - - [ParallelIO](https://github.com/NCAR/ParallelIO) - - [nccmp](https://gitlab.com/remikz/nccmp) - - [nco](http://nco.sourceforge.net/) - - [CDO](https://code.mpimet.mpg.de/projects/cdo) - - [FFTW](http://www.fftw.org/) - - [GPTL](https://jmrosinski.github.io/GPTL/) - - [Tau2]() - - [Boost](https://beta.boost.org/) - - [Eigen](http://eigen.tuxfamily.org/) - - [GSL-Lite](http://github.com/gsl-lite/gsl-lite) - - [JSON for C++](https://github.com/nlohmann/json/) - - [JSON Schema Validator for C++](https://github.com/pboettch/json-schema-validator) - - [pybind11](https://github.com/pybind/pybind11) - - [MADIS](https://madis-data.ncep.noaa.gov) - - [SQLite](https://www.sqlite.org) - - [PROJ](https://proj.org) - - [GEOS](https://www.osgeo.org/projects/geos) - -* UFS Dependencies - - [ESMF](https://www.earthsystemcog.org/projects/esmf/) - - [FMS](https://github.com/noaa-gfdl/fms.git) - -* NCEP Libraries - - [NCEPLIBS-bacio](https://github.com/noaa-emc/nceplibs-bacio.git) - - [NCEPLIBS-sigio](https://github.com/noaa-emc/nceplibs-sigio.git) - - [NCEPLIBS-sfcio](https://github.com/noaa-emc/nceplibs-sfcio.git) - - [NCEPLIBS-gfsio](https://github.com/noaa-emc/nceplibs-gfsio.git) - - [NCEPLIBS-w3nco](https://github.com/noaa-emc/nceplibs-w3nco.git) - - [NCEPLIBS-sp](https://github.com/noaa-emc/nceplibs-sp.git) - - [NCEPLIBS-ip](https://github.com/noaa-emc/nceplibs-ip.git) - - [NCEPLIBS-ip2](https://github.com/noaa-emc/nceplibs-ip2.git) - - [NCEPLIBS-g2](https://github.com/noaa-emc/nceplibs-g2.git) - - [NCEPLIBS-g2c](https://github.com/noaa-emc/nceplibs-g2c.git) - - [NCEPLIBS-g2tmpl](https://github.com/noaa-emc/nceplibs-g2tmpl.git) - - [NCEPLIBS-nemsio](https://github.com/noaa-emc/nceplibs-nemsio.git) - - [NCEPLIBS-nemsiogfs](https://github.com/noaa-emc/nceplibs-nemsiogfs.git) - - [NCEPLIBS-w3emc](https://github.com/noaa-emc/nceplibs-w3emc.git) - - [NCEPLIBS-landsfcutil](https://github.com/noaa-emc/nceplibs-landsfcutil.git) - - [NCEPLIBS-bufr](https://github.com/noaa-emc/nceplibs-bufr.git) - - [NCEPLIBS-wgrib2](https://github.com/noaa-emc/nceplibs-wgrib2.git) - - [NCEPLIBS-prod_util](https://github.com/noaa-emc/nceplibs-prod_util.git) - - [NCEPLIBS-grib_util](https://github.com/noaa-emc/nceplibs-grib_util.git) - - [NCEPLIBS-ncio](https://github.com/noaa-emc/nceplibs-ncio.git) - - [NCEPLIBS-wrf_io](https://github.com/noaa-emc/nceplibs-wrf_io.git) - - [EMC_crtm](https://github.com/noaa-emc/EMC_crtm.git) - - [EMC_post](https://github.com/noaa-emc/EMC_post.git) - -* JEDI Dependencies - - [ecbuild](https://github.com/ecmwf/ecbuild.git) - - [eckit](https://github.com/ecmwf/eckit.git) - - [fckit](https://github.com/ecmwf/fckit.git) - - [atlas](https://github.com/ecmwf/atlas.git) - -* Python and Virtual Environments - - [Miniconda3](https://docs.conda.io/en/latest/) - - [r2d2](https://github.com/jcsda-internal/r2d2.git) - -**IMPORTANT: Steps 1, 2, and 3 need to be repeated for each - compiler/MPI combination that you wish to install.** The new - packages will be installed alongside any previously-existing - packages that may already have been built from other compiler/MPI - combinations. - -## Step 2: Set Up Compiler, MPI, Python, and Module System - -This step is only required if using LMod modules for managing the -software stack. If LMod is not desired or used, the user can skip -ahead to Step 3. - -Run from the top directory: -``` -./setup_modules.sh -p -c -``` -where: - -- `` is the directory where the software packages will be - installed with a default value `$HOME/opt`. The software - installation trees (the top level of each being is the compiler, - e.g. `intel-2020`) will branch directly off of `` while the - module files will be located in the `/modulefiles` - subdirectory. - -- `` points to the configuration script that you wish - to use, as described in Step 1. For example, to use the - `config/config_custom.sh` you would enter this: - -``` -./setup_modules.sh -c config/config_custom.sh -``` - -If no arguments are specified, the default is -`config/config_custom.sh`. Note that you can skip this step as well -for container builds because we currenly include only one compiler/mpi -combination in each container. So, each package is only build once -and there is no need for modules. - -This script sets up the module directory tree in -`/modulefiles`. It also sets up the compiler and mpi modules. -The compiler and mpi modules are handled separately from the rest of -the build because, when possible, we wish to exploit site-specific -installations that maximize performance. - -**For this reason, the compiler and mpi modules are preceded by a - `hpc-` label**. For example, to load the Intel compiler module and - the Intel MPI (IMPI) software library, you would enter this: - -``` -module load hpc-intel/2020 -module load hpc-impi/2020 -``` - -These `hpc-` modules are really meta-modules that will both load the -compiler/mpi library and modify the `MODULEPATH` so the user has -access to the software packages that will be built in Step 4. On HPC -systems, these meta-modules will load the native modules provided by -the system administrators. For example, `module load hpc-impi/2020` -will first load the native `impi/2020` module and then modify the -`MODULEPATH` accordingly to allow users to access the custom libraries -built by this repository. - -So, in short, you should never load the compiler or MPI modules -directly. Instead, you should always load the `hpc-` meta-modules as -demonstrated above - they will provide everything you need to load and -then use these software libraries. - -If the compiler and/or MPI is natively available on the system and the -user wishes to make use of it e.g. `/usr/bin/gcc`, the -`setup_modules.sh` script prompts the user to answer questions -regarding their use. For e.g. in containers, one would like to use -the system provided GNU compilers, but build a MPI implementation. - -## Step 3: Build Software Stack - -Now all that remains is to build the stack: - -``` -./build_stack.sh -p -c -y -m -``` - -Here the `-m` option is only required if LMod is used for managing the -software stack. It should be omitted otherwise. `` and -`` are the same as in Step 2, namely a reference to the -installation prefix and a corresponding configuration file in the -`config` directory. As in Step 2, if this argument is omitted, the -default is to use `$HOME/opt` and `config/config_custom.sh` -respectively. `` represents a user configurable yaml file -containing a list of packages that need to be built in the stack along -with their versions and package options. The default value of `` -is `stack/stack_custom.yaml`. - -## Additional Notes: - -### Setting compiler flags and other options - -Often it is necessary to specify compiler flags (e.g. `gfortran-10 --fallow-argument-mismatch`) to the packages via `FFLAGS`. There are 2 -ways this can be achieved. - -1. For all packages: One can define variable -e.g. `STACK_FFLAGS=-fallow-argument-mismatch` in the config file -`config_custom.sh`. This will append `STACK_FFLAGS` to `FFLAGS` in -every build script under libs. - -2. Package specific flags: To compile only the specific package under -`libs` with the above compiler flag, one can define variable -`FFLAGS=-fallow-argument-mismatch` in the `` section of the -YAML file `stack_custom.yaml`. This will append -`STACK__FFLAGS` to `FFLAGS` in the build script for that -`` only. - -### Adding a New library/package - -If you want to add a new library to the stack you need to follow these -steps: - -1. write a new build script in libs, using exising scripts as a -template - -2. define a new section in the `yaml` file for that library/package in -config directory - -3. if the package is a python virtual environment, add a `requirements.txt` or `environment.yml` file containing the listing the python packages required to install the package. These files should be named and placed in `pyvenv/package_name.txt` and `pyvenv/package_name.yml`. `VENVTYPE=pyvenv` will use the `pyvenv/package_name.txt` and `VENVTYPE=condaenv` will use `pyvenv/package_name.yml` - -4. Add a call to the new build script in `build_stack.sh` - -5. Create a new module template at the appropriate place in the -modulefiles directory, using exising files as a template - -6. Update `README.md` to include the name of the new library or package - -### Configuring for a new HPC - -If you want to port this to a new HPC, you need to follow these steps: - -1. Write a new config file `config/config_.sh`, using existing -configs as a template. Also create a new yaml file -`config/stack_.yaml`, using existing yaml files as a template. - -2. Add/remove basic modules for that HPC - -3. Choose the appropriate Compiler/MPI combination. - -4. If a template modulefile does not exist for that Compiler/MPI -combinattion, create module templates at the appropriate place in the -modulefiles directory, using existing files as a -template. E.g. `hpc-ips` or `hpc-smpi`. - -5. If the HPC provides some basic modules for e.g. Git, CMake, -etc. they can be loaded in `config/config_.sh` - -### Using the **DOWNLOAD_ONLY** option - -If an HPC (e.g. NOAA RDHPCS Hera) does not allow access to online -software via `wget` or `git clone`, you will have to download all the -packages using the **DOWNLOAD_ONLY** option in the `config_custom.sh`. -Execute `build_stack.sh` as you would on a machine that does allow -access to online software with `DOWNLOAD_ONLY=YES` and all the -packages will be downloaded in the `pkg` directory. Transfer the -contents of the `pkg` directory to the machine you wish to install the -hpc-stack and execute `build_stack.sh`. `build_stack.sh` will detect -the already downloaded packages and use them rather than fetching -them. - -### Using the HPC-stack - -- If Lmod is used to manage the software stack, to use the HPC-stack, - you need to activate the stack. This is done by loading the `hpc` - module under `$PREFIX/modulefiles/stack` as follows: - -``` -module use $PREFIX/modulefiles/stack -module load hpc/1.0.0 -``` - -This will put the `hpc-` module in your `MODULEPATH`, -which can be loaded as: - -``` -module load hpc-/ -``` - -- If the HPC-stack is not managed via modules, you need to add - `$PREFIX` to the PATH as follows: - -``` -export PATH="$PREFIX/bin:$PATH" -export LD_LIBRARY_PATH="$PREFIX/lib:$LD_LIBRARY_PATH" -export CMAKE_PREFIX_PATH="$PREFIX" -``` - -### Known workaround for certain installations of Lmod. - -- On some machine's (e.g. **WCOSS_DELL_P3**), LMod is built to disable - loading of default modulefiles and requires the user to load the - module with an explicit version of the module. e.g. `module load - netcdf/4.7.4` instead of `module load netcdf`. The latter looks for - the `default` module which is either the latest version or a version - that is marked as default. To circumvent this, it is necessary to - place the following lines in `modulefiles/stack/hpc/hpc.lua` prior - to executing `setup_modules.sh` or in - `$PREFIX/modulefiles/stack/hpc/1.0.0.lua` after executing - `setup_modules.sh`. - -``` --- https://lmod.readthedocs.io/en/latest/090_configuring_lmod.html -setenv("LMOD_EXACT_MATCH", "no") -setenv("LMOD_EXTENDED_DEFAULT", "yes") -``` - -## Known Issues - -- NetCDF-C++ does not build with LLVM Clang. It can be disabled by setting -`disable_cxx: YES` in the stack file under the NetCDF section. - -- Json-schema-validator does not build with LLVM Clang. It can be disabled -in the stack file in the json-schema-validator-section. ## Disclaimer diff --git a/docs/source/hpc-intro-text.rst b/docs/source/hpc-intro-text.rst index 1f9809a2..41e4ada0 100644 --- a/docs/source/hpc-intro-text.rst +++ b/docs/source/hpc-intro-text.rst @@ -1,9 +1,8 @@ -**Definition:** HPC-stack is a repository that provides a unified, shell script-based build system for -building the software stack required for the `Unified Forecast System (UFS) `_ and applications. +**Definition:** The HPC-stack is a repository that provides a unified, shell script-based build system for building the software stack required for numerical weather prediction (NWP) tools such as the `Unified Forecast System (UFS) `_ and the Joint Effort for Data assimilation Integration (JEDI) framework. Background ------------------------ -The UFS Weather Model draws on over 50 code libraries to run its applications. These libraries range from libraries developed in-house at NOAA (e.g. NCEPLIBS, FMS, etc.) to libraries developed by NOAA's partners (e.g. PIO, ESMF etc) to truly third party libraries (e.g. NETCDF). Individual installation of these libraries is not practical, so the `HPC-Stack `_ was developed as a central installation system to ensure that the infrastructure environment across multiple platforms is as similar as possible. Installation of the HPC-Stack is required to run the SRW. +The `HPC-Stack `_ provides libraries and dependencies in a consistent manner for NWP applications. It is part of the `NCEPLIBS project `_ and is model/system agnostic. The HPC-Stack was originally written to facilitate installation of third-party libraries in a systematic manner on macOS and Linux systems (specifically RHEL). It was later transferred, expanded and further enhanced in the `Joint Effort for Data assimilation Integration (JEDI) `_ project. Instructions ------------------------- @@ -11,8 +10,6 @@ Instructions Users can either build the HPC-stack on their local system or use the centrally maintained stacks on each HPC platform. For a detailed description of installation options, see :ref:`Installing the HPC-Stack `. -.. note:: - `HPC-Stack `_ is part of the NCEPLIBS project and was originally written for the `Joint Effort for Data assimilation Integration (JEDI) `_ framework. diff --git a/docs/source/hpc-notes.rst b/docs/source/hpc-notes.rst index 0eb8903d..001b143b 100644 --- a/docs/source/hpc-notes.rst +++ b/docs/source/hpc-notes.rst @@ -32,7 +32,7 @@ If you want to add a new library to the stack you need to follow these steps: 5. Create a new module template at the appropriate place in the modulefiles directory, using exising files as a template -6. Update ``README.md`` to include the name of the new library or package +6. Update the `HPC Components ` file to include the name of the new library or package Configuring for a new HPC --------------------------- @@ -52,7 +52,7 @@ If you want to port this to a new HPC, you need to follow these steps: Using the **DOWNLOAD_ONLY** Option ---------------------------------------- -If an HPC (e.g. NOAA RDHPCS Hera) does not allow access to online software via ``wget`` or ``git clone``, you will have to download all the packages using the **DOWNLOAD_ONLY** option in the ``config_custom.sh``. Execute ``build_stack.sh`` as you would on a machine that does allow access to online software with ``DOWNLOAD_ONLY=YES`` and all the packages will be downloaded in the ``pkg`` directory. Transfer the contents of the ``pkg`` directory to the machine you wish to install the hpc-stack and execute ``build_stack.sh``. ``build_stack.sh`` will detect the already downloaded packages and use them rather than fetching them. +If an HPC (e.g., NOAA RDHPCS Hera) does not allow access to online software via ``wget`` or ``git clone``, you will have to download all the packages using the **DOWNLOAD_ONLY** option in the ``config_custom.sh``. Execute ``build_stack.sh`` as you would on a machine that does allow access to online software with ``DOWNLOAD_ONLY=YES`` and all the packages will be downloaded in the ``pkg`` directory. Transfer the contents of the ``pkg`` directory to the machine you wish to install the hpc-stack and execute ``build_stack.sh``. ``build_stack.sh`` will detect the already downloaded packages and use them rather than fetching them. Using the HPC-stack --------------------- From 248bf25f90fa6de23d252cec50812e1b4f2f86fc Mon Sep 17 00:00:00 2001 From: gspetro Date: Wed, 16 Feb 2022 19:03:36 -0500 Subject: [PATCH 06/30] edit conf.py --- docs/source/conf.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index f94d6599..2b99aef6 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -45,8 +45,7 @@ 'sphinx.ext.viewcode', 'sphinx.ext.githubpages', 'sphinx.ext.napoleon', - 'sphinxcontrib.bibtex', - 'myst_parser' + 'sphinxcontrib.bibtex' ] bibtex_bibfiles = ['references.bib'] From a723bd238f49bf3ac13d9888bb5924967d6cd595 Mon Sep 17 00:00:00 2001 From: gspetro Date: Wed, 16 Feb 2022 19:14:37 -0500 Subject: [PATCH 07/30] add sphinx_rst_theme to extensions in conf.py --- docs/source/_templates/conf.py | 111 +++++++++++++++++++++++++++++++++ docs/source/conf.py | 1 + 2 files changed, 112 insertions(+) create mode 100644 docs/source/_templates/conf.py diff --git a/docs/source/_templates/conf.py b/docs/source/_templates/conf.py new file mode 100644 index 00000000..2b99aef6 --- /dev/null +++ b/docs/source/_templates/conf.py @@ -0,0 +1,111 @@ +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import sys +sys.path.insert(0, os.path.abspath('.')) + + +# -- Project information ----------------------------------------------------- + +project = 'HPC-Stack Users Guide' +copyright = '2022, EMC/EPIC' +author = 'EMC/EPIC' + +# The short X.Y version +version = '' +# The full version, including alpha/beta/rc tags +release = '' + +numfig = True + + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.doctest', + 'sphinx.ext.intersphinx', + 'sphinx.ext.todo', + 'sphinx.ext.coverage', + 'sphinx.ext.mathjax', + 'sphinx.ext.ifconfig', + 'sphinx.ext.viewcode', + 'sphinx.ext.githubpages', + 'sphinx.ext.napoleon', + 'sphinxcontrib.bibtex' +] + +bibtex_bibfiles = ['references.bib'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +# source_suffix = ['.rst', '.md'] +source_suffix = '.rst' + +# The master toctree document. +master_doc = 'index' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = 'en' + + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = [] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'sphinx_rtd_theme' +html_theme_path = ["_themes", ] + + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +# html_theme_options = {} +html_theme_options = {"body_max_width": "none"} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +html_context = { + 'css_files': [ + '_static/theme_overrides.css', # override wide tables in RTD theme + ], + } + +def setup(app): + app.add_css_file('custom.css') # may also be an URL + diff --git a/docs/source/conf.py b/docs/source/conf.py index 2b99aef6..83945500 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -45,6 +45,7 @@ 'sphinx.ext.viewcode', 'sphinx.ext.githubpages', 'sphinx.ext.napoleon', + 'sphinx_rtd_theme', 'sphinxcontrib.bibtex' ] From da39f1ced23f30e39b0331e30834ee89d4806034 Mon Sep 17 00:00:00 2001 From: gspetro Date: Wed, 16 Feb 2022 19:29:02 -0500 Subject: [PATCH 08/30] more edits to conf.py --- docs/source/conf.py | 98 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 98 insertions(+) diff --git a/docs/source/conf.py b/docs/source/conf.py index 83945500..c634fedd 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -110,3 +110,101 @@ def setup(app): app.add_css_file('custom.css') # may also be an URL +# Custom sidebar templates, must be a dictionary that maps document names +# to template names. +# +# The default sidebars (for documents that don't match any pattern) are +# defined by theme itself. Builtin themes are using these templates by +# default: ``['localtoc.html', 'relations.html', 'sourcelink.html', +# 'searchbox.html']``. +# +# html_sidebars = {} + + +# -- Options for HTMLHelp output --------------------------------------------- + +# Output file base name for HTML help builder. +htmlhelp_basename = 'UFS-SR-Weather-App' + + +# -- Options for LaTeX output ------------------------------------------------ + +latex_engine = 'pdflatex' +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + 'pointsize': '11pt', + + # Additional stuff for the LaTeX preamble. + 'preamble': r''' + \usepackage{charter} + \usepackage[defaultsans]{lato} + \usepackage{inconsolata} + ''', + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'UFS-SRWeatherApp.tex', 'UFS Short-Range Weather App Users Guide', + ' ', 'manual'), +] + + +# -- Options for manual page output ------------------------------------------ + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'UFS-SRWeatherApp', 'UFS Short-Range Weather App Users Guide', + [author], 1) +] + + +# -- Options for Texinfo output ---------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'UFS-SRWeatherApp', 'UFS Short-Range Weather App Users Guide', + author, 'UFS-SRWeatherApp', 'One line description of project.', + 'Miscellaneous'), +] + + +# -- Options for Epub output ------------------------------------------------- + +# Bibliographic Dublin Core info. +epub_title = project + +# The unique identifier of the text. This can be a ISBN number +# or the project homepage. +# +# epub_identifier = '' + +# A unique identification for the text. +# +# epub_uid = '' + +# A list of files that should not be packed into the epub file. +epub_exclude_files = ['search.html'] + + +# -- Extension configuration ------------------------------------------------- + +# -- Options for intersphinx extension --------------------------------------- + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = {'https://docs.python.org/': None} + +# -- Options for todo extension ---------------------------------------------- + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True \ No newline at end of file From 3d99f1c7a44a51d88d03b3a097a1d4c4467f7ffe Mon Sep 17 00:00:00 2001 From: gspetro Date: Wed, 16 Feb 2022 19:47:47 -0500 Subject: [PATCH 09/30] edits to conf.py --- docs/source/conf.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index c634fedd..becc5ed5 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -14,6 +14,10 @@ import sys sys.path.insert(0, os.path.abspath('.')) +import sphinx_rtd_theme +from sphinx_rtd_theme import __version__ as theme_version +from sphinx_rtd_theme import __version_full__ as theme_version_full +from sphinx.locale import _ # -- Project information ----------------------------------------------------- @@ -202,7 +206,11 @@ def setup(app): # -- Options for intersphinx extension --------------------------------------- # Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {'https://docs.python.org/': None} +#intersphinx_mapping = {'https://docs.python.org/': None} +intersphinx_mapping = { + 'rtd': ('https://docs.readthedocs.io/en/stable/', None), + 'sphinx': ('https://www.sphinx-doc.org/en/master/', None), +} # -- Options for todo extension ---------------------------------------------- From 50da53bf402f3f4455658c851c2ce4c967c8d625 Mon Sep 17 00:00:00 2001 From: gspetro Date: Wed, 16 Feb 2022 20:09:10 -0500 Subject: [PATCH 10/30] edits for rtd theme issue --- docs/source/conf.py | 30 +++++++++++++----------------- 1 file changed, 13 insertions(+), 17 deletions(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index becc5ed5..055bbcc9 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -14,11 +14,6 @@ import sys sys.path.insert(0, os.path.abspath('.')) -import sphinx_rtd_theme -from sphinx_rtd_theme import __version__ as theme_version -from sphinx_rtd_theme import __version_full__ as theme_version_full -from sphinx.locale import _ - # -- Project information ----------------------------------------------------- project = 'HPC-Stack Users Guide' @@ -50,7 +45,7 @@ 'sphinx.ext.githubpages', 'sphinx.ext.napoleon', 'sphinx_rtd_theme', - 'sphinxcontrib.bibtex' + 'sphinxcontrib.bibtex', ] bibtex_bibfiles = ['references.bib'] @@ -89,7 +84,8 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = 'sphinx_rtd_theme' +#html_theme = 'sphinx_rtd_theme' +html_theme = 'classic' html_theme_path = ["_themes", ] @@ -128,7 +124,7 @@ def setup(app): # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. -htmlhelp_basename = 'UFS-SR-Weather-App' +htmlhelp_basename = 'HPC-Stack' # -- Options for LaTeX output ------------------------------------------------ @@ -156,7 +152,7 @@ def setup(app): # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'UFS-SRWeatherApp.tex', 'UFS Short-Range Weather App Users Guide', + (master_doc, 'HPC-Stack.tex', 'HPC-Stack Users Guide', ' ', 'manual'), ] @@ -166,7 +162,7 @@ def setup(app): # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ - (master_doc, 'UFS-SRWeatherApp', 'UFS Short-Range Weather App Users Guide', + (master_doc, 'HPC-Stack', 'HPC-Stack Users Guide', [author], 1) ] @@ -177,8 +173,8 @@ def setup(app): # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'UFS-SRWeatherApp', 'UFS Short-Range Weather App Users Guide', - author, 'UFS-SRWeatherApp', 'One line description of project.', + (master_doc, 'HPC-Stack', 'HPC-Stack Users Guide', + author, 'HPC-Stack', 'One line description of project.', 'Miscellaneous'), ] @@ -206,11 +202,11 @@ def setup(app): # -- Options for intersphinx extension --------------------------------------- # Example configuration for intersphinx: refer to the Python standard library. -#intersphinx_mapping = {'https://docs.python.org/': None} -intersphinx_mapping = { - 'rtd': ('https://docs.readthedocs.io/en/stable/', None), - 'sphinx': ('https://www.sphinx-doc.org/en/master/', None), -} +intersphinx_mapping = {'https://docs.python.org/': None} +#intersphinx_mapping = { +# 'rtd': ('https://docs.readthedocs.io/en/stable/', None), +# 'sphinx': ('https://www.sphinx-doc.org/en/master/', None), +#} # -- Options for todo extension ---------------------------------------------- From c16a69e1e455f8778c2445539b0c64da9ce16af9 Mon Sep 17 00:00:00 2001 From: gspetro Date: Wed, 16 Feb 2022 20:45:25 -0500 Subject: [PATCH 11/30] edits for rtd theme issue --- docs/source/conf.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index 055bbcc9..c44a48d6 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -84,8 +84,7 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -#html_theme = 'sphinx_rtd_theme' -html_theme = 'classic' +html_theme = 'sphinx_rtd_theme' html_theme_path = ["_themes", ] @@ -202,7 +201,12 @@ def setup(app): # -- Options for intersphinx extension --------------------------------------- # Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {'https://docs.python.org/': None} +intersphinx_mapping = { + 'python': ('https://docs.python.org/3/', None), + 'sphinx': ('https://www.sphinx-doc.org/en/master/', None), +} +intersphinx_disabled_domains = ['std'] +#intersphinx_mapping = {'https://docs.python.org/': None} #intersphinx_mapping = { # 'rtd': ('https://docs.readthedocs.io/en/stable/', None), # 'sphinx': ('https://www.sphinx-doc.org/en/master/', None), From 559e3b02266236a238464298ab028537a1447829 Mon Sep 17 00:00:00 2001 From: gspetro Date: Thu, 17 Feb 2022 12:14:45 -0500 Subject: [PATCH 12/30] fixed ReadTheDocs css issue --- docs/source/conf.py | 20 +++++--------------- docs/source/index.rst | 3 +-- 2 files changed, 6 insertions(+), 17 deletions(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index c44a48d6..219e335e 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -45,11 +45,8 @@ 'sphinx.ext.githubpages', 'sphinx.ext.napoleon', 'sphinx_rtd_theme', - 'sphinxcontrib.bibtex', ] -bibtex_bibfiles = ['references.bib'] - # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] @@ -100,11 +97,7 @@ # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] -html_context = { - 'css_files': [ - '_static/theme_overrides.css', # override wide tables in RTD theme - ], - } +html_context = {} def setup(app): app.add_css_file('custom.css') # may also be an URL @@ -123,7 +116,7 @@ def setup(app): # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. -htmlhelp_basename = 'HPC-Stack' +htmlhelp_basename = 'HPC-Stack Help' # -- Options for LaTeX output ------------------------------------------------ @@ -201,16 +194,13 @@ def setup(app): # -- Options for intersphinx extension --------------------------------------- # Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = { - 'python': ('https://docs.python.org/3/', None), - 'sphinx': ('https://www.sphinx-doc.org/en/master/', None), -} -intersphinx_disabled_domains = ['std'] -#intersphinx_mapping = {'https://docs.python.org/': None} +intersphinx_mapping = {'https://docs.python.org/': None} #intersphinx_mapping = { # 'rtd': ('https://docs.readthedocs.io/en/stable/', None), # 'sphinx': ('https://www.sphinx-doc.org/en/master/', None), #} +#intersphinx_disabled_domains = ['std'] + # -- Options for todo extension ---------------------------------------------- diff --git a/docs/source/index.rst b/docs/source/index.rst index 61d6ee20..5da1d604 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -1,6 +1,5 @@ .. HPC-Stack Users Guide master file, created by sphinx-quickstart on Wed Feb 9 11:38:12 2022. - You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. - + HPC-Stack User's Guide ================================ From 485e7c62808566e35cfaf37ba850c08cab4c1683 Mon Sep 17 00:00:00 2001 From: gspetro Date: Thu, 17 Feb 2022 14:46:50 -0500 Subject: [PATCH 13/30] fix another ReadTheDocs css issue --- docs/source/conf.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index 219e335e..42d3700d 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -95,7 +95,8 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +#html_static_path = ['_static'] +html_static_path = [] html_context = {} From 52d227f5718f46eee017f55b17a51f3fc1354cf3 Mon Sep 17 00:00:00 2001 From: gspetro Date: Thu, 17 Feb 2022 18:19:54 -0500 Subject: [PATCH 14/30] intro & install edits to remove SRW references --- docs/source/hpc-install.rst | 6 ++---- docs/source/hpc-intro-text.rst | 20 -------------------- docs/source/hpc-intro.rst | 9 ++++++++- 3 files changed, 10 insertions(+), 25 deletions(-) delete mode 100644 docs/source/hpc-intro-text.rst diff --git a/docs/source/hpc-install.rst b/docs/source/hpc-install.rst index ac9121c3..a7ee9cfa 100644 --- a/docs/source/hpc-install.rst +++ b/docs/source/hpc-install.rst @@ -4,7 +4,7 @@ Install and Build the HPC-Stack ================================ -.. warning:: +.. attention:: The HPC-stack is already installed on `Level 1 systems `_ (e.g., Cheyenne, Hera, Orion). Installation is not necessary. HPC-Stack installation will vary from system to system because there are so many possible combinations of operating systems, compilers, MPI's, and package versions. Installation via an EPIC-provided container is recommended to reduce this variability. However, users may choose a non-container approach to installation if they prefer. @@ -207,7 +207,5 @@ Here the -m option is only required if LMod is used for managing the software st .. warning:: Steps :numref:`Step %s `, :numref:`Step %s `, and :numref:`Step %s ` need to be repeated for each compiler/MPI combination that you wish to install.** The new packages will be installed alongside any previously-existing packages that may already have been built from other compiler/MPI combinations. -From here, the user can continue to install and run applications that depend on the HPC-Stack, such as the UFS Short Range Weather (SRW) Application. - - +From here, the user can continue to install and run applications that depend on the HPC-Stack. diff --git a/docs/source/hpc-intro-text.rst b/docs/source/hpc-intro-text.rst deleted file mode 100644 index 41e4ada0..00000000 --- a/docs/source/hpc-intro-text.rst +++ /dev/null @@ -1,20 +0,0 @@ -**Definition:** The HPC-stack is a repository that provides a unified, shell script-based build system for building the software stack required for numerical weather prediction (NWP) tools such as the `Unified Forecast System (UFS) `_ and the Joint Effort for Data assimilation Integration (JEDI) framework. - -Background ------------------------- -The `HPC-Stack `_ provides libraries and dependencies in a consistent manner for NWP applications. It is part of the `NCEPLIBS project `_ and is model/system agnostic. The HPC-Stack was originally written to facilitate installation of third-party libraries in a systematic manner on macOS and Linux systems (specifically RHEL). It was later transferred, expanded and further enhanced in the `Joint Effort for Data assimilation Integration (JEDI) `_ project. - -Instructions -------------------------- -`Level 1 `_ platforms (e.g. Cheyenne, Hera) already have the HPC-Stack installed. Users on those platforms do *not* need to install the HPC-Stack before building UFS applications (e.g. SRW, MRW) or models. Users working on systems that fall under `Support Levels 2-4 `_ will need to install the HPC-Stack the first time they try to run UFS applications or models. - -Users can either build the HPC-stack on their local system or use the centrally maintained stacks on each HPC platform. For a detailed description of installation options, see :ref:`Installing the HPC-Stack `. - - - - - - - - - diff --git a/docs/source/hpc-intro.rst b/docs/source/hpc-intro.rst index bb27618b..a39eb2fb 100644 --- a/docs/source/hpc-intro.rst +++ b/docs/source/hpc-intro.rst @@ -4,7 +4,14 @@ Introduction ====================== -.. include :: hpc-intro-text.rst +**Definition:** The HPC-stack is a repository that provides a unified, shell script-based build system to build the software stack required for numerical weather prediction (NWP) tools such as the `Unified Forecast System (UFS) `__ and the `Joint Effort for Data assimilation Integration (JEDI) `__ framework. + +Background +------------------------ +The `HPC-Stack `__ provides libraries and dependencies in a consistent manner for NWP applications. It is part of the `NCEPLIBS project `__ and is model/system agnostic. The HPC-Stack was originally written to facilitate installation of third-party libraries in a systematic manner on macOS and Linux systems (specifically RHEL). It was later transferred, expanded and further enhanced in the `Joint Effort for Data assimilation Integration (JEDI) `__ project. + + +.. include :: hpc-intro-instructions.rst From 174286c1fe8ee3c59f54539389ce9fbe2d1ef0d1 Mon Sep 17 00:00:00 2001 From: gspetro Date: Thu, 17 Feb 2022 18:35:31 -0500 Subject: [PATCH 15/30] hpc-stack-specific intro contained in one file --- docs/source/hpc-intro.rst | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/docs/source/hpc-intro.rst b/docs/source/hpc-intro.rst index a39eb2fb..e2ebbf67 100644 --- a/docs/source/hpc-intro.rst +++ b/docs/source/hpc-intro.rst @@ -10,8 +10,11 @@ Background ------------------------ The `HPC-Stack `__ provides libraries and dependencies in a consistent manner for NWP applications. It is part of the `NCEPLIBS project `__ and is model/system agnostic. The HPC-Stack was originally written to facilitate installation of third-party libraries in a systematic manner on macOS and Linux systems (specifically RHEL). It was later transferred, expanded and further enhanced in the `Joint Effort for Data assimilation Integration (JEDI) `__ project. +Instructions +------------------------- +`Level 1 `__ platforms (e.g. Cheyenne, Hera) already have the HPC-Stack installed. Users on those platforms do *not* need to install the HPC-Stack before building applications or models that require the HPC-Stack. Users working on systems that fall under `Support Levels 2-4 `_ will need to install the HPC-Stack the first time they try to run applications or models that depend on it. -.. include :: hpc-intro-instructions.rst +Users can either build the HPC-stack on their local system or use the centrally maintained stacks on each HPC platform. For a detailed description of installation options, see :ref:`Installing the HPC-Stack `. From e89dad9a01713b3609328e04f0d359e9f5d81af3 Mon Sep 17 00:00:00 2001 From: gspetro Date: Fri, 18 Feb 2022 13:07:51 -0500 Subject: [PATCH 16/30] fix numbering in hpc-install --- docs/source/hpc-install.rst | 41 +++++++++++++++++-------------------- 1 file changed, 19 insertions(+), 22 deletions(-) diff --git a/docs/source/hpc-install.rst b/docs/source/hpc-install.rst index a7ee9cfa..78c53a77 100644 --- a/docs/source/hpc-install.rst +++ b/docs/source/hpc-install.rst @@ -35,7 +35,7 @@ language. SingularityCE Version 3.7 or above is recommended. Build and Run the Container ---------------------------- -1. Pull and build the container. +#. Pull and build the container. .. code-block:: console @@ -43,15 +43,14 @@ Build and Run the Container singularity build --sandbox ubuntu20.04-epic ubuntu20.04-epic.sif cd ubuntu20.04-epic - Make a directory (e.g. ``contrib``) in the container if one does not exist: - - .. code-block:: console + Make a directory (e.g., ``contrib``) in the container if one does not exist: + + .. code-block:: console - mkdir contrib - cd .. + mkdir contrib + cd .. -2. Start the container and run an interactive shell within it. This command also binds the local working -directory to the container so that data can be shared between them. +#. Start the container and run an interactive shell within it. This command also binds the local working directory to the container so that data can be shared between them. .. code-block:: console @@ -61,38 +60,36 @@ directory to the container so that data can be shared between them. Build the HPC-Stack -------------------- -1. Clone the hpc-stack repository (from inside the singularity shell above). - +#. Clone the hpc-stack repository (from inside the singularity shell above). + .. code-block:: console git clone -b feature/ubuntu20.04 https://github.com/jkbk2004/hpc-stack cd hpc-stack -2. Set up the build environment. Be sure to change the ``prefix`` argument in the code below to -your system's install location (likely within the hpc-stack directory). - - .. code-block:: console +#. Set up the build environment. Be sure to change the ``prefix`` argument in the code below to your system's install location (likely within the hpc-stack directory). + .. code-block:: console + ./setup_modules.sh -p -c config/config_custom.sh where is the directory where the software packages will be installed with a default value $HOME/opt. - Enter YES/YES/YES when the option is presented. Then modify ``build_stack.sh`` with the following commands: .. code-block:: console - + sed -i "10 a source /usr/share/lmod/6.6/init/bash" ./build_stack.sh sed -i "10 a export PATH=/usr/local/sbin:/usr/local/bin:$PATH" ./build_stack.sh sed -i "10 a export LD_LIBRARY_PATH=/usr/local/lib64:/usr/local/lib:$LD_LIBRARY_PATH" ./build_stack.sh -3. Build the environment. This may take several hours to complete. - +#. Build the environment. This may take several hours to complete. + .. code-block:: console ./build_stack.sh -p -c config/config_custom.sh -y stack/stack_custom.yaml -m -4. Load the required modules. - +#. Load the required modules. + .. code-block:: console source /usr/share/lmod/lmod/init/bash @@ -157,9 +154,9 @@ Set Up Compiler, MPI, Python & Module System Run from the top directory: -.. code-block:: console + .. code-block:: console - ./setup_modules.sh -p -c + ./setup_modules.sh -p -c where: From 362435aa7e249661d762d41013c6b7c0ae81a84d Mon Sep 17 00:00:00 2001 From: gspetro Date: Fri, 18 Feb 2022 16:36:14 -0500 Subject: [PATCH 17/30] Natalie's edits and prereqs outline --- docs/source/hpc-install.rst | 46 ++++++++++++++++++++++++++++++------- docs/source/hpc-prereqs.rst | 27 ++++++++++++++++++---- 2 files changed, 60 insertions(+), 13 deletions(-) diff --git a/docs/source/hpc-install.rst b/docs/source/hpc-install.rst index 78c53a77..9f3f0c61 100644 --- a/docs/source/hpc-install.rst +++ b/docs/source/hpc-install.rst @@ -25,11 +25,10 @@ The Earth Prediction Innovation Center (EPIC) provides several containers availa Install Singularity ----------------------- -To install the HPC-stack via Singularity container, first install the Singularity package according to the `Singularity Installation Guide `_. This will include the installation of dependencies and the installation of the Go programming -language. SingularityCE Version 3.7 or above is recommended. +To install the HPC-stack via Singularity container, first install the Singularity package according to the `Singularity Installation Guide `_. This will include the installation of dependencies and the installation of the Go programming language. SingularityCE Version 3.7 or above is recommended. .. warning:: - Docker containers can only be run with root privileges, and users cannot have root privileges on HPC computers. Therefore, it is not possible to build the HPC-stack inside a Docker container. A Docker image may be pulled, but it must be run inside a container such as Singularity. + Docker containers can only be run with root privileges, and users cannot have root privileges on HPC computers. Therefore, it is not possible to build the HPC-stack inside a Docker container on an HPC system. A Docker image may be pulled, but it must be run inside a container such as Singularity. Build and Run the Container @@ -100,6 +99,7 @@ Build the HPC-Stack From here, the user can continue to install and run applications that depend on the HPC-Stack, such as the UFS Short Range Weather (SRW) Application. +.. _NonContainerInstall: Non-Container HPC-Stack Installation and Build ================================================= @@ -112,7 +112,7 @@ To install the HPC-Stack locally, the following pre-requisites must be installed * **Python 3:** Can be obtained either from the `main distributor `_ or from `Anaconda `_. * **Compilers:** Distributions of Fortran, C, and C++ compilers that work for your system. * **Message Passing Interface (MPI)** libraries for multi-processor and multi-core communications, configured to work with your corresponding Fortran, C/C++ compilers. -* **Programs and software packages:** `Lmod `_, `CMake `_, `make `_, `wget `_, `curl `_, `git `_ +* **Programs and software packages:** `Lmod `_, `CMake `_, `make `_, `wget `_, `curl `_, `git `_, and the `TIFF library `_. To determine whether these prerequisites are installed, query the environment variables (for ``Lmod``) or the location and version of the packages (for ``cmake``, ``make``, ``wget``, ``curl``, ``git``). A few examples: @@ -122,6 +122,17 @@ To determine whether these prerequisites are installed, query the environment va which cmake cmake --version +Methods for determining whether ``libtiff`` is installed vary between the systems. Users can try the following approaches: + + .. code-block:: console + + whereis libtiff + locate libtiff + ldconfig -p | grep libtiff + ls /usr/lib64/libtiff* + ls /usr/lib/libtiff* + + If compilers or MPI's need to be installed, consult the :ref:`HPC-Stack Prerequisites ` document for further guidance. .. _NonConConfigure: @@ -133,7 +144,7 @@ Choose the COMPILER, MPI, and PYTHON version, and specify any other aspects of t Some of the parameter settings available are: -* HPC_COMPILER: This defines the vendor and version of the compiler you wish to use for this build. The format is the same as what you would typically use in a module load command. For example, HPC_COMPILER=intel/2020. Use ``gcc -v`` to determine your compiler and version. +* HPC_COMPILER: This defines the vendor and version of the compiler you wish to use for this build. The format is the same as what you would typically use in a module load command. For example, ``HPC_COMPILER=intel/2020``. Use ``gcc -v`` to determine your compiler and version. * HPC_MPI: This is the MPI library you wish to use. The format is the same as for HPC_COMPILER. For example: ``HPC_MPI=impi/2020``. * HPC_PYTHON: This is the Python interpreter to use for the build. The format is the same as for HPC_COMPILER, for example: ``HPC_PYTHON=python/3.7.5``. Use ``python --version`` to determine the current version of Python. @@ -152,7 +163,7 @@ Set Up Compiler, MPI, Python & Module System .. note:: This step is required if you are using ``Lmod`` modules for managing the software stack. Lmod is installed across all Level 1 and Level 2 systems and in the containers provided. If ``LMod`` is not desired or used, the user can skip ahead to :numref:`Step %s `. -Run from the top directory: +After preparing the system configuration in ``./config/config_.sh``, run the following command from the top directory: .. code-block:: console @@ -160,7 +171,11 @@ Run from the top directory: where: -```` is the directory where the software packages will be installed with a default value $HOME/opt. The software installation trees will branch directly off of , while the module files will be located in the /modulefiles subdirectory. +```` is the directory where the software packages will be installed during the hpc-stack build. The default value is $HOME/opt. The software installation trees will branch directly off of , while the module files will be located in the /modulefiles subdirectory. + +.. attention:: + + Note that ```` requires an absolute path; it will not work with a relative path. ```` points to the configuration script that you wish to use, as described in :numref:`Step %s `. The default configuration file is ``config/config_custom.sh``. @@ -187,6 +202,21 @@ It may be necessary to set certain source and path variables in the ``build_stac export LD_LIBRARY_PATH=/usr/local/lib64:/usr/local/lib:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=/usr/lib/x86_64-linux-gnu:$LD_LIBRARY_PATH +It may also be necessary to initialize ``Lmod`` when using a user-specific ``Lmod`` installation: + + .. code-block:: console + + module purge + export BASH_ENV=$HOME//lmod/lmod/init/bash + source $BASH_ENV + export LMOD_SYSTEM_DEFAULT_MODULES=:: + module --initial_load --no_redirect restore + module use <$HOME>/ + +where: +* ```` is the top directory where Lmod is installed +* ``, ...,`` is column-separated list of modules to load by default +* <$HOME>/ is the directory where additional custom modules may be built with Lmod (e.g., $HOME/modulefiles). .. _NonConHPCBuild: @@ -199,7 +229,7 @@ Now all that remains is to build the stack: ./build_stack.sh -p -c -y -m -Here the -m option is only required if LMod is used for managing the software stack. It should be omitted otherwise. and are the same as in :numref:`Step %s `, namely a reference to the installation prefix and a corresponding configuration file in the config directory. As in :numref:`Step %s `, if this argument is omitted, the default is to use ``$HOME/opt`` and ``config/config_custom.sh`` respectively. represents a user configurable yaml file containing a list of packages that need to be built in the stack along with their versions and package options. The default value of is ``stack/stack_custom.yaml``. +Here the -m option is only required when you need to build your own modules *and* LMod is used for managing the software stack. It should be omitted otherwise. and are the same as in :numref:`Step %s `, namely a reference to the absolute-path installation prefix and a corresponding configuration file in the ``config`` directory. As in :numref:`Step %s `, if this argument is omitted, the default is to use ``$HOME/opt`` and ``config/config_custom.sh`` respectively. ```` represents a user configurable yaml file containing a list of packages that need to be built in the stack along with their versions and package options. The default value of ```` is ``stack/stack_custom.yaml``. .. warning:: Steps :numref:`Step %s `, :numref:`Step %s `, and :numref:`Step %s ` need to be repeated for each compiler/MPI combination that you wish to install.** The new packages will be installed alongside any previously-existing packages that may already have been built from other compiler/MPI combinations. diff --git a/docs/source/hpc-prereqs.rst b/docs/source/hpc-prereqs.rst index 4993c626..69cd7164 100644 --- a/docs/source/hpc-prereqs.rst +++ b/docs/source/hpc-prereqs.rst @@ -5,9 +5,26 @@ Installation of the HPC-Stack Prerequisites ============================================= -More information coming soon! +A wide variety of compiler and MPI options are available. Certain combinations may play well together, whereas others may not. + +The following system, compiler, and MPI combinations have been tested successfully: + ++-----------------------------------+------------------------------------------+--------------------------------------------+ +| System | Compilers | MPI | ++===================================+===========================================+============================================+ +| SUSE Linux Enterprise Server 12.4 | Intel compilers 2020.0 (ifort, icc, icps) | Intel MPI wrappers (mpif90, mpicc, mpicxx) | ++-----------------------------------+-------------------------------------------+--------------------------------------------+ +| Linux CentOS 7 | Intel compilers 2020.0 (ifort, icc, icps) | Intel MPI (mpiifort, mpiicc, mpiicpc) | ++-----------------------------------+-------------------------------------------+--------------------------------------------+ + +Compilers and MPI libraries can be downloaded from the following websites: + +Compilers: + * `GNU/GCC `__ (version 9.x) + * `Intel `__ -.. - COMMENT: Linked from "If compilers or MPI's need to be installed, consult the `HPC-Stack Prerequisites ` document for further guidance. " - COMMENT: Add details about requirements (e.g., MPI, compilers, environment modules, etc.)??? - COMMENT: https://www.open-mpi.org/software/ompi/v4.1/ From d1b88151e367255748b150db9de0d77b90aec5ba Mon Sep 17 00:00:00 2001 From: gspetro Date: Fri, 18 Feb 2022 16:52:56 -0500 Subject: [PATCH 18/30] fix table & hyperlink issues --- docs/source/hpc-prereqs.rst | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/docs/source/hpc-prereqs.rst b/docs/source/hpc-prereqs.rst index 69cd7164..a8dd4c15 100644 --- a/docs/source/hpc-prereqs.rst +++ b/docs/source/hpc-prereqs.rst @@ -9,22 +9,26 @@ A wide variety of compiler and MPI options are available. Certain combinations m The following system, compiler, and MPI combinations have been tested successfully: -+-----------------------------------+------------------------------------------+--------------------------------------------+ -| System | Compilers | MPI | -+===================================+===========================================+============================================+ -| SUSE Linux Enterprise Server 12.4 | Intel compilers 2020.0 (ifort, icc, icps) | Intel MPI wrappers (mpif90, mpicc, mpicxx) | -+-----------------------------------+-------------------------------------------+--------------------------------------------+ -| Linux CentOS 7 | Intel compilers 2020.0 (ifort, icc, icps) | Intel MPI (mpiifort, mpiicc, mpiicpc) | -+-----------------------------------+-------------------------------------------+--------------------------------------------+ +.. table:: Sample System, Compiler, and MPI Options + + +------------------------+-------------------------+-----------------------------+ + | **System** | **Compilers** | **MPI** | + +========================+=========================+=============================+ + | SUSE Linux Enterprise | Intel compilers 2020.0 | Intel MPI wrappers | + | Server 12.4 | (ifort, icc, icps) | (mpif90, mpicc, mpicxx) | + +------------------------+-------------------------+-----------------------------+ + | Linux CentOS 7 | Intel compilers 2020.0 | Intel MPI | + | | (ifort, icc, icps) | (mpiifort, mpiicc, mpiicpc) | + +------------------------+-------------------------+-----------------------------+ Compilers and MPI libraries can be downloaded from the following websites: Compilers: * `GNU/GCC `__ (version 9.x) - * `Intel `__ MPI's - * `OpenMPI `__ + * `MPICH `__ * `IntelMPI (IMPI) `__ From bf2320bc5ceb6926bca9cebc0dcb94a8e8a60c01 Mon Sep 17 00:00:00 2001 From: gspetro Date: Tue, 22 Feb 2022 15:48:46 -0500 Subject: [PATCH 19/30] parameters update --- docs/source/hpc-parameters.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/hpc-parameters.rst b/docs/source/hpc-parameters.rst index de03851f..03cbf977 100644 --- a/docs/source/hpc-parameters.rst +++ b/docs/source/hpc-parameters.rst @@ -20,7 +20,7 @@ Compiler & MPI * ``intel/2021.3.0`` ``HPC_MPI``: (Default: “”) - The MPI library you wish to use for this build. The format is the same as for HPC_COMPILER; for example: ``HPC_MPI=impi/2020``. Current MPI types accepted are openmpi, mpich, impi, cray, and cray*.Options include: + The MPI library you wish to use for this build. The format is the same as for HPC_COMPILER; for example: ``HPC_MPI=impi/2020``. Current MPI types accepted are openmpi, mpich, impi, cray, and cray*. Options include: * ``impi/2020`` * ``impi/2018.4.274`` From 5784e7b9a9db83bbc76f04da13ff7b7f4bbaa067 Mon Sep 17 00:00:00 2001 From: gspetro Date: Tue, 22 Feb 2022 16:09:23 -0500 Subject: [PATCH 20/30] update container install --- docs/source/hpc-install.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/source/hpc-install.rst b/docs/source/hpc-install.rst index 9f3f0c61..6dadffd0 100644 --- a/docs/source/hpc-install.rst +++ b/docs/source/hpc-install.rst @@ -38,9 +38,9 @@ Build and Run the Container .. code-block:: console - singularity pull ubuntu20.04-epic.sif docker://noaaepic/ubuntu20.04-epic - singularity build --sandbox ubuntu20.04-epic ubuntu20.04-epic.sif - cd ubuntu20.04-epic + singularity pull ubuntu20.04-gnu9.3.sif docker://noaaepic/ubuntu20.04-gnu9.3 + singularity build --sandbox ubuntu20.04-gnu9.3 ubuntu20.04-gnu9.3.sif + cd ubuntu20.04-gnu9.3 Make a directory (e.g., ``contrib``) in the container if one does not exist: From f8b32160eb1c1165a51b2df7479b2e9a158aadfc Mon Sep 17 00:00:00 2001 From: gspetro Date: Wed, 23 Feb 2022 15:28:50 -0500 Subject: [PATCH 21/30] update to hpc module location --- docs/source/hpc-install.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/source/hpc-install.rst b/docs/source/hpc-install.rst index 6dadffd0..a54d254a 100644 --- a/docs/source/hpc-install.rst +++ b/docs/source/hpc-install.rst @@ -15,7 +15,7 @@ HPC-Stack installation will vary from system to system because there are so many Install and Build the HPC-Stack in a Singularity Container =========================================================== -The Earth Prediction Innovation Center (EPIC) provides several containers available for the installation of the stack and Unified Forecast System (UFS) applications: +The Earth Prediction Innovation Center (EPIC) provides `several containers `__ available for the installation of the stack and Unified Forecast System (UFS) applications: * docker://noaaepic/ubuntu20.04-gnu9.3 * docker://noaaepic/ubuntu20.04-hpc-stack @@ -72,7 +72,7 @@ Build the HPC-Stack ./setup_modules.sh -p -c config/config_custom.sh - where is the directory where the software packages will be installed with a default value $HOME/opt. + where is the directory where the software packages will be installed with a default value $HOME/opt. For example, if the hpc-stack is installed in the user's directory: `/home/$USER/hpc-stack/hpc-modules` Enter YES/YES/YES when the option is presented. Then modify ``build_stack.sh`` with the following commands: .. code-block:: console @@ -87,12 +87,12 @@ Build the HPC-Stack ./build_stack.sh -p -c config/config_custom.sh -y stack/stack_custom.yaml -m -#. Load the required modules. +#. Load the required modules, making sure to change the `` to the location of the module files. .. code-block:: console source /usr/share/lmod/lmod/init/bash - module use /modulefiles/stack + module use /hpc-modules/modulefiles/stack module load hpc hpc-gnu hpc-openmpi module avail From e35bc5ba1d0d04ce132c42e119cf6df92d9194b0 Mon Sep 17 00:00:00 2001 From: gspetro Date: Mon, 28 Feb 2022 11:55:15 -0500 Subject: [PATCH 22/30] added MPI definition, odds & ends --- docs/source/hpc-install.rst | 14 +++++++++++--- docs/source/hpc-notes.rst | 2 +- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/docs/source/hpc-install.rst b/docs/source/hpc-install.rst index a54d254a..0bad363d 100644 --- a/docs/source/hpc-install.rst +++ b/docs/source/hpc-install.rst @@ -9,13 +9,17 @@ Install and Build the HPC-Stack HPC-Stack installation will vary from system to system because there are so many possible combinations of operating systems, compilers, MPI's, and package versions. Installation via an EPIC-provided container is recommended to reduce this variability. However, users may choose a non-container approach to installation if they prefer. +..note:: + + MPI stands for Message Passing Interface. An MPI is a standard communication system used in parallel programming. It establishes portable and efficient syntax for the exchange of messages and data between multiple processors that are used by a single computer program. An MPI is required for high-performance computing (HPC). + .. _SingularityInstall: Install and Build the HPC-Stack in a Singularity Container =========================================================== -The Earth Prediction Innovation Center (EPIC) provides `several containers `__ available for the installation of the stack and Unified Forecast System (UFS) applications: +The Earth Prediction Innovation Center (EPIC) provides `several containers `__ available for the installation of the HPC-Stack and Unified Forecast System (UFS) applications: * docker://noaaepic/ubuntu20.04-gnu9.3 * docker://noaaepic/ubuntu20.04-hpc-stack @@ -28,7 +32,7 @@ Install Singularity To install the HPC-stack via Singularity container, first install the Singularity package according to the `Singularity Installation Guide `_. This will include the installation of dependencies and the installation of the Go programming language. SingularityCE Version 3.7 or above is recommended. .. warning:: - Docker containers can only be run with root privileges, and users cannot have root privileges on HPC computers. Therefore, it is not possible to build the HPC-stack inside a Docker container on an HPC system. A Docker image may be pulled, but it must be run inside a container such as Singularity. + Docker containers can only be run with root privileges, and users cannot have root privileges on HPC computers. Therefore, it is not possible to build the HPC-stack inside a Docker container on an HPC system. A Docker image may be pulled, but it must be run inside a container such as Singularity. Docker can, however, be used when building the HPC-Stack on a local system. Build and Run the Container @@ -92,10 +96,14 @@ Build the HPC-Stack .. code-block:: console source /usr/share/lmod/lmod/init/bash - module use /hpc-modules/modulefiles/stack + module use /modulefiles/stack module load hpc hpc-gnu hpc-openmpi module avail +.. Hint:: + + If the modules cannot be found in `$USER/hpc-stack/modulefiles/stack`, there may be a separate `hpc-modules` directory, which can be sourced using the command `module use /hpc-modules/modulefiles/stack`. + From here, the user can continue to install and run applications that depend on the HPC-Stack, such as the UFS Short Range Weather (SRW) Application. diff --git a/docs/source/hpc-notes.rst b/docs/source/hpc-notes.rst index 001b143b..ad876a91 100644 --- a/docs/source/hpc-notes.rst +++ b/docs/source/hpc-notes.rst @@ -49,7 +49,7 @@ If you want to port this to a new HPC, you need to follow these steps: 5. If the HPC provides some basic modules for e.g. Git, CMake, etc. they can be loaded in ``config/config_.sh`` -Using the **DOWNLOAD_ONLY** Option +Using the DOWNLOAD_ONLY Option ---------------------------------------- If an HPC (e.g., NOAA RDHPCS Hera) does not allow access to online software via ``wget`` or ``git clone``, you will have to download all the packages using the **DOWNLOAD_ONLY** option in the ``config_custom.sh``. Execute ``build_stack.sh`` as you would on a machine that does allow access to online software with ``DOWNLOAD_ONLY=YES`` and all the packages will be downloaded in the ``pkg`` directory. Transfer the contents of the ``pkg`` directory to the machine you wish to install the hpc-stack and execute ``build_stack.sh``. ``build_stack.sh`` will detect the already downloaded packages and use them rather than fetching them. From 41f27f0e2657f9d3b4eaed76cff6ff6b09fe8337 Mon Sep 17 00:00:00 2001 From: gspetro Date: Mon, 28 Feb 2022 12:12:30 -0500 Subject: [PATCH 23/30] fix typos --- docs/source/hpc-install.rst | 1 - docs/source/hpc-intro.rst | 6 +++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/docs/source/hpc-install.rst b/docs/source/hpc-install.rst index 0bad363d..4a89f5cc 100644 --- a/docs/source/hpc-install.rst +++ b/docs/source/hpc-install.rst @@ -10,7 +10,6 @@ Install and Build the HPC-Stack HPC-Stack installation will vary from system to system because there are so many possible combinations of operating systems, compilers, MPI's, and package versions. Installation via an EPIC-provided container is recommended to reduce this variability. However, users may choose a non-container approach to installation if they prefer. ..note:: - MPI stands for Message Passing Interface. An MPI is a standard communication system used in parallel programming. It establishes portable and efficient syntax for the exchange of messages and data between multiple processors that are used by a single computer program. An MPI is required for high-performance computing (HPC). diff --git a/docs/source/hpc-intro.rst b/docs/source/hpc-intro.rst index e2ebbf67..bed3794d 100644 --- a/docs/source/hpc-intro.rst +++ b/docs/source/hpc-intro.rst @@ -4,7 +4,7 @@ Introduction ====================== -**Definition:** The HPC-stack is a repository that provides a unified, shell script-based build system to build the software stack required for numerical weather prediction (NWP) tools such as the `Unified Forecast System (UFS) `__ and the `Joint Effort for Data assimilation Integration (JEDI) `__ framework. +**Definition:** The HPC-Stack is a repository that provides a unified, shell script-based build system to build the software stack required for numerical weather prediction (NWP) tools such as the `Unified Forecast System (UFS) `__ and the `Joint Effort for Data assimilation Integration (JEDI) `__ framework. Background ------------------------ @@ -12,9 +12,9 @@ The `HPC-Stack `__ provides libraries Instructions ------------------------- -`Level 1 `__ platforms (e.g. Cheyenne, Hera) already have the HPC-Stack installed. Users on those platforms do *not* need to install the HPC-Stack before building applications or models that require the HPC-Stack. Users working on systems that fall under `Support Levels 2-4 `_ will need to install the HPC-Stack the first time they try to run applications or models that depend on it. +`Level 1 `__ platforms (e.g., Cheyenne, Hera) already have the HPC-Stack installed. Users on those platforms do *not* need to install the HPC-Stack before building applications or models that require the HPC-Stack. Users working on systems that fall under `Support Levels 2-4 `_ will need to install the HPC-Stack the first time they try to run applications or models that depend on it. -Users can either build the HPC-stack on their local system or use the centrally maintained stacks on each HPC platform. For a detailed description of installation options, see :ref:`Installing the HPC-Stack `. +Users can either build the HPC-Stack on their local system or use the centrally maintained stacks on each HPC platform. For a detailed description of installation options, see :ref:`Installing the HPC-Stack `. From 98ba6d1ac8e45dcdcbfae3def3a07554fcad9dd2 Mon Sep 17 00:00:00 2001 From: gspetro Date: Mon, 28 Feb 2022 12:14:19 -0500 Subject: [PATCH 24/30] fix typos --- docs/source/hpc-install.rst | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/source/hpc-install.rst b/docs/source/hpc-install.rst index 4a89f5cc..bf01e8e4 100644 --- a/docs/source/hpc-install.rst +++ b/docs/source/hpc-install.rst @@ -9,7 +9,8 @@ Install and Build the HPC-Stack HPC-Stack installation will vary from system to system because there are so many possible combinations of operating systems, compilers, MPI's, and package versions. Installation via an EPIC-provided container is recommended to reduce this variability. However, users may choose a non-container approach to installation if they prefer. -..note:: +.. note:: + MPI stands for Message Passing Interface. An MPI is a standard communication system used in parallel programming. It establishes portable and efficient syntax for the exchange of messages and data between multiple processors that are used by a single computer program. An MPI is required for high-performance computing (HPC). @@ -18,7 +19,7 @@ HPC-Stack installation will vary from system to system because there are so many Install and Build the HPC-Stack in a Singularity Container =========================================================== -The Earth Prediction Innovation Center (EPIC) provides `several containers `__ available for the installation of the HPC-Stack and Unified Forecast System (UFS) applications: +The Earth Prediction Innovation Center (EPIC) provides `several containers `__ available for the installation of the HPC-Stack (individually or combined with Unified Forecast System (UFS) applications): * docker://noaaepic/ubuntu20.04-gnu9.3 * docker://noaaepic/ubuntu20.04-hpc-stack From e986b20cdb61983bdefb0c01ca287b863cca3908 Mon Sep 17 00:00:00 2001 From: gspetro Date: Mon, 28 Feb 2022 13:22:47 -0500 Subject: [PATCH 25/30] fix typos --- docs/source/hpc-install.rst | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/docs/source/hpc-install.rst b/docs/source/hpc-install.rst index bf01e8e4..fef9e4c4 100644 --- a/docs/source/hpc-install.rst +++ b/docs/source/hpc-install.rst @@ -5,7 +5,7 @@ Install and Build the HPC-Stack ================================ .. attention:: - The HPC-stack is already installed on `Level 1 systems `_ (e.g., Cheyenne, Hera, Orion). Installation is not necessary. + The HPC-Stack is already installed on `Level 1 `__ systems (e.g., Cheyenne, Hera, Orion). Installation is not necessary. HPC-Stack installation will vary from system to system because there are so many possible combinations of operating systems, compilers, MPI's, and package versions. Installation via an EPIC-provided container is recommended to reduce this variability. However, users may choose a non-container approach to installation if they prefer. @@ -29,10 +29,10 @@ The Earth Prediction Innovation Center (EPIC) provides `several containers `_. This will include the installation of dependencies and the installation of the Go programming language. SingularityCE Version 3.7 or above is recommended. +To install the HPC-Stack via Singularity container, first install the Singularity package according to the `Singularity Installation Guide `_. This will include the installation of dependencies and the installation of the Go programming language. SingularityCE Version 3.7 or above is recommended. .. warning:: - Docker containers can only be run with root privileges, and users cannot have root privileges on HPC computers. Therefore, it is not possible to build the HPC-stack inside a Docker container on an HPC system. A Docker image may be pulled, but it must be run inside a container such as Singularity. Docker can, however, be used when building the HPC-Stack on a local system. + Docker containers can only be run with root privileges, and users cannot have root privileges on HPC computers. Therefore, it is not possible to build the HPC-Stack inside a Docker container on an HPC system. A Docker image may be pulled, but it must be run inside a container such as Singularity. Docker can, however, be used to build the HPC-Stack on a *local* system. Build and Run the Container @@ -57,7 +57,7 @@ Build and Run the Container .. code-block:: console - singularity shell -e --writable --bind /contrib:/contrib ubuntu20.04-gnu9.3 + singularity shell -e --writable --bind /:/contrib ubuntu20.04-gnu9.3 Build the HPC-Stack @@ -67,16 +67,16 @@ Build the HPC-Stack .. code-block:: console - git clone -b feature/ubuntu20.04 https://github.com/jkbk2004/hpc-stack + git clone https://github.com/NOAA-EMC/hpc-stack cd hpc-stack -#. Set up the build environment. Be sure to change the ``prefix`` argument in the code below to your system's install location (likely within the hpc-stack directory). +#. Set up the build environment. Be sure to change the ``prefix`` argument in the code below to your system's install location (likely within the ``hpc-stack`` directory). .. code-block:: console ./setup_modules.sh -p -c config/config_custom.sh - where is the directory where the software packages will be installed with a default value $HOME/opt. For example, if the hpc-stack is installed in the user's directory: `/home/$USER/hpc-stack/hpc-modules` + where is the directory where the software packages will be installed with a default value $HOME/opt. For example, if the HPC-Stack is installed in the user's directory: `/home/$USER/hpc-stack/hpc-modules` Enter YES/YES/YES when the option is presented. Then modify ``build_stack.sh`` with the following commands: .. code-block:: console @@ -102,7 +102,7 @@ Build the HPC-Stack .. Hint:: - If the modules cannot be found in `$USER/hpc-stack/modulefiles/stack`, there may be a separate `hpc-modules` directory, which can be sourced using the command `module use /hpc-modules/modulefiles/stack`. + If the modules cannot be found in ``$USER/hpc-stack/modulefiles/stack``, there may be a separate ``hpc-modules`` directory, which can be sourced using the command ``module use /hpc-modules/modulefiles/stack``. From here, the user can continue to install and run applications that depend on the HPC-Stack, such as the UFS Short Range Weather (SRW) Application. @@ -119,10 +119,10 @@ To install the HPC-Stack locally, the following pre-requisites must be installed * **Python 3:** Can be obtained either from the `main distributor `_ or from `Anaconda `_. * **Compilers:** Distributions of Fortran, C, and C++ compilers that work for your system. -* **Message Passing Interface (MPI)** libraries for multi-processor and multi-core communications, configured to work with your corresponding Fortran, C/C++ compilers. +* **Message Passing Interface (MPI)** libraries for multi-processor and multi-core communications, configured to work with your corresponding Fortran, C, and C++ compilers. * **Programs and software packages:** `Lmod `_, `CMake `_, `make `_, `wget `_, `curl `_, `git `_, and the `TIFF library `_. -To determine whether these prerequisites are installed, query the environment variables (for ``Lmod``) or the location and version of the packages (for ``cmake``, ``make``, ``wget``, ``curl``, ``git``). A few examples: +To determine whether these prerequisites are installed, query the environment variables (for ``Lmod``) or the location and version of the packages (for ``cmake``, ``make``, ``wget``, ``curl``, ``git``). For example: .. code-block:: console @@ -130,7 +130,7 @@ To determine whether these prerequisites are installed, query the environment va which cmake cmake --version -Methods for determining whether ``libtiff`` is installed vary between the systems. Users can try the following approaches: +Methods for determining whether ``libtiff`` is installed vary between systems. Users can try the following approaches: .. code-block:: console @@ -228,7 +228,7 @@ where: .. _NonConHPCBuild: -Build the HPC-stack +Build the HPC-Stack -------------------- Now all that remains is to build the stack: From d7897532b490b3038f83387c50e6644591bd04dd Mon Sep 17 00:00:00 2001 From: gspetro Date: Mon, 28 Feb 2022 15:40:49 -0500 Subject: [PATCH 26/30] fix formatting throughout --- docs/source/hpc-components.rst | 170 +++++++++++++++++---------------- docs/source/hpc-install.rst | 29 +++--- docs/source/hpc-notes.rst | 68 +++++-------- docs/source/hpc-parameters.rst | 20 ++-- docs/source/hpc-prereqs.rst | 2 +- 5 files changed, 139 insertions(+), 150 deletions(-) diff --git a/docs/source/hpc-components.rst b/docs/source/hpc-components.rst index f15286d0..d8962a87 100644 --- a/docs/source/hpc-components.rst +++ b/docs/source/hpc-components.rst @@ -5,87 +5,91 @@ HPC-Stack Components ===================== -The HPC-Stack packages are built in :numref:`Step %s ` using the `build_stack.sh` script. The following software can optionally be built with the scripts under `libs`. - -* Compilers and MPI libraries - - - [GNU/GCC](https://gcc.gnu.org/) - - [Intel](https://intel.com) - - [OpenMPI](https://www.open-mpi.org/) - - [MPICH](https://www.mpich.org/) - - `hpc-` Meta-modules for all the above as well as Intel and IMPI - -* HPC Stack - Third Party Libraries - - - [CMake](https://cmake.org/) - - [Udunits](https://www.unidata.ucar.edu/software/udunits/) - - [PNG](http://www.libpng.org/pub/png/) - - [JPEG](https://jpeg.org/) - - [Jasper](https://github.com/jasper-software/jasper) - - [SZip](https://support.hdfgroup.org/doc_resource/SZIP/) - - [Zlib](http://www.zlib.net/) - - [HDF5](https://www.hdfgroup.org/solutions/hdf5/) - - [PNetCDF](https://parallel-netcdf.github.io/) - - [NetCDF](https://www.unidata.ucar.edu/software/netcdf/) - - [ParallelIO](https://github.com/NCAR/ParallelIO) - - [nccmp](https://gitlab.com/remikz/nccmp) - - [nco](http://nco.sourceforge.net/) - - [CDO](https://code.mpimet.mpg.de/projects/cdo) - - [FFTW](http://www.fftw.org/) - - [GPTL](https://jmrosinski.github.io/GPTL/) - - [Tau2]() - - [Boost](https://beta.boost.org/) - - [Eigen](http://eigen.tuxfamily.org/) - - [GSL-Lite](http://github.com/gsl-lite/gsl-lite) - - [JSON for C++](https://github.com/nlohmann/json/) - - [JSON Schema Validator for C++](https://github.com/pboettch/json-schema-validator) - - [pybind11](https://github.com/pybind/pybind11) - - [MADIS](https://madis-data.ncep.noaa.gov) - - [SQLite](https://www.sqlite.org) - - [PROJ](https://proj.org) - - [GEOS](https://www.osgeo.org/projects/geos) - -* UFS Dependencies - - - [ESMF](https://www.earthsystemcog.org/projects/esmf/) - - [FMS](https://github.com/noaa-gfdl/fms.git) - -* NCEP Libraries - - - [NCEPLIBS-bacio](https://github.com/noaa-emc/nceplibs-bacio.git) - - [NCEPLIBS-sigio](https://github.com/noaa-emc/nceplibs-sigio.git) - - [NCEPLIBS-sfcio](https://github.com/noaa-emc/nceplibs-sfcio.git) - - [NCEPLIBS-gfsio](https://github.com/noaa-emc/nceplibs-gfsio.git) - - [NCEPLIBS-w3nco](https://github.com/noaa-emc/nceplibs-w3nco.git) - - [NCEPLIBS-sp](https://github.com/noaa-emc/nceplibs-sp.git) - - [NCEPLIBS-ip](https://github.com/noaa-emc/nceplibs-ip.git) - - [NCEPLIBS-ip2](https://github.com/noaa-emc/nceplibs-ip2.git) - - [NCEPLIBS-g2](https://github.com/noaa-emc/nceplibs-g2.git) - - [NCEPLIBS-g2c](https://github.com/noaa-emc/nceplibs-g2c.git) - - [NCEPLIBS-g2tmpl](https://github.com/noaa-emc/nceplibs-g2tmpl.git) - - [NCEPLIBS-nemsio](https://github.com/noaa-emc/nceplibs-nemsio.git) - - [NCEPLIBS-nemsiogfs](https://github.com/noaa-emc/nceplibs-nemsiogfs.git) - - [NCEPLIBS-w3emc](https://github.com/noaa-emc/nceplibs-w3emc.git) - - [NCEPLIBS-landsfcutil](https://github.com/noaa-emc/nceplibs-landsfcutil.git) - - [NCEPLIBS-bufr](https://github.com/noaa-emc/nceplibs-bufr.git) - - [NCEPLIBS-wgrib2](https://github.com/noaa-emc/nceplibs-wgrib2.git) - - [NCEPLIBS-prod_util](https://github.com/noaa-emc/nceplibs-prod_util.git) - - [NCEPLIBS-grib_util](https://github.com/noaa-emc/nceplibs-grib_util.git) - - [NCEPLIBS-ncio](https://github.com/noaa-emc/nceplibs-ncio.git) - - [NCEPLIBS-wrf_io](https://github.com/noaa-emc/nceplibs-wrf_io.git) - - [EMC_crtm](https://github.com/noaa-emc/EMC_crtm.git) - - [EMC_post](https://github.com/noaa-emc/EMC_post.git) - -* JEDI Dependencies - - - [ecbuild](https://github.com/ecmwf/ecbuild.git) - - [eckit](https://github.com/ecmwf/eckit.git) - - [fckit](https://github.com/ecmwf/fckit.git) - - [atlas](https://github.com/ecmwf/atlas.git) - -* Python and Virtual Environments - - - [Miniconda3](https://docs.conda.io/en/latest/) - - [r2d2](https://github.com/jcsda-internal/r2d2.git) - +The HPC-Stack packages are built in :numref:`Step %s ` using the ``build_stack.sh`` script. The following software can optionally be built with the scripts under ``libs``. + +* **Compilers and MPI libraries** + + * `GNU/GCC `__ + * `Intel `__ + * `OpenMPI `__ + * `MPICH `__ + * ``hpc-`` Meta-modules for all the above as well as Intel and IMPI + + +* **HPC Stack - Third Party Libraries** + + * `CMake `__ + * `Udunits `__ + * `PNG `__ + * `JPEG `__ + * `Jasper `__ + * `SZip `__ + * `Zlib `__ + * `HDF5 `__ + * `PNetCDF `__ + * `NetCDF `__ + * `ParallelIO `__ + * `nccmp `__ + * `nco `__ + * `CDO `__ + * `FFTW `__ + * `GPTL `__ + * Tau2 + * `Boost `__ + * `Eigen `__ + * `GSL-Lite `__ + * `JSON for C++ `__ + * `JSON Schema Validator for C++ `__ + * `pybind11 `__ + * `MADIS `__ + * `SQLite `__ + * `PROJ `__ + * `GEOS `__ + + +* **UFS Dependencies** + + * `ESMF `__ + * `FMS `__ + + +* **NCEP Libraries** + + * `NCEPLIBS-bacio `__ + * `NCEPLIBS-sigio `__ + * `NCEPLIBS-sfcio `__ + * `NCEPLIBS-gfsio `__ + * `NCEPLIBS-w3nco `__ + * `NCEPLIBS-sp `__ + * `NCEPLIBS-ip `__ + * `NCEPLIBS-ip2 `__ + * `NCEPLIBS-g2 `__ + * `NCEPLIBS-g2c `__ + * `NCEPLIBS-g2tmpl `__ + * `NCEPLIBS-nemsio `__ + * `NCEPLIBS-nemsiogfs `__ + * `NCEPLIBS-w3emc `__ + * `NCEPLIBS-landsfcutil `__ + * `NCEPLIBS-bufr `__ + * `NCEPLIBS-wgrib2 `__ + * `NCEPLIBS-prod_util `__ + * `NCEPLIBS-grib_util `__ + * `NCEPLIBS-ncio `__ + * `NCEPLIBS-wrf_io `__ + * `EMC_crtm `__ + * `EMC_post `__ + + +* **JEDI Dependencies** + + * `ecbuild `__ + * `eckit `__ + * `fckit `__ + * `atlas `__ + + +* **Python and Virtual Environments** + + * `Miniconda3 `__ + * `r2d2 `__ diff --git a/docs/source/hpc-install.rst b/docs/source/hpc-install.rst index fef9e4c4..3a5fadf3 100644 --- a/docs/source/hpc-install.rst +++ b/docs/source/hpc-install.rst @@ -53,11 +53,13 @@ Build and Run the Container mkdir contrib cd .. -#. Start the container and run an interactive shell within it. This command also binds the local working directory to the container so that data can be shared between them. +#. From the local working directory, start the container and run an interactive shell within it. This command also binds the local working directory to the container so that data can be shared between them. .. code-block:: console singularity shell -e --writable --bind /:/contrib ubuntu20.04-gnu9.3 + + Make sure to update ```` with the name of your local working directory. Build the HPC-Stack @@ -76,7 +78,7 @@ Build the HPC-Stack ./setup_modules.sh -p -c config/config_custom.sh - where is the directory where the software packages will be installed with a default value $HOME/opt. For example, if the HPC-Stack is installed in the user's directory: `/home/$USER/hpc-stack/hpc-modules` + Here, ```` is the directory where the software packages will be installed with a default value $HOME/opt. For example, if the HPC-Stack is installed in the user's directory, the prefix might be ``/home/$USER/hpc-stack/hpc-modules``. Enter YES/YES/YES when the option is presented. Then modify ``build_stack.sh`` with the following commands: .. code-block:: console @@ -91,7 +93,7 @@ Build the HPC-Stack ./build_stack.sh -p -c config/config_custom.sh -y stack/stack_custom.yaml -m -#. Load the required modules, making sure to change the `` to the location of the module files. +#. Load the required modules, making sure to change the ```` to the location of the module files. .. code-block:: console @@ -148,11 +150,11 @@ If compilers or MPI's need to be installed, consult the :ref:`HPC-Stack Prerequi Configure the Build --------------------- -Choose the COMPILER, MPI, and PYTHON version, and specify any other aspects of the build that you would like. For Level 1 systems, a default configuration can be found in the applicable ``config/config_.sh`` file. For Level 2-4 systems, selections can be made by editing the config/config_custom.sh file to reflect the appropriate compiler, mpi, and python choices for your system. If Lmod is installed on your system, you can view options using the ``module avail`` command. +Choose the COMPILER, MPI, and PYTHON version, and specify any other aspects of the build that you would like. For Level 1 systems, a default configuration can be found in the applicable ``config/config_.sh`` file. For Level 2-4 systems, selections can be made by editing the config/config_custom.sh file to reflect the appropriate compiler, mpi, and python choices for your system. If Lmod is installed on your system, you can view package options using the ``module avail`` command. Some of the parameter settings available are: -* HPC_COMPILER: This defines the vendor and version of the compiler you wish to use for this build. The format is the same as what you would typically use in a module load command. For example, ``HPC_COMPILER=intel/2020``. Use ``gcc -v`` to determine your compiler and version. +* HPC_COMPILER: This defines the vendor and version of the compiler you wish to use for this build. The format is the same as what you would typically use in a ``module load`` command. For example, ``HPC_COMPILER=intel/2020``. Use ``gcc -v`` to determine your compiler and version. * HPC_MPI: This is the MPI library you wish to use. The format is the same as for HPC_COMPILER. For example: ``HPC_MPI=impi/2020``. * HPC_PYTHON: This is the Python interpreter to use for the build. The format is the same as for HPC_COMPILER, for example: ``HPC_PYTHON=python/3.7.5``. Use ``python --version`` to determine the current version of Python. @@ -160,7 +162,7 @@ Other variables include USE_SUDO, DOWNLOAD_ONLY, NOTE, PKGDIR, LOGDIR, OVERWRITE .. note:: - If you only want to install select components of the stack, you can edit the ``stack/stack_custom.yaml`` file to omit unwanted components. The ``stack/stack_custom.yaml`` file lists the software packages to be built along with their version, options, compiler flags, and any other package-specific options. A full listing of components is available in the :ref:`HPC-Stack Components ` section. + If you only want to install select components of the HPC-Stack, you can edit the ``stack/stack_custom.yaml`` file to omit unwanted components. The ``stack/stack_custom.yaml`` file lists the software packages to be built along with their version, options, compiler flags, and any other package-specific options. A full listing of components is available in the :ref:`HPC-Stack Components ` section. .. _NonConSetUp: @@ -179,7 +181,7 @@ After preparing the system configuration in ``./config/config_.sh``, r where: -```` is the directory where the software packages will be installed during the hpc-stack build. The default value is $HOME/opt. The software installation trees will branch directly off of , while the module files will be located in the /modulefiles subdirectory. +```` is the directory where the software packages will be installed during the HPC-Stack build. The default value is $HOME/opt. The software installation trees will branch directly off of ````, while the module files will be located in the ``/modulefiles`` subdirectory. .. attention:: @@ -189,14 +191,14 @@ where: **Additional Options:** -The compiler and mpi modules can be handled separately from the rest of the build in order to exploit site-specific installations that maximize performance. In this case, the compiler and mpi modules are preceded by an hpc- label. For example, to load the Intel compiler module and the Intel MPI (IMPI) software library, enter: +The compiler and mpi modules can be handled separately from the rest of the build in order to exploit site-specific installations that maximize performance. In this case, the compiler and mpi modules are preceded by an ``hpc-`` label. For example, to load the Intel compiler module and the Intel MPI (IMPI) software library, enter: .. code-block:: console module load hpc-intel/2020 module load hpc-impi/2020 -These hpc- modules are really meta-modules that load the compiler/mpi library and modify the MODULEPATH so that the user has access to the software packages that will be built in :numref:`Step %s `. On HPC systems, these meta-modules load the native modules provided by the system administrators. +These ``hpc-`` modules are really meta-modules that load the compiler/mpi library and modify the MODULEPATH so that the user has access to the software packages that will be built in :numref:`Step %s `. On HPC systems, these meta-modules load the native modules provided by the system administrators. In short, you may prefer not to load the compiler or MPI modules directly. Instead, loading the hpc- meta-modules as demonstrated above will provide everything needed to load software libraries. @@ -222,9 +224,10 @@ It may also be necessary to initialize ``Lmod`` when using a user-specific ``Lmo module use <$HOME>/ where: + * ```` is the top directory where Lmod is installed * ``, ...,`` is column-separated list of modules to load by default -* <$HOME>/ is the directory where additional custom modules may be built with Lmod (e.g., $HOME/modulefiles). +* ``<$HOME>/`` is the directory where additional custom modules may be built with Lmod (e.g., $HOME/modulefiles). .. _NonConHPCBuild: @@ -235,12 +238,12 @@ Now all that remains is to build the stack: .. code-block:: console - ./build_stack.sh -p -c -y -m + ./build_stack.sh -p -c -y -m -Here the -m option is only required when you need to build your own modules *and* LMod is used for managing the software stack. It should be omitted otherwise. and are the same as in :numref:`Step %s `, namely a reference to the absolute-path installation prefix and a corresponding configuration file in the ``config`` directory. As in :numref:`Step %s `, if this argument is omitted, the default is to use ``$HOME/opt`` and ``config/config_custom.sh`` respectively. ```` represents a user configurable yaml file containing a list of packages that need to be built in the stack along with their versions and package options. The default value of ```` is ``stack/stack_custom.yaml``. +Here the -m option is only required when you need to build your own modules *and* LMod is used for managing the software stack. It should be omitted otherwise. ```` and ```` are the same as in :numref:`Step %s `, namely a reference to the absolute-path installation prefix and a corresponding configuration file in the ``config`` directory. As in :numref:`Step %s `, if this argument is omitted, the default is to use ``$HOME/opt`` and ``config/config_custom.sh`` respectively. ```` represents a user configurable yaml file containing a list of packages that need to be built in the stack along with their versions and package options. The default value of ```` is ``stack/stack_custom.yaml``. .. warning:: - Steps :numref:`Step %s `, :numref:`Step %s `, and :numref:`Step %s ` need to be repeated for each compiler/MPI combination that you wish to install.** The new packages will be installed alongside any previously-existing packages that may already have been built from other compiler/MPI combinations. + Steps :numref:`Step %s `, :numref:`Step %s `, and :numref:`Step %s ` need to be repeated for each compiler/MPI combination that you wish to install. The new packages will be installed alongside any previously-existing packages that may already have been built from other compiler/MPI combinations. From here, the user can continue to install and run applications that depend on the HPC-Stack. diff --git a/docs/source/hpc-notes.rst b/docs/source/hpc-notes.rst index ad876a91..3dea53ac 100644 --- a/docs/source/hpc-notes.rst +++ b/docs/source/hpc-notes.rst @@ -8,72 +8,66 @@ HPC-Stack Additional Notes Setting compiler flags and other options ----------------------------------------- -Often it is necessary to specify compiler flags (e.g. ``gfortran-10 --fallow-argument-mismatch``) to the packages via ``FFLAGS``. There are 2 -ways this can be achieved. +Often it is necessary to specify compiler flags (e.g., ``gfortran-10 -fallow-argument-mismatch`` for the packages via ``FFLAGS``. There are 2 ways this can be achieved: -1. For all packages: One can define variable e.g. ``STACK_FFLAGS=-fallow-argument-mismatch`` in the config file ``config_custom.sh``. This will append ``STACK_FFLAGS`` to ``FFLAGS`` in every build script under libs. +#. **For all packages:** One can define variable e.g., ``STACK_FFLAGS=-fallow-argument-mismatch`` in the config file ``config_custom.sh``. This will append ``STACK_FFLAGS`` to ``FFLAGS`` in every build script under libs. -2. Package specific flags: To compile only the specific package under ``libs`` with the above compiler flag, one can define variable ``FFLAGS=-fallow-argument-mismatch`` in the ```` section of the -YAML file ``stack_custom.yaml``. This will append ``STACK__FFLAGS`` to ``FFLAGS`` in the build script for that ```` only. +#. **Package specific flags:** To compile only the specific package under ``libs`` with the above compiler flag, one can define variable ``FFLAGS=-fallow-argument-mismatch`` in the ```` section of the YAML file ``stack_custom.yaml``. This will append ``STACK__FFLAGS`` to ``FFLAGS`` in the build script for that package only. Adding a New Library or Package -------------------------------- If you want to add a new library to the stack you need to follow these steps: -1. Write a new build script in libs, using exising scripts as a template +#. Write a new build script in ``libs``, using existing scripts as a template. -2. Define a new section in the ``yaml`` file for that library/package in config directory +#. Define a new section in the ``yaml`` file for that library/package in ``config`` directory. -3. If the package is a python virtual environment, add a ``requirements.txt`` or ``environment.yml`` file containing the listing the python packages required to install the package. These files should be named and placed in ``pyvenv/package_name.txt`` and ``pyvenv/package_name.yml``. ``VENVTYPE=pyvenv`` will use the ``pyvenv/package_name.txt`` and ``VENVTYPE=condaenv`` will use ``pyvenv/package_name.yml`` +#. If the package is a python virtual environment, add a ``requirements.txt`` or ``environment.yml`` file listing the python packages required to install the package. These files should be named and placed in ``pyvenv/package_name.txt`` and ``pyvenv/package_name.yml``. ``VENVTYPE=pyvenv`` will use the ``pyvenv/package_name.txt`` and ``VENVTYPE=condaenv`` will use ``pyvenv/package_name.yml``. -4. Add a call to the new build script in ``build_stack.sh`` +#. Add a call to the new build script in ``build_stack.sh``. -5. Create a new module template at the appropriate place in the modulefiles directory, using exising files as a template +#. Create a new module template at the appropriate place in the modulefiles directory, using exising files as a template. -6. Update the `HPC Components ` file to include the name of the new library or package +#. Update the :ref:`HPC Components ` file to include the name of the new library or package. Configuring for a new HPC --------------------------- If you want to port this to a new HPC, you need to follow these steps: -1. Write a new config file ``config/config_.sh``, using existing configs as a template. Also create a new yaml file ``config/stack_.yaml``, using existing yaml files as a template. +#. Write a new config file ``config/config_.sh``, using existing config files as a template. Also create a new yaml file ``config/stack_.yaml``, using existing yaml files as a template. -2. Add/remove basic modules for that HPC. +#. Add/remove basic modules for that HPC. -3. Choose the appropriate Compiler/MPI combination. +#. Choose the appropriate Compiler/MPI combination. -4. If a template modulefile does not exist for that Compiler/MPI combinattion, create module templates at the appropriate place in the modulefiles directory, using existing files as a template. E.g. ``hpc-ips`` or ``hpc-smpi``. +#. If a template modulefile does not exist for that Compiler/MPI combinattion, create module templates at the appropriate place in the ``modulefiles`` directory, using existing files as a template (e.g., ``hpc-ips`` or ``hpc-smpi``). -5. If the HPC provides some basic modules for e.g. Git, CMake, etc. they can be loaded in ``config/config_.sh`` +#. If the new HPC system provides some basic modules for e.g., Git, CMake, etc., they can be loaded in ``config/config_.sh``. Using the DOWNLOAD_ONLY Option ---------------------------------------- -If an HPC (e.g., NOAA RDHPCS Hera) does not allow access to online software via ``wget`` or ``git clone``, you will have to download all the packages using the **DOWNLOAD_ONLY** option in the ``config_custom.sh``. Execute ``build_stack.sh`` as you would on a machine that does allow access to online software with ``DOWNLOAD_ONLY=YES`` and all the packages will be downloaded in the ``pkg`` directory. Transfer the contents of the ``pkg`` directory to the machine you wish to install the hpc-stack and execute ``build_stack.sh``. ``build_stack.sh`` will detect the already downloaded packages and use them rather than fetching them. +If an HPC (e.g., NOAA RDHPCS Hera) does not allow access to online software via ``wget`` or ``git clone``, you will have to download all the packages using the ``DOWNLOAD_ONLY`` option in the ``config_custom.sh``. Execute ``build_stack.sh`` as you would on a machine that does allow access to online software with ``DOWNLOAD_ONLY=YES`` and all the packages will be downloaded in the ``pkg`` directory. Transfer the contents of the ``pkg`` directory to the machine where you wish to install the hpc-stack, and execute ``build_stack.sh``. The ``build_stack.sh`` script will detect the already-downloaded packages and use them rather than fetching them. Using the HPC-stack --------------------- -* If Lmod is used to manage the software stack, to use the HPC-stack, - you need to activate the stack. This is done by loading the ``hpc`` - module under ``$PREFIX/modulefiles/stack`` as follows: +* If Lmod is used to manage the software stack, you will need to activate the HPC-Stack in order to use it. This is done by loading the ``hpc`` module under ``$PREFIX/modulefiles/stack`` as follows: .. code-block:: console module use $PREFIX/modulefiles/stack module load hpc/1.0.0 -This will put the ``hpc-`` module in your ``MODULEPATH``, -which can be loaded as: +This will put the ``hpc-`` module in your ``MODULEPATH``, which can be loaded as: .. code-block:: console module load hpc-/ -* If the HPC-stack is not managed via modules, you need to add ``$PREFIX`` to the PATH as follows: +* If the HPC-Stack is not managed via modules, you need to add ``$PREFIX`` to the PATH as follows: .. code-block:: console @@ -84,17 +78,18 @@ which can be loaded as: Known Workaround for Certain Installations of Lmod ---------------------------------------------------- -* On some machine's (e.g., **WCOSS_DELL_P3**), LMod is built to disable loading of default modulefiles and requires the user to load the module with an explicit version of the module. e.g. ``module load netcdf/4.7.4`` instead of ``module load netcdf``. The latter looks for the ``default`` module which is either the latest version or a version that is marked as default. To circumvent this, it is necessary to place the following lines in ``modulefiles/stack/hpc/hpc.lua`` prior to executing ``setup_modules.sh`` or in ``$PREFIX/modulefiles/stack/hpc/1.0.0.lua`` after executing ``setup_modules.sh``. +* On some machines (e.g., WCOSS_DELL_P3), LMod is built to disable loading of default modulefiles and requires the user to load the module with an explicit version of the module (e.g., ``module load netcdf/4.7.4`` instead of ``module load netcdf``). The latter looks for the ``default`` module which is either the latest version or a version that is marked as default. To circumvent this, it is necessary to place the following lines in ``modulefiles/stack/hpc/hpc.lua`` prior to executing ``setup_modules.sh`` or in ``$PREFIX/modulefiles/stack/hpc/1.0.0.lua`` after executing ``setup_modules.sh``. .. code-block:: console - -- https://lmod.readthedocs.io/en/latest/090_configuring_lmod.html setenv("LMOD_EXACT_MATCH", "no") setenv("LMOD_EXTENDED_DEFAULT", "yes") + See more on the `Lmod website `__ + Known Issues -=============== +--------------------- * NetCDF-C++ does not build with LLVM Clang. It can be disabled by setting ``disable_cxx: YES`` in the stack file under the NetCDF section. @@ -102,19 +97,6 @@ Known Issues Disclaimer -============= - -The United States Department of Commerce (DOC) GitHub project code is -provided on an "as is" basis and the user assumes responsibility for -its use. DOC has relinquished control of the information and no longer -has responsibility to protect the integrity, confidentiality, or -availability of the information. Any claims against the Department of -Commerce stemming from the use of its GitHub project will be governed -by all applicable Federal law. Any reference to specific commercial -products, processes, or services by service mark, trademark, -manufacturer, or otherwise, does not constitute or imply their -endorsement, recommendation or favoring by the Department of -Commerce. The Department of Commerce seal and logo, or the seal and -logo of a DOC bureau, shall not be used in any manner to imply -endorsement of any commercial product or activity by DOC or the United -States Government. +--------------- + +The United States Department of Commerce (DOC) GitHub project code is provided on an "as is" basis and the user assumes responsibility for its use. DOC has relinquished control of the information and no longer has responsibility to protect the integrity, confidentiality, or availability of the information. Any claims against the Department of Commerce stemming from the use of its GitHub project will be governed by all applicable Federal law. Any reference to specific commercial products, processes, or services by service mark, trademark, manufacturer, or otherwise, does not constitute or imply their endorsement, recommendation or favoring by the Department of Commerce. The Department of Commerce seal and logo, or the seal and logo of a DOC bureau, shall not be used in any manner to imply endorsement of any commercial product or activity by DOC or the United States Government. diff --git a/docs/source/hpc-parameters.rst b/docs/source/hpc-parameters.rst index 03cbf977..ce89a23d 100644 --- a/docs/source/hpc-parameters.rst +++ b/docs/source/hpc-parameters.rst @@ -32,7 +32,7 @@ Compiler & MPI * ``openmpi/4.1.2`` .. note:: - For example, when using Intel-based compilers and Intel's implementation of the MPI interface (IMPI), the ``config/config_custom.sh`` should contain the following specifications: + For example, when using Intel-based compilers and Intel's implementation of the MPI interface, the ``config/config_custom.sh`` should contain the following specifications: .. code-block:: console @@ -47,7 +47,7 @@ Compiler & MPI This will set the C, Fortran, and C++ compilers and MPI's. .. note:: - To verify that your chosen mpi build (e.g., mpiicc) is based on the corresponding serial compiler (e.g., icc), use the ``-show`` option to query the MPI's. For example, + To verify that your chosen MPI build (e.g., mpiicc) is based on the corresponding serial compiler (e.g., icc), use the ``-show`` option to query the MPI's. For example, .. code-block:: console @@ -68,32 +68,32 @@ Other Parameters The Python interpretor you wish to use for this build. The format is the same as for ``HPC_COMPILER``, for example: ``HPC_PYTHON=python/3.7.5``. ``USE_SUDO``: (Default: “”) - If PREFIX is set to a value that requires root permission to write to, such as ``/opt/modules``, then this flag should be enabled. For example, ``USE_SUDO=Y``. + If the directory where the software packages will be installed (````) requires root permission to write to, such as ``/opt/modules``, then this flag should be enabled. For example, ``USE_SUDO=Y``. ``DOWNLOAD_ONLY``: (Default: “”) - The stack allows the option to download the source code for all the software without performing the installation. This is especially useful for installing the stack on machines that do not allow internet connectivity to websites hosting the softwares e.g. GitHub. + The stack allows the option to download the source code for all the software without performing the installation. This is especially useful for installing the stack on machines that do not allow internet connectivity to websites hosting the software (e.g., GitHub). .. note:: - To enable a boolean flag use a single-digit ``Y`` or ``T``. To disable, use ``N`` or ``F`` (case insensitive) + To enable a boolean flag, use a single-digit ``Y`` or ``T``. To disable, use ``N`` or ``F`` (case insensitive). ``PKGDIR``: (Default: “”) - is the directory where tarred or zipped software files will be downloaded and compiled. Unlike PREFIX, this is a relative path, based on the root path of the repository. Individual software packages can be downloaded manually to this directory and untarred, but this is not required. Build scripts will look for directory ``pkg/pkgName-pkgVersion`` e.g., ``pkg/hdf5-1_10_3``. + is the directory where tarred or zipped software files will be downloaded and compiled. Unlike ````, this is a relative path based on the root path of the repository. Individual software packages can be downloaded manually to this directory and untarred, but this is not required. Build scripts will look for the directory ``pkg/`` (e.g., ``pkg/hdf5-1_10_3``). ``LOGDIR``: (Default: “”) The directory where log files from the build will be written, relative to the root path of the repository. ``OVERWRITE``: (Default: “”) - If set, this flag will cause the build script to remove the current installation, if any exists, and replace it with the new version of each software package in question. If this is not set, the build will bypass software packages that are already installed. + If set to ``T``, this flag will cause the build script to remove the current installation, if any exists, and replace it with the new version of each software package in question. If this variable is not set, the build will bypass software packages that are already installed. ``NTHREADS``: (Default: “”) - The number of threads to use for parallel builds + The number of threads to use for parallel builds. ``MAKE_CHECK``: (Default: “”) - Run make check after build + Run make check after build. ``MAKE_VERBOSE``: (Default: “”) - Print out extra information to the log files during the build + Print out extra information to the log files during the build. ``VENVTYPE``: (Default: “”) Set the type of python environment to build. Value depends on whether using pip or conda. Set ``VENVTYPE=pyvenv`` when using pip and ``VENVTYPE=condaenv`` when using Miniconda for creating virtual environments. Default is ``pyvenv``. diff --git a/docs/source/hpc-prereqs.rst b/docs/source/hpc-prereqs.rst index a8dd4c15..33720e32 100644 --- a/docs/source/hpc-prereqs.rst +++ b/docs/source/hpc-prereqs.rst @@ -30,5 +30,5 @@ Compilers: MPI's * `OpenMPI `__ * `MPICH `__ - * `IntelMPI (IMPI) `__ + * `IntelMPI `__ From 6505ea6ec7810e1b9a8129ea7634830b0354b477 Mon Sep 17 00:00:00 2001 From: gspetro Date: Mon, 28 Feb 2022 16:35:25 -0500 Subject: [PATCH 27/30] fix typos/formatting --- docs/source/hpc-install.rst | 35 ++++++++++++++++------------------ docs/source/hpc-notes.rst | 10 +++++++--- docs/source/hpc-parameters.rst | 32 ++++++++++++++++--------------- 3 files changed, 40 insertions(+), 37 deletions(-) diff --git a/docs/source/hpc-install.rst b/docs/source/hpc-install.rst index 3a5fadf3..0fff20e0 100644 --- a/docs/source/hpc-install.rst +++ b/docs/source/hpc-install.rst @@ -11,7 +11,7 @@ HPC-Stack installation will vary from system to system because there are so many .. note:: - MPI stands for Message Passing Interface. An MPI is a standard communication system used in parallel programming. It establishes portable and efficient syntax for the exchange of messages and data between multiple processors that are used by a single computer program. An MPI is required for high-performance computing (HPC). + MPI stands for Message Passing Interface. An MPI is a standardized communication system used in parallel programming. It establishes portable and efficient syntax for the exchange of messages and data between multiple processors that are used by a single computer program. An MPI is required for high-performance computing (HPC). .. _SingularityInstall: @@ -19,12 +19,12 @@ HPC-Stack installation will vary from system to system because there are so many Install and Build the HPC-Stack in a Singularity Container =========================================================== -The Earth Prediction Innovation Center (EPIC) provides `several containers `__ available for the installation of the HPC-Stack (individually or combined with Unified Forecast System (UFS) applications): +The Earth Prediction Innovation Center (EPIC) provides `several containers `__ available for the installation of the HPC-Stack either individually or combined with Unified Forecast System (UFS) applications: -* docker://noaaepic/ubuntu20.04-gnu9.3 -* docker://noaaepic/ubuntu20.04-hpc-stack -* docker://noaaepic/ubuntu20.04-epic-srwapp -* docker://noaaepic/ubuntu20.04-epic-mrwapp +* ``__ +* ``__ +* ``__ +* ``__ Install Singularity ----------------------- @@ -32,7 +32,7 @@ Install Singularity To install the HPC-Stack via Singularity container, first install the Singularity package according to the `Singularity Installation Guide `_. This will include the installation of dependencies and the installation of the Go programming language. SingularityCE Version 3.7 or above is recommended. .. warning:: - Docker containers can only be run with root privileges, and users cannot have root privileges on HPC computers. Therefore, it is not possible to build the HPC-Stack inside a Docker container on an HPC system. A Docker image may be pulled, but it must be run inside a container such as Singularity. Docker can, however, be used to build the HPC-Stack on a *local* system. + Docker containers can only be run with root privileges, and users cannot have root privileges on HPC's. Therefore, it is not possible to build the HPC-Stack inside a Docker container on an HPC system. A Docker image may be pulled, but it must be run inside a container such as Singularity. Docker can, however, be used to build the HPC-Stack on a *local* system. Build and Run the Container @@ -65,7 +65,7 @@ Build and Run the Container Build the HPC-Stack -------------------- -#. Clone the hpc-stack repository (from inside the singularity shell above). +#. Clone the HPC-Stack repository (from inside the Singularity shell initialized above). .. code-block:: console @@ -78,7 +78,8 @@ Build the HPC-Stack ./setup_modules.sh -p -c config/config_custom.sh - Here, ```` is the directory where the software packages will be installed with a default value $HOME/opt. For example, if the HPC-Stack is installed in the user's directory, the prefix might be ``/home/$USER/hpc-stack/hpc-modules``. + Here, ```` is the directory where the software packages will be installed with a default value of ``$HOME/opt``. For example, if the HPC-Stack is installed in the user's directory, the prefix might be ``/home/$USER/hpc-stack/hpc-modules``. + Enter YES/YES/YES when the option is presented. Then modify ``build_stack.sh`` with the following commands: .. code-block:: console @@ -98,14 +99,10 @@ Build the HPC-Stack .. code-block:: console source /usr/share/lmod/lmod/init/bash - module use /modulefiles/stack + module use /hpc-modules/modulefiles/stack module load hpc hpc-gnu hpc-openmpi module avail -.. Hint:: - - If the modules cannot be found in ``$USER/hpc-stack/modulefiles/stack``, there may be a separate ``hpc-modules`` directory, which can be sourced using the command ``module use /hpc-modules/modulefiles/stack``. - From here, the user can continue to install and run applications that depend on the HPC-Stack, such as the UFS Short Range Weather (SRW) Application. @@ -130,7 +127,7 @@ To determine whether these prerequisites are installed, query the environment va echo $LMOD_PKG which cmake - cmake --version + cmake --version Methods for determining whether ``libtiff`` is installed vary between systems. Users can try the following approaches: @@ -150,7 +147,7 @@ If compilers or MPI's need to be installed, consult the :ref:`HPC-Stack Prerequi Configure the Build --------------------- -Choose the COMPILER, MPI, and PYTHON version, and specify any other aspects of the build that you would like. For Level 1 systems, a default configuration can be found in the applicable ``config/config_.sh`` file. For Level 2-4 systems, selections can be made by editing the config/config_custom.sh file to reflect the appropriate compiler, mpi, and python choices for your system. If Lmod is installed on your system, you can view package options using the ``module avail`` command. +Choose the COMPILER, MPI, and PYTHON version, and specify any other aspects of the build that you would like. For `Level 1 `__ systems, a default configuration can be found in the applicable ``config/config_.sh`` file. For Level 2-4 systems, selections can be made by editing the ``config/config_custom.sh`` file to reflect the appropriate compiler, MPI, and Python choices for your system. If Lmod is installed on your system, you can view package options using the ``module avail`` command. Some of the parameter settings available are: @@ -191,14 +188,14 @@ where: **Additional Options:** -The compiler and mpi modules can be handled separately from the rest of the build in order to exploit site-specific installations that maximize performance. In this case, the compiler and mpi modules are preceded by an ``hpc-`` label. For example, to load the Intel compiler module and the Intel MPI (IMPI) software library, enter: +The compiler and MPI modules can be handled separately from the rest of the build in order to exploit site-specific installations that maximize performance. In this case, the compiler and MPI modules are preceded by an ``hpc-`` label. For example, to load the Intel compiler module and the Intel MPI (IMPI) software library, enter: .. code-block:: console module load hpc-intel/2020 module load hpc-impi/2020 -These ``hpc-`` modules are really meta-modules that load the compiler/mpi library and modify the MODULEPATH so that the user has access to the software packages that will be built in :numref:`Step %s `. On HPC systems, these meta-modules load the native modules provided by the system administrators. +These ``hpc-`` modules are really meta-modules that load the compiler/MPI library and modify the MODULEPATH so that the user has access to the software packages that will be built in :numref:`Step %s `. On HPC systems, these meta-modules load the native modules provided by the system administrators. In short, you may prefer not to load the compiler or MPI modules directly. Instead, loading the hpc- meta-modules as demonstrated above will provide everything needed to load software libraries. @@ -226,7 +223,7 @@ It may also be necessary to initialize ``Lmod`` when using a user-specific ``Lmo where: * ```` is the top directory where Lmod is installed -* ``, ...,`` is column-separated list of modules to load by default +* ``, ...,`` is a comma-separated list of modules to load by default * ``<$HOME>/`` is the directory where additional custom modules may be built with Lmod (e.g., $HOME/modulefiles). .. _NonConHPCBuild: diff --git a/docs/source/hpc-notes.rst b/docs/source/hpc-notes.rst index 3dea53ac..7be1a6ed 100644 --- a/docs/source/hpc-notes.rst +++ b/docs/source/hpc-notes.rst @@ -5,7 +5,9 @@ HPC-Stack Additional Notes =========================== -Setting compiler flags and other options +.. _Flags: + +Setting Compiler Flags and Other Options ----------------------------------------- Often it is necessary to specify compiler flags (e.g., ``gfortran-10 -fallow-argument-mismatch`` for the packages via ``FFLAGS``. There are 2 ways this can be achieved: @@ -46,12 +48,14 @@ If you want to port this to a new HPC, you need to follow these steps: #. If the new HPC system provides some basic modules for e.g., Git, CMake, etc., they can be loaded in ``config/config_.sh``. +.. _DownloadOnly: + Using the DOWNLOAD_ONLY Option ---------------------------------------- -If an HPC (e.g., NOAA RDHPCS Hera) does not allow access to online software via ``wget`` or ``git clone``, you will have to download all the packages using the ``DOWNLOAD_ONLY`` option in the ``config_custom.sh``. Execute ``build_stack.sh`` as you would on a machine that does allow access to online software with ``DOWNLOAD_ONLY=YES`` and all the packages will be downloaded in the ``pkg`` directory. Transfer the contents of the ``pkg`` directory to the machine where you wish to install the hpc-stack, and execute ``build_stack.sh``. The ``build_stack.sh`` script will detect the already-downloaded packages and use them rather than fetching them. +If an HPC (e.g., NOAA RDHPCS Hera) does not allow access to online software via ``wget`` or ``git clone``, you will have to download all the packages using the ``DOWNLOAD_ONLY`` option in the ``config_custom.sh``. Execute ``build_stack.sh`` as you would on a machine that does allow access to online software with ``DOWNLOAD_ONLY=YES`` and all the packages will be downloaded in the ``pkg`` directory. Transfer the contents of the ``pkg`` directory to the machine where you wish to install the HPC-Stack, and execute ``build_stack.sh``. The ``build_stack.sh`` script will detect the already-downloaded packages and use them rather than fetching them. -Using the HPC-stack +Using the HPC-Stack --------------------- * If Lmod is used to manage the software stack, you will need to activate the HPC-Stack in order to use it. This is done by loading the ``hpc`` module under ``$PREFIX/modulefiles/stack`` as follows: diff --git a/docs/source/hpc-parameters.rst b/docs/source/hpc-parameters.rst index ce89a23d..d2df7713 100644 --- a/docs/source/hpc-parameters.rst +++ b/docs/source/hpc-parameters.rst @@ -8,7 +8,7 @@ Build Parameters Compiler & MPI ---------------- -``HPC_COMPILER``: (Default: “”) +``HPC_COMPILER``: This defines the vendor and version of the compiler you wish to use for this build. The format is the same as what you would typically use in a module load command. For example, ``HPC_COMPILER=intel/2020``. Options include: * ``gnu/6.5.0`` @@ -19,7 +19,9 @@ Compiler & MPI * ``intel/2020.2`` * ``intel/2021.3.0`` -``HPC_MPI``: (Default: “”) + For information on setting compiler flags, see :numref:`Section %s Additional Notes `. + +``HPC_MPI``: The MPI library you wish to use for this build. The format is the same as for HPC_COMPILER; for example: ``HPC_MPI=impi/2020``. Current MPI types accepted are openmpi, mpich, impi, cray, and cray*. Options include: * ``impi/2020`` @@ -64,36 +66,36 @@ Compiler & MPI Other Parameters -------------------- -``HPC_PYTHON``: (Default: “”) +``HPC_PYTHON``: The Python interpretor you wish to use for this build. The format is the same as for ``HPC_COMPILER``, for example: ``HPC_PYTHON=python/3.7.5``. -``USE_SUDO``: (Default: “”) - If the directory where the software packages will be installed (````) requires root permission to write to, such as ``/opt/modules``, then this flag should be enabled. For example, ``USE_SUDO=Y``. +``USE_SUDO``: + If the directory where the software packages will be installed (``$PREFIX``) requires root permission to write to, such as ``/opt/modules``, then this flag should be enabled. For example, ``USE_SUDO=Y``. -``DOWNLOAD_ONLY``: (Default: “”) - The stack allows the option to download the source code for all the software without performing the installation. This is especially useful for installing the stack on machines that do not allow internet connectivity to websites hosting the software (e.g., GitHub). +``DOWNLOAD_ONLY``: + The stack allows the option to download the source code for all the software without performing the installation. This is especially useful for installing the stack on machines that do not allow internet connectivity to websites hosting the software (e.g., GitHub). For more information, see :ref:`Additional Notes `. .. note:: To enable a boolean flag, use a single-digit ``Y`` or ``T``. To disable, use ``N`` or ``F`` (case insensitive). -``PKGDIR``: (Default: “”) - is the directory where tarred or zipped software files will be downloaded and compiled. Unlike ````, this is a relative path based on the root path of the repository. Individual software packages can be downloaded manually to this directory and untarred, but this is not required. Build scripts will look for the directory ``pkg/`` (e.g., ``pkg/hdf5-1_10_3``). +``PKGDIR``: + is the directory where tarred or zipped software files will be downloaded and compiled. Unlike ``$PREFIX``, this is a relative path based on the root path of the repository. Individual software packages can be downloaded manually to this directory and untarred, but this is not required. Build scripts will look for the directory ``pkg/`` (e.g., ``pkg/hdf5-1_10_3``). -``LOGDIR``: (Default: “”) +``LOGDIR``: The directory where log files from the build will be written, relative to the root path of the repository. -``OVERWRITE``: (Default: “”) +``OVERWRITE``: If set to ``T``, this flag will cause the build script to remove the current installation, if any exists, and replace it with the new version of each software package in question. If this variable is not set, the build will bypass software packages that are already installed. -``NTHREADS``: (Default: “”) +``NTHREADS``: The number of threads to use for parallel builds. -``MAKE_CHECK``: (Default: “”) +``MAKE_CHECK``: Run make check after build. -``MAKE_VERBOSE``: (Default: “”) +``MAKE_VERBOSE``: Print out extra information to the log files during the build. -``VENVTYPE``: (Default: “”) +``VENVTYPE``: Set the type of python environment to build. Value depends on whether using pip or conda. Set ``VENVTYPE=pyvenv`` when using pip and ``VENVTYPE=condaenv`` when using Miniconda for creating virtual environments. Default is ``pyvenv``. From 26d067286a3e3281ea1fda5ab7bc22fb2452c8ed Mon Sep 17 00:00:00 2001 From: gspetro Date: Mon, 28 Feb 2022 16:39:14 -0500 Subject: [PATCH 28/30] fix typos/formatting --- docs/source/hpc-notes.rst | 2 +- docs/source/hpc-parameters.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/hpc-notes.rst b/docs/source/hpc-notes.rst index 7be1a6ed..f891854f 100644 --- a/docs/source/hpc-notes.rst +++ b/docs/source/hpc-notes.rst @@ -89,7 +89,7 @@ Known Workaround for Certain Installations of Lmod setenv("LMOD_EXACT_MATCH", "no") setenv("LMOD_EXTENDED_DEFAULT", "yes") - See more on the `Lmod website `__ + See more on the `Lmod website `__. Known Issues diff --git a/docs/source/hpc-parameters.rst b/docs/source/hpc-parameters.rst index d2df7713..0a2fdbc2 100644 --- a/docs/source/hpc-parameters.rst +++ b/docs/source/hpc-parameters.rst @@ -73,7 +73,7 @@ Other Parameters If the directory where the software packages will be installed (``$PREFIX``) requires root permission to write to, such as ``/opt/modules``, then this flag should be enabled. For example, ``USE_SUDO=Y``. ``DOWNLOAD_ONLY``: - The stack allows the option to download the source code for all the software without performing the installation. This is especially useful for installing the stack on machines that do not allow internet connectivity to websites hosting the software (e.g., GitHub). For more information, see :ref:`Additional Notes `. + The stack allows the option to download the source code for all the software without performing the installation. This is especially useful for installing the stack on machines that do not allow internet connectivity to websites hosting the software (e.g., GitHub). For more information, see :numref:`Section %s Additional Notes `. .. note:: From 66d3cd635c836930e821a6a500572e81505f25d3 Mon Sep 17 00:00:00 2001 From: gspetro Date: Mon, 28 Feb 2022 16:42:40 -0500 Subject: [PATCH 29/30] moved disclaimer from Additional Notes to Intro --- docs/source/hpc-intro.rst | 3 +++ docs/source/hpc-notes.rst | 4 ---- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/docs/source/hpc-intro.rst b/docs/source/hpc-intro.rst index bed3794d..8585aa1e 100644 --- a/docs/source/hpc-intro.rst +++ b/docs/source/hpc-intro.rst @@ -16,7 +16,10 @@ Instructions Users can either build the HPC-Stack on their local system or use the centrally maintained stacks on each HPC platform. For a detailed description of installation options, see :ref:`Installing the HPC-Stack `. +Disclaimer +--------------- +The United States Department of Commerce (DOC) GitHub project code is provided on an "as is" basis and the user assumes responsibility for its use. DOC has relinquished control of the information and no longer has responsibility to protect the integrity, confidentiality, or availability of the information. Any claims against the Department of Commerce stemming from the use of its GitHub project will be governed by all applicable Federal law. Any reference to specific commercial products, processes, or services by service mark, trademark, manufacturer, or otherwise, does not constitute or imply their endorsement, recommendation or favoring by the Department of Commerce. The Department of Commerce seal and logo, or the seal and logo of a DOC bureau, shall not be used in any manner to imply endorsement of any commercial product or activity by DOC or the United States Government. diff --git a/docs/source/hpc-notes.rst b/docs/source/hpc-notes.rst index f891854f..482f565d 100644 --- a/docs/source/hpc-notes.rst +++ b/docs/source/hpc-notes.rst @@ -100,7 +100,3 @@ Known Issues * Json-schema-validator does not build with LLVM Clang. It can be disabled in the stack file in the json-schema-validator-section. -Disclaimer ---------------- - -The United States Department of Commerce (DOC) GitHub project code is provided on an "as is" basis and the user assumes responsibility for its use. DOC has relinquished control of the information and no longer has responsibility to protect the integrity, confidentiality, or availability of the information. Any claims against the Department of Commerce stemming from the use of its GitHub project will be governed by all applicable Federal law. Any reference to specific commercial products, processes, or services by service mark, trademark, manufacturer, or otherwise, does not constitute or imply their endorsement, recommendation or favoring by the Department of Commerce. The Department of Commerce seal and logo, or the seal and logo of a DOC bureau, shall not be used in any manner to imply endorsement of any commercial product or activity by DOC or the United States Government. From ec4e502f0397fdcbcd315d9a4b42922344f8a11b Mon Sep 17 00:00:00 2001 From: gspetro Date: Tue, 1 Mar 2022 12:46:21 -0500 Subject: [PATCH 30/30] fix readme --- docs/README | 24 +++++------------------- docs/source/conf.py | 2 +- 2 files changed, 6 insertions(+), 20 deletions(-) diff --git a/docs/README b/docs/README index 30617076..e8e20c48 100644 --- a/docs/README +++ b/docs/README @@ -1,28 +1,14 @@ -Steps to build and use the Sphinx documentation tool: +To build html docs: -1) Get Sphinx and sphinxcontrib-bibtex installed on your desktop from - http://www.sphinx-doc.org/en/master/usage/installation.html - https://sphinxcontrib-bibtex.readthedocs.io/en/latest/quickstart.html#installation +From the docs directory: -2) Create a Sphinx documentation root directory: - % mkdir docs - % cd docs +make clean && sphinx-build -b html source build -3) Initialize your Sphinx project (set up an initial directory structure) using - % sphinx-quickstart - - See http://www.sphinx-doc.org/en/master/usage/quickstart.html or - https://sphinx-rtd-tutorial.readthedocs.io/en/latest/sphinx-quickstart.html - - for help. You can answer (ENTER) to most of the questions. - -To build html: - -From the directory above source and build, the sphinx project directory: +OR simply run: make html -Sphinx uses Latex to export the documentation as a PDF file. To build pdf: +Sphinx uses Latex to export the documentation as a PDF file. To build pdf run: make latexpdf diff --git a/docs/source/conf.py b/docs/source/conf.py index 42d3700d..9922392a 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -167,7 +167,7 @@ def setup(app): # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'HPC-Stack', 'HPC-Stack Users Guide', - author, 'HPC-Stack', 'One line description of project.', + author, 'HPC-Stack', 'How to build and run the HPC-Stack', 'Miscellaneous'), ]